diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 8e8c440b3d..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -public -node_modules -resources diff --git a/.gitignore b/.gitignore index e50d424efc..e6e9ac43d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,15 +1,20 @@ -/public -/resources +# Dependencies /node_modules -package-lock.json -*.tern-port -*/**/.tern-port + +# Production +/build + +# Generated files +.docusaurus +.cache-loader + +# Misc .DS_Store -.vscode/settings.json -/scripts/converters/output -/scripts/converters/results_to_markdown/*.json -/scripts/converters/results_to_markdown/.terraform -/scripts/converters/results_to_markdown/terraform.tfstate* -/scripts/converters/results_to_markdown/*.tfvars - -.idea/ +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* \ No newline at end of file diff --git a/.drone.yml b/.old-drone.yml similarity index 100% rename from .drone.yml rename to .old-drone.yml diff --git a/Dockerfile.algolia b/Dockerfile.algolia deleted file mode 100644 index 6c334240e0..0000000000 --- a/Dockerfile.algolia +++ /dev/null @@ -1,10 +0,0 @@ -FROM rancher/docs:latest as prod - -FROM rancher/docs:build - -COPY --from=prod /usr/share/nginx/html/docs/final.algolia.json /run -WORKDIR /run -COPY package.json package.json -COPY scripts scripts - -ENTRYPOINT ["yarn","run","publish-algolia"] diff --git a/Dockerfile.build b/Dockerfile.build deleted file mode 100644 index a50a0e1e47..0000000000 --- a/Dockerfile.build +++ /dev/null @@ -1,19 +0,0 @@ -FROM node:16-alpine - -RUN apk update && apk add py-pygments bash git asciidoc gcompat && rm -rf /var/cache/apk/* - -# Download and install hugo -ENV HUGO_VERSION 0.54.0 -ENV HUGO_BINARY hugo_extended_${HUGO_VERSION}_Linux-64bit.tar.gz - -ADD https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/${HUGO_BINARY} /tmp/hugo.tar.gz -RUN tar xzf /tmp/hugo.tar.gz -C /tmp \ - && mv /tmp/hugo /usr/local/bin/ \ - && rm /tmp/hugo.tar.gz \ - && mkdir -p /run - -WORKDIR /run -COPY package.json /run/ -COPY yarn.lock /run/ - -RUN yarn diff --git a/Dockerfile.dev b/Dockerfile.dev deleted file mode 100644 index af34fcf57d..0000000000 --- a/Dockerfile.dev +++ /dev/null @@ -1,13 +0,0 @@ -FROM rancher/docs:build -ENV HUGO_ENV dev - -VOLUME ["/run/archetypes", "/run/assets", "/run/content", "/run/data", "/run/layouts", "/run/scripts", "/run/static", "/run/.git"] -WORKDIR /run - -ADD https://github.com/rancherlabs/website-theme/archive/master.tar.gz /run/master.tar.gz -RUN mkdir -p /output /theme/rancher-website-theme && tar -xzf /run/master.tar.gz -C /run/node_modules/rancher-website-theme --strip=1 && rm /run/master.tar.gz - -# Expose default hugo port -EXPOSE 9001 - -ENTRYPOINT ["hugo", "serve", "--bind=0.0.0.0", "--buildDrafts", "--buildFuture", "--baseURL=" ] diff --git a/Dockerfile.prod b/Dockerfile.prod deleted file mode 100644 index 38ba46de61..0000000000 --- a/Dockerfile.prod +++ /dev/null @@ -1,28 +0,0 @@ -FROM rancher/docs:build as build -ENV HUGO_ENV production - -WORKDIR /run - -COPY config.toml /run/ -COPY archetypes archetypes -COPY assets assets -COPY data data -COPY layouts layouts -COPY scripts scripts -COPY content content -COPY static static -COPY .git .git - -ADD https://github.com/rancherlabs/website-theme/archive/master.tar.gz /run/master.tar.gz -RUN mkdir -p /output /theme/rancher-website-theme && tar -xzf /run/master.tar.gz -C /run/node_modules/rancher-website-theme --strip=1 && rm /run/master.tar.gz - -RUN ["hugo", "--buildFuture", "--baseURL=https://rancher.com/docs", "--destination=/output"] - -# Make sure something got built -RUN stat /output/index.html - -RUN ["npm","run","build-algolia"] - -FROM nginx:alpine -COPY --from=build /output /usr/share/nginx/html/docs/ -COPY nginx.conf /etc/nginx/conf.d/default.conf diff --git a/Dockerfile.staging b/Dockerfile.staging deleted file mode 100644 index 5dc3076863..0000000000 --- a/Dockerfile.staging +++ /dev/null @@ -1,26 +0,0 @@ -FROM rancher/docs:build as build -ENV HUGO_ENV staging - -WORKDIR /run - -COPY config.toml /run/ -COPY archetypes archetypes -COPY assets assets -COPY data data -COPY layouts layouts -COPY scripts scripts -COPY content content -COPY static static -COPY .git .git - -ADD https://github.com/rancherlabs/website-theme/archive/master.tar.gz /run/master.tar.gz -RUN mkdir -p /output /theme/rancher-website-theme && tar -xzf /run/master.tar.gz -C /run/node_modules/rancher-website-theme --strip=1 && rm /run/master.tar.gz - -RUN ["hugo", "--buildDrafts", "--buildFuture", "--baseURL=https://staging.rancher.com/docs", "--destination=/output"] - -# Make sure something got built -RUN stat /output/index.html - -FROM nginx:alpine -COPY --from=build /output /usr/share/nginx/html/docs/ -COPY nginx.conf /etc/nginx/conf.d/default.conf diff --git a/README.md b/README.md index 07d12120a5..aaba2fa1e1 100644 --- a/README.md +++ b/README.md @@ -1,57 +1,41 @@ -Rancher Docs ------------- +# Website -## Contributing +This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator. -We have transitioned to versioned documentation for Rancher (files within `content/rancher`). +### Installation -New contributions should be made to the applicable versioned directories (e.g., `content/rancher/v2.5` and `content/rancher/v2.0-v2.4`). +``` +$ yarn +``` + +### Local Development -Contents under the `content/rancher/v2.x` directory are no longer maintained after v2.5.6. +``` +$ yarn start +``` -## Running for development/editing +This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. -The `rancher/docs:dev` docker image runs a live-updating server. To run on your workstation, run: +### Build -Linux -```bash - ./scripts/dev ``` - -Windows -```powershell -./scripts/dev-windows.ps1 +$ yarn build ``` -and then navigate to http://localhost:9001/. Click the link on the card associated with a given Rancher version to -access the documentation. For example, clicking on the link of the Rancher v2.5 card will redirect to -http://localhost:9001/rancher/v2.5/en/. Note that due to the way the Rancher website is built, links in the top -navigation panel will not work. +This command generates static content into the `build` directory and can be served using any static contents hosting service. -You can customize the port by passing it as an argument: +### Deployment -Linux -```bash - ./scripts/dev 8080 -``` +Using SSH: -Windows -```powershell -./scripts/dev-windows.ps1 -port 8080 +``` +$ USE_SSH=true yarn deploy ``` -License -======= -Copyright (c) 2014-2019 [Rancher Labs, Inc.](https://rancher.com) - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Not using SSH: -[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) +``` +$ GIT_USER= yarn deploy +``` -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. diff --git a/archetypes/default.md b/archetypes/default.md deleted file mode 100644 index 312b86f5ab..0000000000 --- a/archetypes/default.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: "{{ replace .TranslationBaseName "-" " " | title }}" -date: {{ .Date }} -draft: true -layout: single ---- - diff --git a/assets/img/os/Rancher_aws1.png b/assets/img/os/Rancher_aws1.png deleted file mode 100644 index d4ba26637c..0000000000 Binary files a/assets/img/os/Rancher_aws1.png and /dev/null differ diff --git a/assets/img/os/Rancher_aws2.png b/assets/img/os/Rancher_aws2.png deleted file mode 100644 index ccf2ab2af3..0000000000 Binary files a/assets/img/os/Rancher_aws2.png and /dev/null differ diff --git a/assets/img/os/Rancher_aws3.png b/assets/img/os/Rancher_aws3.png deleted file mode 100644 index 70b0bf2f1a..0000000000 Binary files a/assets/img/os/Rancher_aws3.png and /dev/null differ diff --git a/assets/img/os/Rancher_aws4.png b/assets/img/os/Rancher_aws4.png deleted file mode 100644 index 6cb7655623..0000000000 Binary files a/assets/img/os/Rancher_aws4.png and /dev/null differ diff --git a/assets/img/os/Rancher_aws5.png b/assets/img/os/Rancher_aws5.png deleted file mode 100644 index 5f12f339b2..0000000000 Binary files a/assets/img/os/Rancher_aws5.png and /dev/null differ diff --git a/assets/img/os/Rancher_aws6.png b/assets/img/os/Rancher_aws6.png deleted file mode 100644 index c7f0b6fdc3..0000000000 Binary files a/assets/img/os/Rancher_aws6.png and /dev/null differ diff --git a/assets/img/os/Rancher_busydash.png b/assets/img/os/Rancher_busydash.png deleted file mode 100644 index 1d330b5343..0000000000 Binary files a/assets/img/os/Rancher_busydash.png and /dev/null differ diff --git a/assets/img/os/rancheroshowitworks.png b/assets/img/os/rancheroshowitworks.png deleted file mode 100644 index 9e6164793d..0000000000 Binary files a/assets/img/os/rancheroshowitworks.png and /dev/null differ diff --git a/assets/img/rke/rke-etcd-backup.png b/assets/img/rke/rke-etcd-backup.png deleted file mode 100644 index c6f04f6e9c..0000000000 Binary files a/assets/img/rke/rke-etcd-backup.png and /dev/null differ diff --git a/assets/img/rke/vsphere-advanced-parameters.png b/assets/img/rke/vsphere-advanced-parameters.png deleted file mode 100644 index 77c558ffc5..0000000000 Binary files a/assets/img/rke/vsphere-advanced-parameters.png and /dev/null differ diff --git a/assets/js/app.js b/assets/js/app.js deleted file mode 100644 index ce9b1c4de7..0000000000 --- a/assets/js/app.js +++ /dev/null @@ -1,142 +0,0 @@ -// This is for any custom JS that may need to be added to individual apps. -// Main JS is located in Rancher Website Theme -const bootstrapDocsSearch = function () { - - var firstSearchRender = true; - - const search = instantsearch({ - appId: '30NEY6C9UY', - apiKey: 'b7f43c16886fec97b87981e9e62ef1a5', - indexName: window.location.host === 'rancher.com' ? 'prod_docs' : 'dev_docs', - routing: true, - searchFunction: (helper) => { - - if (helper.state.query === "" && firstSearchRender) { - - firstSearchRender = false; - - return; - } - - helper.search(); - } - }); - - search.addWidget( - instantsearch.widgets.searchBox({ - autofocus: true, - loadingIndicator: true, - container: '#search-box', - placeholder: 'Search Blog, Events, etc...', - magnifier: false, - reset: true, - }) - ); - - - search.addWidget( - instantsearch.widgets.infiniteHits({ - container: '#hits', - templates: { - empty: '

No results

', - item: `

{{{_highlightResult.title.value}}}

{{{_snippetResult.content.value}}}
` - }, - escapeHits: true, - }) - ); - - search.start(); - - $(document).on('click', '.search-open', (e) => { - let wrapperId = $(e.currentTarget).data('launch-id'); - let wrapper = $(`#${wrapperId}`); - - let content = wrapper.find('div.content'); - - const modal = new tingle.modal({ - closeMethods: ['overlay', 'button', 'escape'], - closeLabel: "Close", - - onOpen: () => { - console.log('Search opened'); - }, - - onClose: () => { - console.log('Search closed'); - }, - - beforeClose: () => { - content.detach() - wrapper.append(content); - return true; - } - }); - - // set content - content.detach(); - modal.setContent(content[0]); - - // add a button - let label = wrapper.find('.footer-button-label').data('footer-label'); - if (label) { - modal.addFooterBtn(label, 'tingle-btn tingle-btn--primary', function () { - // here goes some logic - modal.close(); - }); - } - - modal.open(); - setTimeout(function () { - $('#search-box').focus(); - }, 50); - }); - - //mobile nav toggle - $(document).ready(function () { - $("body").addClass("js"); - var $menu = $("#menu"), - $menulink = $(".menu-link"); - - $menulink.click(function () { - $menulink.toggleClass("active"); - $menu.toggleClass("active"); - return false; - }); - }); -} - -const bootstrapIdLinks = function () { - const $container = $('.main-content') - const selector = 'h2[id], h3[id], h4[id], h5[id], h6[id]'; - - $container.on('mouseenter', selector, function (e) { - $(e.target).append($('').addClass('header-anchor').attr('href', '#' + e.target.id).html('')); - }); - - $container.on('mouseleave', selector, function (e) { - $container.find('.header-anchor').remove(); - }); -} - -const replaceReleaseChannel = function () { - const form = $('#release-channel')[0]; - if (form) { - const val = form.channel.value; - - $('CODE').each((idx, code) => { - const $code = $(code); - const text = $code.data('original') || code.innerHTML; - - if (text.includes('<CHART_REPO>')) { - $code.data('original', text); - code.innerHTML = text.replace(/<CHART_REPO>/g, val); - } - }); - } -}; - -$(document).ready(() => { - bootstrapDocsSearch(); - bootstrapIdLinks(); - replaceReleaseChannel(); -}); diff --git a/assets/js/base.js b/assets/js/base.js deleted file mode 120000 index 1f2dd1cf3c..0000000000 --- a/assets/js/base.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/rancher-website-theme/assets/js/base.js \ No newline at end of file diff --git a/assets/js/empty.js b/assets/js/empty.js deleted file mode 100644 index 4d4280715b..0000000000 --- a/assets/js/empty.js +++ /dev/null @@ -1,3 +0,0 @@ -/*-----*/ -; -/*-----*/ diff --git a/assets/js/instantsearch.js b/assets/js/instantsearch.js deleted file mode 120000 index 84e524bdcd..0000000000 --- a/assets/js/instantsearch.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/instantsearch.js/dist/instantsearch.js \ No newline at end of file diff --git a/assets/js/jquery.js b/assets/js/jquery.js deleted file mode 120000 index 91cbc2acf9..0000000000 --- a/assets/js/jquery.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/jquery/dist/jquery.js \ No newline at end of file diff --git a/assets/js/jquery.lory.js b/assets/js/jquery.lory.js deleted file mode 120000 index 9ce2ad2332..0000000000 --- a/assets/js/jquery.lory.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/lory.js/dist/jquery.lory.js \ No newline at end of file diff --git a/assets/js/moment-timezone.js b/assets/js/moment-timezone.js deleted file mode 120000 index 88eb1b562a..0000000000 --- a/assets/js/moment-timezone.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/moment-timezone/builds/moment-timezone-with-data-10-year-range.js \ No newline at end of file diff --git a/assets/js/moment.js b/assets/js/moment.js deleted file mode 120000 index 4b73243ead..0000000000 --- a/assets/js/moment.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/moment/moment.js \ No newline at end of file diff --git a/assets/js/scrollreveal.js b/assets/js/scrollreveal.js deleted file mode 100644 index c2b2dcc569..0000000000 --- a/assets/js/scrollreveal.js +++ /dev/null @@ -1,1546 +0,0 @@ -/*! @license ScrollReveal v4.0.5 - - Copyright 2018 Fisssion LLC. - - Licensed under the GNU General Public License 3.0 for - compatible open source projects and non-commercial use. - - For commercial sites, themes, projects, and applications, - keep your source code private/proprietary by purchasing - a commercial license from https://scrollrevealjs.org/ -*/ -(function (global, factory) { - typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : - typeof define === 'function' && define.amd ? define(factory) : - (global.ScrollReveal = factory()); -}(this, (function () { 'use strict'; - -var defaults = { - delay: 0, - distance: '0', - duration: 600, - easing: 'cubic-bezier(0.5, 0, 0, 1)', - interval: 0, - opacity: 0, - origin: 'bottom', - rotate: { - x: 0, - y: 0, - z: 0 - }, - scale: 1, - cleanup: false, - container: document.documentElement, - desktop: true, - mobile: true, - reset: false, - useDelay: 'always', - viewFactor: 0.0, - viewOffset: { - top: 0, - right: 0, - bottom: 0, - left: 0 - }, - afterReset: function afterReset() {}, - afterReveal: function afterReveal() {}, - beforeReset: function beforeReset() {}, - beforeReveal: function beforeReveal() {} -} - -function failure() { - document.documentElement.classList.remove('sr'); - - return { - clean: function clean() {}, - destroy: function destroy() {}, - reveal: function reveal() {}, - sync: function sync() {}, - get noop() { - return true - } - } -} - -function success() { - document.documentElement.classList.add('sr'); - - if (document.body) { - document.body.style.height = '100%'; - } else { - document.addEventListener('DOMContentLoaded', function () { - document.body.style.height = '100%'; - }); - } -} - -var mount = { success: success, failure: failure } - -/*! @license is-dom-node v1.0.4 - - Copyright 2018 Fisssion LLC. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -*/ -function isDomNode(x) { - return typeof window.Node === 'object' - ? x instanceof window.Node - : x !== null && - typeof x === 'object' && - typeof x.nodeType === 'number' && - typeof x.nodeName === 'string' -} - -/*! @license is-dom-node-list v1.2.1 - - Copyright 2018 Fisssion LLC. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -*/ -function isDomNodeList(x) { - var prototypeToString = Object.prototype.toString.call(x); - var regex = /^\[object (HTMLCollection|NodeList|Object)\]$/; - - return typeof window.NodeList === 'object' - ? x instanceof window.NodeList - : x !== null && - typeof x === 'object' && - typeof x.length === 'number' && - regex.test(prototypeToString) && - (x.length === 0 || isDomNode(x[0])) -} - -/*! @license Tealight v0.3.6 - - Copyright 2018 Fisssion LLC. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -*/ -function tealight(target, context) { - if ( context === void 0 ) { context = document; } - - if (target instanceof Array) { return target.filter(isDomNode); } - if (isDomNode(target)) { return [target]; } - if (isDomNodeList(target)) { return Array.prototype.slice.call(target); } - if (typeof target === "string") { - try { - var query = context.querySelectorAll(target); - return Array.prototype.slice.call(query); - } catch (err) { - return []; - } - } - return []; -} - -function isObject(x) { - return ( - x !== null && - x instanceof Object && - (x.constructor === Object || - Object.prototype.toString.call(x) === '[object Object]') - ) -} - -function each(collection, callback) { - if (isObject(collection)) { - var keys = Object.keys(collection); - return keys.forEach(function (key) { return callback(collection[key], key, collection); }) - } - if (collection instanceof Array) { - return collection.forEach(function (item, i) { return callback(item, i, collection); }) - } - throw new TypeError('Expected either an array or object literal.') -} - -function logger(message) { - var details = [], len = arguments.length - 1; - while ( len-- > 0 ) details[ len ] = arguments[ len + 1 ]; - - if (this.constructor.debug && console) { - var report = "%cScrollReveal: " + message; - details.forEach(function (detail) { return (report += "\n — " + detail); }); - console.log(report, 'color: #ea654b;'); // eslint-disable-line no-console - } -} - -function rinse() { - var this$1 = this; - - var struct = function () { return ({ - active: [], - stale: [] - }); }; - - var elementIds = struct(); - var sequenceIds = struct(); - var containerIds = struct(); - - /** - * Take stock of active element IDs. - */ - try { - each(tealight('[data-sr-id]'), function (node) { - var id = parseInt(node.getAttribute('data-sr-id')); - elementIds.active.push(id); - }); - } catch (e) { - throw e - } - /** - * Destroy stale elements. - */ - each(this.store.elements, function (element) { - if (elementIds.active.indexOf(element.id) === -1) { - elementIds.stale.push(element.id); - } - }); - - each(elementIds.stale, function (staleId) { return delete this$1.store.elements[staleId]; }); - - /** - * Take stock of active container and sequence IDs. - */ - each(this.store.elements, function (element) { - if (containerIds.active.indexOf(element.containerId) === -1) { - containerIds.active.push(element.containerId); - } - if (element.hasOwnProperty('sequence')) { - if (sequenceIds.active.indexOf(element.sequence.id) === -1) { - sequenceIds.active.push(element.sequence.id); - } - } - }); - - /** - * Destroy stale containers. - */ - each(this.store.containers, function (container) { - if (containerIds.active.indexOf(container.id) === -1) { - containerIds.stale.push(container.id); - } - }); - - each(containerIds.stale, function (staleId) { - var stale = this$1.store.containers[staleId].node; - stale.removeEventListener('scroll', this$1.delegate); - stale.removeEventListener('resize', this$1.delegate); - delete this$1.store.containers[staleId]; - }); - - /** - * Destroy stale sequences. - */ - each(this.store.sequences, function (sequence) { - if (sequenceIds.active.indexOf(sequence.id) === -1) { - sequenceIds.stale.push(sequence.id); - } - }); - - each(sequenceIds.stale, function (staleId) { return delete this$1.store.sequences[staleId]; }); -} - -function clean(target) { - var this$1 = this; - - var dirty; - try { - each(tealight(target), function (node) { - var id = node.getAttribute('data-sr-id'); - if (id !== null) { - dirty = true; - var element = this$1.store.elements[id]; - if (element.callbackTimer) { - window.clearTimeout(element.callbackTimer.clock); - } - node.setAttribute('style', element.styles.inline.generated); - node.removeAttribute('data-sr-id'); - delete this$1.store.elements[id]; - } - }); - } catch (e) { - return logger.call(this, 'Clean failed.', e.message) - } - - if (dirty) { - try { - rinse.call(this); - } catch (e) { - return logger.call(this, 'Clean failed.', e.message) - } - } -} - -function destroy() { - var this$1 = this; - - /** - * Remove all generated styles and element ids - */ - each(this.store.elements, function (element) { - element.node.setAttribute('style', element.styles.inline.generated); - element.node.removeAttribute('data-sr-id'); - }); - - /** - * Remove all event listeners. - */ - each(this.store.containers, function (container) { - var target = - container.node === document.documentElement ? window : container.node; - target.removeEventListener('scroll', this$1.delegate); - target.removeEventListener('resize', this$1.delegate); - }); - - /** - * Clear all data from the store - */ - this.store = { - containers: {}, - elements: {}, - history: [], - sequences: {} - }; -} - -/*! @license Rematrix v0.3.0 - - Copyright 2018 Julian Lloyd. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. -*/ -/** - * @module Rematrix - */ - -/** - * Transformation matrices in the browser come in two flavors: - * - * - `matrix` using 6 values (short) - * - `matrix3d` using 16 values (long) - * - * This utility follows this [conversion guide](https://goo.gl/EJlUQ1) - * to expand short form matrices to their equivalent long form. - * - * @param {array} source - Accepts both short and long form matrices. - * @return {array} - */ -function format(source) { - if (source.constructor !== Array) { - throw new TypeError('Expected array.') - } - if (source.length === 16) { - return source - } - if (source.length === 6) { - var matrix = identity(); - matrix[0] = source[0]; - matrix[1] = source[1]; - matrix[4] = source[2]; - matrix[5] = source[3]; - matrix[12] = source[4]; - matrix[13] = source[5]; - return matrix - } - throw new RangeError('Expected array with either 6 or 16 values.') -} - -/** - * Returns a matrix representing no transformation. The product of any matrix - * multiplied by the identity matrix will be the original matrix. - * - * > **Tip:** Similar to how `5 * 1 === 5`, where `1` is the identity. - * - * @return {array} - */ -function identity() { - var matrix = []; - for (var i = 0; i < 16; i++) { - i % 5 == 0 ? matrix.push(1) : matrix.push(0); - } - return matrix -} - -/** - * Returns a 4x4 matrix describing the combined transformations - * of both arguments. - * - * > **Note:** Order is very important. For example, rotating 45° - * along the Z-axis, followed by translating 500 pixels along the - * Y-axis... is not the same as translating 500 pixels along the - * Y-axis, followed by rotating 45° along on the Z-axis. - * - * @param {array} m - Accepts both short and long form matrices. - * @param {array} x - Accepts both short and long form matrices. - * @return {array} - */ -function multiply(m, x) { - var fm = format(m); - var fx = format(x); - var product = []; - - for (var i = 0; i < 4; i++) { - var row = [fm[i], fm[i + 4], fm[i + 8], fm[i + 12]]; - for (var j = 0; j < 4; j++) { - var k = j * 4; - var col = [fx[k], fx[k + 1], fx[k + 2], fx[k + 3]]; - var result = - row[0] * col[0] + row[1] * col[1] + row[2] * col[2] + row[3] * col[3]; - - product[i + k] = result; - } - } - - return product -} - -/** - * Attempts to return a 4x4 matrix describing the CSS transform - * matrix passed in, but will return the identity matrix as a - * fallback. - * - * > **Tip:** This method is used to convert a CSS matrix (retrieved as a - * `string` from computed styles) to its equivalent array format. - * - * @param {string} source - `matrix` or `matrix3d` CSS Transform value. - * @return {array} - */ -function parse(source) { - if (typeof source === 'string') { - var match = source.match(/matrix(3d)?\(([^)]+)\)/); - if (match) { - var raw = match[2].split(', ').map(parseFloat); - return format(raw) - } - } - return identity() -} - -/** - * Returns a 4x4 matrix describing X-axis rotation. - * - * @param {number} angle - Measured in degrees. - * @return {array} - */ -function rotateX(angle) { - var theta = Math.PI / 180 * angle; - var matrix = identity(); - - matrix[5] = matrix[10] = Math.cos(theta); - matrix[6] = matrix[9] = Math.sin(theta); - matrix[9] *= -1; - - return matrix -} - -/** - * Returns a 4x4 matrix describing Y-axis rotation. - * - * @param {number} angle - Measured in degrees. - * @return {array} - */ -function rotateY(angle) { - var theta = Math.PI / 180 * angle; - var matrix = identity(); - - matrix[0] = matrix[10] = Math.cos(theta); - matrix[2] = matrix[8] = Math.sin(theta); - matrix[2] *= -1; - - return matrix -} - -/** - * Returns a 4x4 matrix describing Z-axis rotation. - * - * @param {number} angle - Measured in degrees. - * @return {array} - */ -function rotateZ(angle) { - var theta = Math.PI / 180 * angle; - var matrix = identity(); - - matrix[0] = matrix[5] = Math.cos(theta); - matrix[1] = matrix[4] = Math.sin(theta); - matrix[4] *= -1; - - return matrix -} - -/** - * Returns a 4x4 matrix describing 2D scaling. The first argument - * is used for both X and Y-axis scaling, unless an optional - * second argument is provided to explicitly define Y-axis scaling. - * - * @param {number} scalar - Decimal multiplier. - * @param {number} [scalarY] - Decimal multiplier. - * @return {array} - */ -function scale(scalar, scalarY) { - var matrix = identity(); - - matrix[0] = scalar; - matrix[5] = typeof scalarY === 'number' ? scalarY : scalar; - - return matrix -} - -/** - * Returns a 4x4 matrix describing X-axis translation. - * - * @param {number} distance - Measured in pixels. - * @return {array} - */ -function translateX(distance) { - var matrix = identity(); - matrix[12] = distance; - return matrix -} - -/** - * Returns a 4x4 matrix describing Y-axis translation. - * - * @param {number} distance - Measured in pixels. - * @return {array} - */ -function translateY(distance) { - var matrix = identity(); - matrix[13] = distance; - return matrix -} - -var getPrefixedCssProp = (function () { - var properties = {}; - var style = document.documentElement.style; - - function getPrefixedCssProperty(name, source) { - if ( source === void 0 ) source = style; - - if (name && typeof name === 'string') { - if (properties[name]) { - return properties[name] - } - if (typeof source[name] === 'string') { - return (properties[name] = name) - } - if (typeof source[("-webkit-" + name)] === 'string') { - return (properties[name] = "-webkit-" + name) - } - throw new RangeError(("Unable to find \"" + name + "\" style property.")) - } - throw new TypeError('Expected a string.') - } - - getPrefixedCssProperty.clearCache = function () { return (properties = {}); }; - - return getPrefixedCssProperty -})(); - -function style(element) { - var computed = window.getComputedStyle(element.node); - var position = computed.position; - var config = element.config; - - /** - * Generate inline styles - */ - var inline = {}; - var inlineStyle = element.node.getAttribute('style') || ''; - var inlineMatch = inlineStyle.match(/[\w-]+\s*:\s*[^;]+\s*/gi) || []; - - inline.computed = inlineMatch ? inlineMatch.map(function (m) { return m.trim(); }).join('; ') + ';' : ''; - - inline.generated = inlineMatch.some(function (m) { return m.match(/visibility\s?:\s?visible/i); }) - ? inline.computed - : inlineMatch.concat( ['visibility: visible']).map(function (m) { return m.trim(); }).join('; ') + ';'; - - /** - * Generate opacity styles - */ - var computedOpacity = parseFloat(computed.opacity); - var configOpacity = !isNaN(parseFloat(config.opacity)) - ? parseFloat(config.opacity) - : parseFloat(computed.opacity); - - var opacity = { - computed: computedOpacity !== configOpacity ? ("opacity: " + computedOpacity + ";") : '', - generated: computedOpacity !== configOpacity ? ("opacity: " + configOpacity + ";") : '' - }; - - /** - * Generate transformation styles - */ - var transformations = []; - - if (parseFloat(config.distance)) { - var axis = config.origin === 'top' || config.origin === 'bottom' ? 'Y' : 'X'; - - /** - * Let’s make sure our our pixel distances are negative for top and left. - * e.g. { origin: 'top', distance: '25px' } starts at `top: -25px` in CSS. - */ - var distance = config.distance; - if (config.origin === 'top' || config.origin === 'left') { - distance = /^-/.test(distance) ? distance.substr(1) : ("-" + distance); - } - - var ref = distance.match(/(^-?\d+\.?\d?)|(em$|px$|%$)/g); - var value = ref[0]; - var unit = ref[1]; - - switch (unit) { - case 'em': - distance = parseInt(computed.fontSize) * value; - break - case 'px': - distance = value; - break - case '%': - /** - * Here we use `getBoundingClientRect` instead of - * the existing data attached to `element.geometry` - * because only the former includes any transformations - * current applied to the element. - * - * If that behavior ends up being unintuitive, this - * logic could instead utilize `element.geometry.height` - * and `element.geoemetry.width` for the distaince calculation - */ - distance = - axis === 'Y' - ? element.node.getBoundingClientRect().height * value / 100 - : element.node.getBoundingClientRect().width * value / 100; - break - default: - throw new RangeError('Unrecognized or missing distance unit.') - } - - if (axis === 'Y') { - transformations.push(translateY(distance)); - } else { - transformations.push(translateX(distance)); - } - } - - if (config.rotate.x) { transformations.push(rotateX(config.rotate.x)); } - if (config.rotate.y) { transformations.push(rotateY(config.rotate.y)); } - if (config.rotate.z) { transformations.push(rotateZ(config.rotate.z)); } - if (config.scale !== 1) { - if (config.scale === 0) { - /** - * The CSS Transforms matrix interpolation specification - * basically disallows transitions of non-invertible - * matrixes, which means browsers won't transition - * elements with zero scale. - * - * That’s inconvenient for the API and developer - * experience, so we simply nudge their value - * slightly above zero; this allows browsers - * to transition our element as expected. - * - * `0.0002` was the smallest number - * that performed across browsers. - */ - transformations.push(scale(0.0002)); - } else { - transformations.push(scale(config.scale)); - } - } - - var transform = {}; - if (transformations.length) { - transform.property = getPrefixedCssProp('transform'); - /** - * The default computed transform value should be one of: - * undefined || 'none' || 'matrix()' || 'matrix3d()' - */ - transform.computed = { - raw: computed[transform.property], - matrix: parse(computed[transform.property]) - }; - - transformations.unshift(transform.computed.matrix); - var product = transformations.reduce(multiply); - - transform.generated = { - initial: ((transform.property) + ": matrix3d(" + (product.join(', ')) + ");"), - final: ((transform.property) + ": matrix3d(" + (transform.computed.matrix.join( - ', ' - )) + ");") - }; - } else { - transform.generated = { - initial: '', - final: '' - }; - } - - /** - * Generate transition styles - */ - var transition = {}; - if (opacity.generated || transform.generated.initial) { - transition.property = getPrefixedCssProp('transition'); - transition.computed = computed[transition.property]; - transition.fragments = []; - - var delay = config.delay; - var duration = config.duration; - var easing = config.easing; - - if (opacity.generated) { - transition.fragments.push({ - delayed: ("opacity " + (duration / 1000) + "s " + easing + " " + (delay / 1000) + "s"), - instant: ("opacity " + (duration / 1000) + "s " + easing + " 0s") - }); - } - - if (transform.generated.initial) { - transition.fragments.push({ - delayed: ((transform.property) + " " + (duration / 1000) + "s " + easing + " " + (delay / - 1000) + "s"), - instant: ((transform.property) + " " + (duration / 1000) + "s " + easing + " 0s") - }); - } - - /** - * The default computed transition property should be one of: - * undefined || '' || 'all 0s ease 0s' || 'all 0s 0s cubic-bezier()' - */ - if (transition.computed && !transition.computed.match(/all 0s/)) { - transition.fragments.unshift({ - delayed: transition.computed, - instant: transition.computed - }); - } - - var composed = transition.fragments.reduce( - function (composition, fragment, i) { - composition.delayed += - i === 0 ? fragment.delayed : (", " + (fragment.delayed)); - composition.instant += - i === 0 ? fragment.instant : (", " + (fragment.instant)); - return composition - }, - { - delayed: '', - instant: '' - } - ); - - transition.generated = { - delayed: ((transition.property) + ": " + (composed.delayed) + ";"), - instant: ((transition.property) + ": " + (composed.instant) + ";") - }; - } else { - transition.generated = { - delayed: '', - instant: '' - }; - } - - return { - inline: inline, - opacity: opacity, - position: position, - transform: transform, - transition: transition - } -} - -function animate(element, force) { - if ( force === void 0 ) force = {}; - - var pristine = force.pristine || this.pristine; - var delayed = - element.config.useDelay === 'always' || - (element.config.useDelay === 'onload' && pristine) || - (element.config.useDelay === 'once' && !element.seen); - - var shouldReveal = element.visible && !element.revealed; - var shouldReset = !element.visible && element.revealed && element.config.reset; - - if (force.reveal || shouldReveal) { - return triggerReveal.call(this, element, delayed) - } - - if (force.reset || shouldReset) { - return triggerReset.call(this, element) - } -} - -function triggerReveal(element, delayed) { - var styles = [ - element.styles.inline.generated, - element.styles.opacity.computed, - element.styles.transform.generated.final - ]; - if (delayed) { - styles.push(element.styles.transition.generated.delayed); - } else { - styles.push(element.styles.transition.generated.instant); - } - element.revealed = element.seen = true; - element.node.setAttribute('style', styles.filter(function (s) { return s !== ''; }).join(' ')); - registerCallbacks.call(this, element, delayed); -} - -function triggerReset(element) { - var styles = [ - element.styles.inline.generated, - element.styles.opacity.generated, - element.styles.transform.generated.initial, - element.styles.transition.generated.instant - ]; - element.revealed = false; - element.node.setAttribute('style', styles.filter(function (s) { return s !== ''; }).join(' ')); - registerCallbacks.call(this, element); -} - -function registerCallbacks(element, isDelayed) { - var this$1 = this; - - var duration = isDelayed - ? element.config.duration + element.config.delay - : element.config.duration; - - var beforeCallback = element.revealed - ? element.config.beforeReveal - : element.config.beforeReset; - - var afterCallback = element.revealed - ? element.config.afterReveal - : element.config.afterReset; - - var elapsed = 0; - if (element.callbackTimer) { - elapsed = Date.now() - element.callbackTimer.start; - window.clearTimeout(element.callbackTimer.clock); - } - - beforeCallback(element.node); - - element.callbackTimer = { - start: Date.now(), - clock: window.setTimeout(function () { - afterCallback(element.node); - element.callbackTimer = null; - if (element.revealed && !element.config.reset && element.config.cleanup) { - clean.call(this$1, element.node); - } - }, duration - elapsed) - }; -} - -var nextUniqueId = (function () { - var uid = 0; - return function () { return uid++; } -})(); - -function sequence(element, pristine) { - if ( pristine === void 0 ) pristine = this.pristine; - - /** - * We first check if the element should reset. - */ - if (!element.visible && element.revealed && element.config.reset) { - return animate.call(this, element, { reset: true }) - } - - var seq = this.store.sequences[element.sequence.id]; - var i = element.sequence.index; - - if (seq) { - var visible = new SequenceModel(seq, 'visible', this.store); - var revealed = new SequenceModel(seq, 'revealed', this.store); - - seq.models = { visible: visible, revealed: revealed }; - - /** - * If the sequence has no revealed members, - * then we reveal the first visible element - * within that sequence. - * - * The sequence then cues a recursive call - * in both directions. - */ - if (!revealed.body.length) { - var nextId = seq.members[visible.body[0]]; - var nextElement = this.store.elements[nextId]; - - if (nextElement) { - cue.call(this, seq, visible.body[0], -1, pristine); - cue.call(this, seq, visible.body[0], +1, pristine); - return animate.call(this, nextElement, { reveal: true, pristine: pristine }) - } - } - - /** - * If our element isn’t resetting, we check the - * element sequence index against the head, and - * then the foot of the sequence. - */ - if ( - !seq.blocked.head && - i === [].concat( revealed.head ).pop() && - i >= [].concat( visible.body ).shift() - ) { - cue.call(this, seq, i, -1, pristine); - return animate.call(this, element, { reveal: true, pristine: pristine }) - } - - if ( - !seq.blocked.foot && - i === [].concat( revealed.foot ).shift() && - i <= [].concat( visible.body ).pop() - ) { - cue.call(this, seq, i, +1, pristine); - return animate.call(this, element, { reveal: true, pristine: pristine }) - } - } -} - -function Sequence(interval) { - var i = Math.abs(interval); - if (!isNaN(i)) { - this.id = nextUniqueId(); - this.interval = Math.max(i, 16); - this.members = []; - this.models = {}; - this.blocked = { - head: false, - foot: false - }; - } else { - throw new RangeError('Invalid sequence interval.') - } -} - -function SequenceModel(seq, prop, store) { - var this$1 = this; - - this.head = []; - this.body = []; - this.foot = []; - - each(seq.members, function (id, index) { - var element = store.elements[id]; - if (element && element[prop]) { - this$1.body.push(index); - } - }); - - if (this.body.length) { - each(seq.members, function (id, index) { - var element = store.elements[id]; - if (element && !element[prop]) { - if (index < this$1.body[0]) { - this$1.head.push(index); - } else { - this$1.foot.push(index); - } - } - }); - } -} - -function cue(seq, i, direction, pristine) { - var this$1 = this; - - var blocked = ['head', null, 'foot'][1 + direction]; - var nextId = seq.members[i + direction]; - var nextElement = this.store.elements[nextId]; - - seq.blocked[blocked] = true; - - setTimeout(function () { - seq.blocked[blocked] = false; - if (nextElement) { - sequence.call(this$1, nextElement, pristine); - } - }, seq.interval); -} - -function initialize() { - var this$1 = this; - - rinse.call(this); - - each(this.store.elements, function (element) { - var styles = [element.styles.inline.generated]; - - if (element.visible) { - styles.push(element.styles.opacity.computed); - styles.push(element.styles.transform.generated.final); - element.revealed = true; - } else { - styles.push(element.styles.opacity.generated); - styles.push(element.styles.transform.generated.initial); - element.revealed = false; - } - - element.node.setAttribute('style', styles.filter(function (s) { return s !== ''; }).join(' ')); - }); - - each(this.store.containers, function (container) { - var target = - container.node === document.documentElement ? window : container.node; - target.addEventListener('scroll', this$1.delegate); - target.addEventListener('resize', this$1.delegate); - }); - - /** - * Manually invoke delegate once to capture - * element and container dimensions, container - * scroll position, and trigger any valid reveals - */ - this.delegate(); - - /** - * Wipe any existing `setTimeout` now - * that initialization has completed. - */ - this.initTimeout = null; -} - -function isMobile(agent) { - if ( agent === void 0 ) agent = navigator.userAgent; - - return /Android|iPhone|iPad|iPod/i.test(agent) -} - -function deepAssign(target) { - var sources = [], len = arguments.length - 1; - while ( len-- > 0 ) sources[ len ] = arguments[ len + 1 ]; - - if (isObject(target)) { - each(sources, function (source) { - each(source, function (data, key) { - if (isObject(data)) { - if (!target[key] || !isObject(target[key])) { - target[key] = {}; - } - deepAssign(target[key], data); - } else { - target[key] = data; - } - }); - }); - return target - } else { - throw new TypeError('Target must be an object literal.') - } -} - -function reveal(target, options, syncing) { - var this$1 = this; - if ( options === void 0 ) options = {}; - if ( syncing === void 0 ) syncing = false; - - var containerBuffer = []; - var sequence$$1; - var interval = options.interval || defaults.interval; - - try { - if (interval) { - sequence$$1 = new Sequence(interval); - } - - var nodes = tealight(target); - if (!nodes.length) { - throw new Error('Invalid reveal target.') - } - - var elements = nodes.reduce(function (elementBuffer, elementNode) { - var element = {}; - var existingId = elementNode.getAttribute('data-sr-id'); - - if (existingId) { - deepAssign(element, this$1.store.elements[existingId]); - - /** - * In order to prevent previously generated styles - * from throwing off the new styles, the style tag - * has to be reverted to its pre-reveal state. - */ - element.node.setAttribute('style', element.styles.inline.computed); - } else { - element.id = nextUniqueId(); - element.node = elementNode; - element.seen = false; - element.revealed = false; - element.visible = false; - } - - var config = deepAssign({}, element.config || this$1.defaults, options); - - if ((!config.mobile && isMobile()) || (!config.desktop && !isMobile())) { - if (existingId) { - clean.call(this$1, element); - } - return elementBuffer // skip elements that are disabled - } - - var containerNode = tealight(config.container)[0]; - if (!containerNode) { - throw new Error('Invalid container.') - } - if (!containerNode.contains(elementNode)) { - return elementBuffer // skip elements found outside the container - } - - var containerId; - { - containerId = getContainerId( - containerNode, - containerBuffer, - this$1.store.containers - ); - if (containerId === null) { - containerId = nextUniqueId(); - containerBuffer.push({ id: containerId, node: containerNode }); - } - } - - element.config = config; - element.containerId = containerId; - element.styles = style(element); - - if (sequence$$1) { - element.sequence = { - id: sequence$$1.id, - index: sequence$$1.members.length - }; - sequence$$1.members.push(element.id); - } - - elementBuffer.push(element); - return elementBuffer - }, []); - - /** - * Modifying the DOM via setAttribute needs to be handled - * separately from reading computed styles in the map above - * for the browser to batch DOM changes (limiting reflows) - */ - each(elements, function (element) { - this$1.store.elements[element.id] = element; - element.node.setAttribute('data-sr-id', element.id); - }); - } catch (e) { - return logger.call(this, 'Reveal failed.', e.message) - } - - /** - * Now that element set-up is complete... - * Let’s commit any container and sequence data we have to the store. - */ - each(containerBuffer, function (container) { - this$1.store.containers[container.id] = { - id: container.id, - node: container.node - }; - }); - if (sequence$$1) { - this.store.sequences[sequence$$1.id] = sequence$$1; - } - - /** - * If reveal wasn't invoked by sync, we want to - * make sure to add this call to the history. - */ - if (syncing !== true) { - this.store.history.push({ target: target, options: options }); - - /** - * Push initialization to the event queue, giving - * multiple reveal calls time to be interpreted. - */ - if (this.initTimeout) { - window.clearTimeout(this.initTimeout); - } - this.initTimeout = window.setTimeout(initialize.bind(this), 0); - } -} - -function getContainerId(node) { - var collections = [], len = arguments.length - 1; - while ( len-- > 0 ) collections[ len ] = arguments[ len + 1 ]; - - var id = null; - each(collections, function (collection) { - each(collection, function (container) { - if (id === null && container.node === node) { - id = container.id; - } - }); - }); - return id -} - -/** - * Re-runs the reveal method for each record stored in history, - * for capturing new content asynchronously loaded into the DOM. - */ -function sync() { - var this$1 = this; - - each(this.store.history, function (record) { - reveal.call(this$1, record.target, record.options, true); - }); - - initialize.call(this); -} - -var polyfill = function (x) { return (x > 0) - (x < 0) || +x; }; -var mathSign = Math.sign || polyfill - -/*! @license miniraf v1.0.0 - - Copyright 2018 Fisssion LLC. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. - -*/ -var polyfill$1 = (function () { - var clock = Date.now(); - - return function (callback) { - var currentTime = Date.now(); - if (currentTime - clock > 16) { - clock = currentTime; - callback(currentTime); - } else { - setTimeout(function () { return polyfill$1(callback); }, 0); - } - } -})(); - -var index = window.requestAnimationFrame || - window.webkitRequestAnimationFrame || - window.mozRequestAnimationFrame || - polyfill$1; - -function getGeometry(target, isContainer) { - /** - * We want to ignore padding and scrollbars for container elements. - * More information here: https://goo.gl/vOZpbz - */ - var height = isContainer ? target.node.clientHeight : target.node.offsetHeight; - var width = isContainer ? target.node.clientWidth : target.node.offsetWidth; - - var offsetTop = 0; - var offsetLeft = 0; - var node = target.node; - - do { - if (!isNaN(node.offsetTop)) { - offsetTop += node.offsetTop; - } - if (!isNaN(node.offsetLeft)) { - offsetLeft += node.offsetLeft; - } - node = node.offsetParent; - } while (node) - - return { - bounds: { - top: offsetTop, - right: offsetLeft + width, - bottom: offsetTop + height, - left: offsetLeft - }, - height: height, - width: width - } -} - -function getScrolled(container) { - var top, left; - if (container.node === document.documentElement) { - top = window.pageYOffset; - left = window.pageXOffset; - } else { - top = container.node.scrollTop; - left = container.node.scrollLeft; - } - return { top: top, left: left } -} - -function isElementVisible(element) { - if ( element === void 0 ) element = {}; - - var container = this.store.containers[element.containerId]; - if (!container) { return } - - var viewFactor = Math.max(0, Math.min(1, element.config.viewFactor)); - var viewOffset = element.config.viewOffset; - - var elementBounds = { - top: element.geometry.bounds.top + element.geometry.height * viewFactor, - right: element.geometry.bounds.right - element.geometry.width * viewFactor, - bottom: element.geometry.bounds.bottom - element.geometry.height * viewFactor, - left: element.geometry.bounds.left + element.geometry.width * viewFactor - }; - - var containerBounds = { - top: container.geometry.bounds.top + container.scroll.top + viewOffset.top, - right: container.geometry.bounds.right + container.scroll.left - viewOffset.right, - bottom: - container.geometry.bounds.bottom + container.scroll.top - viewOffset.bottom, - left: container.geometry.bounds.left + container.scroll.left + viewOffset.left - }; - - return ( - (elementBounds.top < containerBounds.bottom && - elementBounds.right > containerBounds.left && - elementBounds.bottom > containerBounds.top && - elementBounds.left < containerBounds.right) || - element.styles.position === 'fixed' - ) -} - -function delegate( - event, - elements -) { - var this$1 = this; - if ( event === void 0 ) event = { type: 'init' }; - if ( elements === void 0 ) elements = this.store.elements; - - index(function () { - var stale = event.type === 'init' || event.type === 'resize'; - - each(this$1.store.containers, function (container) { - if (stale) { - container.geometry = getGeometry.call(this$1, container, true); - } - var scroll = getScrolled.call(this$1, container); - if (container.scroll) { - container.direction = { - x: mathSign(scroll.left - container.scroll.left), - y: mathSign(scroll.top - container.scroll.top) - }; - } - container.scroll = scroll; - }); - - /** - * Due to how the sequencer is implemented, it’s - * important that we update the state of all - * elements, before any animation logic is - * evaluated (in the second loop below). - */ - each(elements, function (element) { - if (stale) { - element.geometry = getGeometry.call(this$1, element); - } - element.visible = isElementVisible.call(this$1, element); - }); - - each(elements, function (element) { - if (element.sequence) { - sequence.call(this$1, element); - } else { - animate.call(this$1, element); - } - }); - - this$1.pristine = false; - }); -} - -function transformSupported() { - var style = document.documentElement.style; - return 'transform' in style || 'WebkitTransform' in style -} - -function transitionSupported() { - var style = document.documentElement.style; - return 'transition' in style || 'WebkitTransition' in style -} - -var version = "4.0.5"; - -var boundDelegate; -var boundDestroy; -var boundReveal; -var boundClean; -var boundSync; -var config; -var debug; -var instance; - -function ScrollReveal(options) { - if ( options === void 0 ) options = {}; - - var invokedWithoutNew = - typeof this === 'undefined' || - Object.getPrototypeOf(this) !== ScrollReveal.prototype; - - if (invokedWithoutNew) { - return new ScrollReveal(options) - } - - if (!ScrollReveal.isSupported()) { - logger.call(this, 'Instantiation failed.', 'This browser is not supported.'); - return mount.failure() - } - - var buffer; - try { - buffer = config - ? deepAssign({}, config, options) - : deepAssign({}, defaults, options); - } catch (e) { - logger.call(this, 'Invalid configuration.', e.message); - return mount.failure() - } - - try { - var container = tealight(buffer.container)[0]; - if (!container) { - throw new Error('Invalid container.') - } - } catch (e) { - logger.call(this, e.message); - return mount.failure() - } - - config = buffer; - - if ((!config.mobile && isMobile()) || (!config.desktop && !isMobile())) { - logger.call( - this, - 'This device is disabled.', - ("desktop: " + (config.desktop)), - ("mobile: " + (config.mobile)) - ); - return mount.failure() - } - - mount.success(); - - this.store = { - containers: {}, - elements: {}, - history: [], - sequences: {} - }; - - this.pristine = true; - - boundDelegate = boundDelegate || delegate.bind(this); - boundDestroy = boundDestroy || destroy.bind(this); - boundReveal = boundReveal || reveal.bind(this); - boundClean = boundClean || clean.bind(this); - boundSync = boundSync || sync.bind(this); - - Object.defineProperty(this, 'delegate', { get: function () { return boundDelegate; } }); - Object.defineProperty(this, 'destroy', { get: function () { return boundDestroy; } }); - Object.defineProperty(this, 'reveal', { get: function () { return boundReveal; } }); - Object.defineProperty(this, 'clean', { get: function () { return boundClean; } }); - Object.defineProperty(this, 'sync', { get: function () { return boundSync; } }); - - Object.defineProperty(this, 'defaults', { get: function () { return config; } }); - Object.defineProperty(this, 'version', { get: function () { return version; } }); - Object.defineProperty(this, 'noop', { get: function () { return false; } }); - - return instance ? instance : (instance = this) -} - -ScrollReveal.isSupported = function () { return transformSupported() && transitionSupported(); }; - -Object.defineProperty(ScrollReveal, 'debug', { - get: function () { return debug || false; }, - set: function (value) { return (debug = typeof value === 'boolean' ? value : debug); } -}); - -ScrollReveal(); - -return ScrollReveal; - -}))); diff --git a/assets/js/tingle.js b/assets/js/tingle.js deleted file mode 120000 index 62cfc9491e..0000000000 --- a/assets/js/tingle.js +++ /dev/null @@ -1 +0,0 @@ -../../node_modules/tingle.js/dist/tingle.js \ No newline at end of file diff --git a/assets/js/vendors.js b/assets/js/vendors.js deleted file mode 100644 index 5993703b75..0000000000 --- a/assets/js/vendors.js +++ /dev/null @@ -1,33 +0,0 @@ -/*! @license ScrollReveal v4.0.5 - - Copyright 2018 Fisssion LLC. - - Licensed under the GNU General Public License 3.0 for - compatible open source projects and non-commercial use. - - For commercial sites, themes, projects, and applications, - keep your source code private/proprietary by purchasing - a commercial license from https://scrollrevealjs.org/ -*/ -var ScrollReveal=function(){"use strict";var r={delay:0,distance:"0",duration:600,easing:"cubic-bezier(0.5, 0, 0, 1)",interval:0,opacity:0,origin:"bottom",rotate:{x:0,y:0,z:0},scale:1,cleanup:!1,container:document.documentElement,desktop:!0,mobile:!0,reset:!1,useDelay:"always",viewFactor:0,viewOffset:{top:0,right:0,bottom:0,left:0},afterReset:function(){},afterReveal:function(){},beforeReset:function(){},beforeReveal:function(){}};var n={success:function(){document.documentElement.classList.add("sr"),document.body?document.body.style.height="100%":document.addEventListener("DOMContentLoaded",function(){document.body.style.height="100%"})},failure:function(){return document.documentElement.classList.remove("sr"),{clean:function(){},destroy:function(){},reveal:function(){},sync:function(){},get noop(){return!0}}}};function o(e){return"object"==typeof window.Node?e instanceof window.Node:null!==e&&"object"==typeof e&&"number"==typeof e.nodeType&&"string"==typeof e.nodeName}function u(e,t){if(void 0===t&&(t=document),e instanceof Array)return e.filter(o);if(o(e))return[e];if(n=e,i=Object.prototype.toString.call(n),"object"==typeof window.NodeList?n instanceof window.NodeList:null!==n&&"object"==typeof n&&"number"==typeof n.length&&/^\[object (HTMLCollection|NodeList|Object)\]$/.test(i)&&(0===n.length||o(n[0])))return Array.prototype.slice.call(e);var n,i;if("string"==typeof e)try{var r=t.querySelectorAll(e);return Array.prototype.slice.call(r)}catch(e){return[]}return[]}function s(e){return null!==e&&e instanceof Object&&(e.constructor===Object||"[object Object]"===Object.prototype.toString.call(e))}function f(n,i){if(s(n))return Object.keys(n).forEach(function(e){return i(n[e],e,n)});if(n instanceof Array)return n.forEach(function(e,t){return i(e,t,n)});throw new TypeError("Expected either an array or object literal.")}function h(e){for(var t=[],n=arguments.length-1;0=[].concat(r.body).shift())return g.call(this,n,i,-1,t),c.call(this,e,{reveal:!0,pristine:t});if(!n.blocked.foot&&i===[].concat(o.foot).shift()&&i<=[].concat(r.body).pop())return g.call(this,n,i,1,t),c.call(this,e,{reveal:!0,pristine:t})}}function v(e){var t=Math.abs(e);if(isNaN(t))throw new RangeError("Invalid sequence interval.");this.id=y(),this.interval=Math.max(t,16),this.members=[],this.models={},this.blocked={head:!1,foot:!1}}function d(e,i,r){var o=this;this.head=[],this.body=[],this.foot=[],f(e.members,function(e,t){var n=r.elements[e];n&&n[i]&&o.body.push(t)}),this.body.length&&f(e.members,function(e,t){var n=r.elements[e];n&&!n[i]&&(tPrevious',nextArrow:'',autoplay:!1,autoplaySpeed:3e3,centerMode:!1,centerPadding:"50px",cssEase:"ease",customPaging:function(e,t){return i(' - - - - - - - -
-
-
-
-
-
-
- -
-
-
- - - - - -
-
-

- 2.0-2.4 - Rancher 2.0-2.4 -

- -
- -

Rancher manages all of your Kubernetes clusters everywhere, unifies them under centralized RBAC, monitors them and lets you easily deploy and manage workloads through an intuitive user interface.

- - -
-
- -
-
-

- 1.6 - Rancher 1.6 -

- -
- -

If you haven't yet migrated to Rancher 2.x, you can still find documentation for 1.6 here. This is only for legacy users of the 1.6 product.

- - -
-
- -
-
-

- OS - RancherOS -

- -
- -

RancherOS is the lightest, easiest way to run Docker in production. Engineered from the ground up for security and speed, it runs all system services and user workloads within Docker containers.

- - -
-
- -
-
-

- RKE - Rancher Kubernetes Engine -

- -
- -

Rancher Kubernetes Engine (RKE) is an extremely simple, lightning fast Kubernetes installer that works everywhere.

- - -
-
- -
-
-

- K3S - K3S -

- -
- -

Lightweight Kubernetes. Easy to install, half the memory, all in a binary less than 100 MB.

- - -
-
- -
-
-

- RKE2 - RKE2 -

- -
- -

RKE2, also known as RKE Government, is Rancher's next-generation Kubernetes distribution.

- - -
-
- -
-
-

- Longhorn - Longhorn -

- -
- -

Longhorn is a lightweight, reliable, and powerful distributed block storage system for Kubernetes.

- - -
-
- - -
-
-
-
-
- diff --git a/content/k3s/_index.md b/content/k3s/_index.md deleted file mode 100644 index cff765de06..0000000000 --- a/content/k3s/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: K3S -weight: 1 -showBreadcrumb: false ---- diff --git a/content/k3s/latest/_index.md b/content/k3s/latest/_index.md deleted file mode 100644 index b0a33f9091..0000000000 --- a/content/k3s/latest/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: K3S -showBreadcrumb: false ---- diff --git a/content/k3s/latest/en/_index.md b/content/k3s/latest/en/_index.md deleted file mode 100644 index 4d71be76b3..0000000000 --- a/content/k3s/latest/en/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "K3s - Lightweight Kubernetes" -shortTitle: K3s -name: "menu" ---- - -Lightweight Kubernetes. Easy to install, half the memory, all in a binary of less than 100 MB. - -Great for: - -* Edge -* IoT -* CI -* Development -* ARM -* Embedding K8s -* Situations where a PhD in K8s clusterology is infeasible - -# What is K3s? - -K3s is a fully compliant Kubernetes distribution with the following enhancements: - -* Packaged as a single binary. -* Lightweight storage backend based on sqlite3 as the default storage mechanism. etcd3, MySQL, Postgres also still available. -* Wrapped in simple launcher that handles a lot of the complexity of TLS and options. -* Secure by default with reasonable defaults for lightweight environments. -* Simple but powerful "batteries-included" features have been added, such as: a local storage provider, a service load balancer, a Helm controller, and the Traefik ingress controller. -* Operation of all Kubernetes control plane components is encapsulated in a single binary and process. This allows K3s to automate and manage complex cluster operations like distributing certificates. -* External dependencies have been minimized (just a modern kernel and cgroup mounts needed). K3s packages the required dependencies, including: - * containerd - * Flannel - * CoreDNS - * CNI - * Host utilities (iptables, socat, etc) - * Ingress controller (traefik) - * Embedded service loadbalancer - * Embedded network policy controller - -# What's with the name? - -We wanted an installation of Kubernetes that was half the size in terms of memory footprint. Kubernetes is a 10-letter word stylized as K8s. So something half as big as Kubernetes would be a 5-letter word stylized as K3s. There is no long form of K3s and no official pronunciation. diff --git a/content/k3s/latest/en/advanced/_index.md b/content/k3s/latest/en/advanced/_index.md deleted file mode 100644 index 554a0e5469..0000000000 --- a/content/k3s/latest/en/advanced/_index.md +++ /dev/null @@ -1,460 +0,0 @@ ---- -title: "Advanced Options and Configuration" -weight: 45 -aliases: - - /k3s/latest/en/running/ - - /k3s/latest/en/configuration/ ---- - -This section contains advanced information describing the different ways you can run and manage K3s: - -- [Certificate rotation](#certificate-rotation) -- [Auto-deploying manifests](#auto-deploying-manifests) -- [Using Docker as the container runtime](#using-docker-as-the-container-runtime) -- [Using etcdctl](#using-etcdctl) -- [Configuring containerd](#configuring-containerd) -- [Running K3s with Rootless mode (Experimental)](#running-k3s-with-rootless-mode-experimental) -- [Node labels and taints](#node-labels-and-taints) -- [Starting the server with the installation script](#starting-the-server-with-the-installation-script) -- [Additional preparation for Alpine Linux setup](#additional-preparation-for-alpine-linux-setup) -- [Additional preparation for (Red Hat/CentOS) Enterprise Linux](#additional-preparation-for-red-hat/centos-enterprise-linux) -- [Additional preparation for Raspberry Pi OS Setup](#additional-preparation-for-raspberry-pi-os-setup) -- [Enabling vxlan on Ubuntu 21.10+ on Raspberry Pi](#enabling-vxlan-on-ubuntu-21.10+-on-raspberry-pi) -- [Running K3d (K3s in Docker) and docker-compose](#running-k3d-k3s-in-docker-and-docker-compose) -- [SELinux Support](#selinux-support) -- [Enabling Lazy Pulling of eStargz (Experimental)](#enabling-lazy-pulling-of-estargz-experimental) -- [Additional Logging Sources](#additional-logging-sources) -- [Server and agent tokens](#server-and-agent-tokens) - -# Certificate Rotation - -By default, certificates in K3s expire in 12 months. - -If the certificates are expired or have fewer than 90 days remaining before they expire, the certificates are rotated when K3s is restarted. - -# Auto-Deploying Manifests - -Any file found in `/var/lib/rancher/k3s/server/manifests` will automatically be deployed to Kubernetes in a manner similar to `kubectl apply`, both on startup and when the file is changed on disk. Deleting files out of this directory will not delete the corresponding resources from the cluster. - -For information about deploying Helm charts, refer to the section about [Helm.](../helm) - -# Using Docker as the Container Runtime - -K3s includes and defaults to [containerd,](https://containerd.io/) an industry-standard container runtime. - -To use Docker instead of containerd, - -1. Install Docker on the K3s node. One of Rancher's [Docker installation scripts](https://github.com/rancher/install-docker) can be used to install Docker: - - ``` - curl https://releases.rancher.com/install-docker/19.03.sh | sh - ``` - -1. Install K3s using the `--docker` option: - - ``` - curl -sfL https://get.k3s.io | sh -s - --docker - ``` - -1. Confirm that the cluster is available: - - ``` - $ sudo k3s kubectl get pods --all-namespaces - NAMESPACE NAME READY STATUS RESTARTS AGE - kube-system local-path-provisioner-6d59f47c7-lncxn 1/1 Running 0 51s - kube-system metrics-server-7566d596c8-9tnck 1/1 Running 0 51s - kube-system helm-install-traefik-mbkn9 0/1 Completed 1 51s - kube-system coredns-8655855d6-rtbnb 1/1 Running 0 51s - kube-system svclb-traefik-jbmvl 2/2 Running 0 43s - kube-system traefik-758cd5fc85-2wz97 1/1 Running 0 43s - ``` - -1. Confirm that the Docker containers are running: - - ``` - $ sudo docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 3e4d34729602 897ce3c5fc8f "entry" About a minute ago Up About a minute k8s_lb-port-443_svclb-traefik-jbmvl_kube-system_d46f10c6-073f-4c7e-8d7a-8e7ac18f9cb0_0 - bffdc9d7a65f rancher/klipper-lb "entry" About a minute ago Up About a minute k8s_lb-port-80_svclb-traefik-jbmvl_kube-system_d46f10c6-073f-4c7e-8d7a-8e7ac18f9cb0_0 - 436b85c5e38d rancher/library-traefik "/traefik --configfi…" About a minute ago Up About a minute k8s_traefik_traefik-758cd5fc85-2wz97_kube-system_07abe831-ffd6-4206-bfa1-7c9ca4fb39e7_0 - de8fded06188 rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_svclb-traefik-jbmvl_kube-system_d46f10c6-073f-4c7e-8d7a-8e7ac18f9cb0_0 - 7c6a30aeeb2f rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_traefik-758cd5fc85-2wz97_kube-system_07abe831-ffd6-4206-bfa1-7c9ca4fb39e7_0 - ae6c58cab4a7 9d12f9848b99 "local-path-provisio…" About a minute ago Up About a minute k8s_local-path-provisioner_local-path-provisioner-6d59f47c7-lncxn_kube-system_2dbd22bf-6ad9-4bea-a73d-620c90a6c1c1_0 - be1450e1a11e 9dd718864ce6 "/metrics-server" About a minute ago Up About a minute k8s_metrics-server_metrics-server-7566d596c8-9tnck_kube-system_031e74b5-e9ef-47ef-a88d-fbf3f726cbc6_0 - 4454d14e4d3f c4d3d16fe508 "/coredns -conf /etc…" About a minute ago Up About a minute k8s_coredns_coredns-8655855d6-rtbnb_kube-system_d05725df-4fb1-410a-8e82-2b1c8278a6a1_0 - c3675b87f96c rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_coredns-8655855d6-rtbnb_kube-system_d05725df-4fb1-410a-8e82-2b1c8278a6a1_0 - 4b1fddbe6ca6 rancher/pause:3.1 "/pause" About a minute ago Up About a minute k8s_POD_local-path-provisioner-6d59f47c7-lncxn_kube-system_2dbd22bf-6ad9-4bea-a73d-620c90a6c1c1_0 - 64d3517d4a95 rancher/pause:3.1 "/pause" - ``` - -### Optional: Use crictl with Docker - -crictl provides a CLI for CRI-compatible container runtimes. - -If you would like to use crictl after installing K3s with the `--docker` option, install crictl using the [official documentation:](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md) - -``` -$ VERSION="v1.17.0" -$ curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz -$ sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin -crictl -``` - -Then start using crictl commands: - -``` -$ sudo crictl version -Version: 0.1.0 -RuntimeName: docker -RuntimeVersion: 19.03.9 -RuntimeApiVersion: 1.40.0 -$ sudo crictl images -IMAGE TAG IMAGE ID SIZE -rancher/coredns-coredns 1.6.3 c4d3d16fe508b 44.3MB -rancher/klipper-helm v0.2.5 6207e2a3f5225 136MB -rancher/klipper-lb v0.1.2 897ce3c5fc8ff 6.1MB -rancher/library-traefik 1.7.19 aa764f7db3051 85.7MB -rancher/local-path-provisioner v0.0.11 9d12f9848b99f 36.2MB -rancher/metrics-server v0.3.6 9dd718864ce61 39.9MB -rancher/pause 3.1 da86e6ba6ca19 742kB -``` - -# Using etcdctl - -etcdctl provides a CLI for etcd. - -If you would like to use etcdctl after installing K3s with embedded etcd, install etcdctl using the [official documentation.](https://etcd.io/docs/latest/install/) - -``` -$ VERSION="v3.5.0" -$ curl -L https://github.com/etcd-io/etcd/releases/download/${VERSION}/etcd-${VERSION}-linux-amd64.tar.gz --output etcdctl-linux-amd64.tar.gz -$ sudo tar -zxvf etcdctl-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-${VERSION}-linux-amd64/etcdctl -``` - -Then start using etcdctl commands with the appropriate K3s flags: - -``` -$ sudo etcdctl --cacert=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --cert=/var/lib/rancher/k3s/server/tls/etcd/client.crt --key=/var/lib/rancher/k3s/server/tls/etcd/client.key version -``` - -# Configuring containerd - -K3s will generate config.toml for containerd in `/var/lib/rancher/k3s/agent/etc/containerd/config.toml`. - -For advanced customization for this file you can create another file called `config.toml.tmpl` in the same directory and it will be used instead. - -The `config.toml.tmpl` will be treated as a Go template file, and the `config.Node` structure is being passed to the template. See [this folder](https://github.com/k3s-io/k3s/blob/master/pkg/agent/templates) for Linux and Windows examples on how to use the structure to customize the configuration file. - - -# Running K3s with Rootless mode (Experimental) - -> **Warning:** This feature is experimental. - -Rootless mode allows running the entire k3s an unprivileged user, so as to protect the real root on the host from potential container-breakout attacks. - -See also https://rootlesscontaine.rs/ to learn about Rootless mode. - -### Known Issues with Rootless mode - -* **Ports** - - When running rootless a new network namespace is created. This means that K3s instance is running with networking fairly detached from the host. The only way to access services run in K3s from the host is to set up port forwards to the K3s network namespace. We have a controller that will automatically bind 6443 and service port below 1024 to the host with an offset of 10000. - - That means service port 80 will become 10080 on the host, but 8080 will become 8080 without any offset. - - Currently, only `LoadBalancer` services are automatically bound. - -* **Cgroups** - - Cgroup v1 is not supported. v2 is supported. - -* **Multi-node cluster** - - Multi-cluster installation is untested and undocumented. - -### Running Servers and Agents with Rootless -* Enable cgroup v2 delegation, see https://rootlesscontaine.rs/getting-started/common/cgroup2/ . - This step is optional, but highly recommended for enabling CPU and memory resource limtitation. - -* Download `k3s-rootless.service` from [`https://github.com/k3s-io/k3s/blob//k3s-rootless.service`](https://github.com/k3s-io/k3s/blob/master/k3s-rootless.service). - Make sure to use the same version of `k3s-rootless.service` and `k3s`. - -* Install `k3s-rootless.service` to `~/.config/systemd/user/k3s-rootless.service`. - Installing this file as a system-wide service (`/etc/systemd/...`) is not supported. - Depending on the path of `k3s` binary, you might need to modify the `ExecStart=/usr/local/bin/k3s ...` line of the file. - -* Run `systemctl --user daemon-reload` - -* Run `systemctl --user enable --now k3s-rootless` - -* Run `KUBECONFIG=~/.kube/k3s.yaml kubectl get pods -A`, and make sure the pods are running. - -> **Note:** Don't try to run `k3s server --rootless` on a terminal, as it doesn't enable cgroup v2 delegation. -> If you really need to try it on a terminal, prepend `systemd-run --user -p Delegate=yes --tty` to create a systemd scope. -> -> i.e., `systemd-run --user -p Delegate=yes --tty k3s server --rootless` - -### Troubleshooting - -* Run `systemctl --user status k3s-rootless` to check the daemon status -* Run `journalctl --user -f -u k3s-rootless` to see the daemon log -* See also https://rootlesscontaine.rs/ - -# Node Labels and Taints - -K3s agents can be configured with the options `--node-label` and `--node-taint` which adds a label and taint to the kubelet. The two options only add labels and/or taints [at registration time,]({{}}/k3s/latest/en/installation/install-options/#node-labels-and-taints-for-agents) so they can only be added once and not changed after that again by running K3s commands. - -If you want to change node labels and taints after node registration you should use `kubectl`. Refer to the official Kubernetes documentation for details on how to add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) and [node labels.](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node) - -# Starting the Server with the Installation Script - -The installation script will auto-detect if your OS is using systemd or openrc and start the service. -When running with openrc, logs will be created at `/var/log/k3s.log`. - -When running with systemd, logs will be created in `/var/log/syslog` and viewed using `journalctl -u k3s`. - -An example of installing and auto-starting with the install script: - -```bash -curl -sfL https://get.k3s.io | sh - -``` - -When running the server manually you should get an output similar to the following: - -``` -$ k3s server -INFO[2019-01-22T15:16:19.908493986-07:00] Starting k3s dev -INFO[2019-01-22T15:16:19.908934479-07:00] Running kube-apiserver --allow-privileged=true --authorization-mode Node,RBAC --service-account-signing-key-file /var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range 10.43.0.0/16 --advertise-port 6445 --advertise-address 127.0.0.1 --insecure-port 0 --secure-port 6444 --bind-address 127.0.0.1 --tls-cert-file /var/lib/rancher/k3s/server/tls/localhost.crt --tls-private-key-file /var/lib/rancher/k3s/server/tls/localhost.key --service-account-key-file /var/lib/rancher/k3s/server/tls/service.key --service-account-issuer k3s --api-audiences unknown --basic-auth-file /var/lib/rancher/k3s/server/cred/passwd --kubelet-client-certificate /var/lib/rancher/k3s/server/tls/token-node.crt --kubelet-client-key /var/lib/rancher/k3s/server/tls/token-node.key -Flag --insecure-port has been deprecated, This flag will be removed in a future version. -INFO[2019-01-22T15:16:20.196766005-07:00] Running kube-scheduler --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --port 0 --secure-port 0 --leader-elect=false -INFO[2019-01-22T15:16:20.196880841-07:00] Running kube-controller-manager --kubeconfig /var/lib/rancher/k3s/server/cred/kubeconfig-system.yaml --service-account-private-key-file /var/lib/rancher/k3s/server/tls/service.key --allocate-node-cidrs --cluster-cidr 10.42.0.0/16 --root-ca-file /var/lib/rancher/k3s/server/tls/token-ca.crt --port 0 --secure-port 0 --leader-elect=false -Flag --port has been deprecated, see --secure-port instead. -INFO[2019-01-22T15:16:20.273441984-07:00] Listening on :6443 -INFO[2019-01-22T15:16:20.278383446-07:00] Writing manifest: /var/lib/rancher/k3s/server/manifests/coredns.yaml -INFO[2019-01-22T15:16:20.474454524-07:00] Node token is available at /var/lib/rancher/k3s/server/node-token -INFO[2019-01-22T15:16:20.474471391-07:00] To join node to cluster: k3s agent -s https://10.20.0.3:6443 -t ${NODE_TOKEN} -INFO[2019-01-22T15:16:20.541027133-07:00] Wrote kubeconfig /etc/rancher/k3s/k3s.yaml -INFO[2019-01-22T15:16:20.541049100-07:00] Run: k3s kubectl -``` - -The output will likely be much longer as the agent will create a lot of logs. By default the server -will register itself as a node (run the agent). - -# Additional Preparation for Alpine Linux Setup - -In order to set up Alpine Linux, you have to go through the following preparation: - -Update **/etc/update-extlinux.conf** by adding: - -``` -default_kernel_opts="... cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory" -``` - -Then update the config and reboot: - -```bash -update-extlinux -reboot -``` -# Additional preparation for (Red Hat/CentOS) Enterprise Linux - -It is recommended to turn off firewalld: -``` -systemctl disable firewalld --now -``` - -If enabled, it is required to disable nm-cloud-setup and reboot the node: -``` -systemctl disable nm-cloud-setup.service nm-cloud-setup.timer -reboot -``` - -# Additional preparation for Raspberry Pi OS Setup -## Enabling legacy iptables on Raspberry Pi OS -Raspberry Pi OS (formerly Raspbian) defaults to using `nftables` instead of `iptables`. **K3S** networking features require `iptables` and do not work with `nftables`. Follow the steps below to switch configure **Buster** to use `legacy iptables`: -``` -sudo iptables -F -sudo update-alternatives --set iptables /usr/sbin/iptables-legacy -sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy -sudo reboot -``` - -## Enabling cgroups for Raspberry Pi OS - -Standard Raspberry Pi OS installations do not start with `cgroups` enabled. **K3S** needs `cgroups` to start the systemd service. `cgroups`can be enabled by appending `cgroup_memory=1 cgroup_enable=memory` to `/boot/cmdline.txt`. - -# Enabling vxlan on Ubuntu 21.10+ on Raspberry Pi - -Starting with Ubuntu 21.10, vxlan support on Raspberry Pi has been moved into a seperate kernel module. -``` -sudo apt install linux-modules-extra-raspi -``` - -# Running K3d (K3s in Docker) and docker-compose - -[k3d](https://github.com/rancher/k3d) is a utility designed to easily run K3s in Docker. - -It can be installed via the the [brew](https://brew.sh/) utility on MacOS: - -``` -brew install k3d -``` - -`rancher/k3s` images are also available to run the K3s server and agent from Docker. - -A `docker-compose.yml` is in the root of the K3s repo that serves as an example of how to run K3s from Docker. To run from `docker-compose` from this repo, run: - - docker-compose up --scale agent=3 - # kubeconfig is written to current dir - - kubectl --kubeconfig kubeconfig.yaml get node - - NAME STATUS ROLES AGE VERSION - 497278a2d6a2 Ready 11s v1.13.2-k3s2 - d54c8b17c055 Ready 11s v1.13.2-k3s2 - db7a5a5a5bdd Ready 12s v1.13.2-k3s2 - -To run the agent only in Docker, use `docker-compose up agent`. - -Alternatively the `docker run` command can also be used: - - sudo docker run \ - -d --tmpfs /run \ - --tmpfs /var/run \ - -e K3S_URL=${SERVER_URL} \ - -e K3S_TOKEN=${NODE_TOKEN} \ - --privileged rancher/k3s:vX.Y.Z - - -### example of /boot/cmdline.txt -``` -console=serial0,115200 console=tty1 root=PARTUUID=58b06195-02 rootfstype=ext4 elevator=deadline fsck.repair=yes rootwait cgroup_memory=1 cgroup_enable=memory -``` - -# SELinux Support - -_Supported as of v1.19.4+k3s1. Experimental as of v1.17.4+k3s1._ - -If you are installing K3s on a system where SELinux is enabled by default (such as CentOS), you must ensure the proper SELinux policies have been installed. - -### Automatic Installation - -_Available as of v1.19.3+k3s2_ - -The [install script]({{}}/k3s/latest/en/installation/install-options/#installation-script-options) will automatically install the SELinux RPM from the Rancher RPM repository if on a compatible system if not performing an air-gapped install. Automatic installation can be skipped by setting `INSTALL_K3S_SKIP_SELINUX_RPM=true`. - -### Manual Installation - -The necessary policies can be installed with the following commands: -``` -yum install -y container-selinux selinux-policy-base -yum install -y https://rpm.rancher.io/k3s/latest/common/centos/7/noarch/k3s-selinux-0.2-1.el7_8.noarch.rpm -``` - -To force the install script to log a warning rather than fail, you can set the following environment variable: `INSTALL_K3S_SELINUX_WARN=true`. - -### Enabling and Disabling SELinux Enforcement - -The way that SELinux enforcement is enabled or disabled depends on the K3s version. - -{{% tabs %}} -{{% tab "K3s v1.19.1+k3s1" %}} - -To leverage SELinux, specify the `--selinux` flag when starting K3s servers and agents. - -This option can also be specified in the K3s [configuration file:]({{}}/k3s/latest/en/installation/install-options/#configuration-file) - -``` -selinux: true -``` - -The `--disable-selinux` option should not be used. It is deprecated and will be either ignored or will be unrecognized, resulting in an error, in future minor releases. - -Using a custom `--data-dir` under SELinux is not supported. To customize it, you would most likely need to write your own custom policy. For guidance, you could refer to the [containers/container-selinux](https://github.com/containers/container-selinux) repository, which contains the SELinux policy files for Container Runtimes, and the [rancher/k3s-selinux](https://github.com/rancher/k3s-selinux) repository, which contains the SELinux policy for K3s . - -{{%/tab%}} -{{% tab "K3s before v1.19.1+k3s1" %}} - -SELinux is automatically enabled for the built-in containerd. - -To turn off SELinux enforcement in the embedded containerd, launch K3s with the `--disable-selinux` flag. - -Using a custom `--data-dir` under SELinux is not supported. To customize it, you would most likely need to write your own custom policy. For guidance, you could refer to the [containers/container-selinux](https://github.com/containers/container-selinux) repository, which contains the SELinux policy files for Container Runtimes, and the [rancher/k3s-selinux](https://github.com/rancher/k3s-selinux) repository, which contains the SELinux policy for K3s . - -{{%/tab%}} -{{% /tabs %}} - -# Enabling Lazy Pulling of eStargz (Experimental) - -### What's lazy pulling and eStargz? - -Pulling images is known as one of the time-consuming steps in the container lifecycle. -According to [Harter, et al.](https://www.usenix.org/conference/fast16/technical-sessions/presentation/harter), - -> pulling packages accounts for 76% of container start time, but only 6.4% of that data is read - -To address this issue, k3s experimentally supports *lazy pulling* of image contents. -This allows k3s to start a container before the entire image has been pulled. -Instead, the necessary chunks of contents (e.g. individual files) are fetched on-demand. -Especially for large images, this technique can shorten the container startup latency. - -To enable lazy pulling, the target image needs to be formatted as [*eStargz*](https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md). -This is an OCI-alternative but 100% OCI-compatible image format for lazy pulling. -Because of the compatibility, eStargz can be pushed to standard container registries (e.g. ghcr.io) as well as this is *still runnable* even on eStargz-agnostic runtimes. - -eStargz is developed based on the [stargz format proposed by Google CRFS project](https://github.com/google/crfs) but comes with practical features including content verification and performance optimization. -For more details about lazy pulling and eStargz, please refer to [Stargz Snapshotter project repository](https://github.com/containerd/stargz-snapshotter). - -### Configure k3s for lazy pulling of eStargz - -As shown in the following, `--snapshotter=stargz` option is needed for k3s server and agent. - -``` -k3s server --snapshotter=stargz -``` - -With this configuration, you can perform lazy pulling for eStargz-formatted images. -The following Pod manifest uses eStargz-formatted `node:13.13.0` image (`ghcr.io/stargz-containers/node:13.13.0-esgz`). -k3s performs lazy pulling for this image. - -``` -apiVersion: v1 -kind: Pod -metadata: - name: nodejs -spec: - containers: - - name: nodejs-estargz - image: ghcr.io/stargz-containers/node:13.13.0-esgz - command: ["node"] - args: - - -e - - var http = require('http'); - http.createServer(function(req, res) { - res.writeHead(200); - res.end('Hello World!\n'); - }).listen(80); - ports: - - containerPort: 80 -``` - -# Additional Logging Sources - -[Rancher logging]({{}}//rancher/v2.6/en/logging/helm-chart-options/) for K3s can be installed without using Rancher. The following instructions should be executed to do so: - -``` -helm repo add rancher-charts https://charts.rancher.io -helm repo update -helm install --create-namespace -n cattle-logging-system rancher-logging-crd rancher-charts/rancher-logging-crd -helm install --create-namespace -n cattle-logging-system rancher-logging --set additionalLoggingSources.k3s.enabled=true rancher-charts/rancher-logging -``` - -# Server and agent tokens - -In K3s, there are two types of tokens: K3S_TOKEN and K3S_AGENT_TOKEN. - -K3S_TOKEN: Defines the key required by the server to offer the HTTP config resources. These resources are requested by the other servers before joining the K3s HA cluster. If the K3S_AGENT_TOKEN is not defined, the agents use this token as well to access the required HTTP resources to join the cluster. Note that this token is also used to generate the encryption key for important content in the database (e.g., bootstrap data). - -K3S_AGENT_TOKEN: Optional. Defines the key required by the server to offer HTTP config resources to the agents. If not defined, agents will require K3S_TOKEN. Defining K3S_AGENT_TOKEN is encouraged to avoid agents having to know K3S_TOKEN, which is also used to encrypt data. - -If no K3S_TOKEN is defined, the first K3s server will generate a random one. The result is part of the content in `/var/lib/rancher/k3s/server/token`. For example, `K1070878408e06a827960208f84ed18b65fa10f27864e71a57d9e053c4caff8504b::server:df54383b5659b9280aa1e73e60ef78fc`, where `df54383b5659b9280aa1e73e60ef78fc` is the K3S_TOKEN. diff --git a/content/k3s/latest/en/architecture/_index.md b/content/k3s/latest/en/architecture/_index.md deleted file mode 100644 index 2409ea4c62..0000000000 --- a/content/k3s/latest/en/architecture/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Architecture -weight: 1 ---- - -This page describes the architecture of a high-availability K3s server cluster and how it differs from a single-node server cluster. - -It also describes how agent nodes are registered with K3s servers. - -A server node is defined as a machine (bare-metal or virtual) running the `k3s server` command. A worker node is defined as a machine running the `k3s agent` command. - -This page covers the following topics: - -- [Single-server setup with an embedded database](#single-server-setup-with-an-embedded-db) -- [High-availability K3s server with an external database](#high-availability-k3s-server-with-an-external-db) - - [Fixed registration address for agent nodes](#fixed-registration-address-for-agent-nodes) -- [How agent node registration works](#how-agent-node-registration-works) -- [Automatically deployed manifests](#automatically-deployed-manifests) - -# Single-server Setup with an Embedded DB - -The following diagram shows an example of a cluster that has a single-node K3s server with an embedded SQLite database. - -In this configuration, each agent node is registered to the same server node. A K3s user can manipulate Kubernetes resources by calling the K3s API on the server node. - -
K3s Architecture with a Single Server
-![Architecture]({{}}/img/rancher/k3s-architecture-single-server.png) - -# High-Availability K3s Server with an External DB - -Single server clusters can meet a variety of use cases, but for environments where uptime of the Kubernetes control plane is critical, you can run K3s in an HA configuration. An HA K3s cluster is comprised of: - -* Two or more **server nodes** that will serve the Kubernetes API and run other control plane services -* An **external datastore** (as opposed to the embedded SQLite datastore used in single-server setups) - -
K3s Architecture with a High-availability Server
-![Architecture]({{}}/img/rancher/k3s-architecture-ha-server.png) - -### Fixed Registration Address for Agent Nodes - -In the high-availability server configuration, each node must also register with the Kubernetes API by using a fixed registration address, as shown in the diagram below. - -After registration, the agent nodes establish a connection directly to one of the server nodes. - -![k3s HA]({{}}/img/k3s/k3s-production-setup.svg) - -# How Agent Node Registration Works - -Agent nodes are registered with a websocket connection initiated by the `k3s agent` process, and the connection is maintained by a client-side load balancer running as part of the agent process. - -Agents will register with the server using the node cluster secret along with a randomly generated password for the node, stored at `/etc/rancher/node/password`. The server will store the passwords for individual nodes as Kubernetes secrets, and any subsequent attempts must use the same password. Node password secrets are stored in the `kube-system` namespace with names using the template `.node-password.k3s`. - -Note: Prior to K3s v1.20.2 servers stored passwords on disk at `/var/lib/rancher/k3s/server/cred/node-passwd`. - -If the `/etc/rancher/node` directory of an agent is removed, the password file should be recreated for the agent, or the entry removed from the server. - -A unique node ID can be appended to the hostname by launching K3s servers or agents using the `--with-node-id` flag. - -# Automatically Deployed Manifests - -The [manifests](https://github.com/rancher/k3s/tree/master/manifests) located at the directory path `/var/lib/rancher/k3s/server/manifests` are bundled into the K3s binary at build time. These will be installed at runtime by the [rancher/helm-controller.](https://github.com/rancher/helm-controller#helm-controller) \ No newline at end of file diff --git a/content/k3s/latest/en/backup-restore/_index.md b/content/k3s/latest/en/backup-restore/_index.md deleted file mode 100644 index 13031a06c3..0000000000 --- a/content/k3s/latest/en/backup-restore/_index.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: Backup and Restore -weight: 26 ---- - -The way K3s is backed up and restored depends on which type of datastore is used. - -- [Backup and Restore with External Datastore](#backup-and-restore-with-external-datastore) -- [Backup and Restore with Embedded etcd Datastore (Experimental)](#backup-and-restore-with-embedded-etcd-datastore-experimental) - -# Backup and Restore with External Datastore - -When an external datastore is used, backup and restore operations are handled outside of K3s. The database administrator will need to back up the external database, or restore it from a snapshot or dump. - -We recommend configuring the database to take recurring snapshots. - -For details on taking database snapshots and restoring your database from them, refer to the official database documentation: - -- [Official MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-snapshot-method.html) -- [Official PostgreSQL documentation](https://www.postgresql.org/docs/8.3/backup-dump.html) -- [Official etcd documentation](https://etcd.io/docs/latest/op-guide/recovery/) - -# Backup and Restore with Embedded etcd Datastore (Experimental) - -_Available as of v1.19.1+k3s1_ - -In this section, you'll learn how to create backups of the K3s cluster data and to restore the cluster from backup. - ->**Note on Single-Server with embedded SQLite:** Currently, backups of SQLite are not supported. Instead, make a copy of `/var/lib/rancher/k3s/server` and then delete K3s. - -### Creating Snapshots - -Snapshots are enabled by default. - -The snapshot directory defaults to `${data-dir}/server/db/snapshots`. The data-dir value defaults to `/var/lib/rancher/k3s` and can be changed by setting the `--data-dir` flag. - -To configure the snapshot interval or the number of retained snapshots, refer to the [options.](#options) - -### Restoring a Cluster from a Snapshot - -When K3s is restored from backup, the old data directory will be moved to `${data-dir}/server/db/etcd-old/`. Then K3s will attempt to restore the snapshot by creating a new data directory, then starting etcd with a new K3s cluster with one etcd member. - -To restore the cluster from backup, run K3s with the `--cluster-reset` option, with the `--cluster-reset-restore-path` also given: - -``` -./k3s server \ - --cluster-reset \ - --cluster-reset-restore-path= -``` - -**Result:** A message in the logs says that K3s can be restarted without the flags. Start k3s again and should run successfully and be restored from the specified snapshot. - -### Options - -These options can be passed in with the command line, or in the [configuration file,]({{}}/k3s/latest/en/installation/install-options/#configuration-file ) which may be easier to use. - -| Options | Description | -| ----------- | --------------- | -| `--etcd-disable-snapshots` | Disable automatic etcd snapshots | -| `--etcd-snapshot-schedule-cron` value | Snapshot interval time in cron spec. eg. every 5 hours `0 */5 * * *`(default: `0 */12 * * *`) | -| `--etcd-snapshot-retention` value | Number of snapshots to retain (default: 5) | -| `--etcd-snapshot-dir` value | Directory to save db snapshots. (Default location: `${data-dir}/db/snapshots`) | -| `--cluster-reset` | Forget all peers and become sole member of a new cluster. This can also be set with the environment variable `[$K3S_CLUSTER_RESET]`. -| `--cluster-reset-restore-path` value | Path to snapshot file to be restored - -### S3 Compatible API Support - -K3s supports writing etcd snapshots to and restoring etcd snapshots from systems with S3-compatible APIs. S3 support is available for both on-demand and scheduled snapshots. - -The arguments below have been added to the `server` subcommand. These flags exist for the `etcd-snapshot` subcommand as well however the `--etcd-s3` portion is removed to avoid redundancy. - -| Options | Description | -| ----------- | --------------- | -| `--etcd-s3` | Enable backup to S3 | -| `--etcd-s3-endpoint` | S3 endpoint url | -| `--etcd-s3-endpoint-ca` | S3 custom CA cert to connect to S3 endpoint | -| `--etcd-s3-skip-ssl-verify` | Disables S3 SSL certificate validation | -| `--etcd-s3-access-key` | S3 access key | -| `--etcd-s3-secret-key` | S3 secret key" | -| `--etcd-s3-bucket` | S3 bucket name | -| `--etcd-s3-region` | S3 region / bucket location (optional). defaults to us-east-1 | -| `--etcd-s3-folder` | S3 folder | - -To perform an on-demand etcd snapshot and save it to S3: - -``` -k3s etcd-snapshot \ - --s3 \ - --s3-bucket= \ - --s3-access-key= \ - --s3-secret-key= -``` - -To perform an on-demand etcd snapshot restore from S3, first make sure that K3s isn't running. Then run the following commands: - -``` -k3s server \ - --cluster-init \ - --cluster-reset \ - --etcd-s3 \ - --cluster-reset-restore-path= \ - --etcd-s3-bucket= \ - --etcd-s3-access-key= \ - --etcd-s3-secret-key= -``` - -### Etcd Snapshot and Restore Subcommands - -k3s supports a set of subcommands for working with your etcd snapshots. - -| Subcommand | Description | -| ----------- | --------------- | -| delete | Delete given snapshot(s) | -| ls, list, l | List snapshots | -| prune | Remove snapshots that exceed the configured retention count | -| save | Trigger an immediate etcd snapshot | - -*note* The `save` subcommand is the same as `k3s etcd-snapshot`. The latter will eventually be deprecated in favor of the former. - -These commands will perform as expected whether the etcd snapshots are stored locally or in an S3 compatible object store. - -For additional information on the etcd snapshot subcommands, run `k3s etcd-snapshot`. - -Delete a snapshot from S3. - -``` -k3s etcd-snapshot delete \ - --s3 \ - --s3-bucket= \ - --s3-access-key= \ - --s3-secret-key= \ - -``` - -Prune local snapshots with the default retention policy (5). The `prune` subcommand takes an additional flag `--snapshot-retention` that allows for overriding the default retention policy. - -``` -k3s etcd-snapshot prune -``` - -``` -k3s etcd-snapshot prune --snapshot-retention 10 -``` diff --git a/content/k3s/latest/en/cluster-access/_index.md b/content/k3s/latest/en/cluster-access/_index.md deleted file mode 100644 index 7f861015b5..0000000000 --- a/content/k3s/latest/en/cluster-access/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Cluster Access -weight: 21 ---- - -The kubeconfig file stored at `/etc/rancher/k3s/k3s.yaml` is used to configure access to the Kubernetes cluster. If you have installed upstream Kubernetes command line tools such as kubectl or helm you will need to configure them with the correct kubeconfig path. This can be done by either exporting the `KUBECONFIG` environment variable or by invoking the `--kubeconfig` command line flag. Refer to the examples below for details. - -Leverage the KUBECONFIG environment variable: - -``` -export KUBECONFIG=/etc/rancher/k3s/k3s.yaml -kubectl get pods --all-namespaces -helm ls --all-namespaces -``` - -Or specify the location of the kubeconfig file in the command: - -``` -kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml get pods --all-namespaces -helm --kubeconfig /etc/rancher/k3s/k3s.yaml ls --all-namespaces -``` - -### Accessing the Cluster from Outside with kubectl - -Copy `/etc/rancher/k3s/k3s.yaml` on your machine located outside the cluster as `~/.kube/config`. Then replace the value of the `server` field with the IP or name of your K3s server. `kubectl` can now manage your K3s cluster. diff --git a/content/k3s/latest/en/faq/_index.md b/content/k3s/latest/en/faq/_index.md deleted file mode 100644 index ed339e3405..0000000000 --- a/content/k3s/latest/en/faq/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: FAQ -weight: 60 ---- - -The FAQ is updated periodically and designed to answer the questions our users most frequently ask about K3s. - -**Is K3s a suitable replacement for k8s?** - -K3s is capable of nearly everything k8s can do. It is just a more lightweight version. See the [main]({{}}/k3s/latest/en/) docs page for more details. - -**How can I use my own Ingress instead of Traefik?** - -Simply start K3s server with `--disable traefik` and deploy your ingress. - -**Does K3s support Windows?** - -At this time K3s does not natively support Windows, however we are open to the idea in the future. - -**How can I build from source?** - -Please reference the K3s [BUILDING.md](https://github.com/rancher/k3s/blob/master/BUILDING.md) with instructions. - -**Where are the K3s logs?** - -The installation script will auto-detect if your OS is using systemd or openrc and start the service. - -When running with openrc, logs will be created at `/var/log/k3s.log`. - -When running with systemd, logs will be created in `/var/log/syslog` and viewed using `journalctl -u k3s`. \ No newline at end of file diff --git a/content/k3s/latest/en/helm/_index.md b/content/k3s/latest/en/helm/_index.md deleted file mode 100644 index 5d43acbb78..0000000000 --- a/content/k3s/latest/en/helm/_index.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Helm -weight: 42 ---- - -Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/docs/intro/quickstart/](https://helm.sh/docs/intro/quickstart/). - -K3s does not require any special configuration to use with Helm command-line tools. Just be sure you have properly set up your kubeconfig as per the section about [cluster access](../cluster-access). K3s does include some extra functionality to make deploying both traditional Kubernetes resource manifests and Helm Charts even easier with the [rancher/helm-release CRD.](#using-the-helm-crd) - -This section covers the following topics: - -- [Automatically Deploying Manifests and Helm Charts](#automatically-deploying-manifests-and-helm-charts) -- [Using the Helm CRD](#using-the-helm-crd) -- [Customizing Packaged Components with HelmChartConfig](#customizing-packaged-components-with-helmchartconfig) -- [Upgrading from Helm v2](#upgrading-from-helm-v2) - -### Automatically Deploying Manifests and Helm Charts - -Any Kubernetes manifests found in `/var/lib/rancher/k3s/server/manifests` will automatically be deployed to K3s in a manner similar to `kubectl apply`. Manifests deployed in this manner are managed as AddOn custom resources, and can be viewed by running `kubectl get addon -A`. You will find AddOns for packaged components such as CoreDNS, Local-Storage, Traefik, etc. AddOns are created automatically by the deploy controller, and are named based on their filename in the manifests directory. - -It is also possible to deploy Helm charts as AddOns. K3s includes a [Helm Controller](https://github.com/rancher/helm-controller/) that manages Helm charts using a HelmChart Custom Resource Definition (CRD). - -### Using the Helm CRD - -> **Note:** K3s versions through v0.5.0 used `k3s.cattle.io/v1` as the apiVersion for HelmCharts. This has been changed to `helm.cattle.io/v1` for later versions. - -The [HelmChart resource definition](https://github.com/rancher/helm-controller#helm-controller) captures most of the options you would normally pass to the `helm` command-line tool. Here's an example of how you might deploy Grafana from the default chart repository, overriding some of the default chart values. Note that the HelmChart resource itself is in the `kube-system` namespace, but the chart's resources will be deployed to the `monitoring` namespace. - -```yaml -apiVersion: helm.cattle.io/v1 -kind: HelmChart -metadata: - name: grafana - namespace: kube-system -spec: - chart: stable/grafana - targetNamespace: monitoring - set: - adminPassword: "NotVerySafePassword" - valuesContent: |- - image: - tag: master - env: - GF_EXPLORE_ENABLED: true - adminUser: admin - sidecar: - datasources: - enabled: true -``` - -#### HelmChart Field Definitions - -| Field | Default | Description | Helm Argument / Flag Equivalent | -|-------|---------|-------------|-------------------------------| -| name | | Helm Chart name | NAME | -| spec.chart | | Helm Chart name in repository, or complete HTTPS URL to chart archive (.tgz) | CHART | -| spec.targetNamespace | default | Helm Chart target namespace | `--namespace` | -| spec.version | | Helm Chart version (when installing from repository) | `--version` | -| spec.repo | | Helm Chart repository URL | `--repo` | -| spec.helmVersion | v3 | Helm version to use (`v2` or `v3`) | | -| spec.bootstrap | False | Set to True if this chart is needed to bootstrap the cluster (Cloud Controller Manager, etc) | | -| spec.set | | Override simple default Chart values. These take precedence over options set via valuesContent. | `--set` / `--set-string` | -| spec.jobImage | | Specify the image to use when installing the helm chart. E.g. rancher/klipper-helm:v0.3.0 . | | -| spec.valuesContent | | Override complex default Chart values via YAML file content | `--values` | -| spec.chartContent | | Base64-encoded chart archive .tgz - overrides spec.chart | CHART | - -Content placed in `/var/lib/rancher/k3s/server/static/` can be accessed anonymously via the Kubernetes APIServer from within the cluster. This URL can be templated using the special variable `%{KUBERNETES_API}%` in the `spec.chart` field. For example, the packaged Traefik component loads its chart from `https://%{KUBERNETES_API}%/static/charts/traefik-1.81.0.tgz`. - -**Note:** The `name` field should follow the Helm chart naming conventions. Refer [here](https://helm.sh/docs/chart_best_practices/conventions/#chart-names) to learn more. - ->**Notice on File Naming Requirements:** `HelmChart` and `HelmChartConfig` manifest filenames should adhere to Kubernetes object [naming restrictions](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/). The Helm Controller uses filenames to create objects; therefore, the filename must also align with the restrictions. Any related errors can be observed in the rke2-server logs. The example below is an error generated from using underscores: -``` -level=error msg="Failed to process config: failed to process -/var/lib/rancher/rke2/server/manifests/rke2_ingress_daemonset.yaml: -Addon.k3s.cattle.io \"rke2_ingress_daemonset\" is invalid: metadata.name: -Invalid value: \"rke2_ingress_daemonset\": a lowercase RFC 1123 subdomain -must consist of lower case alphanumeric characters, '-' or '.', and must -start and end with an alphanumeric character (e.g. 'example.com', regex -used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9] -([-a-z0-9]*[a-z0-9])?)*')" -``` - -### Customizing Packaged Components with HelmChartConfig - -To allow overriding values for packaged components that are deployed as HelmCharts (such as Traefik), K3s versions starting with v1.19.0+k3s1 support customizing deployments via a HelmChartConfig resources. The HelmChartConfig resource must match the name and namespace of its corresponding HelmChart, and supports providing additional `valuesContent`, which is passed to the `helm` command as an additional value file. - -> **Note:** HelmChart `spec.set` values override HelmChart and HelmChartConfig `spec.valuesContent` settings. - -For example, to customize the packaged Traefik ingress configuration, you can create a file named `/var/lib/rancher/k3s/server/manifests/traefik-config.yaml` and populate it with the following content: - -```yaml -apiVersion: helm.cattle.io/v1 -kind: HelmChartConfig -metadata: - name: traefik - namespace: kube-system -spec: - valuesContent: |- - image: - name: traefik - tag: v2.6.1 - proxyProtocol: - enabled: true - trustedIPs: - - 10.0.0.0/8 - forwardedHeaders: - enabled: true - trustedIPs: - - 10.0.0.0/8 - ssl: - enabled: true - permanentRedirect: false -``` - -### Upgrading from Helm v2 - -> **Note:** K3s versions starting with v1.17.0+k3s.1 support Helm v3, and will use it by default. Helm v2 charts can be used by setting `helmVersion: v2` in the spec. - -If you were using Helm v2 in previous versions of K3s, you may upgrade to v1.17.0+k3s.1 or newer and Helm 2 will still function. If you wish to migrate to Helm 3, [this](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) blog post by Helm explains how to use a plugin to successfully migrate. Refer to the official Helm 3 documentation [here](https://helm.sh/docs/) for more information. K3s will handle either Helm v2 or Helm v3 as of v1.17.0+k3s.1. Just be sure you have properly set your kubeconfig as per the examples in the section about [cluster access.](../cluster-access) - -Note that Helm 3 no longer requires Tiller and the `helm init` command. Refer to the official documentation for details. diff --git a/content/k3s/latest/en/installation/_index.md b/content/k3s/latest/en/installation/_index.md deleted file mode 100644 index 6d595b68ff..0000000000 --- a/content/k3s/latest/en/installation/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "Installation" -weight: 20 ---- - -This section contains instructions for installing K3s in various environments. Please ensure you have met the [Installation Requirements]({{< baseurl >}}/k3s/latest/en/installation/installation-requirements/) before you begin installing K3s. - -[Installation and Configuration Options]({{}}/k3s/latest/en/installation/install-options/) provides guidance on the options available to you when installing K3s. - -[High Availability with an External DB]({{}}/k3s/latest/en/installation/ha/) details how to set up an HA K3s cluster backed by an external datastore such as MySQL, PostgreSQL, or etcd. - -[High Availability with Embedded DB]({{}}/k3s/latest/en/installation/ha-embedded/) details how to set up an HA K3s cluster that leverages a built-in distributed database. - -[Air-Gap Installation]({{}}/k3s/latest/en/installation/airgap/) details how to set up K3s in environments that do not have direct access to the Internet. - -[Disable Components Flags]({{}}/k3s/latest/en/installation/disable-flags/) details how to set up K3s with etcd only nodes and controlplane only nodes - -### Uninstalling - -If you installed K3s with the help of the `install.sh` script, an uninstall script is generated during installation. The script is created on your node at `/usr/local/bin/k3s-uninstall.sh` (or as `k3s-agent-uninstall.sh`). diff --git a/content/k3s/latest/en/installation/airgap/_index.md b/content/k3s/latest/en/installation/airgap/_index.md deleted file mode 100644 index 303cebab4b..0000000000 --- a/content/k3s/latest/en/installation/airgap/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: "Air-Gap Install" -weight: 60 ---- - -You can install K3s in an air-gapped environment using two different methods. An air-gapped environment is any environment that is not directly connected to the Internet. You can either deploy a private registry and mirror docker.io, or you can manually deploy images such as for small clusters. - -# Private Registry Method - -This document assumes you have already created your nodes in your air-gap environment and have a Docker private registry on your bastion host. - -If you have not yet set up a private Docker registry, refer to the official documentation [here](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry). - -### Create the Registry YAML - -Follow the [Private Registry Configuration]({{< baseurl >}}/k3s/latest/en/installation/private-registry) guide to create and configure the registry.yaml file. - -Once you have completed this, you may now go to the [Install K3s](#install-k3s) section below. - - -# Manually Deploy Images Method - -We are assuming you have created your nodes in your air-gap environment. -This method requires you to manually deploy the necessary images to each node and is appropriate for edge deployments where running a private registry is not practical. - -### Prepare the Images Directory and K3s Binary -Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. - -Place the tar file in the `images` directory, for example: - -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` - -Place the k3s binary at `/usr/local/bin/k3s` and ensure it is executable. - -Follow the steps in the next section to install K3s. - -# Install K3s - -### Prerequisites - -- Before installing K3s, complete the the [Private Registry Method](#private-registry-method) or the [Manually Deploy Images Method](#manually-deploy-images-method) above to prepopulate the images that K3s needs to install. -- Download the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images. Place the binary in `/usr/local/bin` on each air-gapped node and ensure it is executable. -- Download the K3s install script at https://get.k3s.io. Place the install script anywhere on each air-gapped node, and name it `install.sh`. - -When running the K3s script with the `INSTALL_K3S_SKIP_DOWNLOAD` environment variable, K3s will use the local version of the script and binary. - - -### Installing K3s in an Air-Gapped Environment - -You can install K3s on one or more servers as described below. - -{{% tabs %}} -{{% tab "Single Server Configuration" %}} - -To install K3s on a single server, simply do the following on the server node: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh -``` - -Then, to optionally add additional agents do the following on each agent node. Take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node token from the server typically at `/var/lib/rancher/k3s/server/node-token` - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh -``` - -{{% /tab %}} -{{% tab "High Availability Configuration" %}} - -Reference the [High Availability with an External DB]({{< baseurl >}}/k3s/latest/en/installation/ha) or [High Availability with Embedded DB]({{< baseurl >}}/k3s/latest/en/installation/ha-embedded) guides. You will be tweaking install commands so you specify `INSTALL_K3S_SKIP_DOWNLOAD=true` and run your install script locally instead of via curl. You will also utilize `INSTALL_K3S_EXEC='args'` to supply any arguments to k3s. - -For example, step two of the High Availability with an External DB guide mentions the following: - -``` -curl -sfL https://get.k3s.io | sh -s - server \ - --datastore-endpoint='mysql://username:password@tcp(hostname:3306)/database-name' -``` - -Instead, you would modify such examples like below: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true INSTALL_K3S_EXEC='server' K3S_DATASTORE_ENDPOINT='mysql://username:password@tcp(hostname:3306)/database-name' ./install.sh -``` - -{{% /tab %}} -{{% /tabs %}} - ->**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -# Upgrading - -### Install Script Method - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each -node. Delete the old tar file. -2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past -with the same environment variables. -3. Restart the K3s service (if not restarted automatically by installer). - - -### Automated Upgrades Method - -As of v1.17.4+k3s1 K3s supports [automated upgrades]({{< baseurl >}}/k3s/latest/en/upgrades/automated/). To enable this in air-gapped environments, you must ensure the required images are available in your private registry. - -You will need the version of rancher/k3s-upgrade that corresponds to the version of K3s you intend to upgrade to. Note, the image tag replaces the `+` in the K3s release with a `-` because Docker images do not support `+`. - -You will also need the versions of system-upgrade-controller and kubectl that are specified in the system-upgrade-controller manifest YAML that you will deploy. Check for the latest release of the system-upgrade-controller [here](https://github.com/rancher/system-upgrade-controller/releases/latest) and download the system-upgrade-controller.yaml to determine the versions you need to push to your private registry. For example, in release v0.4.0 of the system-upgrade-controller, these images are specified in the manifest YAML: - -``` -rancher/system-upgrade-controller:v0.4.0 -rancher/kubectl:v0.17.0 -``` - -Once you have added the necessary rancher/k3s-upgrade, rancher/system-upgrade-controller, and rancher/kubectl images to your private registry, follow the [automated upgrades]({{< baseurl >}}/k3s/latest/en/upgrades/automated/) guide. diff --git a/content/k3s/latest/en/installation/datastore/_index.md b/content/k3s/latest/en/installation/datastore/_index.md deleted file mode 100644 index b5476b42f3..0000000000 --- a/content/k3s/latest/en/installation/datastore/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: "Cluster Datastore Options" -weight: 50 ---- - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available datastore options allow you to select a datastore that best fits your use case. For example: - -* If your team doesn't have expertise in operating etcd, you can choose an enterprise-grade SQL database like MySQL or PostgreSQL -* If you need to run a simple, short-lived cluster in your CI/CD environment, you can use the embedded SQLite database -* If you wish to deploy Kubernetes on the edge and require a highly available solution but can't afford the operational overhead of managing a database at the edge, you can use K3s's embedded HA datastore built on top of embedded etcd. - -K3s supports the following datastore options: - -* Embedded [SQLite](https://www.sqlite.org/index.html) -* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) -* [MySQL](https://www.mysql.com/) (certified against version 5.7) -* [MariaDB](https://mariadb.org/) (certified against version 10.3.20) -* [etcd](https://etcd.io/) (certified against version 3.3.15) -* Embedded etcd for High Availability - -### External Datastore Configuration Parameters -If you wish to use an external datastore such as PostgreSQL, MySQL, or etcd you must set the `datastore-endpoint` parameter so that K3s knows how to connect to it. You may also specify parameters to configure the authentication and encryption of the connection. The below table summarizes these parameters, which can be passed as either CLI flags or environment variables. - - CLI Flag | Environment Variable | Description - ------------|-------------|------------------ - `--datastore-endpoint` | `K3S_DATASTORE_ENDPOINT` | Specify a PostgreSQL, MySQL, or etcd connection string. This is a string used to describe the connection to the datastore. The structure of this string is specific to each backend and is detailed below. - `--datastore-cafile` | `K3S_DATASTORE_CAFILE` | TLS Certificate Authority (CA) file used to help secure communication with the datastore. If your datastore serves requests over TLS using a certificate signed by a custom certificate authority, you can specify that CA using this parameter so that the K3s client can properly verify the certificate. | -| `--datastore-certfile` | `K3S_DATASTORE_CERTFILE` | TLS certificate file used for client certificate based authentication to your datastore. To use this feature, your datastore must be configured to support client certificate based authentication. If you specify this parameter, you must also specify the `datastore-keyfile` parameter. | -| `--datastore-keyfile` | `K3S_DATASTORE_KEYFILE` | TLS key file used for client certificate based authentication to your datastore. See the previous `datastore-certfile` parameter for more details. | - -As a best practice we recommend setting these parameters as environment variables rather than command line arguments so that your database credentials or other sensitive information aren't exposed as part of the process info. - -### Datastore Endpoint Format and Functionality -As mentioned, the format of the value passed to the `datastore-endpoint` parameter is dependent upon the datastore backend. The following details this format and functionality for each supported external datastore. - -{{% tabs %}} -{{% tab "PostgreSQL" %}} - -In its most common form, the datastore-endpoint parameter for PostgreSQL has the following format: - -`postgres://username:password@hostname:port/database-name` - -More advanced configuration parameters are available. For more information on these, please see https://godoc.org/github.com/lib/pq. - -If you specify a database name and it does not exist, the server will attempt to create it. - -If you only supply `postgres://` as the endpoint, K3s will attempt to do the following: - -* Connect to localhost using `postgres` as the username and password -* Create a database named `kubernetes` - - -{{% /tab %}} -{{% tab "MySQL / MariaDB" %}} - -In its most common form, the `datastore-endpoint` parameter for MySQL and MariaDB has the following format: - -`mysql://username:password@tcp(hostname:3306)/database-name` - -More advanced configuration parameters are available. For more information on these, please see https://github.com/go-sql-driver/mysql#dsn-data-source-name - -Note that due to a [known issue](https://github.com/rancher/k3s/issues/1093) in K3s, you cannot set the `tls` parameter. TLS communication is supported, but you cannot, for example, set this parameter to "skip-verify" to cause K3s to skip certificate verification. - -If you specify a database name and it does not exist, the server will attempt to create it. - -If you only supply `mysql://` as the endpoint, K3s will attempt to do the following: - -* Connect to the MySQL socket at `/var/run/mysqld/mysqld.sock` using the `root` user and no password -* Create a database with the name `kubernetes` - - -{{% /tab %}} -{{% tab "etcd" %}} - -In its most common form, the `datastore-endpoint` parameter for etcd has the following format: - -`https://etcd-host-1:2379,https://etcd-host-2:2379,https://etcd-host-3:2379` - -The above assumes a typical three node etcd cluster. The parameter can accept one more comma separated etcd URLs. - -{{% /tab %}} -{{% /tabs %}} - -
Based on the above, the following example command could be used to launch a server instance that connects to a PostgreSQL database named k3s: -``` -K3S_DATASTORE_ENDPOINT='postgres://username:password@hostname:5432/k3s' k3s server -``` - -And the following example could be used to connect to a MySQL database using client certificate authentication: -``` -K3S_DATASTORE_ENDPOINT='mysql://username:password@tcp(hostname:3306)/k3s' \ -K3S_DATASTORE_CERTFILE='/path/to/client.crt' \ -K3S_DATASTORE_KEYFILE='/path/to/client.key' \ -k3s server -``` - -### Embedded etcd for HA - -Please see [High Availability with Embedded DB]({{}}/k3s/latest/en/installation/ha-embedded/) for instructions on how to run with this option. diff --git a/content/k3s/latest/en/installation/disable-flags/_index.md b/content/k3s/latest/en/installation/disable-flags/_index.md deleted file mode 100644 index 2dbbfeb892..0000000000 --- a/content/k3s/latest/en/installation/disable-flags/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Disable Components Flags" -weight: 60 ---- - -Starting the K3s server with `--cluster-init` will run all control plane components, including the api server, controller manager, scheduler, and etcd. However, you can run server nodes with certain components and exclude others; the following sections will explain how to do that. - -# ETCD Only Nodes - -This document assumes you run K3s server with embedded etcd by passing `--cluster-init` flag to the server process. - -To run a K3s server with only etcd components you can pass `--disable-apiserver --disable-controller-manager --disable-scheduler` flags to k3s, this will result in running a server node with only etcd, for example to run K3s server with those flags: - -``` -curl -fL https://get.k3s.io | sh -s - server --cluster-init --disable-apiserver --disable-controller-manager --disable-scheduler -``` - -You can join other nodes to the cluster normally after that. - -# Disable ETCD - -You can also disable etcd from a server node and this will result in a k3s server running control components other than etcd, that can be accomplished by running k3s server with flag `--disable-etcd` for example to join another node with only control components to the etcd node created in the previous section: - -``` -curl -fL https://get.k3s.io | sh -s - server --token --disable-etcd --server https://:6443 -``` - -The end result will be a two nodes one of them is etcd only node and the other one is controlplane only node, if you check the node list you should see something like the following: - -``` -kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-172-31-13-32 Ready etcd 5h39m v1.20.4+k3s1 -ip-172-31-14-69 Ready control-plane,master 5h39m v1.20.4+k3s1 -``` - -Note that you can run `kubectl` commands only on the k3s server that has the api running, and you can't run `kubectl` commands on etcd only nodes. - - -### Re-enabling control components - -In both cases you can re-enable any component that you already disabled simply by removing the corresponding flag that disables them, so for example if you want to revert the etcd only node back to a full k3s server with all components you can just remove the following 3 flags `--disable-apiserver --disable-controller-manager --disable-scheduler`, so in our example to revert back node `ip-172-31-13-32` to a full k3s server you can just re-run the curl command without the disable flags: -``` -curl -fL https://get.k3s.io | sh -s - server --cluster-init -``` - -you will notice that all components started again and you can run kubectl commands again: - -``` -kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-172-31-13-32 Ready control-plane,etcd,master 5h45m v1.20.4+k3s1 -ip-172-31-14-69 Ready control-plane,master 5h45m v1.20.4+k3s1 -``` - -Notice that role labels has been re-added to the node `ip-172-31-13-32` with the correct labels (control-plane,etcd,master). - -# Add disable flags using the config file - -In any of the previous situations you can use the config file instead of running the curl commands with the associated flags, for example to run an etcd only node you can add the following options to the `/etc/rancher/k3s/config.yaml` file: - -``` ---- -disable-apiserver: true -disable-controller-manager: true -disable-scheduler: true -cluster-init: true -``` -and then start K3s using the curl command without any arguents: - -``` -curl -fL https://get.k3s.io | sh - -``` -# Disable components using .skip files - -For any yaml file under `/var/lib/rancher/k3s/server/manifests` (coredns, traefik, local-storeage, etc.) you can add a `.skip` file which will cause K3s to not apply the associated yaml file. -For example, adding `traefik.yaml.skip` in the manifests directory will cause K3s to skip `traefik.yaml`. -``` -ls /var/lib/rancher/k3s/server/manifests -ccm.yaml local-storage.yaml rolebindings.yaml traefik.yaml.skip -coredns.yaml traefik.yaml - -kubectl get pods -A -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system local-path-provisioner-64ffb68fd-xx98j 1/1 Running 0 74s -kube-system metrics-server-5489f84d5d-7zwkt 1/1 Running 0 74s -kube-system coredns-85cb69466-vcq7j 1/1 Running 0 74s -``` diff --git a/content/k3s/latest/en/installation/ha-embedded/_index.md b/content/k3s/latest/en/installation/ha-embedded/_index.md deleted file mode 100644 index 6b7f342181..0000000000 --- a/content/k3s/latest/en/installation/ha-embedded/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "High Availability with Embedded DB" -weight: 40 ---- - ->**Notice:** -K3s has added full support for embedded etcd as of release v1.19.5+k3s1. Versions v1.19.1 thru v1.19.4 provide only experimental support for embedded etcd. -Embedded etcd replaced experimental Dqlite in the K3s v1.19.1 release. This is a breaking change. Please note that upgrades from experimental Dqlite to embedded etcd are not supported. If you attempt an upgrade it will not succeed and data will be lost. - ->**Warning:** -Embedded etcd (HA) may have performance issues on slower disks such as Raspberry Pis running with SD cards. - -To run K3s in this mode, you must have an odd number of server nodes. We recommend starting with three nodes. - -To get started, first launch a server node with the `cluster-init` flag to enable clustering and a token that will be used as a shared secret to join additional servers to the cluster. -``` -curl -sfL https://get.k3s.io | K3S_TOKEN=SECRET sh -s - server --cluster-init -``` - -After launching the first server, join the second and third servers to the cluster using the shared secret: -``` -K3S_TOKEN=SECRET k3s server --server https://:6443 -``` - -Now you have a highly available control plane. Joining additional worker nodes to the cluster follows the same procedure as a single server cluster. - -There are a few config flags that must be the same in all server nodes: - -* Network related flags: `--cluster-dns`, `--cluster-domain`, `--cluster-cidr`, `--service-cidr` -* Flags controlling the deployment of certain components: `--disable-helm-controller`, `--disable-kube-proxy`, `--disable-network-policy` and any component passed to `--disable` -* Feature related flags: `--secrets-encryption` - -## Existing clusters -If you have an existing cluster using the default embedded SQLite database, you can convert it to etcd by simply restarting your K3s server with the `--cluster-init` flag. Once you've done that, you'll be able to add additional instances as described above. - ->**Important:** K3s v1.22.2 and newer support migration from SQLite to etcd. Older versions will create a new empty datastore if you add `--cluster-init` to an existing server. diff --git a/content/k3s/latest/en/installation/ha/_index.md b/content/k3s/latest/en/installation/ha/_index.md deleted file mode 100644 index 4e5072bfff..0000000000 --- a/content/k3s/latest/en/installation/ha/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: High Availability with an External DB -weight: 30 ---- - -> **Note:** Official support for installing Rancher on a Kubernetes cluster was introduced in our v1.0.0 release. - -This section describes how to install a high-availability K3s cluster with an external database. - -Single server clusters can meet a variety of use cases, but for environments where uptime of the Kubernetes control plane is critical, you can run K3s in an HA configuration. An HA K3s cluster is comprised of: - -* Two or more **server nodes** that will serve the Kubernetes API and run other control plane services -* Zero or more **agent nodes** that are designated to run your apps and services -* An **external datastore** (as opposed to the embedded SQLite datastore used in single-server setups) -* A **fixed registration address** that is placed in front of the server nodes to allow agent nodes to register with the cluster - -For more details on how these components work together, refer to the [architecture section.]({{}}/k3s/latest/en/architecture/#high-availability-with-an-external-db) - -Agents register through the fixed registration address, but after registration they establish a connection directly to one of the server nodes. This is a websocket connection initiated by the `k3s agent` process and it is maintained by a client-side load balancer running as part of the agent process. - -# Installation Outline - -Setting up an HA cluster requires the following steps: - -1. [Create an external datastore](#1-create-an-external-datastore) -2. [Launch server nodes](#2-launch-server-nodes) -3. [Configure the fixed registration address](#3-configure-the-fixed-registration-address) -4. [Join agent nodes](#4-optional-join-agent-nodes) - -### 1. Create an External Datastore -You will first need to create an external datastore for the cluster. See the [Cluster Datastore Options]({{}}/k3s/latest/en/installation/datastore/) documentation for more details. - -### 2. Launch Server Nodes -K3s requires two or more server nodes for this HA configuration. See the [Installation Requirements]({{}}/k3s/latest/en/installation/installation-requirements/) guide for minimum machine requirements. - -When running the `k3s server` command on these nodes, you must set the `datastore-endpoint` parameter so that K3s knows how to connect to the external datastore. The `token` parameter can also be used to set a deterministic token when adding nodes. When empty, this token will be generated automatically for further use. - -For example, a command like the following could be used to install the K3s server with a MySQL database as the external datastore and [set a token]({{}}/k3s/latest/en/installation/install-options/server-config/#cluster-options): - -```bash -curl -sfL https://get.k3s.io | sh -s - server \ - --token=SECRET \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" -``` - -The datastore endpoint format differs based on the database type. For details, refer to the section on [datastore endpoint formats.]({{}}/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality) - -To configure TLS certificates when launching server nodes, refer to the [datastore configuration guide.]({{}}/k3s/latest/en/installation/datastore/#external-datastore-configuration-parameters) - -> **Note:** The same installation options available to single-server installs are also available for high-availability installs. For more details, see the [Installation and Configuration Options]({{}}/k3s/latest/en/installation/install-options/) documentation. - -By default, server nodes will be schedulable and thus your workloads can get launched on them. If you wish to have a dedicated control plane where no user workloads will run, you can use taints. The `node-taint` parameter will allow you to configure nodes with taints, for example `--node-taint CriticalAddonsOnly=true:NoExecute`. - -Once you've launched the `k3s server` process on all server nodes, ensure that the cluster has come up properly with `k3s kubectl get nodes`. You should see your server nodes in the Ready state. - -### 3. Configure the Fixed Registration Address - -Agent nodes need a URL to register against. This can be the IP or hostname of any of the server nodes, but in many cases those may change over time. For example, if you are running your cluster in a cloud that supports scaling groups, you may scale the server node group up and down over time, causing nodes to be created and destroyed and thus having different IPs from the initial set of server nodes. Therefore, you should have a stable endpoint in front of the server nodes that will not change over time. This endpoint can be set up using any number approaches, such as: - -* A layer-4 (TCP) load balancer -* Round-robin DNS -* Virtual or elastic IP addresses - -This endpoint can also be used for accessing the Kubernetes API. So you can, for example, modify your [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file to point to it instead of a specific node. To avoid certificate errors in such a configuration, you should install the server with the `--tls-san YOUR_IP_OR_HOSTNAME_HERE` option. This option adds an additional hostname or IP as a Subject Alternative Name in the TLS cert, and it can be specified multiple times if you would like to access via both the IP and the hostname. - -### 4. Optional: Join Additional Server Nodes - -The same example command in Step 2 can be used to join additional server nodes, where the token from the first node needs to be used. - -If the first server node was started without the `--token` CLI flag or `K3S_TOKEN` variable, the token value can be retrieved from any server already joined to the cluster: -```bash -cat /var/lib/rancher/k3s/server/token -``` - -Additional server nodes can then be added [using the token]({{}}/k3s/latest/en/installation/install-options/server-config/#cluster-options): - -```bash -curl -sfL https://get.k3s.io | sh -s - server \ - --token=SECRET \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" -``` - -There are a few config flags that must be the same in all server nodes: - -* Network related flags: `--cluster-dns`, `--cluster-domain`, `--cluster-cidr`, `--service-cidr` -* Flags controlling the deployment of certain components: `--disable-helm-controller`, `--disable-kube-proxy`, `--disable-network-policy` and any component passed to `--disable` -* Feature related flags: `--secrets-encryption` - -> **Note:** Ensure that you retain a copy of this token as it is required when restoring from backup and adding nodes. Previously, K3s did not enforce the use of a token when using external SQL datastores. - -### 5. Optional: Join Agent Nodes - -Because K3s server nodes are schedulable by default, the minimum number of nodes for an HA K3s server cluster is two server nodes and zero agent nodes. To add nodes designated to run your apps and services, join agent nodes to your cluster. - -Joining agent nodes in an HA cluster is the same as joining agent nodes in a single server cluster. You just need to specify the URL the agent should register to and the token it should use. - -```bash -K3S_TOKEN=SECRET k3s agent --server https://fixed-registration-address:6443 -``` diff --git a/content/k3s/latest/en/installation/install-options/_index.md b/content/k3s/latest/en/installation/install-options/_index.md deleted file mode 100644 index e38f4cb7f0..0000000000 --- a/content/k3s/latest/en/installation/install-options/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: "Installation Options" -weight: 20 ---- - -This page focuses on the options that can be used when you set up K3s for the first time: - -- [Options for installation with script](#options-for-installation-with-script) -- [Options for installation from binary](#options-for-installation-from-binary) -- [Registration options for the K3s server](#registration-options-for-the-k3s-server) -- [Registration options for the K3s agent](#registration-options-for-the-k3s-agent) -- [Configuration File](#configuration-file) - -In addition to configuring K3s with environment variables and CLI arguments, K3s can also use a [config file.](#configuration-file) - -For more advanced options, refer to [this page.]({{}}/k3s/latest/en/advanced) - -> Throughout the K3s documentation, you will see some options that can be passed in as both command flags and environment variables. For help with passing in options, refer to [How to Use Flags and Environment Variables.]({{}}/k3s/latest/en/installation/install-options/how-to-flags) - -### Options for Installation with Script - -As mentioned in the [Quick-Start Guide]({{}}/k3s/latest/en/quick-start/), you can use the installation script available at https://get.k3s.io to install K3s as a service on systemd and openrc based systems. - -The simplest form of this command is as follows: -```sh -curl -sfL https://get.k3s.io | sh - -``` - -When using this method to install K3s, the following environment variables can be used to configure the installation: - -| Environment Variable | Description | -|-----------------------------|---------------------------------------------| -| `INSTALL_K3S_SKIP_DOWNLOAD` | If set to true will not download K3s hash or binary. | -| `INSTALL_K3S_SYMLINK` | By default will create symlinks for the kubectl, crictl, and ctr binaries if the commands do not already exist in path. If set to 'skip' will not create symlinks and 'force' will overwrite. | -| `INSTALL_K3S_SKIP_ENABLE` | If set to true will not enable or start K3s service. | -| `INSTALL_K3S_SKIP_START` | If set to true will not start K3s service. | -| `INSTALL_K3S_VERSION` | Version of K3s to download from Github. Will attempt to download from the stable channel if not specified. | -| `INSTALL_K3S_BIN_DIR` | Directory to install K3s binary, links, and uninstall script to, or use `/usr/local/bin` as the default. | -| `INSTALL_K3S_BIN_DIR_READ_ONLY` | If set to true will not write files to `INSTALL_K3S_BIN_DIR`, forces setting `INSTALL_K3S_SKIP_DOWNLOAD=true`. | -| `INSTALL_K3S_SYSTEMD_DIR` | Directory to install systemd service and environment files to, or use `/etc/systemd/system` as the default. | -| `INSTALL_K3S_EXEC` | Command with flags to use for launching K3s in the service. If the command is not specified, and the `K3S_URL` is set, it will default to "agent." If `K3S_URL` not set, it will default to "server." For help, refer to [this example.]({{}}/k3s/latest/en/installation/install-options/how-to-flags/#example-b-install-k3s-exec) | -| `INSTALL_K3S_NAME` | Name of systemd service to create, will default to 'k3s' if running k3s as a server and 'k3s-agent' if running k3s as an agent. If specified the name will be prefixed with 'k3s-'. | -| `INSTALL_K3S_TYPE` | Type of systemd service to create, will default from the K3s exec command if not specified. | -| `INSTALL_K3S_SELINUX_WARN` | If set to true will continue if k3s-selinux policy is not found. | -| `INSTALL_K3S_SKIP_SELINUX_RPM` | If set to true will skip automatic installation of the k3s RPM. | -| `INSTALL_K3S_CHANNEL_URL` | Channel URL for fetching K3s download URL. Defaults to https://update.k3s.io/v1-release/channels. | -| `INSTALL_K3S_CHANNEL` | Channel to use for fetching K3s download URL. Defaults to "stable". Options include: `stable`, `latest`, `testing`. | - -This example shows where to place aforementioned environment variables as options (after the pipe): - -``` -curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh - -``` - -Environment variables which begin with `K3S_` will be preserved for the systemd and openrc services to use. - -Setting `K3S_URL` without explicitly setting an exec command will default the command to "agent". - -When running the agent `K3S_TOKEN` must also be set. - -### Options for installation from binary - -As stated, the installation script is primarily concerned with configuring K3s to run as a service. If you choose to not use the script, you can run K3s simply by downloading the binary from our [release page](https://github.com/rancher/k3s/releases/latest), placing it on your path, and executing it. The K3s binary supports the following commands: - -Command | Description ---------|------------------ -`k3s server` | Run the K3s management server, which will also launch Kubernetes control plane components such as the API server, controller-manager, and scheduler. -`k3s agent` | Run the K3s node agent. This will cause K3s to run as a worker node, launching the Kubernetes node services `kubelet` and `kube-proxy`. -`k3s kubectl` | Run an embedded [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) CLI. If the `KUBECONFIG` environment variable is not set, this will automatically attempt to use the config file that is created at `/etc/rancher/k3s/k3s.yaml` when launching a K3s server node. -`k3s crictl` | Run an embedded [crictl](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md). This is a CLI for interacting with Kubernetes's container runtime interface (CRI). Useful for debugging. -`k3s ctr` | Run an embedded [ctr](https://github.com/projectatomic/containerd/blob/master/docs/cli.md). This is a CLI for containerd, the container daemon used by K3s. Useful for debugging. -`k3s help` | Shows a list of commands or help for one command - -The `k3s server` and `k3s agent` commands have additional configuration options that can be viewed with `k3s server --help` or `k3s agent --help`. - -### Registration Options for the K3s Server - -For details on configuring the K3s server, refer to the [server configuration reference.]({{}}/k3s/latest/en/installation/install-options/server-config) - - -### Registration Options for the K3s Agent - -For details on configuring the K3s agent, refer to the [agent configuration reference.]({{}}/k3s/latest/en/installation/install-options/agent-config) - -### Configuration File - -_Available as of v1.19.1+k3s1_ - -In addition to configuring K3s with environment variables and CLI arguments, K3s can also use a config file. - -By default, values present in a YAML file located at `/etc/rancher/k3s/config.yaml` will be used on install. - -An example of a basic `server` config file is below: - -```yaml -write-kubeconfig-mode: "0644" -tls-san: - - "foo.local" -node-label: - - "foo=bar" - - "something=amazing" -``` - -In general, CLI arguments map to their respective YAML key, with repeatable CLI arguments being represented as YAML lists. - -An identical configuration using solely CLI arguments is shown below to demonstrate this: - -```bash -k3s server \ - --write-kubeconfig-mode "0644" \ - --tls-san "foo.local" \ - --node-label "foo=bar" \ - --node-label "something=amazing" -``` - -It is also possible to use both a configuration file and CLI arguments. In these situations, values will be loaded from both sources, but CLI arguments will take precedence. For repeatable arguments such as `--node-label`, the CLI arguments will overwrite all values in the list. - -Finally, the location of the config file can be changed either through the cli argument `--config FILE, -c FILE`, or the environment variable `$K3S_CONFIG_FILE`. diff --git a/content/k3s/latest/en/installation/install-options/agent-config/_index.md b/content/k3s/latest/en/installation/install-options/agent-config/_index.md deleted file mode 100644 index a31cb02943..0000000000 --- a/content/k3s/latest/en/installation/install-options/agent-config/_index.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: K3s Agent Configuration Reference -weight: 2 ---- -In this section, you'll learn how to configure the K3s agent. - -> Throughout the K3s documentation, you will see some options that can be passed in as both command flags and environment variables. For help with passing in options, refer to [How to Use Flags and Environment Variables.]({{}}/k3s/latest/en/installation/install-options/how-to-flags) - -- [Config](#config) -- [Logging](#logging) -- [Cluster Options](#cluster-options) -- [Data](#data) -- [Node](#node) -- [Runtime](#runtime) -- [Networking](#networking) -- [Customized Flags](#customized-flags) -- [Experimental](#experimental) -- [Deprecated](#deprecated) -- [Node Labels and Taints for Agents](#node-labels-and-taints-for-agents) -- [K3s Agent CLI Help](#k3s-agent-cli-help) - -### Config - -| Flag | Default | Description | -|------|---------|-------------| -| `--config FILE, -c` FILE | "/etc/rancher/k3s/config.yaml" | Load configuration from FILE | - -### Logging - -| Flag | Default | Environment Variable | Description | -|------|---------|----------------------|-------------| -| `--debug` | N/A | `K3S_DEBUG` | Turn on debug logs | -| `-v` value | 0 | N/A | Number for the log level verbosity | -| `--vmodule` value | N/A | N/A | Comma-separated list of pattern=N settings for file-filtered logging | -| `--log value, -l` value | N/A | N/A | Log to file | -| `--alsologtostderr` | N/A | N/A | Log to standard error as well as file (if set) | - -### Cluster Options -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--token value, -t` value | `K3S_TOKEN` | Token to use for authentication | -| `--token-file` value | `K3S_TOKEN_FILE` | Token file to use for authentication | -| `--server value, -s` value | `K3S_URL` | Server to connect to | - - -### Data -| Flag | Default | Description | -|------|---------|-------------| -| `--data-dir value, -d` value | "/var/lib/rancher/k3s" | Folder to hold state | - -### Node -| Flag | Default | Environment Variable | Description | -|------|---------|----------------------|-------------| -| `--node-name` value | N/A | `K3S_NODE_NAME` | Node name | -| `--with-node-id` | N/A | N/A | Append id to node name | -| `--node-label` value | N/A | N/A | Registering and starting kubelet with set of labels | -| `--node-taint` value | N/A | N/A | Registering kubelet with set of taints | -| `--image-credential-provider-bin-dir` value | "/var/lib/rancher/credentialprovider/bin" | N/A | The path to the directory where credential provider plugin binaries are located | -| `--image-credential-provider-config` value | "/var/lib/rancher/credentialprovider/config.yaml" | N/A | The path to the credential provider plugin config file | -| `--protect-kernel-defaults` | N/A | N/A | Kernel tuning behavior. If set, error if kernel tunables are different than kubelet defaults | -| `--selinux` | N/A | `K3S_SELINUX` | Enable SELinux in containerd. | -| `--lb-server-port` value | 6444 | `K3S_LB_SERVER_PORT` | Local port for supervisor client load-balancer. If the supervisor and apiserver are not colocated an additional port 1 less than this port will also be used for the apiserver client load-balancer. | - -### Runtime -| Flag | Default | Description | -|------|---------|-------------| -| `--docker` | N/A | Use docker instead of containerd | -| `--container-runtime-endpoint` value | N/A | Disable embedded containerd and use alternative CRI implementation | -| `--pause-image` value | "docker.io/rancher/pause:3.1" | Customized pause image for containerd or docker sandbox | (agent/runtime) (default: ) -| `--snapshotter` value | `overlayfs` | Override default containerd snapshotter | -| `--private-registry` value | `/etc/rancher/k3s/registries.yaml` | Private registry configuration file | - -### Networking -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--node-ip value, -i` value | N/A | IP address to advertise for node | -| `--node-external-ip` value | N/A | External IP address to advertise for node | -| `--resolv-conf` value | `K3S_RESOLV_CONF` | Kubelet resolv.conf file | -| `--flannel-iface` value | N/A | Override default flannel interface | -| `--flannel-conf` value | N/A | Override default flannel config file | - -> Note: if you wish to directly set the kubelet `--resolv-conf` value, use `--kubelet-arg=resolv-conf=value` instead. The k3s flag is only passed through to the kubelet if set to the path of a valid resolv.conf file. -### Customized Flags -| Flag | Description | -|------|--------------| -| `--kubelet-arg` value | Customized flag for kubelet process | -| `--kube-proxy-arg` value | Customized flag for kube-proxy process | - -### Experimental -| Flag | Description | -|------|--------------| -| `--rootless` | Run rootless | - -### Deprecated -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--no-flannel` | N/A | Use `--flannel-backend=none` | -| `--cluster-secret` value | `K3S_CLUSTER_SECRET` | Use `--token` | - -### Node Labels and Taints for Agents - -K3s agents can be configured with the options `--node-label` and `--node-taint` which adds a label and taint to the kubelet. The two options only add labels and/or taints at registration time, so they can only be added once and not changed after that again by running K3s commands. - -Below is an example showing how to add labels and a taint: -```bash - --node-label foo=bar \ - --node-label hello=world \ - --node-taint key1=value1:NoExecute -``` - -If you want to change node labels and taints after node registration you should use `kubectl`. Refer to the official Kubernetes documentation for details on how to add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) and [node labels.](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node) - -### K3s Agent CLI Help - -> If an option appears in brackets below, for example `[$K3S_URL]`, it means that the option can be passed in as an environment variable of that name. - -```bash -NAME: - k3s agent - Run node agent - -USAGE: - k3s agent [OPTIONS] - -OPTIONS: - --config FILE, -c FILE (config) Load configuration from FILE (default: "/etc/rancher/k3s/config.yaml") [$K3S_CONFIG_FILE] - --debug (logging) Turn on debug logs [$K3S_DEBUG] - -v value (logging) Number for the log level verbosity (default: 0) - --vmodule value (logging) Comma-separated list of pattern=N settings for file-filtered logging - --log value, -l value (logging) Log to file - --alsologtostderr (logging) Log to standard error as well as file (if set) - --token value, -t value (cluster) Token to use for authentication [$K3S_TOKEN] - --token-file value (cluster) Token file to use for authentication [$K3S_TOKEN_FILE] - --server value, -s value (cluster) Server to connect to [$K3S_URL] - --data-dir value, -d value (agent/data) Folder to hold state (default: "/var/lib/rancher/k3s") - --node-name value (agent/node) Node name [$K3S_NODE_NAME] - --with-node-id (agent/node) Append id to node name - --node-label value (agent/node) Registering and starting kubelet with set of labels - --node-taint value (agent/node) Registering kubelet with set of taints - --image-credential-provider-bin-dir value (agent/node) The path to the directory where credential provider plugin binaries are located (default: "/var/lib/rancher/credentialprovider/bin") - --image-credential-provider-config value (agent/node) The path to the credential provider plugin config file (default: "/var/lib/rancher/credentialprovider/config.yaml") - --docker (agent/runtime) Use docker instead of containerd - --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use alternative CRI implementation - --pause-image value (agent/runtime) Customized pause image for containerd or docker sandbox (default: "rancher/mirrored-pause:3.6") - --snapshotter value (agent/runtime) Override default containerd snapshotter (default: "overlayfs") - --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") - --node-ip value, -i value (agent/networking) IPv4/IPv6 addresses to advertise for node - --node-external-ip value (agent/networking) IPv4/IPv6 external IP addresses to advertise for node - --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] - --flannel-iface value (agent/networking) Override default flannel interface - --flannel-conf value (agent/networking) Override default flannel config file - --kubelet-arg value (agent/flags) Customized flag for kubelet process - --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process - --protect-kernel-defaults (agent/node) Kernel tuning behavior. If set, error if kernel tunables are different than kubelet defaults. - --rootless (experimental) Run rootless - --selinux (agent/node) Enable SELinux in containerd [$K3S_SELINUX] - --lb-server-port value (agent/node) Local port for supervisor client load-balancer. If the supervisor and apiserver are not colocated an additional port 1 less than this port will also be used for the apiserver client load-balancer. (default: 6444) [$K3S_LB_SERVER_PORT] - --no-flannel (deprecated) use --flannel-backend=none - --cluster-secret value (deprecated) use --token [$K3S_CLUSTER_SECRET] -``` diff --git a/content/k3s/latest/en/installation/install-options/how-to-flags/_index.md b/content/k3s/latest/en/installation/install-options/how-to-flags/_index.md deleted file mode 100644 index 447dfdc40f..0000000000 --- a/content/k3s/latest/en/installation/install-options/how-to-flags/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: How to Use Flags and Environment Variables -weight: 3 ---- - -Throughout the K3s documentation, you will see some options that can be passed in as both command flags and environment variables. The below examples show how these options can be passed in both ways. - -### Example A: K3S_KUBECONFIG_MODE - -The option to allow writing to the kubeconfig file is useful for allowing a K3s cluster to be imported into Rancher. Below are two ways to pass in the option. - -Using the flag `--write-kubeconfig-mode 644`: - -```bash -$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 -``` -Using the environment variable `K3S_KUBECONFIG_MODE`: - -```bash -$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - -``` - -### Example B: INSTALL_K3S_EXEC - -If this command is not specified as a server or agent command, it will default to "agent" if `K3S_URL` is set, or "server" if it is not set. - -The final systemd command resolves to a combination of this environment variable and script args. To illustrate this, the following commands result in the same behavior of registering a server without flannel: - -```bash -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-backend none" sh -s - -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --flannel-backend none" sh -s - -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server" sh -s - --flannel-backend none -curl -sfL https://get.k3s.io | sh -s - server --flannel-backend none -curl -sfL https://get.k3s.io | sh -s - --flannel-backend none -``` - -### Example C: CONFIG FILE - -Before installing k3s, you can create a file called `config.yaml` containing fields that match CLI flags. That file needs to be in the path: `/etc/rancher/k3s/config.yaml` for k3s to consume it. - -The fields in the config file drop the starting `--` from the matching CLI flag. For example: - -``` -write-kubeconfig-mode: 644 -token: "secret" -node-ip: 10.0.10.22,2a05:d012:c6f:4655:d73c:c825:a184:1b75 -cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56 -service-cidr: 10.43.0.0/16,2001:cafe:42:1::/112 -``` diff --git a/content/k3s/latest/en/installation/install-options/server-config/_index.md b/content/k3s/latest/en/installation/install-options/server-config/_index.md deleted file mode 100644 index 853d2ccb49..0000000000 --- a/content/k3s/latest/en/installation/install-options/server-config/_index.md +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: K3s Server Configuration Reference -weight: 1 ---- - -In this section, you'll learn how to configure the K3s server. - -> Throughout the K3s documentation, you will see some options that can be passed in as both command flags and environment variables. For help with passing in options, refer to [How to Use Flags and Environment Variables.]({{}}/k3s/latest/en/installation/install-options/how-to-flags) - -- [Commonly Used Options](#commonly-used-options) - - [Database](#database) - - [Cluster Options](#cluster-options) - - [Client Options](#client-options) -- [Agent Options](#agent-options) - - [Agent Nodes](#agent-nodes) - - [Agent Runtime](#agent-runtime) - - [Agent Networking](#agent-networking) -- [Advanced Options](#advanced-options) - - [Logging](#logging) - - [Listeners](#listeners) - - [Data](#data) - - [Networking](#networking) - - [Customized Options](#customized-options) - - [Storage Class](#storage-class) - - [Kubernetes Components](#kubernetes-components) - - [Customized Flags for Kubernetes Processes](#customized-flags-for-kubernetes-processes) - - [Experimental Options](#experimental-options) - - [Deprecated Options](#deprecated-options) -- [K3s Server Cli Help](#k3s-server-cli-help) - - -# Commonly Used Options - -### Database - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--datastore-endpoint` value | `K3S_DATASTORE_ENDPOINT` | Specify etcd, Mysql, Postgres, or Sqlite (default) data source name | -| `--datastore-cafile` value | `K3S_DATASTORE_CAFILE` | TLS Certificate Authority file used to secure datastore backend communication | -| `--datastore-certfile` value | `K3S_DATASTORE_CERTFILE` | TLS certification file used to secure datastore backend communication | -| `--datastore-keyfile` value | `K3S_DATASTORE_KEYFILE` | TLS key file used to secure datastore backend communication | -| `--etcd-expose-metrics` | N/A | Expose etcd metrics to client interface. (Default false) | -| `--etcd-disable-snapshots` | N/A | Disable automatic etcd snapshots | -| `--etcd-snapshot-name` value | N/A | Set the base name of etcd snapshots. Default: etcd-snapshot- (default: "etcd-snapshot") | -| `--etcd-snapshot-schedule-cron` value | N/A | Snapshot interval time in cron spec. eg. every 5 hours '* */5 * * *' (default: "0 */12 * * *") | -| `--etcd-snapshot-retention` value | N/A | Number of snapshots to retain (Default: 5) | -| `--etcd-snapshot-dir` value | N/A | Directory to save db snapshots. (Default location: ${data-dir}/db/snapshots) | -| `--etcd-s3` | N/A | Enable backup to S3 | -| `--etcd-s3-endpoint` value | N/A | S3 endpoint url (default: "s3.amazonaws.com") | -| `--etcd-s3-endpoint-ca` value | N/A | S3 custom CA cert to connect to S3 endpoint | -| `--etcd-s3-skip-ssl-verify` | N/A | Disables S3 SSL certificate validation | -| `--etcd-s3-access-key` value | `AWS_ACCESS_KEY_ID` | S3 access key | -| `--etcd-s3-secret-key` value | `AWS_SECRET_ACCESS_KEY` | S3 secret key | -| `--etcd-s3-bucket` value | N/A | S3 bucket name | -| `--etcd-s3-region` value | N/A | S3 region / bucket location (optional) (default: "us-east-1") | -| `--etcd-s3-folder` value | N/A | S3 folder | - -### Cluster Options - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--token value, -t` value | `K3S_TOKEN` | Shared secret used to join a server or agent to a cluster | -| `--token-file` value | `K3S_TOKEN_FILE` | File containing the cluster-secret/token | - -### Client Options - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--write-kubeconfig value, -o` value | `K3S_KUBECONFIG_OUTPUT` | Write kubeconfig for admin client to this file | -| `--write-kubeconfig-mode` value | `K3S_KUBECONFIG_MODE` | Write kubeconfig with this [mode.](https://en.wikipedia.org/wiki/Chmod) The option to allow writing to the kubeconfig file is useful for allowing a K3s cluster to be imported into Rancher. An example value is 644. | - -# Agent Options - -K3s agent options are available as server options because the server has the agent process embedded within. - -### Agent Nodes - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--node-name` value | `K3S_NODE_NAME` | Node name | -| `--with-node-id` | N/A | Append id to node name | (agent/node) -| `--node-label` value | N/A | Registering and starting kubelet with set of labels | -| `--node-taint` value | N/A | Registering kubelet with set of taints | -| `--image-credential-provider-bin-dir` value | N/A | The path to the directory where credential provider plugin binaries are located (default: "/var/lib/rancher/credentialprovider/bin") | -| `--image-credential-provider-config` value | N/A | The path to the credential provider plugin config file (default: "/var/lib/rancher/credentialprovider/config.yaml") | -| `--selinux` | `K3S_SELINUX` | Enable SELinux in containerd | -| `--lb-server-port` value | `K3S_LB_SERVER_PORT` | Local port for supervisor client load-balancer. If the supervisor and apiserver are not colocated an additional port 1 less than this port will also be used for the apiserver client load-balancer. (default: 6444) | - -### Agent Runtime - -| Flag | Default | Description | -|------|---------|-------------| -| `--docker` | N/A | Use docker instead of containerd | (agent/runtime) -| `--container-runtime-endpoint` value | N/A | Disable embedded containerd and use alternative CRI implementation | -| `--pause-image` value | "docker.io/rancher/pause:3.1" | Customized pause image for containerd or Docker sandbox | -| `--snapshotter` value | N/A | Override default containerd snapshotter (default: "overlayfs") | -| `--private-registry` value | "/etc/rancher/k3s/registries.yaml" | Private registry configuration file | - -### Agent Networking - -the agent options are there because the server has the agent process embedded within - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--node-ip value, -i` value | N/A | IP address to advertise for node | -| `--node-external-ip` value | N/A | External IP address to advertise for node | -| `--resolv-conf` value | `K3S_RESOLV_CONF` | Kubelet resolv.conf file | -| `--flannel-iface` value | N/A | Override default flannel interface | -| `--flannel-conf` value | N/A | Override default flannel config file | - -# Advanced Options - -### Logging - -| Flag | Default | Description | -|------|---------|-------------| -| `--debug` | N/A | Turn on debug logs | -| `-v` value | 0 | Number for the log level verbosity | -| `--vmodule` value | N/A | Comma-separated list of pattern=N settings for file-filtered logging | -| `--log value, -l` value | N/A | Log to file | -| `--alsologtostderr` | N/A | Log to standard error as well as file (if set) | - - -### Listeners - -| Flag | Default | Description | -|------|---------|-------------| -| `--bind-address` value | 0.0.0.0 | k3s bind address | -| `--https-listen-port` value | 6443 | HTTPS listen port | -| `--advertise-address` value | node-external-ip/node-ip | IP address that apiserver uses to advertise to members of the cluster | -| `--advertise-port` value | 0 | Port that apiserver uses to advertise to members of the cluster (default: listen-port) | -| `--tls-san` value | N/A | Add additional hostname or IP as a Subject Alternative Name in the TLS cert - -### Data - -| Flag | Default | Description | -|------|---------|-------------| -| `--data-dir value, -d` value | `/var/lib/rancher/k3s` or `${HOME}/.rancher/k3s` if not root | Folder to hold state | - -### Networking - -| Flag | Default | Description | -|------|---------|-------------| -| `--cluster-cidr` value | "10.42.0.0/16" | Network CIDR to use for pod IPs | -| `--service-cidr` value | "10.43.0.0/16" | Network CIDR to use for services IPs | -| `--service-node-port-range` value | "30000-32767" | Port range to reserve for services with NodePort visibility | -| `--cluster-dns` value | "10.43.0.10" | Cluster IP for coredns service. Should be in your service-cidr range | -| `--cluster-domain` value | "cluster.local" | Cluster Domain | -| `--flannel-backend` value | "vxlan" | One of 'none', 'vxlan', 'ipsec', 'host-gw', or 'wireguard' | - -### Customized Flags - -| Flag | Description | -|------|--------------| -| `--etcd-arg` value | Customized flag for etcd process | -| `--kube-apiserver-arg` value | Customized flag for kube-apiserver process | -| `--kube-scheduler-arg` value | Customized flag for kube-scheduler process | -| `--kube-controller-manager-arg` value | Customized flag for kube-controller-manager process | -| `--kube-cloud-controller-manager-arg` value | Customized flag for kube-cloud-controller-manager process | - -### Storage Class - -| Flag | Description | -|------|--------------| -| `--default-local-storage-path` value | Default local storage path for local provisioner storage class | - -### Kubernetes Components - -| Flag | Description | -|------|--------------| -| `--disable` value | Do not deploy packaged components and delete any deployed components (valid items: coredns, servicelb, traefik,local-storage, metrics-server) | -| `--disable-scheduler` | Disable Kubernetes default scheduler | -| `--disable-cloud-controller` | Disable k3s default cloud controller manager | -| `--disable-kube-proxy` | Disable running kube-proxy | -| `--disable-network-policy` | Disable k3s default network policy controller | - -### Customized Flags for Kubernetes Processes - -| Flag | Description | -|------|--------------| -| `--kubelet-arg` value | Customized flag for kubelet process | -| `--kube-proxy-arg` value | Customized flag for kube-proxy process | - -### Experimental Options - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--rootless` | N/A | Run rootless | (experimental) -| `--agent-token` value | `K3S_AGENT_TOKEN` | Shared secret used to join agents to the cluster, but not servers | -| `--agent-token-file` value | `K3S_AGENT_TOKEN_FILE` | File containing the agent secret | -| `--server value, -s` value | `K3S_URL` | Server to connect to, used to join a cluster | -| `--cluster-init` | `K3S_CLUSTER_INIT` | Initialize new cluster master | -| `--cluster-reset` | `K3S_CLUSTER_RESET` | Forget all peers and become a single cluster new cluster master | -| `--secrets-encryption` | N/A | Enable Secret encryption at rest | - -### Deprecated Options - -| Flag | Environment Variable | Description | -|------|----------------------|-------------| -| `--no-flannel` | N/A | Use --flannel-backend=none | -| `--no-deploy` value | N/A | Do not deploy packaged components (valid items: coredns, servicelb, traefik, local-storage, metrics-server) | -| `--cluster-secret` value | `K3S_CLUSTER_SECRET` | Use --token | - - -# K3s Server CLI Help - -> If an option appears in brackets below, for example `[$K3S_TOKEN]`, it means that the option can be passed in as an environment variable of that name. - -```bash -NAME: - k3s server - Run management server - -USAGE: - k3s server [OPTIONS] - -OPTIONS: - --config FILE, -c FILE (config) Load configuration from FILE (default: "/etc/rancher/k3s/config.yaml") [$K3S_CONFIG_FILE] --debug (logging) Turn on debug logs [$K3S_DEBUG] - -v value (logging) Number for the log level verbosity (default: 0) - --vmodule value (logging) Comma-separated list of pattern=N settings for file-filtered logging - --log value, -l value (logging) Log to file - --alsologtostderr (logging) Log to standard error as well as file (if set) - --bind-address value (listener) k3s bind address (default: 0.0.0.0) - --https-listen-port value (listener) HTTPS listen port (default: 6443) - --advertise-address value (listener) IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip) - --advertise-port value (listener) Port that apiserver uses to advertise to members of the cluster (default: listen-port) (default: 0) - --tls-san value (listener) Add additional hostname or IP as a Subject Alternative Name in the TLS cert - --data-dir value, -d value (data) Folder to hold state default /var/lib/rancher/k3s or ${HOME}/.rancher/k3s if not root - --cluster-cidr value (networking) Network CIDR to use for pod IPs (default: "10.42.0.0/16") - --service-cidr value (networking) Network CIDR to use for services IPs (default: "10.43.0.0/16") - --service-node-port-range value (networking) Port range to reserve for services with NodePort visibility (default: "30000-32767") - --cluster-dns value (networking) Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10) - --cluster-domain value (networking) Cluster Domain (default: "cluster.local") - --flannel-backend value (networking) One of 'none', 'vxlan', 'ipsec', 'host-gw', or 'wireguard' (default: "vxlan") - --token value, -t value (cluster) Shared secret used to join a server or agent to a cluster [$K3S_TOKEN] - --token-file value (cluster) File containing the cluster-secret/token [$K3S_TOKEN_FILE] - --write-kubeconfig value, -o value (client) Write kubeconfig for admin client to this file [$K3S_KUBECONFIG_OUTPUT] - --write-kubeconfig-mode value (client) Write kubeconfig with this mode [$K3S_KUBECONFIG_MODE] - --etcd-arg value (flags) Customized flag for etcd process - --kube-apiserver-arg value (flags) Customized flag for kube-apiserver process - --kube-scheduler-arg value (flags) Customized flag for kube-scheduler process - --kube-controller-manager-arg value (flags) Customized flag for kube-controller-manager process - --kube-cloud-controller-manager-arg value (flags) Customized flag for kube-cloud-controller-manager process - --datastore-endpoint value (db) Specify etcd, Mysql, Postgres, or Sqlite (default) data source name [$K3S_DATASTORE_ENDPOINT] - --datastore-cafile value (db) TLS Certificate Authority file used to secure datastore backend communication [$K3S_DATASTORE_CAFILE] - --datastore-certfile value (db) TLS certification file used to secure datastore backend communication [$K3S_DATASTORE_CERTFILE] - --datastore-keyfile value (db) TLS key file used to secure datastore backend communication [$K3S_DATASTORE_KEYFILE] - --etcd-expose-metrics (db) Expose etcd metrics to client interface. (Default false) - --etcd-disable-snapshots (db) Disable automatic etcd snapshots - --etcd-snapshot-name value (db) Set the base name of etcd snapshots. Default: etcd-snapshot- (default: "etcd-snapshot") - --etcd-snapshot-schedule-cron value (db) Snapshot interval time in cron spec. eg. every 5 hours '* */5 * * *' (default: "0 */12 * * *") - --etcd-snapshot-retention value (db) Number of snapshots to retain Default: 5 (default: 5) - --etcd-snapshot-dir value (db) Directory to save db snapshots. (Default location: ${data-dir}/db/snapshots) - --etcd-s3 (db) Enable backup to S3 - --etcd-s3-endpoint value (db) S3 endpoint url (default: "s3.amazonaws.com") - --etcd-s3-endpoint-ca value (db) S3 custom CA cert to connect to S3 endpoint - --etcd-s3-skip-ssl-verify (db) Disables S3 SSL certificate validation - --etcd-s3-access-key value (db) S3 access key [$AWS_ACCESS_KEY_ID] - --etcd-s3-secret-key value (db) S3 secret key [$AWS_SECRET_ACCESS_KEY] - --etcd-s3-bucket value (db) S3 bucket name - --etcd-s3-region value (db) S3 region / bucket location (optional) (default: "us-east-1") - --etcd-s3-folder value (db) S3 folder - --default-local-storage-path value (storage) Default local storage path for local provisioner storage class - --disable value (components) Do not deploy packaged components and delete any deployed components (valid items: coredns, servicelb, traefik, local-storage, metrics-server) - --disable-scheduler (components) Disable Kubernetes default scheduler - --disable-cloud-controller (components) Disable k3s default cloud controller manager - --disable-kube-proxy (components) Disable running kube-proxy - --disable-network-policy (components) Disable k3s default network policy controller - --node-name value (agent/node) Node name [$K3S_NODE_NAME] - --with-node-id (agent/node) Append id to node name - --node-label value (agent/node) Registering and starting kubelet with set of labels - --node-taint value (agent/node) Registering kubelet with set of taints - --image-credential-provider-bin-dir value (agent/node) The path to the directory where credential provider plugin binaries are located (default: "/var/lib/rancher/credentialprovider/bin") - --image-credential-provider-config value (agent/node) The path to the credential provider plugin config file (default: "/var/lib/rancher/credentialprovider/config.yaml") - --docker (agent/runtime) Use docker instead of containerd - --container-runtime-endpoint value (agent/runtime) Disable embedded containerd and use alternative CRI implementation - --pause-image value (agent/runtime) Customized pause image for containerd or docker sandbox (default: "docker.io/rancher/pause:3.1") - --snapshotter value (agent/runtime) Override default containerd snapshotter (default: "overlayfs") - --private-registry value (agent/runtime) Private registry configuration file (default: "/etc/rancher/k3s/registries.yaml") - --node-ip value, -i value (agent/networking) IP address to advertise for node - --node-external-ip value (agent/networking) External IP address to advertise for node - --resolv-conf value (agent/networking) Kubelet resolv.conf file [$K3S_RESOLV_CONF] - --flannel-iface value (agent/networking) Override default flannel interface - --flannel-conf value (agent/networking) Override default flannel config file - --kubelet-arg value (agent/flags) Customized flag for kubelet process - --kube-proxy-arg value (agent/flags) Customized flag for kube-proxy process - --protect-kernel-defaults (agent/node) Kernel tuning behavior. If set, error if kernel tunables are different than kubelet defaults. - --rootless (experimental) Run rootless - --agent-token value (experimental/cluster) Shared secret used to join agents to the cluster, but not servers [$K3S_AGENT_TOKEN] - --agent-token-file value (experimental/cluster) File containing the agent secret [$K3S_AGENT_TOKEN_FILE] - --server value, -s value (experimental/cluster) Server to connect to, used to join a cluster [$K3S_URL] - --cluster-init (experimental/cluster) Initialize new cluster master [$K3S_CLUSTER_INIT] - --cluster-reset (experimental/cluster) Forget all peers and become a single cluster new cluster master [$K3S_CLUSTER_RESET] - --cluster-reset-restore-path value (db) Path to snapshot file to be restored - --secrets-encryption (experimental) Enable Secret encryption at rest - --system-default-registry value (image) Private registry to be used for all system images [$K3S_SYSTEM_DEFAULT_REGISTRY] - --selinux (agent/node) Enable SELinux in containerd [$K3S_SELINUX] - --lb-server-port value (agent/node) Local port for supervisor client load-balancer. If the supervisor and apiserver are not colocated an additional port 1 less than this port will also be used for the apiserver client load-balancer. (default: 6444) [$K3S_LB_SERVER_PORT] - --no-flannel (deprecated) use --flannel-backend=none - --no-deploy value (deprecated) Do not deploy packaged components (valid items: coredns, servicelb, traefik, local-storage, metrics-server) - --cluster-secret value (deprecated) use --token [$K3S_CLUSTER_SECRET] -``` diff --git a/content/k3s/latest/en/installation/installation-requirements/_index.md b/content/k3s/latest/en/installation/installation-requirements/_index.md deleted file mode 100644 index f4fd336d7f..0000000000 --- a/content/k3s/latest/en/installation/installation-requirements/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Installation Requirements -weight: 1 -aliases: - - /k3s/latest/en/installation/node-requirements/ ---- - -K3s is very lightweight, but has some minimum requirements as outlined below. - -Whether you're configuring a K3s cluster to run in a Docker or Kubernetes setup, each node running K3s should meet the following minimum requirements. You may need more resources to fit your needs. - -## Prerequisites - -Two nodes cannot have the same hostname. - -If all your nodes have the same hostname, use the `--with-node-id` option to append a random suffix for each node, or otherwise devise a unique name to pass with `--node-name` or `$K3S_NODE_NAME` for each node you add to the cluster. - -## Operating Systems - -K3s is expected to work on most modern Linux systems. - -Some OSs have specific requirements: - -- If you are using **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. -- If you are using **Alpine Linux**, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. -- If you are using **(Red Hat/CentOS) Enterprise Linux**, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-red-hat-centos-enterprise-linux) for additional setup. - -For more information on which OSs were tested with Rancher managed K3s clusters, refer to the [Rancher support and maintenance terms.](https://rancher.com/support-maintenance-terms/) - -## Hardware - -Hardware requirements scale based on the size of your deployments. Minimum recommendations are outlined here. - -* RAM: 512MB Minimum (we recommend at least 1GB) -* CPU: 1 Minimum - -[This section](./resource-profiling) captures the results of tests to determine minimum resource requirements for the K3s agent, the K3s server with a workload, and the K3s server with one agent. It also contains analysis about what has the biggest impact on K3s server and agent utilization, and how the cluster datastore can be protected from interference from agents and workloads. - -#### Disks - -K3s performance depends on the performance of the database. To ensure optimal speed, we recommend using an SSD when possible. Disk performance will vary on ARM devices utilizing an SD card or eMMC. - -## Networking - -The K3s server needs port 6443 to be accessible by all nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used or over UDP ports 51820 and 51821 (when using IPv6) when Flannel Wireguard backend is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then the ports needed by Flannel are not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -If you plan on achieving high availability with embedded etcd, server nodes must be accessible to each other on ports 2379 and 2380. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. -> **Warning:** Flannel relies on the [Bridge CNI plugin](https://www.cni.dev/plugins/current/main/bridge/) to create a L2 network that switches traffic. Rogue pods with NET_RAW capabilities can abuse that L2 network to launch attacks such as [ARP spoofing](https://static.sched.com/hosted_files/kccncna19/72/ARP%20DNS%20spoof.pdf). Therefore, as documented in the [kubernetes docs](https://kubernetes.io/docs/concepts/security/pod-security-standards/), please set a restricted profile that disables NET_RAW on non-trustable pods. - -
Inbound Rules for K3s Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 6443 | K3s agent nodes | Kubernetes API Server -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN -| UDP | 51820 | K3s server and agent nodes | Required only for Flannel Wireguard backend -| UDP | 51821 | K3s server and agent nodes | Required only for Flannel Wireguard backend with IPv6 -| TCP | 10250 | K3s server and agent nodes | Kubelet metrics -| TCP | 2379-2380 | K3s server nodes | Required only for HA with embedded etcd - -Typically all outbound traffic is allowed. - -## Large Clusters - -Hardware requirements are based on the size of your K3s cluster. For production and large clusters, we recommend using a high-availability setup with an external database. The following options are recommended for the external database in production: - -- MySQL -- PostgreSQL -- etcd - -### CPU and Memory - -The following are the minimum CPU and memory requirements for nodes in a high-availability K3s server: - -| Deployment Size | Nodes | VCPUS | RAM | -|:---------------:|:---------:|:-----:|:-----:| -| Small | Up to 10 | 2 | 4 GB | -| Medium | Up to 100 | 4 | 8 GB | -| Large | Up to 250 | 8 | 16 GB | -| X-Large | Up to 500 | 16 | 32 GB | -| XX-Large | 500+ | 32 | 64 GB | - -### Disks - -The cluster performance depends on database performance. To ensure optimal speed, we recommend always using SSD disks to back your K3s cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. - -### Network - -You should consider increasing the subnet size for the cluster CIDR so that you don't run out of IPs for the pods. You can do that by passing the `--cluster-cidr` option to K3s server upon starting. - -### Database - -K3s supports different databases including MySQL, PostgreSQL, MariaDB, and etcd, the following is a sizing guide for the database resources you need to run large clusters: - -| Deployment Size | Nodes | VCPUS | RAM | -|:---------------:|:---------:|:-----:|:-----:| -| Small | Up to 10 | 1 | 2 GB | -| Medium | Up to 100 | 2 | 8 GB | -| Large | Up to 250 | 4 | 16 GB | -| X-Large | Up to 500 | 8 | 32 GB | -| XX-Large | 500+ | 16 | 64 GB | - diff --git a/content/k3s/latest/en/installation/installation-requirements/resource-profiling/_index.md b/content/k3s/latest/en/installation/installation-requirements/resource-profiling/_index.md deleted file mode 100644 index 53866c533c..0000000000 --- a/content/k3s/latest/en/installation/installation-requirements/resource-profiling/_index.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: K3s Resource Profiling -shortTitle: Resource Profiling -weight: 1 ---- - -This section captures the results of tests to determine minimum resource requirements for K3s. - -The results are summarized as follows: - -| Components | Processor | Min CPU | Min RAM with Kine/SQLite | Min RAM with Embedded etcd | -|------------|-----|----------|-------------------------|---------------------------| -| K3s server with a workload | Intel(R) Xeon(R) Platinum 8124M CPU, 3.00 GHz | 10% of a core | 768 M | 896 M | -| K3s cluster with a single agent | Intel(R) Xeon(R) Platinum 8124M CPU, 3.00 GHz | 10% of a core | 512 M | 768 M | -| K3s agent | Intel(R) Xeon(R) Platinum 8124M CPU, 3.00 GHz | 5% of a core | 256 M | 256 M | -| K3s server with a workload | Pi4B BCM2711, 1.50 GHz | 20% of a core | 768 M | 896 M | -| K3s cluster with a single agent | Pi4B BCM2711, 1.50 GHz | 20% of a core | 512 M | 768 M | -| K3s agent | Pi4B BCM2711, 1.50 GHz | 10% of a core | 256 M | 256 M | - -- [Scope of Resource Testing](#scope-of-resource-testing) -- [Components Included for Baseline Measurements](#components-included-for-baseline-measurements) -- [Methodology](#methodology) -- [Environment](#environment) -- [Baseline Resource Requirements](#baseline-resource-requirements) - - [K3s Server with a Workload](#k3s-server-with-a-workload) - - [K3s Cluster with a Single Agent](#k3s-cluster-with-a-single-agent) - - [K3s Agent](#k3s-agent) -- [Analysis](#analysis) - - [Primary Resource Utilization Drivers](#primary-resource-utilization-drivers) - - [Preventing Agents and Workloads from Interfering with the Cluster Datastore](#preventing-agents-and-workloads-from-interfering-with-the-cluster-datastore) - -# Scope of Resource Testing - -The resource tests were intended to address the following problem statements: - -- On a single-node cluster, determine the legitimate minimum amount of CPU, memory, and IOPs that should be set aside to run the entire K3s stack server stack, assuming that a real workload will be deployed on the cluster. -- On an agent (worker) node, determine the legitimate minimum amount of CPU, memory, and IOPs that should be set aside for the Kubernetes and K3s control plane components (the kubelet and k3s agent). - -# Components Included for Baseline Measurements - -The tested components are: - -* K3s 1.19.2 with all packaged components enabled -* Prometheus + Grafana monitoring stack -* Kubernetes Example PHP Guestbook app - -These are baseline figures for a stable system using only K3s packaged components (Traefik Ingress, Klipper lb, local-path storage) running a standard monitoring stack (Prometheus and Grafana) and the Guestbook example app. - -Resource figures including IOPS are for the Kubernetes datastore and control plane only, and do not include overhead for system-level management agents or logging, container image management, or any workload-specific requirements. - -# Methodology - -A standalone instance of Prometheus v2.21.0 was used to collect host CPU, memory, and disk IO statistics using `prometheus-node-exporter` installed via apt. - -`systemd-cgtop` was used to spot-check systemd cgroup-level CPU and memory utilization. `system.slice/k3s.service` tracks resource utilization for both K3s and containerd, while individual pods are under the `kubepods` hierarchy. - -Additional detailed K3s memory utilization data was collected from the `process_resident_memory_bytes` and `go_memstats_alloc_bytes` metrics using the kubelet exporter integrated into the server and agent processes. - -Utilization figures were based on 95th percentile readings from steady state operation on nodes running the described workloads. - -# Environment - -OS: Ubuntu 20.04 x86_64, aarch64 - -Hardware: - -- AWS c5d.xlarge - 4 core, 8 GB RAM, NVME SSD -- Raspberry Pi 4 Model B - 4 core, 8 GB RAM, Class 10 SDHC - -# Baseline Resource Requirements - -This section captures the results of tests to determine minimum resource requirements for the K3s agent, the K3s server with a workload, and the K3s server with one agent. - -### K3s Server with a Workload - -These are the requirements for a single-node cluster in which the K3s server shares resources with a workload. - -The CPU requirements are: - -| Resource Requirement | Tested Processor | -|-----------|-----------------| -| 10% of a core | Intel(R) Xeon(R) Platinum 8124M CPU, 3.00 GHz | -| 20% of a core | Low-power processor such as Pi4B BCM2711, 1.50 GHz | - -The IOPS and memory requirements are: - -| Tested Datastore | IOPS | KiB/sec | Latency | RAM | -|-----------|------|---------|---------|--------| -| Kine/SQLite | 10 | 500 | < 10 ms | 768 M | -| Embedded etcd | 50 | 250 | < 5 ms | 896 M | - -### K3s Cluster with a Single Agent - -These are the baseline requirements for a K3s cluster with a K3s server node and a K3s agent, but no workload. - -The CPU requirements are: - -| Resource Requirement | Tested Processor | -|-----------|-----------------| -| 10% of a core | Intel(R) Xeon(R) Platinum 8124M CPU, 3.00 GHz | -| 20% of a core | Pi4B BCM2711, 1.50 GHz | - -The IOPS and memory requirements are: - -| Datastore | IOPS | KiB/sec | Latency | RAM | -|-----------|------|---------|---------|--------| -| Kine/SQLite | 10 | 500 | < 10 ms | 512 M | -| Embedded etcd | 50 | 250 | < 5 ms | 768 M | - -### K3s Agent - -The CPU requirements are: - - Resource Requirement | Tested Processor | -|-----------|-----------------| -| 5% of a core | Intel(R) Xeon(R) Platinum 8124M CPU, 3.00 GHz | -| 10% of a core | Pi4B BCM2711, 1.50 GHz | - -256 M of RAM is required. - -# Analysis - -This section captures what has the biggest impact on K3s server and agent utilization, and how the cluster datastore can be protected from interference from agents and workloads. - -### Primary Resource Utilization Drivers - -K3s server utilization figures are primarily driven by support of the Kubernetes datastore (kine or etcd), API Server, Controller-Manager, and Scheduler control loops, as well as any management tasks necessary to effect changes to the state of the system. Operations that place additional load on the Kubernetes control plane, such as creating/modifying/deleting resources, will cause temporary spikes in utilization. Using operators or apps that make extensive use of the Kubernetes datastore (such as Rancher or other Operator-type applications) will increase the server's resource requirements. Scaling up the cluster by adding additional nodes or creating many cluster resources will increase the server's resource requirements. - -K3s agent utilization figures are primarily driven by support of container lifecycle management control loops. Operations that involve managing images, provisioning storage, or creating/destroying containers will cause temporary spikes in utilization. Image pulls in particular are typically highly CPU and IO bound, as they involve decompressing image content to disk. If possible, workload storage (pod ephemeral storage and volumes) should be isolated from the agent components (/var/lib/rancher/k3s/agent) to ensure that there are no resource conflicts. - -### Preventing Agents and Workloads from Interfering with the Cluster Datastore - -When running in an environment where the server is also hosting workload pods, care should be taken to ensure that agent and workload IOPS do not interfere with the datastore. - -This can be best accomplished by placing the server components (/var/lib/rancher/k3s/server) on a different storage medium than the agent components (/var/lib/rancher/k3s/agent), which include the containerd image store. - -Workload storage (pod ephemeral storage and volumes) should also be isolated from the datastore. - -Failure to meet datastore throughput and latency requirements may result in delayed response from the control plane and/or failure of the control plane to maintain system state. \ No newline at end of file diff --git a/content/k3s/latest/en/installation/kube-dashboard/_index.md b/content/k3s/latest/en/installation/kube-dashboard/_index.md deleted file mode 100644 index 880a16c630..0000000000 --- a/content/k3s/latest/en/installation/kube-dashboard/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Kubernetes Dashboard" -weight: 60 ---- - -This installation guide will help you to deploy and configure the [Kubernetes Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/) on K3s. - -### Deploying the Kubernetes Dashboard - -```bash -GITHUB_URL=https://github.com/kubernetes/dashboard/releases -VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||') -sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml -``` - -### Dashboard RBAC Configuration - -> **Important:** The `admin-user` created in this guide will have administrative privileges in the Dashboard. - -Create the following resource manifest files: - -`dashboard.admin-user.yml` -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: admin-user - namespace: kubernetes-dashboard -``` - -`dashboard.admin-user-role.yml` -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: admin-user -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: ServiceAccount - name: admin-user - namespace: kubernetes-dashboard -``` - -Deploy the `admin-user` configuration: - -```bash -sudo k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml -``` - -### Obtain the Bearer Token - -```bash -sudo k3s kubectl -n kubernetes-dashboard describe secret admin-user-token | grep '^token' -``` - -### Local Access to the Dashboard - -To access the Dashboard you must create a secure channel to your K3s cluster: - -```bash -sudo k3s kubectl proxy -``` - -The Dashboard is now accessible at: - -* http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ -* `Sign In` with the `admin-user` Bearer Token - -#### Advanced: Remote Access to the Dashboard - -Please see the Dashboard documentation: Using [Port Forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to Access Applications in a Cluster. - -### Upgrading the Dashboard - -```bash -sudo k3s kubectl delete ns kubernetes-dashboard -GITHUB_URL=https://github.com/kubernetes/dashboard/releases -VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||') -sudo k3s kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml -``` - -### Deleting the Dashboard and admin-user configuration - -```bash -sudo k3s kubectl delete ns kubernetes-dashboard -sudo k3s kubectl delete clusterrolebinding kubernetes-dashboard -sudo k3s kubectl delete clusterrole kubernetes-dashboard -``` diff --git a/content/k3s/latest/en/installation/network-options/_index.md b/content/k3s/latest/en/installation/network-options/_index.md deleted file mode 100644 index 138a061871..0000000000 --- a/content/k3s/latest/en/installation/network-options/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Network Options" -weight: 25 ---- - -> **Note:** Please reference the [Networking]({{}}/k3s/latest/en/networking) page for information about CoreDNS, Traefik, and the Service LB. - -By default, K3s will run with flannel as the CNI, using VXLAN as the default backend. To change the CNI, refer to the section on configuring a [custom CNI](#custom-cni). To change the flannel backend, refer to the flannel options section. - -### Flannel Options - -The default backend for flannel is VXLAN. To enable encryption, pass the IPSec (Internet Protocol Security) or WireGuard options below. - -If you wish to use WireGuard as your flannel backend it may require additional kernel modules. Please see the [WireGuard Install Guide](https://www.wireguard.com/install/) for details. The WireGuard install steps will ensure the appropriate kernel modules are installed for your operating system. You need to install WireGuard on every node, both server and agents before attempting to leverage the WireGuard flannel backend option. - - CLI Flag and Value | Description - -------------------|------------ - `--flannel-backend=vxlan` | (Default) Uses the VXLAN backend. | - `--flannel-backend=ipsec` | Uses the IPSEC backend which encrypts network traffic. | - `--flannel-backend=host-gw` | Uses the host-gw backend. | - `--flannel-backend=wireguard` | Uses the WireGuard backend which encrypts network traffic. May require additional kernel modules and configuration. | - `--flannel-ipv6-masq` | Apply masquerading rules to IPv6 traffic (default for IPv4). Only applies on dual-stack or IPv6-only clusters | - -### Custom CNI - -Run K3s with `--flannel-backend=none` and install your CNI of choice. Most CNI plugins come with their own network policy engine, so it is recommended to set `--disable-network-policy` as well to avoid conflicts. IP Forwarding should be enabled for Canal and Calico. Please reference the steps below. - -{{% tabs %}} -{{% tab "Canal" %}} - -Visit the [Project Calico Docs](https://docs.projectcalico.org/) website. Follow the steps to install Canal. Modify the Canal YAML so that IP forwarding is allowed in the container_settings section, for example: - -``` -"container_settings": { - "allow_ip_forwarding": true - } -``` - -Apply the Canal YAML. - -Ensure the settings were applied by running the following command on the host: - -``` -cat /etc/cni/net.d/10-canal.conflist -``` - -You should see that IP forwarding is set to true. - -{{% /tab %}} -{{% tab "Calico" %}} - -Follow the [Calico CNI Plugins Guide](https://docs.projectcalico.org/master/reference/cni-plugin/configuration). Modify the Calico YAML so that IP forwarding is allowed in the container_settings section, for example: - -``` -"container_settings": { - "allow_ip_forwarding": true - } -``` - -Apply the Calico YAML. - -Ensure the settings were applied by running the following command on the host: - -``` -cat /etc/cni/net.d/10-calico.conflist -``` - -You should see that IP forwarding is set to true. - - -{{% /tab %}} -{{% /tabs %}} - -### Dual-stack installation - -Dual-stack networking must be configured when the cluster is first created. It cannot be enabled on an existing single-stack cluster. - -Dual-stack is supported on k3s v1.21 or above. - -To enable dual-stack in K3s, you must provide valid dual-stack `cluster-cidr` and `service-cidr` on all server nodes. Both servers and agents must provide valid dual-stack `node-ip` settings. Node address auto-detection is not supported on dual-stack clusters, because kubelet fetches only the first IP address that it finds. Additionally, only vxlan backend is supported currently. This is an example of a valid configuration: - -``` -k3s server --node-ip 10.0.10.7,2a05:d012:c6f:4611:5c2:5602:eed2:898c --cluster-cidr 10.42.0.0/16,2001:cafe:42:0::/56 --service-cidr 10.43.0.0/16,2001:cafe:42:1::/112 -``` - -Note that you can choose whatever `cluster-cidr` and `service-cidr` value, however the `node-ip` values must correspond to the ip addresses of your main interface. Remember to allow ipv6 traffic if you are deploying in a public cloud. - -If you are using a custom cni plugin, i.e. a cni plugin different from flannel, the previous configuration might not be enough to enable dual-stack in the cni plugin. Please check how to enable dual-stack in its documentation and verify if network policies can be enabled. - -### IPv6 only installation - -IPv6 only setup is supported on k3s v1.22 or above - -> **Warning:** If your IPv6 default route is set by a router advertisement (RA), you will need to set `net.ipv6.conf.all.accept_ra = 2`; otherwise, the node will drop the default route once it expires. Be aware that accepting RAs could increase the risk of [man-in-the-middle attacks](https://github.com/kubernetes/kubernetes/issues/91507). diff --git a/content/k3s/latest/en/installation/private-registry/_index.md b/content/k3s/latest/en/installation/private-registry/_index.md deleted file mode 100644 index 523ae6c858..0000000000 --- a/content/k3s/latest/en/installation/private-registry/_index.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: "Private Registry Configuration" -weight: 55 ---- -_Available as of v1.0.0_ - -Containerd can be configured to connect to private registries and use them to pull private images on the node. - -Upon startup, K3s will check to see if a `registries.yaml` file exists at `/etc/rancher/k3s/` and instruct containerd to use any registries defined in the file. If you wish to use a private registry, then you will need to create this file as root on each node that will be using the registry. - -Note that server nodes are schedulable by default. If you have not tainted the server nodes and will be running workloads on them, please ensure you also create the `registries.yaml` file on each server as well. - -Configuration in containerd can be used to connect to a private registry with a TLS connection and with registries that enable authentication as well. The following section will explain the `registries.yaml` file and give different examples of using private registry configuration in K3s. - -# Registries Configuration File - -The file consists of two main sections: - -- mirrors -- configs - -### Mirrors - -Mirrors is a directive that defines the names and endpoints of the private registries, for example: - -``` -mirrors: - mycustomreg.com: - endpoint: - - "https://mycustomreg.com:5000" -``` - -Each mirror must have a name and set of endpoints. When pulling an image from a registry, containerd will try these endpoint URLs one by one, and use the first working one. - -#### Rewrites - -Each mirror can have a set of rewrites. Rewrites can change the tag of an image based on a regular expression. This is useful if the organization/project structure in the mirror registry is different to the upstream one. - -For example, the following configuration would transparently pull the image `docker.io/rancher/coredns-coredns:1.6.3` from `registry.example.com:5000/mirrorproject/rancher-images/coredns-coredns:1.6.3`: - -``` -mirrors: - docker.io: - endpoint: - - "https://registry.example.com:5000" - rewrite: - "^rancher/(.*)": "mirrorproject/rancher-images/$1" -``` - -The image will still be stored under the original name so that a `crictl image ls` will show `docker.io/rancher/coredns-coredns:1.6.3` as available on the node, even though the image was pulled from the mirrored registry with a different name. - -### Configs - -The `configs` section defines the TLS and credential configuration for each mirror. For each mirror you can define `auth` and/or `tls`. - -The `tls` part consists of: - -| Directive | Description | -|------------------------|--------------------------------------------------------------------------------------| -| `cert_file` | The client certificate path that will be used to authenticate with the registry | -| `key_file` | The client key path that will be used to authenticate with the registry | -| `ca_file` | Defines the CA certificate path to be used to verify the registry's server cert file | -| `insecure_skip_verify` | Boolean that defines if TLS verification should be skipped for the registry | - -The `auth` part consists of either username/password or authentication token: - -| Directive | Description | -|------------|---------------------------------------------------------| -| `username` | user name of the private registry basic auth | -| `password` | user password of the private registry basic auth | -| `auth` | authentication token of the private registry basic auth | - -Below are basic examples of using private registries in different modes: - -### With TLS - -Below are examples showing how you may configure `/etc/rancher/k3s/registries.yaml` on each node when using TLS. - -{{% tabs %}} -{{% tab "With Authentication" %}} - -``` -mirrors: - docker.io: - endpoint: - - "https://mycustomreg.com:5000" -configs: - "mycustomreg:5000": - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password - tls: - cert_file: # path to the cert file used in the registry - key_file: # path to the key file used in the registry - ca_file: # path to the ca file used in the registry -``` - -{{% /tab %}} -{{% tab "Without Authentication" %}} - -``` -mirrors: - docker.io: - endpoint: - - "https://mycustomreg.com:5000" -configs: - "mycustomreg:5000": - tls: - cert_file: # path to the cert file used in the registry - key_file: # path to the key file used in the registry - ca_file: # path to the ca file used in the registry -``` - -{{% /tab %}} -{{% /tabs %}} - -### Without TLS - -Below are examples showing how you may configure `/etc/rancher/k3s/registries.yaml` on each node when _not_ using TLS. - -{{% tabs %}} -{{% tab "With Authentication" %}} - -``` -mirrors: - docker.io: - endpoint: - - "http://mycustomreg.com:5000" -configs: - "mycustomreg:5000": - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password -``` - -{{% /tab %}} -{{% tab "Without Authentication" %}} - -``` -mirrors: - docker.io: - endpoint: - - "http://mycustomreg.com:5000" -``` - -{{% /tab %}} -{{% /tabs %}} - -> In case of no TLS communication, you need to specify `http://` for the endpoints, otherwise it will default to https. - -In order for the registry changes to take effect, you need to restart K3s on each node. - -# Adding Images to the Private Registry - -First, obtain the k3s-images.txt file from GitHub for the release you are working with. -Pull the K3s images listed on the k3s-images.txt file from docker.io - -Example: `docker pull docker.io/rancher/coredns-coredns:1.6.3` - -Then, retag the images to the private registry. - -Example: `docker tag coredns-coredns:1.6.3 mycustomreg:5000/coredns-coredns` - -Last, push the images to the private registry. - -Example: `docker push mycustomreg.com:5000/coredns-coredns` diff --git a/content/k3s/latest/en/installation/uninstall/_index.md b/content/k3s/latest/en/installation/uninstall/_index.md deleted file mode 100644 index f30b018029..0000000000 --- a/content/k3s/latest/en/installation/uninstall/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Uninstalling K3s -weight: 61 ---- - -If you installed K3s using the installation script, a script to uninstall K3s was generated during installation. - -> Uninstalling K3s deletes the cluster data and all of the scripts. To restart the cluster with different installation options, re-run the installation script with different flags. - -To uninstall K3s from a server node, run: - -``` -/usr/local/bin/k3s-uninstall.sh -``` - -To uninstall K3s from an agent node, run: - -``` -/usr/local/bin/k3s-agent-uninstall.sh -``` \ No newline at end of file diff --git a/content/k3s/latest/en/known-issues/_index.md b/content/k3s/latest/en/known-issues/_index.md deleted file mode 100644 index d12fafa2a5..0000000000 --- a/content/k3s/latest/en/known-issues/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Known Issues -weight: 70 ---- -The Known Issues are updated periodically and designed to inform you about any issues that may not be immediately addressed in the next upcoming release. - -**Snap Docker** - -If you plan to use K3s with docker, Docker installed via a snap package is not recommended as it has been known to cause issues running K3s. - -**Iptables** - -If you are running iptables in nftables mode instead of legacy you might encounter issues. We recommend utilizing newer iptables (such as 1.6.1+) to avoid issues. - -**Rootless Mode** - -Running K3s with Rootless mode is experimental and has several [known issues.]({{}}/k3s/latest/en/advanced/#known-issues-with-rootless-mode) diff --git a/content/k3s/latest/en/networking/_index.md b/content/k3s/latest/en/networking/_index.md deleted file mode 100644 index 2dff22e927..0000000000 --- a/content/k3s/latest/en/networking/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Networking" -weight: 35 ---- - -This page explains how CoreDNS, the Traefik Ingress controller, and Klipper service load balancer work within K3s. - -Refer to the [Installation Network Options]({{}}/k3s/latest/en/installation/network-options/) page for details on Flannel configuration options and backend selection, or how to set up your own CNI. - -For information on which ports need to be opened for K3s, refer to the [Installation Requirements.]({{}}/k3s/latest/en/installation/installation-requirements/#networking) - -- [CoreDNS](#coredns) -- [Traefik Ingress Controller](#traefik-ingress-controller) -- [Service Load Balancer](#service-load-balancer) - - [How the Service LB Works](#how-the-service-lb-works) - - [Usage](#usage) - - [Excluding the Service LB from Nodes](#excluding-the-service-lb-from-nodes) - - [Disabling the Service LB](#disabling-the-service-lb) -- [Nodes Without a Hostname](#nodes-without-a-hostname) - -# CoreDNS - -CoreDNS is deployed on start of the agent. To disable, run each server with the `--disable coredns` option. - -If you don't install CoreDNS, you will need to install a cluster DNS provider yourself. - -# Traefik Ingress Controller - -[Traefik](https://traefik.io/) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease. It simplifies networking complexity while designing, deploying, and running applications. - -Traefik is deployed by default when starting the server. For more information see [Auto Deploying Manifests]({{}}/k3s/latest/en/advanced/#auto-deploying-manifests). The default config file is found in `/var/lib/rancher/k3s/server/manifests/traefik.yaml`. - -The Traefik ingress controller will use ports 80 and 443 on the host (i.e. these will not be usable for HostPort or NodePort). - -The `traefik.yaml` file should not be edited manually, because k3s would overwrite it again once it is restarted. Instead you can customize Traefik by creating an additional `HelmChartConfig` manifest in `/var/lib/rancher/k3s/server/manifests`. For more details and an example see [Customizing Packaged Components with HelmChartConfig]({{}}/k3s/latest/en/helm/#customizing-packaged-components-with-helmchartconfig). For more information on the possible configuration values, refer to the official [Traefik Helm Configuration Parameters.](https://github.com/traefik/traefik-helm-chart/tree/master/traefik). - -To disable it, start each server with the `--disable traefik` option. - -If Traefik is not disabled K3s versions 1.20 and earlier will install Traefik v1, while K3s versions 1.21 and later will install Traefik v2 if v1 is not already present. - -To migrate from an older Traefik v1 instance please refer to the [Traefik documentation](https://doc.traefik.io/traefik/migration/v1-to-v2/) and [migration tool](https://github.com/traefik/traefik-migration-tool). - -# Service Load Balancer - -Any service load balancer (LB) can be leveraged in your Kubernetes cluster. K3s provides a load balancer known as [Klipper Load Balancer](https://github.com/k3s-io/klipper-lb) that uses available host ports. - -Upstream Kubernetes allows a Service of type LoadBalancer to be created, but doesn't include the implementation of the LB. Some LB services require a cloud provider such as Amazon EC2 or Microsoft Azure. By contrast, the K3s service LB makes it possible to use an LB service without a cloud provider. - -### How the Service LB Works - -K3s creates a controller that creates a Pod for the service load balancer, which is a Kubernetes object of kind [Service.](https://kubernetes.io/docs/concepts/services-networking/service/) - -For each service load balancer, a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) is created. The DaemonSet creates a pod with the `svc` prefix on each node. - -The Service LB controller listens for other Kubernetes Services. After it finds a Service, it creates a proxy Pod for the service using a DaemonSet on all of the nodes. This Pod becomes a proxy to the other Service, so that for example, requests coming to port 8000 on a node could be routed to your workload on port 8888. - -If the Service LB runs on a node that has an external IP, it uses the external IP. - -If multiple Services are created, a separate DaemonSet is created for each Service. - -It is possible to run multiple Services on the same node, as long as they use different ports. - -If you try to create a Service LB that listens on port 80, the Service LB will try to find a free host in the cluster for port 80. If no host with that port is available, the LB will stay in Pending. - -### Usage - -Create a [Service of type LoadBalancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) in K3s. - -### Excluding the Service LB from Nodes - -To exclude nodes from using the Service LB, add the following label to the nodes that should not be excluded: - -``` -svccontroller.k3s.cattle.io/enablelb -``` - -If the label is used, the service load balancer only runs on the labeled nodes. - -### Disabling the Service LB - -To disable the embedded LB, run the server with the `--disable servicelb` option. - -This is necessary if you wish to run a different LB, such as MetalLB. - -# Nodes Without a Hostname - -Some cloud providers, such as Linode, will create machines with "localhost" as the hostname and others may not have a hostname set at all. This can cause problems with domain name resolution. You can run K3s with the `--node-name` flag or `K3S_NODE_NAME` environment variable and this will pass the node name to resolve this issue. diff --git a/content/k3s/latest/en/quick-start/_index.md b/content/k3s/latest/en/quick-start/_index.md deleted file mode 100644 index 8ec057e36a..0000000000 --- a/content/k3s/latest/en/quick-start/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Quick-Start Guide" -weight: 10 ---- - -This guide will help you quickly launch a cluster with default options. The [installation section](../installation) covers in greater detail how K3s can be set up. - -For information on how K3s components work together, refer to the [architecture section.]({{}}/k3s/latest/en/architecture/#high-availability-with-an-external-db) - -> New to Kubernetes? The official Kubernetes docs already have some great tutorials outlining the basics [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/). - -Install Script --------------- -K3s provides an installation script that is a convenient way to install it as a service on systemd or openrc based systems. This script is available at https://get.k3s.io. To install K3s using this method, just run: -```bash -curl -sfL https://get.k3s.io | sh - -``` - -After running this installation: - -* The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed -* Additional utilities will be installed, including `kubectl`, `crictl`, `ctr`, `k3s-killall.sh`, and `k3s-uninstall.sh` -* A [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file will be written to `/etc/rancher/k3s/k3s.yaml` and the kubectl installed by K3s will automatically use it - -To install on worker nodes and add them to the cluster, run the installation script with the `K3S_URL` and `K3S_TOKEN` environment variables. Here is an example showing how to join a worker node: - -```bash -curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken sh - -``` -Setting the `K3S_URL` parameter causes K3s to run in worker mode. The K3s agent will register with the K3s server listening at the supplied URL. The value to use for `K3S_TOKEN` is stored at `/var/lib/rancher/k3s/server/node-token` on your server node. - -Note: Each machine must have a unique hostname. If your machines do not have unique hostnames, pass the `K3S_NODE_NAME` environment variable and provide a value with a valid and unique hostname for each node. diff --git a/content/k3s/latest/en/security/_index.md b/content/k3s/latest/en/security/_index.md deleted file mode 100644 index ba6ef7ccbd..0000000000 --- a/content/k3s/latest/en/security/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "Security" -weight: 90 ---- - -This section describes the methodology and means of securing a K3s cluster. It's broken into 2 sections. These guides assume k3s is running with embedded etcd. - -The documents below apply to CIS Kubernetes Benchmark v1.6. - -* [Hardening Guide](./hardening_guide/) -* [CIS Benchmark Self-Assessment Guide](./self_assessment/) diff --git a/content/k3s/latest/en/security/hardening_guide/_index.md b/content/k3s/latest/en/security/hardening_guide/_index.md deleted file mode 100644 index e22571f30e..0000000000 --- a/content/k3s/latest/en/security/hardening_guide/_index.md +++ /dev/null @@ -1,677 +0,0 @@ ---- -title: "CIS Hardening Guide" -weight: 80 ---- - -This document provides prescriptive guidance for hardening a production installation of K3s. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Internet Security (CIS). - -K3s has a number of security mitigations applied and turned on by default and will pass a number of the Kubernetes CIS controls without modification. There are some notable exceptions to this that require manual intervention to fully comply with the CIS Benchmark: - -1. K3s will not modify the host operating system. Any host-level modifications will need to be done manually. -2. Certain CIS policy controls for `PodSecurityPolicies` and `NetworkPolicies` will restrict the functionality of the cluster. You must opt into having K3s configure these by adding the appropriate options (enabling of admission plugins) to your command-line flags or configuration file as well as manually applying appropriate policies. Further details are presented in the sections below. - -The first section (1.1) of the CIS Benchmark concerns itself primarily with pod manifest permissions and ownership. K3s doesn't utilize these for the core components since everything is packaged into a single binary. - -## Host-level Requirements - -There are two areas of host-level requirements: kernel parameters and etcd process/directory configuration. These are outlined in this section. - -### Ensure `protect-kernel-defaults` is set - -This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. - -> **Note:** `protect-kernel-defaults` is exposed as a top-level flag for K3s. - -#### Set kernel parameters - -Create a file called `/etc/sysctl.d/90-kubelet.conf` and add the snippet below. Then run `sysctl -p /etc/sysctl.d/90-kubelet.conf`. - -```bash -vm.panic_on_oom=0 -vm.overcommit_memory=1 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -## Kubernetes Runtime Requirements - -The runtime requirements to comply with the CIS Benchmark are centered around pod security (PSPs), network policies and API Server auditing logs. These are outlined in this section. K3s doesn't apply any default PSPs or network policies. However, K3s ships with a controller that is meant to apply a given set of network policies. By default, K3s runs with the `NodeRestriction` admission controller. To enable PSPs, add the following to the K3s start command: `--kube-apiserver-arg="enable-admission-plugins=NodeRestriction,PodSecurityPolicy,ServiceAccount"`. This will have the effect of maintaining the `NodeRestriction` plugin as well as enabling the `PodSecurityPolicy`. The same happens with the API Server auditing logs, K3s doesn't enable them by default, so audit log configuration and audit policy must be created manually. - -### Pod Security Policies - -When PSPs are enabled, a policy can be applied to satisfy the necessary controls described in section 5.2 of the CIS Benchmark. - -Here is an example of a compliant PSP. - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: restricted-psp -spec: - privileged: false # CIS - 5.2.1 - allowPrivilegeEscalation: false # CIS - 5.2.5 - requiredDropCapabilities: # CIS - 5.2.7/8/9 - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'csi' - - 'persistentVolumeClaim' - - 'ephemeral' - hostNetwork: false # CIS - 5.2.4 - hostIPC: false # CIS - 5.2.3 - hostPID: false # CIS - 5.2.2 - runAsUser: - rule: 'MustRunAsNonRoot' # CIS - 5.2.6 - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -``` - -For the above PSP to be effective, we need to create a ClusterRole and a ClusterRoleBinding. We also need to include a "system unrestricted policy" which is needed for system-level pods that require additional privileges. - -These can be combined with the PSP yaml above and NetworkPolicy yaml below into a single file and placed in the `/var/lib/rancher/k3s/server/manifests` directory. Below is an example of a `policy.yaml` file. - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: restricted-psp -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'csi' - - 'persistentVolumeClaim' - - 'ephemeral' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: psp:restricted-psp - labels: - addonmanager.kubernetes.io/mode: EnsureExists -rules: -- apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - restricted-psp ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: default:restricted-psp - labels: - addonmanager.kubernetes.io/mode: EnsureExists -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted-psp -subjects: -- kind: Group - name: system:authenticated - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: system-unrestricted-psp -spec: - allowPrivilegeEscalation: true - allowedCapabilities: - - '*' - fsGroup: - rule: RunAsAny - hostIPC: true - hostNetwork: true - hostPID: true - hostPorts: - - max: 65535 - min: 0 - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system-unrestricted-node-psp-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system-unrestricted-psp-role -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system-unrestricted-psp-role -rules: -- apiGroups: - - policy - resourceNames: - - system-unrestricted-psp - resources: - - podsecuritypolicies - verbs: - - use ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: system-unrestricted-svc-acct-psp-rolebinding - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system-unrestricted-psp-role -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts ---- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: intra-namespace - namespace: kube-system -spec: - podSelector: {} - ingress: - - from: - - namespaceSelector: - matchLabels: - name: kube-system ---- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: intra-namespace - namespace: default -spec: - podSelector: {} - ingress: - - from: - - namespaceSelector: - matchLabels: - name: default ---- -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: intra-namespace - namespace: kube-public -spec: - podSelector: {} - ingress: - - from: - - namespaceSelector: - matchLabels: - name: kube-public -``` - -> **Note:** The Kubernetes critical additions such as CNI, DNS, and Ingress are ran as pods in the `kube-system` namespace. Therefore, this namespace will have a policy that is less restrictive so that these components can run properly. - -### NetworkPolicies - -> NOTE: K3s deploys kube-router for network policy enforcement. Support for this in K3s is currently experimental. - -CIS requires that all namespaces have a network policy applied that reasonably limits traffic into namespaces and pods. - -Here is an example of a compliant network policy. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: intra-namespace - namespace: kube-system -spec: - podSelector: {} - ingress: - - from: - - namespaceSelector: - matchLabels: - name: kube-system -``` - -With the applied restrictions, DNS will be blocked unless purposely allowed. Below is a network policy that will allow for traffic to exist for DNS. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-network-dns-policy - namespace: -spec: - ingress: - - ports: - - port: 53 - protocol: TCP - - port: 53 - protocol: UDP - podSelector: - matchLabels: - k8s-app: kube-dns - policyTypes: - - Ingress -``` - -The metrics-server and Traefik ingress controller will be blocked by default if network policies are not created to allow access. Traefik v1 as packaged in K3s version 1.20 and below uses different labels than Traefik v2. Ensure that you only use the sample yaml below that is associated with the version of Traefik present on your cluster. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-all-metrics-server - namespace: kube-system -spec: - podSelector: - matchLabels: - k8s-app: metrics-server - ingress: - - {} - policyTypes: - - Ingress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-all-svclbtraefik-ingress - namespace: kube-system -spec: - podSelector: - matchLabels: - app: svclb-traefik - ingress: - - {} - policyTypes: - - Ingress ---- -# Below is for 1.20 ONLY -- remove if on 1.21 or above -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-all-traefik-v120-ingress - namespace: kube-system -spec: - podSelector: - matchLabels: - app: traefik - ingress: - - {} - policyTypes: - - Ingress ---- -# Below is for 1.21 and above ONLY -- remove if on 1.20 or below -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-all-traefik-v121-ingress - namespace: kube-system -spec: - podSelector: - matchLabels: - app.kubernetes.io/name: traefik - ingress: - - {} - policyTypes: - - Ingress -``` - -> **Note:** Operators must manage network policies as normal for additional namespaces that are created. - -### API Server audit configuration - -CIS requirements 1.2.22 to 1.2.25 are related to configuring audit logs for the API Server. K3s doesn't create by default the log directory and audit policy, as auditing requirements are specific to each user's policies and environment. - -The log directory, ideally, must be created before starting K3s. A restrictive access permission is recommended to avoid leaking potential sensitive information. - -```bash -sudo mkdir -p -m 700 /var/lib/rancher/k3s/server/logs -``` - -A starter audit policy to log request metadata is provided below. The policy should be written to a file named `audit.yaml` in `/var/lib/rancher/k3s/server` directory. Detailed information about policy configuration for the API server can be found in the Kubernetes [documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/). - -```yaml -apiVersion: audit.k8s.io/v1 -kind: Policy -rules: -- level: Metadata -``` - -Both configurations must be passed as arguments to the API Server as: - -```bash ---kube-apiserver-arg='audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' ---kube-apiserver-arg='audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' -``` - -If the configurations are created after K3s is installed, they must be added to K3s' systemd service in `/etc/systemd/system/k3s.service`. - -```bash -ExecStart=/usr/local/bin/k3s \ - server \ - '--kube-apiserver-arg=audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' \ - '--kube-apiserver-arg=audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' \ -``` - -K3s must be restarted to load the new configuration. - -```bash -sudo systemctl daemon-reload -sudo systemctl restart k3s.service -``` - -Additional information about CIS requirements 1.2.22 to 1.2.25 is presented below. - -## Known Issues -The following are controls that K3s currently does not pass by default. Each gap will be explained, along with a note clarifying whether it can be passed through manual operator intervention, or if it will be addressed in a future release of K3s. - -### Control 1.2.15 -Ensure that the admission control plugin `NamespaceLifecycle` is set. -
-Rationale -Setting admission control policy to `NamespaceLifecycle` ensures that objects cannot be created in non-existent namespaces, and that namespaces undergoing termination are not used for creating the new objects. This is recommended to enforce the integrity of the namespace termination process and also for the availability of the newer objects. - -This can be remediated by passing this argument as a value to the `enable-admission-plugins=` and pass that to `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.16 -Ensure that the admission control plugin `PodSecurityPolicy` is set. -
-Rationale -A Pod Security Policy is a cluster-level resource that controls the actions that a pod can perform and what it has the ability to access. The `PodSecurityPolicy` objects define a set of conditions that a pod must run with in order to be accepted into the system. Pod Security Policies are comprised of settings and strategies that control the security features a pod has access to and hence this must be used to control pod access permissions. - -This can be remediated by passing this argument as a value to the `enable-admission-plugins=` and pass that to `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.22 -Ensure that the `--audit-log-path` argument is set. -
-Rationale -Auditing the Kubernetes API Server provides a security-relevant chronological set of records documenting the sequence of activities that have affected system by individual users, administrators or other components of the system. Even though currently, Kubernetes provides only basic audit capabilities, it should be enabled. You can enable it by setting an appropriate audit log path. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.23 -Ensure that the `--audit-log-maxage` argument is set to 30 or as appropriate. -
-Rationale -Retaining logs for at least 30 days ensures that you can go back in time and investigate or correlate any events. Set your audit log retention period to 30 days or as per your business requirements. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.24 -Ensure that the `--audit-log-maxbackup` argument is set to 10 or as appropriate. -
-Rationale -Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. For example, if you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.25 -Ensure that the `--audit-log-maxsize` argument is set to 100 or as appropriate. -
-Rationale -Kubernetes automatically rotates the log files. Retaining old log files ensures that you would have sufficient log data available for carrying out any investigation or correlation. If you have set file size of 100 MB and the number of old log files to keep as 10, you would approximate have 1 GB of log data that you could potentially use for your analysis. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.26 -Ensure that the `--request-timeout` argument is set as appropriate. -
-Rationale -Setting global request timeout allows extending the API server request timeout limit to a duration appropriate to the user's connection speed. By default, it is set to 60 seconds which might be problematic on slower connections making cluster resources inaccessible once the data volume for requests exceeds what can be transmitted in 60 seconds. But, setting this timeout limit to be too large can exhaust the API server resources making it prone to Denial-of-Service attack. Hence, it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.27 -Ensure that the `--service-account-lookup` argument is set to true. -
-Rationale -If `--service-account-lookup` is not enabled, the apiserver only verifies that the authentication token is valid, and does not validate that the service account token mentioned in the request is actually present in etcd. This allows using a service account token even after the corresponding service account is deleted. This is an example of time of check to time of use security issue. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 1.2.33 -Ensure that the `--encryption-provider-config` argument is set as appropriate. -
-Rationale -`etcd` is a highly available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. These objects are sensitive in nature and should be encrypted at rest to avoid any disclosures. - -Detailed steps on how to configure secrets encryption in K3s are available in [Secrets Encryption](../secrets_encryption/). -
- -### Control 1.2.34 -Ensure that encryption providers are appropriately configured. -
-Rationale -Where `etcd` encryption is used, it is important to ensure that the appropriate set of encryption providers is used. Currently, the `aescbc`, `kms` and `secretbox` are likely to be appropriate options. - -This can be remediated by passing a valid configuration to `k3s` as outlined above. Detailed steps on how to configure secrets encryption in K3s are available in [Secrets Encryption](../secrets_encryption/). -
- -### Control 1.3.1 -Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate. -
-Rationale -Garbage collection is important to ensure sufficient resource availability and avoiding degraded performance and availability. In the worst case, the system might crash or just be unusable for a long period of time. The current setting for garbage collection is 12,500 terminated pods which might be too high for your system to sustain. Based on your system resources and tests, choose an appropriate threshold value to activate garbage collection. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 3.2.1 -Ensure that a minimal audit policy is created. -
-Rationale -Logging is an important detective control for all systems, to detect potential unauthorized access. - -This can be remediated by passing controls 1.2.22 - 1.2.25 and verifying their efficacy. -
- -### Control 4.2.7 -Ensure that the `--make-iptables-util-chains` argument is set to true. -
-Rationale -Kubelets can automatically manage the required changes to iptables based on how you choose your networking options for the pods. It is recommended to let kubelets manage the changes to iptables. This ensures that the iptables configuration remains in sync with pods networking configuration. Manually configuring iptables with dynamic pod network configuration changes might hamper the communication between pods/containers and to the outside world. You might have iptables rules too restrictive or too open. - -This can be remediated by passing this argument as a value to the `--kube-apiserver-arg=` argument to `k3s server`. An example can be found below. -
- -### Control 5.1.5 -Ensure that default service accounts are not actively used -
-Rationale -Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod. - -Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. - -The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -This can be remediated by updating the `automountServiceAccountToken` field to `false` for the `default` service account in each namespace. - -For `default` service accounts in the built-in namespaces (`kube-system`, `kube-public`, `kube-node-lease`, and `default`), K3s does not automatically do this. You can manually update this field on these service accounts to pass the control. -
- -## Control Plane Execution and Arguments - -Listed below are the K3s control plane components and the arguments they are given at start, by default. Commented to their right is the CIS 1.6 control that they satisfy. - -```bash -kube-apiserver - --advertise-port=6443 - --allow-privileged=true - --anonymous-auth=false # 1.2.1 - --api-audiences=unknown - --authorization-mode=Node,RBAC - --bind-address=127.0.0.1 - --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs - --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt # 1.2.31 - --enable-admission-plugins=NodeRestriction,PodSecurityPolicy # 1.2.17 - --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt # 1.2.32 - --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt # 1.2.29 - --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key # 1.2.29 - --etcd-servers=https://127.0.0.1:2379 - --insecure-port=0 # 1.2.19 - --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt - --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt - --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key - --profiling=false # 1.2.21 - --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt - --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key - --requestheader-allowed-names=system:auth-proxy - --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt - --requestheader-extra-headers-prefix=X-Remote-Extra- - --requestheader-group-headers=X-Remote-Group - --requestheader-username-headers=X-Remote-User - --secure-port=6444 # 1.2.20 - --service-account-issuer=k3s - --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key # 1.2.28 - --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key - --service-cluster-ip-range=10.43.0.0/16 - --storage-backend=etcd3 - --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt # 1.2.30 - --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key # 1.2.30 -``` - -```bash -kube-controller-manager - --address=127.0.0.1 - --allocate-node-cidrs=true - --bind-address=127.0.0.1 # 1.3.7 - --cluster-cidr=10.42.0.0/16 - --cluster-signing-cert-file=/var/lib/rancher/k3s/server/tls/client-ca.crt - --cluster-signing-key-file=/var/lib/rancher/k3s/server/tls/client-ca.key - --kubeconfig=/var/lib/rancher/k3s/server/cred/controller.kubeconfig - --port=10252 - --profiling=false # 1.3.2 - --root-ca-file=/var/lib/rancher/k3s/server/tls/server-ca.crt # 1.3.5 - --secure-port=0 - --service-account-private-key-file=/var/lib/rancher/k3s/server/tls/service.key # 1.3.4 - --use-service-account-credentials=true # 1.3.3 -``` - -```bash -kube-scheduler - --address=127.0.0.1 - --bind-address=127.0.0.1 # 1.4.2 - --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig - --port=10251 - --profiling=false # 1.4.1 - --secure-port=0 -``` - -```bash -kubelet - --address=0.0.0.0 - --anonymous-auth=false # 4.2.1 - --authentication-token-webhook=true - --authorization-mode=Webhook # 4.2.2 - --cgroup-driver=cgroupfs - --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt # 4.2.3 - --cloud-provider=external - --cluster-dns=10.43.0.10 - --cluster-domain=cluster.local - --cni-bin-dir=/var/lib/rancher/k3s/data/223e6420f8db0d8828a8f5ed3c44489bb8eb47aa71485404f8af8c462a29bea3/bin - --cni-conf-dir=/var/lib/rancher/k3s/agent/etc/cni/net.d - --container-runtime-endpoint=/run/k3s/containerd/containerd.sock - --container-runtime=remote - --containerd=/run/k3s/containerd/containerd.sock - --eviction-hard=imagefs.available<5%,nodefs.available<5% - --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% - --fail-swap-on=false - --healthz-bind-address=127.0.0.1 - --hostname-override=hostname01 - --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig - --kubelet-cgroups=/systemd/system.slice - --node-labels= - --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests - --protect-kernel-defaults=true # 4.2.6 - --read-only-port=0 # 4.2.4 - --resolv-conf=/run/systemd/resolve/resolv.conf - --runtime-cgroups=/systemd/system.slice - --serialize-image-pulls=false - --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt # 4.2.10 - --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key # 4.2.10 -``` - -The command below is an example of how the outlined remediations can be applied to harden K3s. - -```bash -k3s server \ - --protect-kernel-defaults=true \ - --secrets-encryption=true \ - --kube-apiserver-arg='audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log' \ - --kube-apiserver-arg='audit-policy-file=/var/lib/rancher/k3s/server/audit.yaml' \ - --kube-apiserver-arg='audit-log-maxage=30' \ - --kube-apiserver-arg='audit-log-maxbackup=10' \ - --kube-apiserver-arg='audit-log-maxsize=100' \ - --kube-apiserver-arg='request-timeout=300s' \ - --kube-apiserver-arg='service-account-lookup=true' \ - --kube-apiserver-arg='enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount' \ - --kube-controller-manager-arg='terminated-pod-gc-threshold=10' \ - --kube-controller-manager-arg='use-service-account-credentials=true' \ - --kubelet-arg='streaming-connection-idle-timeout=5m' \ - --kubelet-arg='make-iptables-util-chains=true' -``` - -## Conclusion - -If you have followed this guide, your K3s cluster will be configured to comply with the CIS Kubernetes Benchmark. You can review the [CIS Benchmark Self-Assessment Guide](../self_assessment/) to understand the expectations of each of the benchmark's checks and how you can do the same on your cluster. diff --git a/content/k3s/latest/en/security/secrets_encryption/_index.md b/content/k3s/latest/en/security/secrets_encryption/_index.md deleted file mode 100644 index a7491e2fb6..0000000000 --- a/content/k3s/latest/en/security/secrets_encryption/_index.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: Secrets Encryption -weight: 26 ---- - -# Secrets Encryption Config -_Available as of v1.17.4+k3s1_ - -K3s supports enabling secrets encryption at rest by passing the flag `--secrets-encryption` on a server; this flag will do the following automatically: - -- Generate an AES-CBC key -- Generate an encryption config file with the generated key -- Pass the config to the KubeAPI as encryption-provider-config - -Example of the encryption config file: -``` -{ - "kind": "EncryptionConfiguration", - "apiVersion": "apiserver.config.k8s.io/v1", - "resources": [ - { - "resources": [ - "secrets" - ], - "providers": [ - { - "aescbc": { - "keys": [ - { - "name": "aescbckey", - "secret": "xxxxxxxxxxxxxxxxxxx" - } - ] - } - }, - { - "identity": {} - } - ] - } - ] -} -``` - - -## Secrets Encryption Tool -_Available as of v1.21.8+k3s1_ - -K3s contains a utility tool `secrets-encrypt`, which enables automatic control over the following: - -- Disabling/Enabling secrets encryption -- Adding new encryption keys -- Rotating and deleting encryption keys -- Reencrypting secrets - ->**Warning:** Failure to follow proper procedure for rotating encryption keys can leave your cluster permanently corrupted. Proceed with caution. - -### Single-Server Encryption Key Rotation -To rotate secrets encryption keys on a single-node cluster: - -- Start the K3s server with the flag `--secrets-encryption` - ->**Note:** Starting K3s without encryption and enabling it at a later time is currently *not* supported. - -1. Prepare - - ``` - k3s secrets-encrypt prepare - ``` - -2. Kill and restart the K3s server with same arguments -3. Rotate - - ``` - k3s secrets-encrypt rotate - ``` - -4. Kill and restart the K3s server with same arguments -5. Reencrypt - - ``` - k3s secrets-encrypt reencrypt - ``` - -### High-Availability Encryption Key Rotation -The steps are the same for both embedded DB and external DB clusters. - -To rotate secrets encryption keys on HA setups: - ->**Notes:** -> -> - Starting K3s without encryption and enabling it at a later time is currently *not* supported. -> -> - While not required, it is recommended that you pick one server node from which to run the `secrets-encrypt` commands. - -- Start up all three K3s servers with the `--secrets-encryption` flag. For brevity, the servers will be referred to as S1, S2, S3. - -1. Prepare on S1 - - ``` - k3s secrets-encrypt prepare - ``` - -2. Kill and restart S1 with same arguments -3. Once S1 is up, kill and restart the S2 and S3 - -4. Rotate on S1 - - ``` - k3s secrets-encrypt rotate - ``` - -5. Kill and restart S1 with same arguments -6. Once S1 is up, kill and restart the S2 and S3 - -7. Reencrypt on S1 - - ``` - k3s secrets-encrypt reencrypt - ``` - -8. Kill and restart S1 with same arguments -9. Once S1 is up, kill and restart the S2 and S3 - -### Single-Server Secrets Encryption Disable/Enable -After launching a server with `--secrets-encryption` flag, secrets encryption can be disabled. - -To disable secrets encryption on a single-node cluster: - -1. Disable - - ``` - k3s secrets-encrypt disable - ``` - -2. Kill and restart the K3s server with same arguments - -3. Reencrypt with flags - - ``` - k3s secrets-encrypt reencrypt --force --skip - ``` - -To re-enable secrets encryption on a single node cluster: - -1. Enable - - ``` - k3s secrets-encrypt enable - ``` - -2. Kill and restart the K3s server with same arguments - -3. Reencrypt with flags - - ``` - k3s secrets-encrypt reencrypt --force --skip - ``` - -### High-Availability Secrets Encryption Disable/Enable -After launching a HA cluster with `--secrets-encryption` flags, secrets encryption can be disabled. ->**Note:** While not required, it is recommended that you pick one server node from which to run the `secrets-encrypt` commands. - -For brevity, the three servers used in this guide will be referred to as S1, S2, S3. - -To disable secrets encryption on a HA cluster: - -1. Disable on S1 - - ``` - k3s secrets-encrypt disable - ``` - -2. Kill and restart S1 with same arguments -3. Once S1 is up, kill and restart the S2 and S3 - - -4. Reencrypt with flags on S1 - - ``` - k3s secrets-encrypt reencrypt --force --skip - ``` - -To re-enable secrets encryption on a HA cluster: - -1. Enable on S1 - - ``` - k3s secrets-encrypt enable - ``` - -2. Kill and restart S1 with same arguments -3. Once S1 is up, kill and restart the S2 and S3 - -4. Reencrypt with flags on S1 - - ``` - k3s secrets-encrypt reencrypt --force --skip - ``` - - -### Secrets Encryption Status -The secrets-encrypt tool includes a `status` command that displays information about the current status of secrets encryption on the node. - -An example of the command on a single-server node: -``` -$ k3s secrets-encrypt status -Encryption Status: Enabled -Current Rotation Stage: start -Server Encryption Hashes: All hashes match - -Active Key Type Name ------- -------- ---- - * AES-CBC aescbckey - -``` - -Another example on HA cluster, after rotating the keys, but before restarting the servers: -``` -$ k3s secrets-encrypt status -Encryption Status: Enabled -Current Rotation Stage: rotate -Server Encryption Hashes: hash does not match between node-1 and node-2 - -Active Key Type Name ------- -------- ---- - * AES-CBC aescbckey-2021-12-10T22:54:38Z - AES-CBC aescbckey - -``` - -Details on each section are as follows: - -- __Encryption Status__: Displayed whether secrets encryption is disabled or enabled on the node -- __Current Rotation Stage__: Indicates the current rotation stage on the node. - Stages are: `start`, `prepare`, `rotate`, `reencrypt_request`, `reencrypt_active`, `reencrypt_finished` -- __Server Encryption Hashes__: Useful for HA clusters, this indicates whether all servers are on the same stage with their local files. This can be used to identify whether a restart of servers is required before proceeding to the next stage. In the HA example above, node-1 and node-2 have different hashes, indicating that they currently do not have the same encryption configuration. Restarting the servers will sync up their configuration. -- __Key Table__: Summarizes information about the secrets encryption keys found on the node. - * __Active__: The "*" indicates which, if any, of the keys are currently used for secrets encryption. An active key is used by Kubernetes to encrypt any new secrets. - * __Key Type__: All keys using this tool are `AES-CBC` type. See more info [here.](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers) - * __Name__: Name of the encryption key. \ No newline at end of file diff --git a/content/k3s/latest/en/security/self_assessment/_index.md b/content/k3s/latest/en/security/self_assessment/_index.md deleted file mode 100644 index 6471a95fb8..0000000000 --- a/content/k3s/latest/en/security/self_assessment/_index.md +++ /dev/null @@ -1,3078 +0,0 @@ ---- -title: CIS Self Assessment Guide -weight: 90 ---- - -### CIS Kubernetes Benchmark v1.6 - K3s with Kubernetes v1.17 to v1.21 - -#### Overview - -This document is a companion to the K3s security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of K3s, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the CIS Kubernetes Benchmark. It is to be used by K3s operators, security teams, auditors, and decision-makers. - -This guide is specific to the **v1.17**, **v1.18**, **v1.19**, **v1.20** and **v1.21** release line of K3s and the **v1.6** release of the CIS Kubernetes Benchmark. - -For more information about each control, including detailed descriptions and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. - -Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. - -These are the possible results for each control: - -- **Pass** - The K3s cluster under test passed the audit outlined in the benchmark. -- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section will explain why this is so. -- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s does not prevent their implementation, but no further configuration or auditing of the cluster under test has been performed. - -This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary and will require you to adjust the "audit" commands to fit your scenario. - -> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. - -### Controls - ---- - -## 1.1 Master Node Configuration Files -### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the -master node. -For example, chmod 644 /etc/kubernetes/manifests/kube-apiserver.yaml - -### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml - -### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /etc/kubernetes/manifests/kube-controller-manager.yaml - -### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml - -### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /etc/kubernetes/manifests/kube-scheduler.yaml - -### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /etc/kubernetes/manifests/kube-scheduler.yaml - -### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /etc/kubernetes/manifests/etcd.yaml - -### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /etc/kubernetes/manifests/etcd.yaml - -### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 - -### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root - -### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). For example, -chmod 700 /var/lib/etcd - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 1.1.11 -``` - -**Expected Result**: - -```console -'700' is equal to '700' -``` - -**Returned Value**: - -```console -700 -``` - -### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) - - -**Result:** Not Applicable - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). -For example, chown etcd:etcd /var/lib/etcd - -### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig - -### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /etc/kubernetes/admin.conf - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 scheduler - -**Audit:** - -```bash -/bin/sh -c 'if test -e scheduler; then stat -c permissions=%a scheduler; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root scheduler - -**Audit:** - -```bash -/bin/sh -c 'if test -e scheduler; then stat -c %U:%G scheduler; fi' -``` - -**Expected Result**: - -```console -'root:root' is not present -``` - -### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 controllermanager - -**Audit:** - -```bash -/bin/sh -c 'if test -e controllermanager; then stat -c permissions=%a controllermanager; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root controllermanager - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/k3s/server/tls -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown -R root:root /etc/kubernetes/pki/ - -**Audit:** - -```bash -find /etc/kubernetes/pki/ | xargs stat -c %U:%G -``` - -**Expected Result**: - -```console -'root:root' is not present -``` - -### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 644 /etc/kubernetes/pki/*.crt - -**Audit:** - -```bash -stat -c %n %a /var/lib/rancher/k3s/server/tls/*.crt -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 600 /etc/kubernetes/pki/*.key - -**Audit:** - -```bash -stat -c %n %a /var/lib/rancher/k3s/server/tls/*.key -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -## 1.2 API Server -### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---anonymous-auth=false - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' -``` - -### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --basic-auth-file= parameter. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'basic-auth-file' -``` - -**Expected Result**: - -```console -'--basic-auth-file' is not present -``` - -### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --token-auth-file= parameter. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'token-auth-file' -``` - -**Expected Result**: - -```console -'--token-auth-file' is not present -``` - -### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --kubelet-https parameter. - -### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the -kubelet client certificate and key parameters as below. ---kubelet-client-certificate= ---kubelet-client-key= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' -``` - -**Expected Result**: - -```console -'--kubelet-client-certificate' is not present AND '--kubelet-client-key' is not present -``` - -### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the ---kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. ---kubelet-certificate-authority= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority' -``` - -**Expected Result**: - -```console -'--kubelet-certificate-authority' is not present -``` - -### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. -One such example could be as below. ---authorization-mode=RBAC - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' -``` - -**Expected Result**: - -```console -'--authorization-mode' is not present -``` - -### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes Node. ---authorization-mode=Node,RBAC - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' -``` - -**Expected Result**: - -```console -'--authorization-mode' is not present -``` - -### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes RBAC, -for example: ---authorization-mode=Node,RBAC - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' -``` - -**Expected Result**: - -```console -'--authorization-mode' is not present -``` - -### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set the desired limits in a configuration file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameters. ---enable-admission-plugins=...,EventRateLimit,... ---admission-control-config-file= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' -``` - -**Expected Result**: - -```console -'--enable-admission-plugins' is not present -``` - -### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --enable-admission-plugins parameter, or set it to a -value that does not include AlwaysAdmit. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' -``` - -**Expected Result**: - -```console -'--enable-admission-plugins' is not present OR '--enable-admission-plugins' is not present -``` - -### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -AlwaysPullImages. ---enable-admission-plugins=...,AlwaysPullImages,... - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' -``` - -**Expected Result**: - -```console -'--enable-admission-plugins' is not present -``` - -### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -SecurityContextDeny, unless PodSecurityPolicy is already in place. ---enable-admission-plugins=...,SecurityContextDeny,... - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' -``` - -**Expected Result**: - -```console -'--enable-admission-plugins' is not present OR '--enable-admission-plugins' is not present -``` - -### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and ensure that the --disable-admission-plugins parameter is set to a -value that does not include ServiceAccount. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'ServiceAccount' -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --disable-admission-plugins parameter to -ensure it does not include NamespaceLifecycle. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'disable-admission-plugins' -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes PodSecurityPolicy: ---enable-admission-plugins=...,PodSecurityPolicy,... -Then restart the API Server. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' -``` - -**Expected Result**: - -```console -'--enable-admission-plugins' is not present -``` - -### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes NodeRestriction. ---enable-admission-plugins=...,NodeRestriction,... - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins' -``` - -**Expected Result**: - -```console -'--enable-admission-plugins' is not present -``` - -### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --insecure-bind-address parameter. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'insecure-bind-address' -``` - -**Expected Result**: - -```console -'--insecure-bind-address' is not present -``` - -### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---insecure-port=0 - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'insecure-port' -``` - -**Expected Result**: - -```console -'--insecure-port' is not present -``` - -### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --secure-port parameter or -set it to a different (non-zero) desired port. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port' -``` - -**Expected Result**: - -```console -'--secure-port' is not present OR '--secure-port' is not present -``` - -### 1.2.21 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling' -``` - -**Expected Result**: - -```console -'--profiling' is not present -``` - -### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-path parameter to a suitable path and -file where you would like audit logs to be written, for example: ---audit-log-path=/var/log/apiserver/audit.log - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-path' -``` - -**Expected Result**: - -```console -'--audit-log-path' is not present -``` - -### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: ---audit-log-maxage=30 - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxage' -``` - -**Expected Result**: - -```console -'--audit-log-maxage' is not present -``` - -### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate -value. ---audit-log-maxbackup=10 - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxbackup' -``` - -**Expected Result**: - -```console -'--audit-log-maxbackup' is not present -``` - -### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. -For example, to set it as 100 MB: ---audit-log-maxsize=100 - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxsize' -``` - -**Expected Result**: - -```console -'--audit-log-maxsize' is not present -``` - -### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameter as appropriate and if needed. -For example, ---request-timeout=300s - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'request-timeout' -``` - -**Expected Result**: - -```console -'--request-timeout' is not present OR '--request-timeout' is not present -``` - -### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---service-account-lookup=true -Alternatively, you can delete the --service-account-lookup parameter from this file so -that the default takes effect. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-lookup' -``` - -**Expected Result**: - -```console -'--service-account-lookup' is not present OR '--service-account-lookup' is not present -``` - -### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --service-account-key-file parameter -to the public key file for service accounts: ---service-account-key-file= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-key-file' -``` - -**Expected Result**: - -```console -'--service-account-key-file' is not present -``` - -### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate and key file parameters. ---etcd-certfile= ---etcd-keyfile= - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 1.2.29 -``` - -**Expected Result**: - -```console -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -**Returned Value**: - -```console -Feb 21 23:13:24 k3s[5223]: time="2022-02-21T23:13:24.847339487Z" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" -``` - -### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the TLS certificate and private key file parameters. ---tls-cert-file= ---tls-private-key-file= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2 -``` - -**Expected Result**: - -```console -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -**Returned Value**: - -```console -Feb 21 23:13:24 k3s[5223]: time="2022-02-21T23:13:24.847339487Z" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" Feb 21 23:13:24 k3s[5223]: {"level":"info","ts":"2022-02-21T23:13:24.848Z","caller":"raft/raft.go:1530","msg":"b3656202b34887ca switched to configuration voters=(12926846069174208458)"} -``` - -### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the client certificate authority file. ---client-ca-file= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file' -``` - -**Expected Result**: - -```console -'--client-ca-file' is not present -``` - -### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate authority file parameter. ---etcd-cafile= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile' -``` - -**Expected Result**: - -```console -'--etcd-cafile' is not present -``` - -### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Manual) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config' -``` - -**Expected Result**: - -```console -'--encryption-provider-config' is not present -``` - -### 1.2.34 Ensure that encryption providers are appropriately configured (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -In this file, choose aescbc, kms or secretbox as the encryption provider. - -**Audit:** - -```bash -grep aescbc /path/to/encryption-config.json -``` - -### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM -_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM -_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM -_SHA384 - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites' -``` - -**Expected Result**: - -```console -'--tls-cipher-suites' is not present -``` - -## 1.3 Controller Manager -### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, -for example: ---terminated-pod-gc-threshold=10 - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold' -``` - -**Expected Result**: - -```console -'--terminated-pod-gc-threshold' is not present -``` - -### 1.3.2 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling' -``` - -**Expected Result**: - -```console -'--profiling' is not present -``` - -### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node to set the below parameter. ---use-service-account-credentials=true - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials' -``` - -**Expected Result**: - -```console -'--use-service-account-credentials' is not present -``` - -### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --service-account-private-key-file parameter -to the private key file for service accounts. ---service-account-private-key-file= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file' -``` - -**Expected Result**: - -```console -'--service-account-private-key-file' is not present -``` - -### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --root-ca-file parameter to the certificate bundle file`. ---root-ca-file= - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file' -``` - -**Expected Result**: - -```console -'--root-ca-file' is not present -``` - -### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. ---feature-gates=RotateKubeletServerCertificate=true - -### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and ensure the correct value for the --bind-address parameter - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'bind-address' -``` - -**Expected Result**: - -```console -'--bind-address' is present OR '--bind-address' is not present -``` - -## 1.4 Scheduler -### 1.4.1 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -Feb 21 23:13:24 k3s[5223]: time="2022-02-21T23:13:24.851975832Z" level=info msg="Running kube-scheduler --address=127.0.0.1 --bind-address=127.0.0.1 --kubeconfig=/var/lib/rancher/k3s/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=0" -``` - -### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml -on the master node and ensure the correct value for the --bind-address parameter - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address' -``` - -**Expected Result**: - -```console -'--bind-address' is present OR '--bind-address' is not present -``` - -## 2 Etcd Node Configuration Files -### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml -on the master node and set the below parameters. ---cert-file= ---key-file= - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.1 -``` - -**Expected Result**: - -```console -'cert-file' is present AND 'key-file' is present -``` - -**Returned Value**: - -```console -cert-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.crt key-file: /var/lib/rancher/k3s/server/tls/etcd/server-client.key -``` - -### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master -node and set the below parameter. ---client-cert-auth="true" - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.2 -``` - -**Expected Result**: - -```console -'--client-cert-auth' is not present -``` - -**Returned Value**: - -```console -client-cert-auth: true -``` - -### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master -node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.3 -``` - -**Expected Result**: - -```console -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -**Returned Value**: - -```console -false -``` - -### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. -Then, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the -master node and set the below parameters. ---peer-client-file= ---peer-key-file= - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.4 -``` - -**Expected Result**: - -```console -'cert-file' is present AND 'key-file' is present -``` - -**Returned Value**: - -```console -cert-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt key-file: /var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key -``` - -### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master -node and set the below parameter. ---peer-client-cert-auth=true - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.5 -``` - -**Expected Result**: - -```console -'--client-cert-auth' is not present -``` - -**Returned Value**: - -```console -client-cert-auth: true -``` - -### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the master -node and either remove the --peer-auto-tls parameter or set it to false. ---peer-auto-tls=false - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.6 -``` - -**Expected Result**: - -```console -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -**Returned Value**: - -```console -false -``` - -### 2.7 Ensure that a unique Certificate Authority is used for etcd (Manual) - - -**Result:** pass - -**Remediation:** -[Manual test] -Follow the etcd documentation and create a dedicated certificate authority setup for the -etcd service. -Then, edit the etcd pod specification file /var/lib/rancher/k3s/server/db/etcd/config on the -master node and set the below parameter. ---trusted-ca-file= - -**Audit Script:** `check_for_k3s_etcd.sh` - -```bash -#!/bin/bash - -# This script is used to ensure that k3s is actually running etcd (and not other databases like sqlite3) -# before it checks the requirement -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - - -if [[ "$(journalctl -D /var/log/journal -u k3s | grep 'Managed etcd' | grep -v grep | wc -l)" -gt 0 ]]; then - case $1 in - "1.1.11") - echo $(stat -c %a /var/lib/rancher/k3s/server/db/etcd);; - "1.2.29") - echo $(journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-');; - "2.1") - echo $(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.2") - echo "$(grep -A 5 'client-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.3") - echo $(grep 'auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.4") - echo $(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep -E 'cert-file|key-file');; - "2.5") - echo "$(grep -A 5 'peer-transport-security' /var/lib/rancher/k3s/server/db/etcd/config | grep 'client-cert-auth')";; - "2.6") - echo $(grep 'peer-auto-tls' /var/lib/rancher/k3s/server/db/etcd/config);; - "2.7") - echo $(grep 'trusted-ca-file' /var/lib/rancher/k3s/server/db/etcd/config);; - esac -else -# If another database is running, return whatever is required to pass the scan - case $1 in - "1.1.11") - echo "700";; - "1.2.29") - echo "--etcd-certfile AND --etcd-keyfile";; - "2.1") - echo "cert-file AND key-file";; - "2.2") - echo "true";; - "2.3") - echo "false";; - "2.4") - echo "peer-cert-file AND peer-key-file";; - "2.5") - echo "true";; - "2.6") - echo "--peer-auto-tls=false";; - "2.7") - echo "--trusted-ca-file";; - esac -fi - -``` - -**Audit Execution:** - -```bash -./check_for_k3s_etcd.sh 2.7 -``` - -**Expected Result**: - -```console -'trusted-ca-file' is present -``` - -**Returned Value**: - -```console -trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt trusted-ca-file: /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt -``` - -## 3.1 Authentication and Authorization -### 3.1.1 Client certificate authentication should not be used for users (Manual) - - -**Result:** warn - -**Remediation:** -Alternative mechanisms provided by Kubernetes such as the use of OIDC should be -implemented in place of client certificates. - -## 3.2 Logging -### 3.2.1 Ensure that a minimal audit policy is created (Manual) - - -**Result:** warn - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file' -``` - -### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) - - -**Result:** warn - -**Remediation:** -Consider modification of the audit policy in use on the cluster to include these items, at a -minimum. - -## 4.1 Worker Node Configuration Files -### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig -``` - -**Expected Result**: - -```console -'permissions' is present OR '/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, chown root:root /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig -``` - -**Expected Result**: - -```console -'root:root' is not present OR '/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' is not present -``` - -**Returned Value**: - -```console -root:root -``` - -### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'644' is equal to '644' -``` - -**Returned Value**: - -```console -644 -``` - -### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual) - - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig -``` - -### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) - - -**Result:** pass - -**Remediation:** -Run the following command to modify the file permissions of the ---client-ca-file chmod 644 - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt -``` - -**Expected Result**: - -```console -'644' is equal to '644' OR '640' is present OR '600' is present OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -**Returned Value**: - -```console -644 -``` - -### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Manual) - - -**Result:** warn - -**Remediation:** -Run the following command to modify the ownership of the --client-ca-file. -chown root:root - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt -``` - -### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chmod 644 /var/lib/kubelet/config.yaml - -### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chown root:root /var/lib/kubelet/config.yaml - -## 4.2 Kubelet -### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to -false. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---anonymous-auth=false -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth' | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -Feb 21 23:13:24 k3s[5223]: time="2022-02-21T23:13:24.847339487Z" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" -``` - -### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If -using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---authorization-mode=Webhook -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode' | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' not have 'AlwaysAllow' -``` - -**Returned Value**: - -```console -Feb 21 23:13:24 k3s[5223]: time="2022-02-21T23:13:24.847339487Z" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" -``` - -### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---client-ca-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver'| tail -n1 | grep 'client-ca-file' | grep -v grep -``` - -**Expected Result**: - -```console -'--client-ca-file' is present -``` - -**Returned Value**: - -```console -Feb 21 23:13:24 k3s[5223]: time="2022-02-21T23:13:24.847339487Z" level=info msg="Running kube-apiserver --advertise-port=6443 --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,k3s --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log --authorization-mode=Node,RBAC --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/k3s/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/k3s/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy,NamespaceLifecycle,ServiceAccount --encryption-provider-config=/var/lib/rancher/k3s/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/k3s/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/k3s/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/k3s/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/k3s/server/tls/client-auth-proxy.key --request-timeout=300s --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6444 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-account-lookup=true --service-account-signing-key-file=/var/lib/rancher/k3s/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key" -``` - -### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Manual) - - -**Result:** warn - -**Remediation:** -If using a Kubelet config file, edit the file to set readOnlyPort to 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---read-only-port=0 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' -``` - -### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual) - - -**Result:** warn - -**Remediation:** -If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a -value other than 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---streaming-connection-idle-timeout=5m -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout' -``` - -### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set protectKernelDefaults: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---protect-kernel-defaults=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults' -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -Feb 21 23:13:32 k3s[5223]: time="2022-02-21T23:13:32.581127632Z" level=info msg="Running kubelet --address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --cni-bin-dir=/var/lib/rancher/k3s/data/9de9bfcf367b723ef0ac73dd91761165a4a8ad11ad16a758d3a996264e60c612/bin --cni-conf-dir=/var/lib/rancher/k3s/agent/etc/cni/net.d --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-labels= --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" -``` - -### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove the --make-iptables-util-chains argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains' -``` - -**Expected Result**: - -```console -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -**Returned Value**: - -```console -Feb 21 23:13:32 k3s[5223]: time="2022-02-21T23:13:32.581127632Z" level=info msg="Running kubelet --address=0.0.0.0 --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/k3s/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --cni-bin-dir=/var/lib/rancher/k3s/data/9de9bfcf367b723ef0ac73dd91761165a4a8ad11ad16a758d3a996264e60c612/bin --cni-conf-dir=/var/lib/rancher/k3s/agent/etc/cni/net.d --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/k3s/agent/kubelet.kubeconfig --make-iptables-util-chains=true --node-labels= --pod-manifest-path=/var/lib/rancher/k3s/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --streaming-connection-idle-timeout=5m --tls-cert-file=/var/lib/rancher/k3s/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/k3s/agent/serving-kubelet.key" -``` - -### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) - - -**Result:** Not Applicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and remove the --hostname-override argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) - - -**Result:** warn - -**Remediation:** -If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC containerd -``` - -### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) - - -**Result:** warn - -**Remediation:** -If using a Kubelet config file, edit the file to set tlsCertFile to the location -of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile -to the location of the corresponding private key file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameters in KUBELET_CERTIFICATE_ARGS variable. ---tls-cert-file= ---tls-private-key-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 -``` - -### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) - - -**Result:** Not Applicable - -**Remediation:** -If using a Kubelet config file, edit the file to add the line rotateCertificates: true or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS -variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) - - -**Result:** Not Applicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. ---feature-gates=RotateKubeletServerCertificate=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) - - -**Result:** warn - -**Remediation:** -If using a Kubelet config file, edit the file to set TLSCipherSuites: to -TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -or to a subset of these values. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the --tls-cipher-suites parameter as follows, or to a subset of these values. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC containerd -``` - -## 5.1 RBAC and Service Accounts -### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) - - -**Result:** warn - -**Remediation:** -Identify all clusterrolebindings to the cluster-admin role. Check if they are used and -if they need this role or if they could use a role with fewer privileges. -Where possible, first bind users to a lower privileged role and then remove the -clusterrolebinding to the cluster-admin role : -kubectl delete clusterrolebinding [name] - -### 5.1.2 Minimize access to secrets (Manual) - - -**Result:** warn - -**Remediation:** -Where possible, remove get, list and watch access to secret objects in the cluster. - -### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) - - -**Result:** warn - -**Remediation:** -Where possible replace any use of wildcards in clusterroles and roles with specific -objects or actions. - -### 5.1.4 Minimize access to create pods (Manual) - - -**Result:** warn - -**Remediation:** -Where possible, remove create access to pod objects in the cluster. - -### 5.1.5 Ensure that default service accounts are not actively used. (Manual) - - -**Result:** warn - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value -automountServiceAccountToken: false - -### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) - - -**Result:** warn - -**Remediation:** -Modify the definition of pods and service accounts which do not need to mount service -account tokens to disable it. - -## 5.2 Pod Security Policies -### 5.2.1 Minimize the admission of privileged containers (Manual) - - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that -the .spec.privileged field is omitted or set to false. - -**Audit:** - -```bash -kubectl describe psp global-restricted-psp | grep MustRunAsNonRoot -``` - -### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Manual) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostPID field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Manual) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostIPC field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Manual) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostNetwork field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Manual) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.allowPrivilegeEscalation field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.6 Minimize the admission of root containers (Manual) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of -UIDs not including 0. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) - - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - -**Audit:** - -```bash -kubectl get psp -``` - -### 5.2.8 Minimize the admission of containers with added capabilities (Manual) - - -**Result:** warn - -**Remediation:** -Ensure that allowedCapabilities is not present in PSPs for the cluster unless -it is set to an empty array. - -### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) - - -**Result:** warn - -**Remediation:** -Review the use of capabilites in applications runnning on your cluster. Where a namespace -contains applicaions which do not require any Linux capabities to operate consider adding -a PSP which forbids the admission of containers which do not drop all capabilities. - -## 5.3 Network Policies and CNI -### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) - - -**Result:** warn - -**Remediation:** -If the CNI plugin in use does not support network policies, consideration should be given to -making use of a different plugin, or finding an alternate mechanism for restricting traffic -in the Kubernetes cluster. - -### 5.3.2 Ensure that all Namespaces have Network Policies defined (Manual) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create NetworkPolicy objects as you need them. - -**Audit Script:** `check_for_rke2_network_policies.sh` - -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -for namespace in kube-system kube-public default; do - policy_count=$(/var/lib/rancher/rke2/bin/kubectl get networkpolicy -n ${namespace} -o json | jq -r '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "false" - exit - fi -done - -echo "true" - -``` - -**Audit Execution:** - -```bash -./check_for_rke2_network_policies.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -## 5.4 Secrets Management -### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) - - -**Result:** warn - -**Remediation:** -if possible, rewrite application code to read secrets from mounted secret files, rather than -from environment variables. - -**Audit:** - -```bash -kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {' '}{end}' -A -``` - -### 5.4.2 Consider external secret storage (Manual) - - -**Result:** warn - -**Remediation:** -Refer to the secrets management options offered by your cloud provider or a third-party -secrets management solution. - -## 5.5 Extensible Admission Control -### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and setup image provenance. - -## 5.7 General Policies -### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) - - -**Result:** warn - -**Remediation:** -Follow the documentation and create namespaces for objects in your deployment as you need -them. - -### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) - - -**Result:** warn - -**Remediation:** -Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you -would need to enable alpha features in the apiserver by passing "--feature- -gates=AllAlpha=true" argument. -Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS -parameter to "--feature-gates=AllAlpha=true" -KUBE_API_ARGS="--feature-gates=AllAlpha=true" -Based on your system, restart the kube-apiserver service. For example: -systemctl restart kube-apiserver.service -Use annotations to enable the docker/default seccomp profile in your pod definitions. An -example is as below: -apiVersion: v1 -kind: Pod -metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default -spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - -### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and apply security contexts to your pods. For a -suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker -Containers. - -### 5.7.4 The default namespace should not be used (Manual) - - -**Result:** pass - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit:** - -```bash -kubectl get all --no-headers -n default | grep -v service | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -'0' is equal to '0' -``` - -**Returned Value**: - -```console ---count=0 -``` diff --git a/content/k3s/latest/en/storage/_index.md b/content/k3s/latest/en/storage/_index.md deleted file mode 100644 index 883128ee11..0000000000 --- a/content/k3s/latest/en/storage/_index.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: "Volumes and Storage" -weight: 30 ---- - -When deploying an application that needs to retain data, you’ll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application’s pod fails. - -A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. For details on how PVs and PVCs work, refer to the official Kubernetes documentation on [storage.](https://kubernetes.io/docs/concepts/storage/volumes/) - -This page describes how to set up persistent storage with a local storage provider, or with [Longhorn.](#setting-up-longhorn) - -# What's changed in K3s storage? - -K3s removes several optional volume plugins and all built-in (sometimes referred to as "in-tree") cloud providers. We do this in order to achieve a smaller binary size and to avoid dependence on third-party cloud or data center technologies and services, which may not be available in many K3s use cases. We are able to do this because their removal affects neither core Kubernetes functionality nor conformance. - -The following volume plugins have been removed from K3s: - -* cephfs -* fc -* flocker -* git_repo -* glusterfs -* portworx -* quobyte -* rbd -* storageos - -Both components have out-of-tree alternatives that can be used with K3s: The Kubernetes [Container Storage Interface (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) and [Cloud Provider Interface (CPI)](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). - -Kubernetes maintainers are actively migrating in-tree volume plugins to CSI drivers. For more information on this migration, please refer [here](https://kubernetes.io/blog/2021/12/10/storage-in-tree-to-csi-migration-status-update/). - -# Setting up the Local Storage Provider -K3s comes with Rancher's Local Path Provisioner and this enables the ability to create persistent volume claims out of the box using local storage on the respective node. Below we cover a simple example. For more information please reference the official documentation [here](https://github.com/rancher/local-path-provisioner/blob/master/README.md#usage). - -Create a hostPath backed persistent volume claim and a pod to utilize it: - -### pvc.yaml - -``` -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: local-path-pvc - namespace: default -spec: - accessModes: - - ReadWriteOnce - storageClassName: local-path - resources: - requests: - storage: 2Gi -``` - -### pod.yaml - -``` -apiVersion: v1 -kind: Pod -metadata: - name: volume-test - namespace: default -spec: - containers: - - name: volume-test - image: nginx:stable-alpine - imagePullPolicy: IfNotPresent - volumeMounts: - - name: volv - mountPath: /data - ports: - - containerPort: 80 - volumes: - - name: volv - persistentVolumeClaim: - claimName: local-path-pvc -``` - -Apply the yaml: - -``` -kubectl create -f pvc.yaml -kubectl create -f pod.yaml -``` - -Confirm the PV and PVC are created: - -``` -kubectl get pv -kubectl get pvc -``` - -The status should be Bound for each. - -# Setting up Longhorn - -[comment]: <> (pending change - longhorn may support arm64 and armhf in the future.) - -> **Note:** At this time Longhorn only supports amd64 and arm64 (experimental). - -K3s supports [Longhorn](https://github.com/longhorn/longhorn). Longhorn is an open-source distributed block storage system for Kubernetes. - -Below we cover a simple example. For more information, refer to the official documentation [here](https://github.com/longhorn/longhorn/blob/master/README.md). - -Apply the longhorn.yaml to install Longhorn: - -``` -kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/longhorn.yaml -``` - -Longhorn will be installed in the namespace `longhorn-system`. - -Apply the yaml to create the PVC and pod: - -``` -kubectl create -f pvc.yaml -kubectl create -f pod.yaml -``` - -### pvc.yaml - -``` -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: longhorn-volv-pvc -spec: - accessModes: - - ReadWriteOnce - storageClassName: longhorn - resources: - requests: - storage: 2Gi -``` - -### pod.yaml - -``` -apiVersion: v1 -kind: Pod -metadata: - name: volume-test - namespace: default -spec: - containers: - - name: volume-test - image: nginx:stable-alpine - imagePullPolicy: IfNotPresent - volumeMounts: - - name: volv - mountPath: /data - ports: - - containerPort: 80 - volumes: - - name: volv - persistentVolumeClaim: - claimName: longhorn-volv-pvc -``` - -Confirm the PV and PVC are created: - -``` -kubectl get pv -kubectl get pvc -``` - -The status should be Bound for each. diff --git a/content/k3s/latest/en/upgrades/_index.md b/content/k3s/latest/en/upgrades/_index.md deleted file mode 100644 index fad0975985..0000000000 --- a/content/k3s/latest/en/upgrades/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "Upgrades" -weight: 25 ---- - -### Upgrading your K3s cluster - -[Upgrade basics]({{< baseurl >}}/k3s/latest/en/upgrades/basic/) describes several techniques for upgrading your cluster manually. It can also be used as a basis for upgrading through third-party Infrastructure-as-Code tools like [Terraform](https://www.terraform.io/). - -[Automated upgrades]({{< baseurl >}}/k3s/latest/en/upgrades/automated/) describes how to perform Kubernetes-native automated upgrades using Rancher's [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller). - -### Version-specific caveats - -- **Traefik:** If Traefik is not disabled, K3s versions 1.20 and earlier will install Traefik v1, while K3s versions 1.21 and later will install Traefik v2, if v1 is not already present. To upgrade from the older Traefik v1 to Traefik v2, please refer to the [Traefik documentation](https://doc.traefik.io/traefik/migration/v1-to-v2/) and use the [migration tool](https://github.com/traefik/traefik-migration-tool). - -- **K3s bootstrap data:** If you are using K3s in an HA configuration with an external SQL datastore, and your server (control-plane) nodes were not started with the `--token` CLI flag, you will no longer be able to add additional K3s servers to the cluster without specifying the token. Ensure that you retain a copy of this token, as it is required when restoring from backup. Previously, K3s did not enforce the use of a token when using external SQL datastores. - - The affected versions are <= v1.19.12+k3s1, v1.20.8+k3s1, v1.21.2+k3s1; the patched versions are v1.19.13+k3s1, v1.20.9+k3s1, v1.21.3+k3s1. - - - You may retrieve the token value from any server already joined to the cluster as follows: -``` -cat /var/lib/rancher/k3s/server/token -``` - -- **Experimental Dqlite:** The experimental embedded Dqlite data store was deprecated in K3s v1.19.1. Please note that upgrades from experimental Dqlite to experimental embedded etcd are not supported. If you attempt an upgrade, it will not succeed, and data will be lost. diff --git a/content/k3s/latest/en/upgrades/automated/_index.md b/content/k3s/latest/en/upgrades/automated/_index.md deleted file mode 100644 index f95da7de0e..0000000000 --- a/content/k3s/latest/en/upgrades/automated/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "Automated Upgrades" -weight: 20 ---- - ->**Note:** This feature is available as of [v1.17.4+k3s1](https://github.com/rancher/k3s/releases/tag/v1.17.4%2Bk3s1) - -### Overview - -You can manage K3s cluster upgrades using Rancher's system-upgrade-controller. This is a Kubernetes-native approach to cluster upgrades. It leverages a [custom resource definition (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#custom-resources), the `plan`, and a [controller](https://kubernetes.io/docs/concepts/architecture/controller/) that schedules upgrades based on the configured plans. - -A plan defines upgrade policies and requirements. This documentation will provide plans with defaults appropriate for upgrading a K3s cluster. For more advanced plan configuration options, please review the [CRD](https://github.com/rancher/system-upgrade-controller/blob/master/pkg/apis/upgrade.cattle.io/v1/types.go). - -The controller schedules upgrades by monitoring plans and selecting nodes to run upgrade [jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) on. A plan defines which nodes should be upgraded through a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). When a job has run to completion successfully, the controller will label the node on which it ran accordingly. - ->**Note:** The upgrade job that is launched must be highly privileged. It is configured with the following: -> -- Host `IPC`, `NET`, and `PID` namespaces -- The `CAP_SYS_BOOT` capability -- Host root mounted at `/host` with read and write permissions - -For more details on the design and architecture of the system-upgrade-controller or its integration with K3s, see the following Git repositories: - -- [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) -- [k3s-upgrade](https://github.com/rancher/k3s-upgrade) - -To automate upgrades in this manner, you must do the following: - -1. Install the system-upgrade-controller into your cluster -1. Configure plans - ->**Note:** Users can and should use Rancher to upgrade their K3s cluster if Rancher is managing it. -> -> * If you choose to use Rancher to upgrade, the following steps below are taken care of for you. -> * If you choose not to use Rancher to upgrade, you must use the following steps below to do so. - - -### Install the system-upgrade-controller - The system-upgrade-controller can be installed as a deployment into your cluster. The deployment requires a service-account, clusterRoleBinding, and a configmap. To install these components, run the following command: -``` -kubectl apply -f https://github.com/rancher/system-upgrade-controller/releases/latest/download/system-upgrade-controller.yaml -``` -The controller can be configured and customized via the previously mentioned configmap, but the controller must be redeployed for the changes to be applied. - - -### Configure plans -It is recommended that you minimally create two plans: a plan for upgrading server (master) nodes and a plan for upgrading agent (worker) nodes. As needed, you can create additional plans to control the rollout of the upgrade across nodes. The following two example plans will upgrade your cluster to K3s v1.17.4+k3s1. Once the plans are created, the controller will pick them up and begin to upgrade your cluster. -``` -# Server plan -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: server-plan - namespace: system-upgrade -spec: - concurrency: 1 - cordon: true - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/master - operator: In - values: - - "true" - serviceAccountName: system-upgrade - upgrade: - image: rancher/k3s-upgrade - version: v1.17.4+k3s1 ---- -# Agent plan -apiVersion: upgrade.cattle.io/v1 -kind: Plan -metadata: - name: agent-plan - namespace: system-upgrade -spec: - concurrency: 1 - cordon: true - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/master - operator: DoesNotExist - prepare: - args: - - prepare - - server-plan - image: rancher/k3s-upgrade - serviceAccountName: system-upgrade - upgrade: - image: rancher/k3s-upgrade - version: v1.17.4+k3s1 -``` -There are a few important things to call out regarding these plans: - -First, the plans must be created in the same namespace where the controller was deployed. - -Second, the `concurrency` field indicates how many nodes can be upgraded at the same time. - -Third, the server-plan targets server nodes by specifying a label selector that selects nodes with the `node-role.kubernetes.io/master` label. The agent-plan targets agent nodes by specifying a label selector that select nodes without that label. - -Fourth, the `prepare` step in the agent-plan will cause upgrade jobs for that plan to wait for the server-plan to complete before they execute. - -Fifth, both plans have the `version` field set to v1.17.4+k3s1. Alternatively, you can omit the `version` field and set the `channel` field to a URL that resolves to a release of K3s. This will cause the controller to monitor that URL and upgrade the cluster any time it resolves to a new release. This works well with the [release channels]({{< baseurl >}}/k3s/latest/en/upgrades/basic/#release-channels). Thus, you can configure your plans with the following channel to ensure your cluster is always automatically upgraded to the newest stable release of K3s: -``` -apiVersion: upgrade.cattle.io/v1 -kind: Plan -... -spec: - ... - channel: https://update.k3s.io/v1-release/channels/stable - -``` - -As stated, the upgrade will begin as soon as the controller detects that a plan was created. Updating a plan will cause the controller to re-evaluate the plan and determine if another upgrade is needed. - -You can monitor the progress of an upgrade by viewing the plan and jobs via kubectl: -``` -kubectl -n system-upgrade get plans -o yaml -kubectl -n system-upgrade get jobs -o yaml -``` - diff --git a/content/k3s/latest/en/upgrades/basic/_index.md b/content/k3s/latest/en/upgrades/basic/_index.md deleted file mode 100644 index d4f1fb8ebd..0000000000 --- a/content/k3s/latest/en/upgrades/basic/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Upgrade Basics" -weight: 10 ---- - -You can upgrade K3s by using the installation script, or by manually installing the binary of the desired version. - ->**Note:** When upgrading, upgrade server nodes first one at a time, then any worker nodes. - -### Release Channels - -Upgrades performed via the installation script or using our [automated upgrades]({{< baseurl >}}/k3s/latest/en/upgrades/automated/) feature can be tied to different release channels. The following channels are available: - -| Channel | Description | -|---------------|---------| -| stable | (Default) Stable is recommended for production environments. These releases have been through a period of community hardening. | -| latest | Latest is recommended for trying out the latest features. These releases have not yet been through a period of community hardening. | -| v1.18 (example) | There is a release channel tied to each supported Kubernetes minor version. At the time of this writing, they are `v1.18`, `v1.17`, and `v1.16`. These channels will select the latest patch available, not necessarily a stable release. | - -For an exhaustive and up-to-date list of channels, you can visit the [k3s channel service API](https://update.k3s.io/v1-release/channels). For more technical details on how channels work, you see the [channelserver project](https://github.com/rancher/channelserver). - -### Upgrade K3s Using the Installation Script - -To upgrade K3s from an older version you can re-run the installation script using the same flags, for example: - -```sh -curl -sfL https://get.k3s.io | sh - -``` -This will upgrade to a newer version in the stable channel by default. - -If you want to upgrade to a newer version in a specific channel (such as latest) you can specify the channel: -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh - -``` - -If you want to upgrade to a specific version you can run the following command: - -```sh -curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh - -``` - -### Manually Upgrade K3s Using the Binary - -Or to manually upgrade K3s: - -1. Download the desired version of the K3s binary from [releases](https://github.com/rancher/k3s/releases) -2. Copy the downloaded binary to `/usr/local/bin/k3s` (or your desired location) -3. Stop the old k3s binary -4. Launch the new k3s binary - -### Restarting K3s - -Restarting K3s is supported by the installation script for systemd and OpenRC. - -**systemd** - -To restart servers manually: -```sh -sudo systemctl restart k3s -``` - -To restart agents manually: -```sh -sudo systemctl restart k3s-agent -``` - -**OpenRC** - -To restart servers manually: -```sh -sudo service k3s restart -``` - -To restart agents manually: -```sh -sudo service k3s-agent restart -``` diff --git a/content/k3s/latest/en/upgrades/killall/_index.md b/content/k3s/latest/en/upgrades/killall/_index.md deleted file mode 100644 index dabe4cde27..0000000000 --- a/content/k3s/latest/en/upgrades/killall/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: The k3s-killall.sh script -weight: 4 ---- - -To allow high availability during upgrades, the K3s containers continue running when the K3s service is stopped. - -To stop all of the K3s containers and reset the containerd state, the `k3s-killall.sh` script can be used. - -The killall script cleans up containers, K3s directories, and networking components while also removing the iptables chain with all the associated rules. The cluster data will not be deleted. - -To run the killall script from a server node, run: - -``` -/usr/local/bin/k3s-killall.sh -``` \ No newline at end of file diff --git a/content/os/_index.md b/content/os/_index.md deleted file mode 100644 index 2601a529a9..0000000000 --- a/content/os/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: RancherOS -weight: 1 -showBreadcrumb: false ---- diff --git a/content/os/v1.x/_index.md b/content/os/v1.x/_index.md deleted file mode 100644 index 0c15327f26..0000000000 --- a/content/os/v1.x/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: v1.x -showBreadcrumb: false ---- diff --git a/content/os/v1.x/en/_index.md b/content/os/v1.x/en/_index.md deleted file mode 100644 index fcd43aea12..0000000000 --- a/content/os/v1.x/en/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Overview of RancherOS -shortTitle: RancherOS -description: RancherOS is a simplified Linux distribution built from containers, for containers. These documents describe how to install and use RancherOS. -weight: 1 ---- - -> RancherOS 1.x is currently in a maintain-only-as-essential mode. It is no longer being actively maintained at a code level other than addressing critical or security fixes. For more information about the support status of RancherOS, see [this page.](https://rancher.com/docs/os/v1.x/en/support/) - -RancherOS is the smallest, easiest way to run Docker in production. Every process in RancherOS is a container managed by Docker. This includes system services such as `udev` and `syslog`. Because it only includes the services necessary to run Docker, RancherOS is significantly smaller than most traditional operating systems. By removing unnecessary libraries and services, requirements for security patches and other maintenance are also reduced. This is possible because, with Docker, users typically package all necessary libraries into their containers. - -Another way in which RancherOS is designed specifically for running Docker is that it always runs the latest version of Docker. This allows users to take advantage of the latest Docker capabilities and bug fixes. - -Like other minimalist Linux distributions, RancherOS boots incredibly quickly. Starting Docker containers is nearly instant, similar to starting any other process. This speed is ideal for organizations adopting microservices and autoscaling. - -Docker is an open-source platform designed for developers, system admins, and DevOps. It is used to build, ship, and run containers, using a simple and powerful command line interface (CLI). To get started with Docker, please visit the [Docker user guide](https://docs.docker.com/engine/userguide/). - -### Hardware Requirements - -* Memory Requirements - -Platform | RAM requirement(>=v1.5.x) | RAM requirement(v1.4.x) --------- | ------------------------ | --------------------------- -Baremetal | 1GB | 1280MB -VirtualBox | 1GB | 1280MB -VMWare | 1GB | 1280MB (rancheros.iso)
2048MB (rancheros-vmware.iso) -GCE | 1GB | 1280MB -AWS | 1GB | 1.7GB - -You can adjust memory requirements by custom building RancherOS, please refer to [reduce-memory-requirements]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/#reduce-memory-requirements) - -### How RancherOS Works - -Everything in RancherOS is a Docker container. We accomplish this by launching two instances of Docker. One is what we call **System Docker** and is the first process on the system. All other system services, like `ntpd`, `syslog`, and `console`, are running in Docker containers. System Docker replaces traditional init systems like `systemd` and is used to launch [additional system services]({{}}/os/v1.x/en/system-services/). - -System Docker runs a special container called **Docker**, which is another Docker daemon responsible for managing all of the user’s containers. Any containers that you launch as a user from the console will run inside this Docker. This creates isolation from the System Docker containers and ensures that normal user commands don’t impact system services. - - We created this separation not only for the security benefits, but also to make sure that commands like `docker rm -f $(docker ps -qa)` don't delete the entire OS. - -{{< img "/img/os/rancheroshowitworks.png" "How it works">}} - -### Running RancherOS - -To get started with RancherOS, head over to our [Quick Start Guide](quick-start-guide/). - -### Latest Release - -Please check our repository for the latest release in our [README](https://github.com/rancher/os/blob/master/README.md). - -
-
diff --git a/content/os/v1.x/en/about/_index.md b/content/os/v1.x/en/about/_index.md deleted file mode 100644 index 8b5bf2f852..0000000000 --- a/content/os/v1.x/en/about/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Additional Resources -weight: 200 ---- - -## Developing - -Development is easiest done with QEMU on Linux. OS X works too, although QEMU doesn't have KVM support. If you are running Linux in a virtual machine, then we recommend you run VMWare Fusion/Workstation and enable VT-x support. Then, QEMU will have KVM support and run sufficiently fast inside your Linux VM. - -### Building - -#### Requirements: - -* bash -* make -* Docker 1.10.3+ - -``` -$ make -``` - -The build will run in Docker containers, and when the build is done, the vmlinuz, initrd, and ISO should be in `dist/artifacts`. - -If you're building a version of RancherOS used for development and not for a release, you can instead run `make dev`. This will run faster than the standard build by avoiding building the `installer.tar` and `rootfs.tar.gz` artifacts which are not needed by QEMU. - -### Testing - -Run `make integration-tests` to run the all integration tests in a container, or `./scripts/integration-tests` to run them outside a container (they use QEMU to test the OS.) - -To run just one integration test, or a group of them (using regex's like `.*Console.*`, you can set the `RUNTEST` environment variable: - -``` -$ RUNTEST=TestPreload make integration-test -``` - -### Running - -Prerequisites: QEMU, coreutils, cdrtools/genisoimage/mkisofs. -On OS X, `brew` is recommended to install those. On Linux, use your distro package manager. - -To launch RancherOS in QEMU from your dev version, you can either use `make run`, or customise the vm using `./scripts/run` and its options. You can use `--append your.kernel=params here` and `--cloud-config your-cloud-config.yml` to configure the RancherOS instance you're launching. - -You can SSH in using `./scripts/ssh`. Your SSH keys should have been populated (if you didn't provide your own cloud-config) so you won't need a password. If you don't have SSH keys, or something is wrong with your cloud-config, then the password is "`rancher`". - -If you're on OS X, you can run RancherOS using [_xhyve_](https://github.com/mist64/xhyve) instead of QEMU: just pass `--xhyve` to `./scripts/run` and `./scripts/ssh`. - -### Debugging and logging. - -You can enable extra log information in the console by setting them using `sudo ros config set`, -or as kernel boot parameters. -Enable all logging by setting `rancher.debug` true -or you can set `rancher.docker.debug`, `rancher.system_docker.debug`, `rancher.bootstrap_docker.debug`, or `rancher.log` individually. - -You will also be able to view the debug logging information by running `dmesg` as root. - -## Repositories - -All of repositories are located within our main GitHub [page](https://github.com/rancher). - -[RancherOS Repo](https://github.com/rancher/os): This repo contains the bulk of the RancherOS code. - -[RancherOS Services Repo](https://github.com/rancher/os-services): This repo is where any [system-services]({{< baseurl >}}/os/v1.x/en//system-services/) can be contributed. - -[RancherOS Images Repo](https://github.com/rancher/os-images): This repo is for the corresponding service images. - - -## Bugs - -If you find any bugs or are having any trouble, please contact us by filing an [issue](https://github.com/rancher/os/issues/new). - -If you have any updates to our documentation, please make any PRs to our [docs repo](https://github.com/rancher/docs). - -
-
diff --git a/content/os/v1.x/en/about/custom-partition-layout/_index.md b/content/os/v1.x/en/about/custom-partition-layout/_index.md deleted file mode 100644 index a1c43205ed..0000000000 --- a/content/os/v1.x/en/about/custom-partition-layout/_index.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: How to custom partition layout -weight: 305 ---- - -When users use the default `ros install`, ROS will automatically create one partition on the root disk. -It will be the only partition with the label RANCHER_STATE. -But sometimes users want to be able to customize the root disk partition to isolate the data. - -> The following defaults to MBR mode, GPT mode has not been tested. - -### Use RANCHER_STATE partition - -As mentioned above, the default mode is that ROS will automatically create one partition with the label RANCHER_STATE. - -In addition, we can have other partitions, e.g.: two partitions, one is RANCHER_STATE and the other is a normal partition. - -First boot a ROS instance from ISO, then manually format and partition `/dev/sda` , the reference configuration is as follows: - -``` -[root@rancher oem]# fdisk -l -Disk /dev/sda: 5 GiB, 5377622016 bytes, 10503168 sectors -Units: sectors of 1 * 512 = 512 bytes -Sector size (logical/physical): 512 bytes / 512 bytes -I/O size (minimum/optimal): 512 bytes / 512 bytes -Disklabel type: dos -Disk identifier: 0x9fff87e9 - -Device Boot Start End Sectors Size Id Type -/dev/sda1 * 2048 7503167 7501120 3.6G 83 Linux -/dev/sda2 7503872 10503167 2999296 1.4G 83 Linux - -[root@rancher oem]# blkid -/dev/sda1: LABEL="RANCHER_STATE" UUID="512f212b-3130-458e-a2d1-1d601c34d4e4" TYPE="ext4" PARTUUID="9fff87e9-01" -/dev/sda2: UUID="3828e3ac-b825-4898-9072-45da9d37c2a6" TYPE="ext4" PARTUUID="9fff87e9-02" -``` - -Then install ROS to the disk with `ros install -t noformat -d /dev/sda ...`. - -After rebooting, you can use `/dev/sda2`. For example, changing the data root of user-docker: - -``` -$ ros config set mounts '[["/dev/sda2","/mnt/s","ext4",""]]’ -$ ros config set rancher.docker.graph /mnt/s -$ reboot -``` - -> In this mode, the RANCHER_STATE partition capacity cannot exceed 3.8GiB, otherwise the bootloader may not recognize the boot disk. This is the test result on VirtualBox. - -### Use RANCHER_BOOT partition - -When you only use the RANCHER_STATE partition, the bootloader will be installed in the `/boot` directory. - -``` -$ system-docker run -it --rm -v /:/host alpine -ls /host/boot -... -``` - -If you want to use a separate boot partition, you also need to boot a ROS instance from ISO, then manually format and partition `/dev/sda`: - -``` -[root@rancher rancher]# fdisk -l -Disk /dev/sda: 5 GiB, 5377622016 bytes, 10503168 sectors -Units: sectors of 1 * 512 = 512 bytes -Sector size (logical/physical): 512 bytes / 512 bytes -I/O size (minimum/optimal): 512 bytes / 512 bytes -Disklabel type: dos -Disk identifier: 0xe32b3025 - -Device Boot Start End Sectors Size Id Type -/dev/sda1 2048 2503167 2501120 1.2G 83 Linux -/dev/sda2 2504704 7503167 4998464 2.4G 83 Linux -/dev/sda3 7503872 10503167 2999296 1.4G 83 Linux - -[root@rancher rancher]# mkfs.ext4 -L RANCHER_BOOT /dev/sda1 -[root@rancher rancher]# mkfs.ext4 -L RANCHER_STATE /dev/sda2 -[root@rancher rancher]# mkfs.ext4 /dev/sda3 - -[root@rancher rancher]# blkid -/dev/sda1: LABEL="RANCHER_BOOT" UUID="43baeac3-11f3-4eed-acfa-64daf66b26c8" TYPE="ext4" PARTUUID="e32b3025-01" -/dev/sda2: LABEL="RANCHER_STATE" UUID="16f1ecef-dbe4-42a2-87a1-611939684e0b" TYPE="ext4" PARTUUID="e32b3025-02" -/dev/sda3: UUID="9f34e161-0eee-48f9-93de-3b7c54dea437" TYPE="ext4" PARTUUID="c9b8f181-03" -``` - -Then install ROS to the disk with `ros install -t noformat -d /dev/sda ...`. - -After rebooting, you can check the boot partition: - -``` -[root@rancher rancher]# mkdir /boot -[root@rancher rancher]# mount /dev/sda1 /boot -[root@rancher rancher]# ls -ahl /boot/ -total 175388 -drwxr-xr-x 4 root root 4.0K Sep 27 03:35 . -drwxr-xr-x 1 root root 4.0K Sep 27 03:38 .. --rw-r--r-- 1 root root 24 Sep 27 03:05 append --rw-r--r-- 1 root root 128 Sep 27 03:35 global.cfg --rw-r--r-- 1 root root 96.8M Sep 27 03:05 initrd -``` - -If you are not using the first partition as a BOOT partition, you need to set BOOT flag via the fdisk tool. - -> In this mode, the RANCHER_BOOT partition capacity cannot exceed 3.8GiB, otherwise the bootloader may not recognize the boot disk. This is the test result on VirtualBox. - -### Use RANCHER_OEM partition - -If you format any partition with the label RANCHER_OEM, ROS will mount this partition to `/usr/share/ros/oem`: - -``` -[root@rancher rancher]# blkid -/dev/sda2: LABEL="RANCHER_OEM" UUID="4f438455-63a3-4d29-ac90-50adbeced412" TYPE="ext4" PARTUUID="9fff87e9-02" - -[root@rancher rancher]# df -hT | grep sda2 -/dev/sda2 ext4 1.4G 4.3M 1.3G 0% /usr/share/ros/oem -``` - -Currently, this OEM directory is hardcoded and not configurable. - -### Use RANCHER_SWAP partition - -Suppose you have a partition(`/dev/sda2`) and you want to use it as a SWAP partition: - -``` -$ mkswap -L RANCHER_SWAP /dev/sda2 - -$ blkid -/dev/sda1: LABEL="RANCHER_STATE" UUID="512f212b-3130-458e-a2d1-1d601c34d4e4" TYPE="ext4" PARTUUID="9fff87e9-01" -/dev/sda2: LABEL="RANCHER_SWAP" UUID="772b6e76-f89c-458e-931e-10902d78d3e4" TYPE="swap" PARTUUID="9fff87e9-02" -``` - -After you install ROS to the disk, you can add the `runcmd` to enable SWAP: - -``` -runcmd: -- swapon -L RANCHER_SWAP -``` - -Then check the memory information: - -``` -[root@rancher rancher]# free -m - total used free shared buffers cached -Mem: 1996 774 1221 237 20 614 --/+ buffers/cache: 139 1856 -Swap: 487 0 487 -``` diff --git a/content/os/v1.x/en/about/faqs/_index.md b/content/os/v1.x/en/about/faqs/_index.md deleted file mode 100644 index 4caf7c71a8..0000000000 --- a/content/os/v1.x/en/about/faqs/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: FAQs -weight: 301 ---- - -### What is required to run RancherOS? - -RancherOS runs on any laptop, physical, or virtual servers. - -### What are some commands? - -Command | Description ---------|------------ -`docker`| Good old Docker, use that to run stuff. -`system-docker` | The Docker instance running the system containers. Must run as root or using `sudo` -`ros` | Control and configure RancherOS - - -### How can I extend my disk size in Amazon? - -Assuming your EC2 instance with RancherOS with more disk space than what's being read, run the following command to extend the disk size. This allows RancherOS to see the disk size. - -``` -$ docker run --privileged --rm --it debian:jessie resize2fs /dev/xvda1 -``` - -`xvda1` should be the right disk for your own setup. In the future, we will be trying to create a system service that would automatically do this on boot in AWS. diff --git a/content/os/v1.x/en/about/microcode-loader/_index.md b/content/os/v1.x/en/about/microcode-loader/_index.md deleted file mode 100644 index 88fe6069d7..0000000000 --- a/content/os/v1.x/en/about/microcode-loader/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: How to update microcode -weight: 306 ---- - -Processor manufacturers release stability and security updates to the processor microcode. While microcode can be updated through the BIOS, the Linux kernel is also able to apply these updates. -These updates provide bug fixes that can be critical to the stability of your system. Without these updates, you may experience spurious crashes or unexpected system halts that can be difficult to track down. - -The microcode loader supports three loading methods: - -- Early load microcode -- Late loading -- Builtin microcode - -You can get more details from [here](https://www.kernel.org/doc/html/latest/x86/microcode.html). - -RancherOS supports `Late loading`. To update the Intel microcode, get the latest Intel microcode. An example is [here](https://downloadcenter.intel.com/download/28087/Linux-Processor-Microcode-Data-File?v=t). Then copy the data files to the firmware directory: - -``` -mkdir -p /lib/firmware/intel-ucode/ -cp -v intel-ucode/* /lib/firmware/intel-ucode/ -``` -Reload the microcode. This file does not exist if you are running RancherOS on the hypervisor. Usually, the VM does not need to update the microcode. - -``` -echo 1 > /sys/devices/system/cpu/microcode/reload -``` -Check the result: - -``` -dmesg | grep microcode -[ 13.659429] microcode: sig=0x306f2, pf=0x1, revision=0x36 -[ 13.665981] microcode: Microcode Update Driver: v2.01 , Peter Oruba -[ 510.899733] microcode: updated to revision 0x3b, date = 2017-11-17 -``` - -You can use `runcmd` to reload the microcode every boot: - -``` -runcmd: -- echo 1 > /sys/devices/system/cpu/microcode/reload -``` diff --git a/content/os/v1.x/en/about/recovery-console/_index.md b/content/os/v1.x/en/about/recovery-console/_index.md deleted file mode 100644 index ad5fea9bc3..0000000000 --- a/content/os/v1.x/en/about/recovery-console/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: How to use recovery console -weight: 304 ---- - -### Test Environment - -In order to demonstrate how to use the recovery console, we choose a scene that the disk space is full and the OS cannot boot. - -| Term | Definition | -|-----------------------|--------------------------------------------------| -| RancherOS | v1.4.0 | -| Platform | Virtualbox | -| Root Disk | 2GB | -| CPU | 1C | -| MEM | 2GB | - - -### Fill up the disk - -Start this VM to check disk usage: - -``` -/dev/sda1 ext4 1.8G 567.2M 1.2G 32% /opt -/dev/sda1 ext4 1.8G 567.2M 1.2G 32% /mnt -... -... -``` - -Fill the remaining space with `dd`: - -``` -$ cd /opt/ -$ dd if=/dev/zero of=2GB.img bs=1M count=2000 -dd: writing '2GB.img': No space left on device -1304+0 records in -1302+1 records out - -$ ls -ahl -total 1334036 -drwxr-xr-x 2 root root 4.0K Jul 19 07:32 . -drwxr-xr-x 1 root root 4.0K Jul 19 06:58 .. --rw-r--r-- 1 root root 1.3G Jul 19 07:32 2GB.img -``` - -At this point you cannot reboot in the OS, but you can reboot via Virtualbox: - -``` -$ shutdown -h now -Failed to write to log, write /var/log/boot/shutdown.log: no space left on device -[ ] shutdown:info: Setting shutdown timeout to 60 (rancher.shutdown_timeout set to 60) -Failed to write to log, write /var/log/boot/shutdown.log: no space left on device -Failed to write to log, write /var/log/boot/shutdown.log: no space left on device -.[ ] shutdown:fatal: Error response from daemon: {"message":"mkdir /var/lib/system-docker/overlay2/7c7dffbed40e7b0ed4c68d5630b17a179751643ca7b7a4ac183e48a767071684-init: no space left on device"} -Failed to write to log, write /var/log/boot/shutdown.log: no space left on device -``` - -After rebooting, you will not be able to enter the OS and there will be a kernel panic. - -![](https://ws1.sinaimg.cn/mw1024/006tNc79ly1ftf8071p5sj31kw0s14or.jpg) - -### Boot with recovery console - -When you can access the bootloader, you should select the `Recovery console` and press `` to edit: - -![](https://ws3.sinaimg.cn/mw1024/006tNc79ly1ftf7mpir3fj312u0i4a9z.jpg) - -You need add `rancher.autologin=tty1` to the end, then press ``. If all goes well, you will automatically login to the recovery console. - -### How to recover - -We need to mount the root disk in the recovery console and delete some data: - -``` -# If you couldn't see any disk devices created under `/dev/`, please try this command: -$ ros udev-settle - -$ mkdir /mnt/root-disk -$ mount /dev/sda1 /mnt/root-disk - -# delete data previously generated using dd -$ ls -ahl /mnt/root-disk/opt --rw-r--r-- 1 root root 1.3G Jul 19 07:32 2GB.img -$ rm -f /mnt/root-disk/opt/2GB.img -``` - -After rebooting, you can enter the OS normally. - diff --git a/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md b/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md deleted file mode 100644 index 3fb01def4e..0000000000 --- a/content/os/v1.x/en/about/running-rancher-on-rancherOS/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Tips on using Rancher v1.x with RancherOS -weight: 302 ---- - -RancherOS can be used to launch [Rancher](/rancher/) and be used as the OS to add nodes to Rancher. - -### Launching Agents using Cloud-Config - -You can easily add hosts into Rancher by using [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) to launch the rancher/agent container. - -After Rancher is launched and host registration has been saved, you will be able to find use the custom option to add Rancher OS nodes. - -```bash -$ sudo docker run --d --privileged -v /var/run/docker.sock:/var/run/docker.sock \ - rancher/agent:v0.8.2 http://:8080/v1/projects/1a5/scripts/ -``` - -
- -> **Note:** The `rancher/agent` version is correlated to the Rancher server version. You will need to check the custom command to get the appropriate tag for the version to use. - -_Cloud-Config Example_ - -Here's using the command above and converting it into a cloud-config file to launch the rancher/agent in docker when RancherOS boots up. - -```yaml -#cloud-config -rancher: - services: - rancher-agent1: - image: rancher/agent:v0.8.2 - command: http://:8080/v1/projects/1a5/scripts/ - privileged: true - volumes: - - /var/run/docker.sock:/var/run/docker.sock -``` -
- -> **Note:** You can not name the service `rancher-agent` as this will not allow the rancher/agent container to be launched correctly. Please read more about why [you can't name your container as `rancher-agent`]({{}}/rancher/v1.6/en/faqs/agents/#adding-in-name-rancher-agent). - -### Adding in Host Labels - -With each host, you have the ability to add labels to help you organize your hosts. The labels are added as an environment variable when launching the rancher/agent container. The host label in the UI will be a key/value pair and the keys must be unique identifiers. If you added two keys with different values, we'll take the last inputted value to use as the key/value pair. - -By adding labels to hosts, you can use these labels when to schedule services/load balancers/services and create a whitelist or blacklist of hosts for your services to run on. - -When adding a custom host, you can add the labels using the UI and it will automatically add the environment variable (`CATTLE_HOST_LABELS`) with the key/value pair into the command on the UI screen. - -#### Native Docker Commands Example - -```bash -# Adding one host label to the rancher/agent command -$ sudo docker run -e CATTLE_HOST_LABELS='foo=bar' -d --privileged \ - -v /var/run/docker.sock:/var/run/docker.sock rancher/agent:v0.8.2 \ - http://:8080/v1/projects/1a5/scripts/ - -# Adding more than one host label requires joining the additional host labels with an `&` -$ sudo docker run -e CATTLE_HOST_LABELS='foo=bar&hello=world' -d --privileged \ - -v /var/run/docker.sock:/var/run/docker.sock rancher/agent:v0.8.2 \ - http://:8080/v1/projects/1a5/scripts/ -``` - -#### Cloud-Config Example - -Adding one host label - -```yaml -#cloud-config -rancher: - services: - rancher-agent1: - image: rancher/agent:v0.8.2 - command: http://:8080/v1/projects/1a5/scripts/ - privileged: true - volumes: - - /var/run/docker.sock:/var/run/docker.sock - environment: - CATTLE_HOST_LABELS: foo=bar -``` -
- -Adding more than one host label requires joining the additional host labels with an `&` - -```yaml -#cloud-config -rancher: - services: - rancher-agent1: - image: rancher/agent:v0.8.2 - command: http://:8080/v1/projects/1a5/scripts/ - privileged: true - volumes: - - /var/run/docker.sock:/var/run/docker.sock - environment: - CATTLE_HOST_LABELS: foo=bar&hello=world -``` diff --git a/content/os/v1.x/en/about/security/_index.md b/content/os/v1.x/en/about/security/_index.md deleted file mode 100644 index 00286cf1a5..0000000000 --- a/content/os/v1.x/en/about/security/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: RancherOS Security -weight: 303 ---- - - - - - - - -
-

Security policy

-

Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame. RancherOS is a minimal Linux distribution, built with entirely using open source components.

-
-

Reporting process

-

Please submit possible security issues by emailing security@rancher.com

-
-

Announcements

-

Subscribe to the Rancher announcements forum for release updates.

-
- -### RancherOS Vulnerabilities - -| ID | Description | Date | Resolution | -|----|-------------|------|------------| -| [CVE-2017-6074](http://seclists.org/oss-sec/2017/q1/471) | Local privilege-escalation using a user after free issue in [Datagram Congestion Control Protocol (DCCP)](https://wiki.linuxfoundation.org/networking/dccp). DCCP is built into the RancherOS kernel as a dynamically loaded module, and isn't loaded by default. | 17 Feb 2017 | [RancherOS v0.8.1](https://github.com/rancher/os/releases/tag/v0.8.1) using a [patched 4.9.12 Linux kernel](https://github.com/rancher/os-kernel/releases/tag/v4.9.12-rancher) | -| [CVE-2017-7184](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-7184) | Allows local users to obtain root privileges or cause a denial of service (heap-based out-of-bounds access) by leveraging the CAP_NET_ADMIN capability. | 3 April 2017 | [RancherOS v0.9.2-rc1](https://github.com/rancher/os/releases/tag/v0.9.2-rc1) using Linux 4.9.20 | -| [CVE-2017-1000364](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-1000364) | Linux Kernel is prone to a local memory-corruption vulnerability. Attackers may be able to exploit this issue to execute arbitrary code with elevated privileges | 19 June 2017 | [RancherOS v1.0.3](https://github.com/rancher/os/releases/tag/v1.0.3) | -| [CVE-2017-1000366](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-1000366) | glibc contains a vulnerability that allows manipulation of the heap/stack. Attackers may be able to exploit this issue to execute arbitrary code with elevated privileges | 19 June 2017 | [RancherOS v1.0.3](https://github.com/rancher/os/releases/tag/v1.0.3) | -| [CVE-2017-1000405](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-1000405) | The Linux Kernel versions 2.6.38 through 4.14 have a problematic use of pmd_mkdirty() in the touch_pmd() function inside the THP implementation. touch_pmd() can be reached by get_user_pages(). In such case, the pmd will become dirty. | 10 Dec 2017 | [RancherOS v1.1.1](https://github.com/rancher/os/releases/tag/v1.1.1) | -| [CVE-2017-5754](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5754) | Systems with microprocessors utilizing speculative execution and indirect branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis of the data cache. | 5 Jan 2018 | [RancherOS v1.1.3](https://github.com/rancher/os/releases/tag/v1.1.3) using Linux v4.9.75 | -| [CVE-2017-5715](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5715) | Systems with microprocessors utilizing speculative execution and indirect branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis | 6 Feb 2018 | [RancherOS v1.1.4](https://github.com/rancher/os/releases/tag/v1.1.4) using Linux v4.9.78 with the Retpoline support | -| [CVE-2017-5753](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-5753) | Systems with microprocessors utilizing speculative execution and branch prediction may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | -| [CVE-2018-8897](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-8897) | A statement in the System Programming Guide of the Intel 64 and IA-32 Architectures Software Developer's Manual (SDM) was mishandled in the development of some or all operating-system kernels, resulting in unexpected behavior for #DB exceptions that are deferred by MOV SS or POP SS, as demonstrated by (for example) privilege escalation in Windows, macOS, some Xen configurations, or FreeBSD, or a Linux kernel crash. | 31 May 2018 | [RancherOS v1.4.0](https://github.com/rancher/os/releases/tag/v1.4.0) using Linux v4.14.32 | -| [CVE-2018-3620](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3620) | L1 Terminal Fault is a hardware vulnerability which allows unprivileged speculative access to data which is available in the Level 1 Data Cache when the page table entry controlling the virtual address, which is used for the access, has the Present bit cleared or other reserved bits set. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | -| [CVE-2018-3639](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-3639) | Systems with microprocessors utilizing speculative execution and speculative execution of memory reads before the addresses of all prior memory writes are known may allow unauthorized disclosure of information to an attacker with local user access via a side-channel analysis, aka Speculative Store Bypass (SSB), Variant 4. | 19 Sep 2018 | [RancherOS v1.4.1](https://github.com/rancher/os/releases/tag/v1.4.1) using Linux v4.14.67 | -| [CVE-2018-17182](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-17182) | The vmacache_flush_all function in mm/vmacache.c mishandles sequence number overflows. An attacker can trigger a use-after-free (and possibly gain privileges) via certain thread creation, map, unmap, invalidation, and dereference operations. | 18 Oct 2018 | [RancherOS v1.4.2](https://github.com/rancher/os/releases/tag/v1.4.2) using Linux v4.14.73 | -| [CVE-2019-5736](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5736) | runc through 1.0-rc6, as used in Docker before 18.09.2 and other products, allows attackers to overwrite the host runc binary (and consequently obtain host root access) by leveraging the ability to execute a command as root within one of these types of containers: (1) a new container with an attacker-controlled image, or (2) an existing container, to which the attacker previously had write access, that can be attached with docker exec. This occurs because of file-descriptor mishandling, related to /proc/self/exe. | 12 Feb 2019 | [RancherOS v1.5.1](https://github.com/rancher/os/releases/tag/v1.5.1) | -| [Microarchitectural Data Sampling (MDS)](https://www.kernel.org/doc/html/latest/x86/mds.html) | Microarchitectural Data Sampling (MDS) is a family of side channel attacks on internal buffers in Intel CPUs. The variants are: CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 | 31 May 2019 | [RancherOS v1.5.2](https://github.com/rancher/os/releases/tag/v1.5.2) using Linux v4.14.122 | -| [The TCP SACK panic](https://lwn.net/Articles/791409/) | Selective acknowledgment (SACK) is a technique used by TCP to help alleviate congestion that can arise due to the retransmission of dropped packets. It allows the endpoints to describe which pieces of the data they have received, so that only the missing pieces need to be retransmitted. However, a bug was recently found in the Linux implementation of SACK that allows remote attackers to panic the system by sending crafted SACK information. | 11 July 2019 | [RancherOS v1.5.3](https://github.com/rancher/os/releases/tag/v1.5.3) | diff --git a/content/os/v1.x/en/configuration/_index.md b/content/os/v1.x/en/configuration/_index.md deleted file mode 100644 index 15a11fcaaa..0000000000 --- a/content/os/v1.x/en/configuration/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Configuration -weight: 120 -aliases: - - /os/v1.x/en/installation/configuration ---- - -There are two ways that RancherOS can be configured. - -1. A cloud-config file can be used to provide configuration when first booting RancherOS. -2. Manually changing configuration with the `ros config` command. - -Typically, when you first boot the server, you pass in a cloud-config file to configure the initialization of the server. After the first boot, if you have any changes for the configuration, it's recommended that you use `ros config` to set the necessary configuration properties. Any changes will be saved on disk and a reboot will be required for changes to be applied. - -### Cloud-Config - -Cloud-config is a declarative configuration file format supported by many Linux distributions and is the primary configuration mechanism for RancherOS. - -A Linux OS supporting cloud-config will invoke a cloud-init process during startup to parse the cloud-config file and configure the operating system. RancherOS runs its own cloud-init process in a system container. The cloud-init process will attempt to retrieve a cloud-config file from a variety of data sources. Once cloud-init obtains a cloud-config file, it configures the Linux OS according to the content of the cloud-config file. - -When you create a RancherOS instance on AWS, for example, you can optionally provide cloud-config passed in the `user-data` field. Inside the RancherOS instance, cloud-init process will retrieve the cloud-config content through its AWS cloud-config data source, which simply extracts the content of user-data received by the VM instance. If the file starts with "`#cloud-config`", cloud-init will interpret that file as a cloud-config file. If the file starts with `#!` (e.g., `#!/bin/sh`), cloud-init will simply execute that file. You can place any configuration commands in the file as scripts. - -A cloud-config file uses the YAML format. YAML is easy to understand and easy to parse. For more information on YAML, please read more at the [YAML site](http://www.yaml.org/). The most important formatting principle is indentation or whitespace. This indentation indicates relationships of the items to one another. If something is indented more than the previous line, it is a sub-item of the top item that is less indented. - -Example: Notice how both are indented underneath `ssh_authorized_keys`. - -```yaml -#cloud-config -ssh_authorized_keys: - - ssh-rsa AAA...ZZZ example1@rancher - - ssh-rsa BBB...ZZZ example2@rancher -``` - -In our example above, we have our `#cloud-config` line to indicate it's a cloud-config file. We have 1 top-level property, `ssh_authorized_keys`. Its value is a list of public keys that are represented as a dashed list under `ssh_authorized_keys:`. - -### Manually Changing Configuration - -To update RancherOS configuration after booting, the `ros config set ` command can be used. -For more complicated settings, like the [sysctl settings]({{< baseurl >}}/os/v1.x/en/configuration/sysctl/), you can also create a small YAML file and then run `sudo ros config merge -i `. - -#### Getting Values - -You can easily get any value that's been set in the `/var/lib/rancher/conf/cloud-config.yml` file. Let's see how easy it is to get the DNS configuration of the system. - -``` -$ sudo ros config get rancher.network.dns.nameservers -- 8.8.8.8 -- 8.8.4.4 -``` - -#### Setting Values - -You can set values in the `/var/lib/rancher/conf/cloud-config.yml` file. - -Setting a simple value in the `/var/lib/rancher/conf/cloud-config.yml` - -``` -$ sudo ros config set rancher.docker.tls true -``` - -Setting a list in the `/var/lib/rancher/conf/cloud-config.yml` - -``` -$ sudo ros config set rancher.network.dns.nameservers "['8.8.8.8','8.8.4.4']" -``` - -#### Exporting the Current Configuration - -To output and review the current configuration state you can use the `ros config export` command. - -``` -$ sudo ros config export -rancher: - docker: - tls: true - network: - dns: - nameservers: - - 8.8.8.8 - - 8.8.4.4 -``` - -#### Validating a Configuration File - -To validate a configuration file you can use the `ros config validate` command. - -``` -$ sudo ros config validate -i cloud-config.yml -``` diff --git a/content/os/v1.x/en/configuration/adding-kernel-parameters/_index.md b/content/os/v1.x/en/configuration/adding-kernel-parameters/_index.md deleted file mode 100644 index da82856f3c..0000000000 --- a/content/os/v1.x/en/configuration/adding-kernel-parameters/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kernel boot parameters -weight: 133 -aliases: - - /os/v1.x/en/installation/configuration/adding-kernel-parameters ---- - -RancherOS parses the Linux kernel boot cmdline to add any keys it understands to its configuration. This allows you to modify what cloud-init sources it will use on boot, to enable `rancher.debug` logging, or to almost any other configuration setting. - -There are two ways to set or modify persistent kernel parameters, in-place (editing the file and reboot) or during installation to disk. - -### In-place editing - -_Available as of v1.1_ - -To edit the kernel boot parameters of an already installed RancherOS system, use the new `sudo ros config syslinux` editing command (uses `vi`). - -> To activate this setting, you will need to reboot. - -_For v1.0_ - -For in-place editing, you will need to run a container with an editor and a mount to access the `/boot/global.cfg` file containing the kernel parameters. - -> To activate this setting, you will need to reboot. - -```bash -$ sudo system-docker run --rm -it -v /:/host alpine vi /host/boot/global.cfg -``` - -### During installation - -If you want to set the extra kernel parameters when you are [Installing RancherOS to Disk]({{< baseurl >}}/os/v1.x/en/installation/server/install-to-disk/) please use the `--append` parameter. - -```bash -$ sudo ros install -d /dev/sda --append "rancheros.autologin=tty1" -``` - -### Graphical boot screen - -_Available as of v1.1_ - -RancherOS v1.1.0 added a Syslinux boot menu, which allows you to temporarily edit the boot parameters, or to select "Debug logging", "Autologin", both "Debug logging & Autologin" and "Recovery Console". - -On desktop systems the Syslinux boot menu can be switched to graphical mode by adding `UI vesamenu.c32` to a new line in `global.cfg` (use `sudo ros config syslinux` to edit the file). - -### Useful RancherOS kernel boot parameters - -#### User password - -`rancher.password=` will set the password for rancher user. If you are not willing to use SSH keys, you can consider this parameter. - -#### Recovery console - -`rancher.recovery=true` will start a single user `root` bash session as easily in the boot process, with no network, or persistent filesystem mounted. This can be used to fix disk problems, or to debug your system. - -#### Enable/Disable sshd - -`rancher.ssh.daemon=false` (its enabled in the os-config) can be used to start your RancherOS with no sshd daemon. This can be used to further reduce the ports that your system is listening on. - -#### Enable debug logging - -`rancher.debug=true` will log everything to the console for debugging. - -#### Autologin console - -`rancher.autologin=` will automatically log in the specified console - common values are `tty1`, `ttyS0` and `ttyAMA0` - depending on your platform. - -#### Enable/Disable hypervisor service auto-enable - -RancherOS v1.1.0 added detection of Hypervisor, and then will try to download the a service called `-vm-tools`. This may cause boot speed issues, and so can be disabled by setting `rancher.hypervisor_service=false`. - -#### Auto reboot after a kernel panic - -_Available as of v1.3_ - -`panic=10` will automatically reboot after a kernel panic, 10 means wait 10 seconds before reboot. This is a common kernel parameter, pointing out that it is because we set this parameter by default. diff --git a/content/os/v1.x/en/configuration/airgap-configuration/_index.md b/content/os/v1.x/en/configuration/airgap-configuration/_index.md deleted file mode 100644 index 8cc05fc450..0000000000 --- a/content/os/v1.x/en/configuration/airgap-configuration/_index.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: Air Gap Configuration -weight: 138 -aliases: - - /os/v1.x/en/installation/configuration/airgap-configuration ---- - -In the air gap environment, the Docker registry, RancherOS repositories URL, and the RancherOS upgrade URL should be configured to ensure the OS can pull images, update OS services, and upgrade the OS. - - -# Configuring a Private Docker Registry - -You should use a private Docker registry so that `user-docker` and `system-docker` can pull images. - -1. Add the private Docker registry domain to the [images prefix]({{< baseurl >}}/os/v1.x/en/configuration/images-prefix/). -2. Set the private registry certificates for `user-docker`. For details, refer to [Certificates for Private Registries]({{< baseurl >}}/os/v1.x/en/configuration/private-registries/#certificates-for-private-registries) -3. Set the private registry certificates for `system-docker`. There are two ways to set the certificates: - - To set the private registry certificates before RancherOS starts, you can run a script included with RancherOS. For details, refer to [Set Custom Certs in ISO]({{< baseurl >}}/os/v1.x/en/configuration/airgap-configuration/#set-custom-certs-in-iso). - - To set the private registry certificates after RancherOS starts, append your private registry certs to the `/etc/ssl/certs/ca-certificates.crt.rancher` file. Then reboot to make the certs fully take effect. -4. The images used by RancherOS should be pushed to your private registry. - -# Set Custom Certs in ISO - -RancherOS provides a [script](https://github.com/rancher/os/blob/master/scripts/tools/flush_crt_iso.sh) to set your custom certs for an ISO. The following commands show how to use the script: - -```shell -$ git clone https://github.com/rancher/os.git -$ cd os -$ make shell-bind -$ cd scripts/tools/ -$ wget http://link/rancheros-xx.iso -$ wget http://link/custom.crt -$ ./flush_crt_iso.sh --iso rancheros-xx.iso --cert custom.crt -$ exit - -$ ls ./build/ -``` - -# Configuring RancherOS Repositories and Upgrade URL - -The following steps show how to configure RancherOS to update from private repositories. - -By default, RancherOS will update the `engine`, `console`, and `service` list from `https://raw.githubusercontent.com/rancher/os-services` and update the `os` list from `https://releases.rancher.com/os/releases.yml`. So in the air gap environment, you need to change the repository URL and upgrade URL to your own URLs. - -### 1. Clone os-services files - -Clone `github.com/rancher/os-services` to local. The repo has many branches named after the RancherOS versions. Please check out the branch that you are using. - -``` -$ git clone https://github.com/rancher/os-services.git -$ cd os-services -$ git checkout v1.5.2 -``` - -### 2. Download the OS releases yaml - -Download the `releases.yml` from `https://releases.rancher.com/os/releases.yml`. - -### 3. Serve these files by HTTP - -Use a HTTP server to serve the cloned `os-services` directory and download `releases.yml`. -Make sure you can access all the files in `os-services` and `releases.yml` by URL. - -### 4. Set the URLs - -In your cloud-config, set `rancher.repositories.core.url` and `rancher.upgrade.url` to your own `os-services` and `releases` URLs: -```yaml -#cloud-config -rancher: - repositories: - core: - url: https://foo.bar.com/os-services - upgrade: - url: https://foo.bar.com/os/releases.yml -``` - -You can also customize `rancher.repositories.core.url` and `rancher.upgrade.url` after it's been started using `ros config`. - -``` -$ sudo ros config set rancher.repositories.core.url https://foo.bar.com/os-services -$ sudo ros config set rancher.upgrade.url https://foo.bar.com/os/releases.yml -``` - -# Example Cloud-config - - -Here is a total cloud-config example for using RancherOS in an air gap environment. - -For `system-docker`, see [Configuring Private Docker Registry]({{< baseurl >}}/os/v1.x/en/configuration/airgap-configuration/#configuring-private-docker-registry). - -```yaml -#cloud-config -write_files: - - path: /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt - permissions: "0644" - owner: root - content: | - -----BEGIN CERTIFICATE----- - MIIDJjCCAg4CCQDLCSjwGXM72TANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJB - VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 - cyBQdHkgTHRkMQ4wDAYDVQQDEwVhbGVuYTAeFw0xNTA3MjMwMzUzMDdaFw0xNjA3 - MjIwMzUzMDdaMFUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEw - HwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDjAMBgNVBAMTBWFsZW5h - MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxdVIDGlAySQmighbfNqb - TtqetENPXjNNq1JasIjGGZdOsmFvNciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg - 1FECgW7oo6DOET74swUywtq/2IOeik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFm - fP5gDgthrWBWlEPTPY1tmPjI2Hepu2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqT - uo6M2QCgSX3E1kXLnipRT6jUh0HokhFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKX - EVP1Tlw0y1ext2ppS1NR9Sg46GP4+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4 - LQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA45V0bnGPhIIkb54Gzjt9jyPJxPVTW - mwTCP+0jtfLxAor5tFuCERVs8+cLw1wASfu4vH/yHJ/N/CW92yYmtqoGLuTsywJt - u1+amECJaLyq0pZ5EjHqLjeys9yW728IifDxbQDX0cj7bBjYYzzUXp0DB/dtWb/U - KdBmT1zYeKWmSxkXDFFSpL/SGKoqx3YLTdcIbgNHwKNMfTgD+wTZ/fvk0CLxye4P - n/1ZWdSeZPAgjkha5MTUw3o1hjo/0H0ekI4erZFrZnG2N3lDaqDPR8djR+x7Gv6E - vloANkUoc1pvzvxKoz2HIHUKf+xFT50xppx6wsQZ01pNMSNF0qgc1vvH - -----END CERTIFICATE----- -rancher: - environment: - REGISTRY_DOMAIN: xxxx.yyy - repositories: - core: - url: https://foo.bar.com/os-services - upgrade: - url: https://foo.bar.com/os/releases.yml -``` diff --git a/content/os/v1.x/en/configuration/date-and-timezone/_index.md b/content/os/v1.x/en/configuration/date-and-timezone/_index.md deleted file mode 100644 index 4f21ba4b3d..0000000000 --- a/content/os/v1.x/en/configuration/date-and-timezone/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Date and time zone -weight: 121 -aliases: - - /os/v1.x/en/installation/configuration/date-and-timezone ---- - -The default console keeps time in the Coordinated Universal Time (UTC) zone and synchronizes clocks with the Network Time Protocol (NTP). The Network Time Protocol daemon (ntpd) is an operating system program that maintains the system time in synchronization with time servers using the NTP. - -RancherOS can run ntpd in the System Docker container. You can update its configurations by updating `/etc/ntp.conf`. For an example of how to update a file such as `/etc/ntp.conf` within a container, refer to [this page.]({{< baseurl >}}/os/v1.x/en/configuration/write-files/#writing-files-in-specific-system-services) - -The default console cannot support changing the time zone because including `tzdata` (time zone data) will increase the ISO size. However, you can change the time zone in the container by passing a flag to specify the time zone when you run the container: - -``` -$ docker run -e TZ=Europe/Amsterdam debian:jessie date -Tue Aug 20 09:28:19 CEST 2019 -``` - -You may need to install the `tzdata` in some images: - -``` -$ docker run -e TZ=Asia/Shanghai -e DEBIAN_FRONTEND=noninteractive -it --rm ubuntu /bin/bash -c "apt-get update && apt-get install -yq tzdata && date” -Thu Aug 29 08:13:02 CST 2019 -``` diff --git a/content/os/v1.x/en/configuration/disable-access-to-system/_index.md b/content/os/v1.x/en/configuration/disable-access-to-system/_index.md deleted file mode 100644 index bcbe845c4a..0000000000 --- a/content/os/v1.x/en/configuration/disable-access-to-system/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Disabling Access to RancherOS -weight: 136 -aliases: - - /os/v1.x/en/installation/configuration/disable-access-to-system ---- - -_Available as of v1.5_ - -In RancherOS, you can set `rancher.password` as a kernel parameter and `auto-login` to be enabled, but there may be some cases where we want to disable both of these options. Both of these options can be disabled in the cloud-config or as part of a `ros` command. - -### How to Disabling Options - -If RancherOS has already been started, you can use `ros config set` to update that you want to disable - -``` -# Disabling the `rancher.password` kernel parameter -$ sudo ros config set rancher.disable ["password"] - -# Disabling the `autologin` ability -$ sudo ros config set rancher.disable ["autologin"] -``` - -Alternatively, you can set it up in your cloud-config so it's automatically disabled when you boot RancherOS. - - -```yaml -# cloud-config -rancher: - disable: - - password - - autologin -``` diff --git a/content/os/v1.x/en/configuration/docker/_index.md b/content/os/v1.x/en/configuration/docker/_index.md deleted file mode 100644 index f1c9bc0334..0000000000 --- a/content/os/v1.x/en/configuration/docker/_index.md +++ /dev/null @@ -1,283 +0,0 @@ ---- -title: Configuring Docker or System Docker -weight: 126 -aliases: - - /os/v1.x/en/installation/configuration/docker ---- - -In RancherOS, you can configure System Docker and Docker daemons by using [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). - -### Configuring Docker - -In your cloud-config, Docker configuration is located under the `rancher.docker` key. - -```yaml -#cloud-config -rancher: - docker: - tls: true - tls_args: - - "--tlsverify" - - "--tlscacert=/etc/docker/tls/ca.pem" - - "--tlscert=/etc/docker/tls/server-cert.pem" - - "--tlskey=/etc/docker/tls/server-key.pem" - - "-H=0.0.0.0:2376" - storage_driver: overlay -``` - -You can also customize Docker after it's been started using `ros config`. - -``` -$ sudo ros config set rancher.docker.storage_driver overlay -``` - -#### User Docker settings - -Many of the standard Docker daemon arguments can be placed under the `rancher.docker` key. The command needed to start the Docker daemon will be generated based on these arguments. The following arguments are currently supported. - -Key | Value ----|--- -`bridge` | String -`bip` | String -`config_file` | String -`containerd` | String -`debug` | Boolean -`exec_root` | String -`group` | String -`graph` | String -`host` | List -`insecure_registry` | List -`live_restore` | Boolean -`log_driver` | String -`log_opts` | Map where keys and values are strings -`pid_file` | String -`registry_mirror` | String -`restart` | Boolean -`selinux_enabled` | Boolean -`storage_driver` | String -`userland_proxy` | Boolean - -In addition to the standard daemon arguments, there are a few fields specific to RancherOS. - -Key | Value | Default | Description ----|---|---| --- -`extra_args` | List of Strings | `[]` | Arbitrary daemon arguments, appended to the generated command -`environment` | List of Strings | `[]` | -`tls` | Boolean | `false` | When [setting up TLS]({{< baseurl >}}/os/v1.x/en/configuration/setting-up-docker-tls/), this key needs to be set to true. -`tls_args` | List of Strings (used only if `tls: true`) | `[]` | -`server_key` | String (used only if `tls: true`)| `""` | PEM encoded server TLS key. -`server_cert` | String (used only if `tls: true`) | `""` | PEM encoded server TLS certificate. -`ca_key` | String (used only if `tls: true`) | `""` | PEM encoded CA TLS key. -`storage_context` | String | `console` | Specifies the name of the system container in whose context to run the Docker daemon process. - -#### Example using extra_args for setting MTU - -The following example can be used to set MTU on the Docker daemon: - -```yaml -#cloud-config -rancher: - docker: - extra_args: [--mtu, 1460] -``` - -#### Example using bip for docker0 bridge - -_Available as of v1.4.x_ - -The docker0 bridge can be configured with docker args, it will take effect after reboot. - -``` -$ ros config set rancher.docker.bip 192.168.0.0/16 -``` - -### Configuring System Docker - -In your cloud-config, System Docker configuration is located under the `rancher.system_docker` key. - -```yaml -#cloud-config -rancher: - system_docker: - storage_driver: overlay -``` - -#### System Docker settings - -All daemon arguments shown in the first table are also available to System Docker. The following are also supported. - -Key | Value | Default | Description ----|---|---| --- -`extra_args` | List of Strings | `[]` | Arbitrary daemon arguments, appended to the generated command -`environment` | List of Strings (optional) | `[]` | - -_Available as of v1.4.x_ - -The docker-sys bridge can be configured with system-docker args, it will take effect after reboot. - -``` -$ ros config set rancher.system_docker.bip 172.19.0.0/16 -``` - -_Available as of v1.4.x_ - -The default path of system-docker logs is `/var/log/system-docker.log`. If you want to write the system-docker logs to a separate partition, -e.g. [RANCHER_OEM partition]({{}}/os/v1.x/en/about/custom-partition-layout/#use-rancher-oem-partition), you can try `rancher.defaults.system_docker_logs`: - -``` -#cloud-config -rancher: - defaults: - system_docker_logs: /usr/share/ros/oem/system-docker.log -``` - -### Using a pull through registry mirror - -There are 3 Docker engines that can be configured to use the pull-through Docker Hub registry mirror cache: - -``` -#cloud-config -rancher: - bootstrap_docker: - registry_mirror: "http://10.10.10.23:5555" - docker: - registry_mirror: "http://10.10.10.23:5555" - system_docker: - registry_mirror: "http://10.10.10.23:5555" -``` - -`bootstrap_docker` is used to prepare and initial network and pull any cloud-config options that can be used to configure the final network configuration and System-docker - its very unlikely to pull any images. - -A successful pull through mirror cache request by System-docker looks like: - -``` -[root@rancher-dev rancher]# system-docker pull alpine -Using default tag: latest -DEBU[0201] Calling GET /v1.23/info -> WARN[0201] Could not get operating system name: Error opening /usr/lib/os-release: open /usr/lib/os-release: no such file or directory -WARN[0201] Could not get operating system name: Error opening /usr/lib/os-release: open /usr/lib/os-release: no such file or directory -DEBU[0201] Calling POST /v1.23/images/create?fromImage=alpine%3Alatest -DEBU[0201] hostDir: /etc/docker/certs.d/10.10.10.23:5555 -DEBU[0201] Trying to pull alpine from http://10.10.10.23:5555/ v2 -DEBU[0204] Pulling ref from V2 registry: alpine:latest -DEBU[0204] pulling blob "sha256:2aecc7e1714b6fad58d13aedb0639011b37b86f743ba7b6a52d82bd03014b78e" latest: Pulling from library/alpine -DEBU[0204] Downloaded 2aecc7e1714b to tempfile /var/lib/system-docker/tmp/GetImageBlob281102233 2aecc7e1714b: Extracting 1.99 MB/1.99 MB -DEBU[0204] Untar time: 0.161064213s -DEBU[0204] Applied tar sha256:3fb66f713c9fa9debcdaa58bb9858bd04c17350d9614b7a250ec0ee527319e59 to 841c99a5995007d7a66b922be9bafdd38f8090af17295b4a44436ef433a2aecc7e1714b: Pull complete -Digest: sha256:0b94d1d1b5eb130dd0253374552445b39470653fb1a1ec2d81490948876e462c -Status: Downloaded newer image for alpine:latest -``` - -### Using Multiple User Docker Daemons - -_Available as of v1.5.0_ - -When RancherOS is booted, you start with a User Docker service that is running in System Docker. With v1.5.0, RancherOS has the ability to create additional User Docker services that can run at the same time. - -#### Terminology - -Throughout the rest of this documentation, we may simplify to use these terms when describing Docker. - -| Terminology | Definition | -|-----------------------|--------------------------------------------------| -| DinD | Docker in docker | -| User Docker | The user-docker on RancherOS | -| Other User Docker| The other user-docker daemons you create, these user-docker daemons are automatically assumed to be Docker in Docker. | - -#### Pre-Requisites - -User Docker must be set as Docker 17.12.1 or earlier. If it's a later Docker version, it will produce errors when creating a user defined network in System Docker. - -``` -$ ros engine switch docker-17.12.1-ce -``` - -You will need to create a user-defined network, which will be used when creating the Other User Docker. - -``` -$ system-docker network create --subnet=172.20.0.0/16 dind -``` - -#### Create the Other User Docker - -In order to create another User Docker, you will use `ros engine create`. Currently, RancherOS only supports Docker `17.12.1` and `18.03.1` for the Other User Docker image. - -``` -$ ros engine create otheruserdockername --network=dind --fixed-ip=172.20.0.2 -``` - -After the Other User Docker service is created, users can query this service like other services. - -``` -$ ros service list -... -... -disabled volume-efs -disabled volume-nfs -enabled otheruserdockername -``` - -You can use `ros service up` to start the Other User Docker service. - -``` -$ ros service up otheruserdockername -``` - -After the Other User Docker service is running, you can interact with it just like you can use the built-in User Docker. You would need to append `-` to `docker`. - -``` -$ docker-otheruserdockername ps -a -``` - -#### SSH into the Other User Docker container - -When creating the Other User Docker, you can set an external SSH port so you can SSH into the Other User Docker container in System Docker. By using `--ssh-port` and adding ssh keys with `--authorized-keys`, you can set up this optional SSH port. - -``` -$ ros engine create --help -... -... -OPTIONS: - --ssh-port value - --authorized-keys value -``` - -When using `--authorized-keys`, you will need to put the key file in one of the following directories: - -``` -/var/lib/rancher/ -/opt/ -/home/ -``` - -RancherOS will generate a random password for each Other User Docker container, which can be viewed in the container logs. If you do not set any SSH keys, the password can be used. - -``` -$ system-docker logs otheruserdockername - -====================================== -chpasswd: password for 'root' changed -password: xCrw6fEG -====================================== -``` - -In System Docker, you can SSH into any Other User Docker Container using `ssh`. - -``` -$ system-docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -2ca07a25799b rancher/os-dind:17.12.1 "docker-entrypoint..." 5 seconds ago Up 3 seconds 2375/tcp, 0.0.0.0:34791->22/tcp otheruserdockername - -$ ssh -p 34791 root@ - -$ ssh root@ - -``` - -#### Removing any Other User Docker Service - -We recommend using `ros engine rm` to remove any Other User Docker service. - -``` -$ ros engine rm otheruserdockername -``` diff --git a/content/os/v1.x/en/configuration/hostname/_index.md b/content/os/v1.x/en/configuration/hostname/_index.md deleted file mode 100644 index d7c6f3636b..0000000000 --- a/content/os/v1.x/en/configuration/hostname/_index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Setting the Hostname -weight: 124 -aliases: - - /os/v1.x/en/installation/configuration/hostname ---- - -You can set the hostname of the host using [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). The example below shows how to configure it. - -```yaml -#cloud-config -hostname: myhost -``` diff --git a/content/os/v1.x/en/configuration/images-prefix/_index.md b/content/os/v1.x/en/configuration/images-prefix/_index.md deleted file mode 100644 index 207595a131..0000000000 --- a/content/os/v1.x/en/configuration/images-prefix/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Images prefix -weight: 121 -aliases: - - /os/v1.x/en/installation/configuration/images-prefix ---- - -_Available as of v1.3_ - -When you have built your own docker registries, and have cached the `rancher/os` and other `os-services` images, -something like a normal `docker pull rancher/os` can be cached as `docker pull dockerhub.mycompanyname.com/docker.io/rancher/os`. - -However, you need a way to inject a prefix into RancherOS for installation or service pulls. -RancherOS supports a global prefix you can add to force ROS to always use your mirror. - -You can config a global image prefix: - -``` -ros config set rancher.environment.REGISTRY_DOMAIN xxxx.yyy - -``` - -Then you check the os list: - -``` -$ ros os list -xxxx.yyy/rancher/os:v1.3.0 remote latest running -xxxx.yyy/rancher/os:v1.2.0 remote available -... -... -``` - -Also you can check consoles: - -``` -$ ros console switch ubuntu -Switching consoles will -1. destroy the current console container -2. log you out -3. restart Docker -Continue [y/N]: y -Pulling console (xxxx.yyy/rancher/os-ubuntuconsole:v1.3.0)... -... -``` - -If you want to reset this setting: - -``` -ros config set rancher.environment.REGISTRY_DOMAIN docker.io -``` diff --git a/content/os/v1.x/en/configuration/kernel-modules-kernel-headers/_index.md b/content/os/v1.x/en/configuration/kernel-modules-kernel-headers/_index.md deleted file mode 100644 index a350c41eff..0000000000 --- a/content/os/v1.x/en/configuration/kernel-modules-kernel-headers/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Installing Kernel Modules that require Kernel Headers -weight: 135 -aliases: - - /os/v1.x/en/installation/configuration/kernel-modules-kernel-headers ---- - -To compile any kernel modules, you will need to download the kernel headers. The kernel headers are available in the form of a system service. Since the kernel headers are a system service, they need to be enabled using the `ros service` command. - -### Installing Kernel Headers - -The following commands can be used to install kernel headers for usage by containers in Docker or System Docker. - -#### Docker - -``` -$ sudo ros service enable kernel-headers -$ sudo ros service up kernel-headers -``` - -#### System Docker - -``` -$ sudo ros service enable kernel-headers-system-docker -$ sudo ros service up kernel-headers-system-docker -``` - -The `ros service` commands will install the kernel headers in `/lib/modules/$(uname -r)/build`. Based on which service you install, the kernel headers will be available to containers, in Docker or System Docker, by bind mounting specific volumes. For any containers that compile a kernel module, the Docker command will need to bind mount in `/usr/src` and `/lib/modules`. - -> **Note:** Since both commands install kernel headers in the same location, the only reason for different services is due to the fact that the storage places for System Docker and Docker are different. Either one or both kernel headers can be installed in the same RancherOS services. - -### Example of Launching Containers to use Kernel Headers - -``` -# Run a container in Docker and bind mount specific directories -$ docker run -it -v /usr/src:/usr/src -v /lib/modules:/lib/modules ubuntu:15.10 -# Run a container in System Docker and bind mount specific directories -$ sudo system-docker run -it -v /usr/src:/usr/src -v /lib/modules:/lib/modules ubuntu:15.10 -``` diff --git a/content/os/v1.x/en/configuration/loading-kernel-modules/_index.md b/content/os/v1.x/en/configuration/loading-kernel-modules/_index.md deleted file mode 100644 index d7f2b47673..0000000000 --- a/content/os/v1.x/en/configuration/loading-kernel-modules/_index.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Loading Kernel Modules -weight: 134 -aliases: - - /os/v1.x/en/installation/configuration/loading-kernel-modules ---- - -Since RancherOS v0.8, we build our own kernels using an unmodified kernel.org LTS kernel. -We provide both loading kernel modules with parameters and loading extra kernel modules for you. - -### Loading Kernel Modules with parameters - -_Available as of v1.4_ - -The `rancher.modules` can help you to set kernel modules or module parameters. - -As an example, I'm going to set a parameter for kernel module `ndb` - -``` -sudo ros config set rancher.modules "['nbd nbds_max=1024', 'nfs']" -``` - -Or - -``` -#cloud-config -rancher: - modules: [nbd nbds_max=1024, nfs] -``` - -After rebooting, you can check that `ndbs_max` parameter has been updated. - -``` -# cat /sys/module/nbd/parameters/nbds_max -1024 -``` - -### Loading Extra Kernel Modules - -We also build almost all optional extras as modules - so most in-tree modules are available -in the `kernel-extras` service. - -If you do need to build kernel modules for RancherOS, there are 4 options: - -* Try the `kernel-extras` service -* Ask us to add it into the next release -* If its out of tree, copy the methods used for the zfs and open-iscsi services -* Build it yourself. - -#### Try the kernel-extras service - -We build the RancherOS kernel with most of the optional drivers as kernel modules, packaged -into an optional RancherOS service. - -To install these, run: - -``` -sudo ros service enable kernel-extras -sudo ros service up kernel-extras -``` - -The modules should now be available for you to `modprobe` - -#### Ask us to do it - -Open a GitHub issue in the https://github.com/rancher/os repository - we'll probably add -it to the kernel-extras next time we build a kernel. Tell us if you need the module at initial -configuration or boot, and we can add it to the default kernel modules. - -#### Copy the out of tree build method - -See https://github.com/rancher/os-services/blob/master/z/zfs.yml and -https://github.com/rancher/os-services/tree/master/images/20-zfs - -The build container and build.sh script build the source, and then create a tools image, which is used to -"wonka.sh" import those tools into the console container using `docker run` - -#### Build your own. - -As an example I'm going build the `intel-ishtp` hid driver using the `rancher/os-zfs:` images to build in, as they should contain the right tools versions for that kernel. - -``` -sudo docker run --rm -it --entrypoint bash --privileged -v /lib:/host/lib -v $(pwd):/data -w /data rancher/os-zfs:$(ros -v | cut -d ' ' -f 2) - -apt-get update -apt-get install -qy libncurses5-dev bc libssh-dev -curl -SsL -o src.tgz https://github.com/rancher/os-kernel/releases/download/v$(uname -r)/linux-$(uname -r)-src.tgz -tar zxvf src.tgz -zcat /proc/config.gz >.config -# Yes, ignore the name of the directory :/ -cd v* -# enable whatever modules you want to add. -make menuconfig -# I finally found an Intel sound hub that wasn't enabled yet -# CONFIG_INTEL_ISH_HID=m -make modules SUBDIRS=drivers/hid/intel-ish-hid - -# test it -insmod drivers/hid/intel-ish-hid/intel-ishtp.ko -rmmod intel-ishtp - -# install it -ln -s /host/lib/modules/ /lib/ -cp drivers/hid/intel-ish-hid/*.ko /host/lib/modules/$(uname -r)/kernel/drivers/hid/ -depmod - -# done -exit -``` - -Then in your console, you should be able to run - -``` -modprobe intel-ishtp -``` diff --git a/content/os/v1.x/en/configuration/private-registries/_index.md b/content/os/v1.x/en/configuration/private-registries/_index.md deleted file mode 100644 index b231ec4fb6..0000000000 --- a/content/os/v1.x/en/configuration/private-registries/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Private Registries -weight: 128 -aliases: - - /os/v1.x/en/installation/configuration/private-registries ---- - -When launching services through a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config), it is sometimes necessary to pull a private image from DockerHub or from a private registry. Authentication for these can be embedded in your cloud-config. - -For example, to add authentication for DockerHub: - -```yaml -#cloud-config -rancher: - registry_auths: - https://index.docker.io/v1/: - auth: dXNlcm5hbWU6cGFzc3dvcmQ= -``` - -The `auth` key is generated by base64 encoding a string of the form `username:password`. The `docker login` command can be used to generate an `auth` key. After running the command and authenticating successfully, the key can be found in the `$HOME/.docker/config.json` file. - -```json -{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "dXNlcm5hbWU6cGFzc3dvcmQ=" - } - } -} -``` - -Alternatively, a username and password can be specified directly. - -```yaml -#cloud-config -rancher: - registry_auths: - https://index.docker.io/v1/: - username: username - password: password -``` - -### Docker Client Authentication - -Configuring authentication for the Docker client is not handled by the `registry_auth` key. Instead, the `write_files` directive can be used to write credentials to the standard Docker configuration location. - -``` -#cloud-config -write_files: - - path: /home/rancher/.docker/config.json - permissions: "0755" - owner: rancher - content: | - { - "auths": { - "https://index.docker.io/v1/": { - "auth": "asdf=", - "email": "not@val.id" - } - } - } -``` - -### Certificates for Private Registries - -Certificates can be stored in the standard locations (i.e. `/etc/docker/certs.d`) following the [Docker documentation](https://docs.docker.com/registry/insecure). By using the `write_files` directive of the [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config), the certificates can be written directly into `/etc/docker/certs.d`. - -```yaml -#cloud-config -write_files: - - path: /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt - permissions: "0644" - owner: root - content: | - -----BEGIN CERTIFICATE----- - MIIDJjCCAg4CCQDLCSjwGXM72TANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJB - VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0 - cyBQdHkgTHRkMQ4wDAYDVQQDEwVhbGVuYTAeFw0xNTA3MjMwMzUzMDdaFw0xNjA3 - MjIwMzUzMDdaMFUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEw - HwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDjAMBgNVBAMTBWFsZW5h - MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxdVIDGlAySQmighbfNqb - TtqetENPXjNNq1JasIjGGZdOsmFvNciroNBgCps/HPJphICQwtHpNeKv4+ZuL0Yg - 1FECgW7oo6DOET74swUywtq/2IOeik+i+7skmpu1o9uNC+Fo+twpgHnGAaGk8IFm - fP5gDgthrWBWlEPTPY1tmPjI2Hepu2hJ28SzdXi1CpjfFYOiWL8cUlvFBdyNqzqT - uo6M2QCgSX3E1kXLnipRT6jUh0HokhFK4htAQ3hTBmzcxRkgTVZ/D0hA5lAocMKX - EVP1Tlw0y1ext2ppS1NR9Sg46GP4+ATgT1m3ae7rWjQGuBEB6DyDgyxdEAvmAEH4 - LQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA45V0bnGPhIIkb54Gzjt9jyPJxPVTW - mwTCP+0jtfLxAor5tFuCERVs8+cLw1wASfu4vH/yHJ/N/CW92yYmtqoGLuTsywJt - u1+amECJaLyq0pZ5EjHqLjeys9yW728IifDxbQDX0cj7bBjYYzzUXp0DB/dtWb/U - KdBmT1zYeKWmSxkXDFFSpL/SGKoqx3YLTdcIbgNHwKNMfTgD+wTZ/fvk0CLxye4P - n/1ZWdSeZPAgjkha5MTUw3o1hjo/0H0ekI4erZFrZnG2N3lDaqDPR8djR+x7Gv6E - vloANkUoc1pvzvxKoz2HIHUKf+xFT50xppx6wsQZ01pNMSNF0qgc1vvH - -----END CERTIFICATE----- -``` diff --git a/content/os/v1.x/en/configuration/resizing-device-partition/_index.md b/content/os/v1.x/en/configuration/resizing-device-partition/_index.md deleted file mode 100644 index dc21dc1d6a..0000000000 --- a/content/os/v1.x/en/configuration/resizing-device-partition/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Resizing a Device Partition -weight: 131 -aliases: - - /os/v1.x/en/installation/configuration/resizing-device-partition ---- - -The `resize_device` cloud config option can be used to automatically extend the first partition (assuming its `ext4`) to fill the size of it's device. - -Once the partition has been resized to fill the device, a `/var/lib/rancher/resizefs.done` file will be written to prevent the resize tools from being run again. If you need it to run again, delete that file and reboot. - -```yaml -#cloud-config -rancher: - resize_device: /dev/sda -``` - -This behavior is the default when launching RancherOS on AWS. diff --git a/content/os/v1.x/en/configuration/running-commands/_index.md b/content/os/v1.x/en/configuration/running-commands/_index.md deleted file mode 100644 index b13fee7e04..0000000000 --- a/content/os/v1.x/en/configuration/running-commands/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Running Commands -weight: 123 -aliases: - - /os/v1.x/en/installation/configuration/running-commands ---- - -You can automate running commands on boot using the `runcmd` cloud-config directive. Commands can be specified as either a list or a string. In the latter case, the command is executed with `sh`. - -```yaml -#cloud-config -runcmd: -- [ touch, /home/rancher/test1 ] -- echo "test" > /home/rancher/test2 -``` - -Commands specified using `runcmd` will be executed within the context of the `console` container. - -### Running Docker commands - -When using `runcmd`, RancherOS will wait for all commands to complete before starting Docker. As a result, any `docker run` command should not be placed under `runcmd`. Instead, the `/etc/rc.local` script can be used. RancherOS will not wait for commands in this script to complete, so you can use the `wait-for-docker` command to ensure that the Docker daemon is running before performing any `docker run` commands. - -```yaml -#cloud-config -rancher: -write_files: - - path: /etc/rc.local - permissions: "0755" - owner: root - content: | - #!/bin/bash - wait-for-docker - docker run -d nginx -``` - -Running Docker commands in this manner is useful when pieces of the `docker run` command are dynamically generated. For services whose configuration is static, [adding a system service]({{< baseurl >}}/os/v1.x/en/system-services/) is recommended. diff --git a/content/os/v1.x/en/configuration/setting-up-docker-tls/_index.md b/content/os/v1.x/en/configuration/setting-up-docker-tls/_index.md deleted file mode 100644 index 0fb44180b0..0000000000 --- a/content/os/v1.x/en/configuration/setting-up-docker-tls/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Setting up Docker TLS -weight: 127 -aliases: - - /os/v1.x/en/installation/configuration/setting-up-docker-tls ---- - -`ros tls generate` is used to generate both the client and server TLS certificates for Docker. - -Remember, all `ros` commands need to be used with `sudo` or as a `root` user. - -### End to end example - -#### Enable TLS for Docker and Generate Server Certificate - -To have docker secured by TLS you need to set `rancher.docker.tls` to `true`, and generate a set of server and client keys and certificates: - -``` -$ sudo ros config set rancher.docker.tls true -$ sudo ros tls gen --server -H localhost -H -H ... -H -$ sudo system-docker restart docker -``` - -Here, ``s are the hostnames that you will be able to use as your docker host names. A `` can be a wildcard pattern, e.g. "`*.*.*.*.*`". It is recommended to have `localhost` as one of the hostnames, so that you can test docker TLS connectivity locally. - -When you've done that, all the necessary server certificate and key files have been saved to `/etc/docker/tls` directory, and the `docker` service has been started with `--tlsverify` option. - -#### Generate Client Certificates - -You also need client cert and key to access Docker via a TCP socket now: - - -``` -$ sudo ros tls gen - INFO[0000] Out directory (-d, --dir) not specified, using default: /home/rancher/.docker -``` - -All the docker client TLS files are in `~/.docker` dir now. - -#### Test docker TLS connection - -Now you can use your client cert to check if you can access Docker via TCP: - -``` -$ docker --tlsverify version -``` - -Because all the necessary files are in the `~/.docker` dir, you don't need to specify them using `--tlscacert` `--tlscert` and `--tlskey` options. You also don't need `-H` to access Docker on localhost. - -Copy the files from `/home/rancher/.docker` to `$HOME/.docker` on your client machine if you need to access Docker on your RancherOS host from there. - -On your client machine, set the Docker host and test out if Docker commands work. - - -``` -$ export DOCKER_HOST=tcp://:2376 DOCKER_TLS_VERIFY=1 -$ docker ps -``` diff --git a/content/os/v1.x/en/configuration/ssh-keys/_index.md b/content/os/v1.x/en/configuration/ssh-keys/_index.md deleted file mode 100644 index 25dbfe72cf..0000000000 --- a/content/os/v1.x/en/configuration/ssh-keys/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: SSH Settings -weight: 121 -aliases: - - /os/v1.x/en/installation/configuration/ssh-keys ---- - -RancherOS supports adding SSH keys through the [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file. Within the cloud-config file, you simply add the ssh keys within the `ssh_authorized_keys` key. - -```yaml -#cloud-config -ssh_authorized_keys: - - ssh-rsa AAA...ZZZ example1@rancher - - ssh-rsa BBB...ZZZ example2@rancher -``` - -When we pass the cloud-config file during the `ros install` command, it will allow these ssh keys to be associated with the **rancher** user. You can ssh into RancherOS using the key. - -``` -$ ssh -i /path/to/private/key rancher@ -``` - -Please note that OpenSSH 7.0 and greater similarly disable the ssh-dss (DSA) public key algorithm. It too is weak and we recommend against its use. - -### SSHD Port and IP - -_Available as of v1.3_ - -RancherOS supports changing the sshd port and IP, you can use these in the cloud-config file: - -``` -rancher: - ssh: - port: 10022 - listen_address: 172.22.100.100 -``` - -These settings are only designed for default console. -Because if you change sshd-config, restart the host will restore the default, the new configuration will not take effect. - -For other consoles, all files are persistent, you can modify sshd-config by yourself. diff --git a/content/os/v1.x/en/configuration/switching-consoles/_index.md b/content/os/v1.x/en/configuration/switching-consoles/_index.md deleted file mode 100644 index 4668878f6d..0000000000 --- a/content/os/v1.x/en/configuration/switching-consoles/_index.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Switching Consoles -weight: 125 -aliases: - - /os/v1.x/en/installation/configuration/switching-consoles ---- - -When [booting from the ISO]({{< baseurl >}}/os/v1.x/en/installation/workstation//boot-from-iso/), RancherOS starts with the default console, which is based on busybox. - -You can select which console you want RancherOS to start with using the [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). - -### Enabling Consoles using Cloud-Config - -When launching RancherOS with a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file, you can select which console you want to use. - -Currently, the list of available consoles are: - -* default -* alpine -* centos -* debian -* fedora -* ubuntu - -Here is an example cloud-config file that can be used to enable the debian console. - -```yaml -#cloud-config -rancher: - console: debian -``` - -### Listing Available Consoles - -You can easily list the available consoles in RancherOS and what their status is with `sudo ros console list`. - -``` -$ sudo ros console list -disabled alpine -disabled centos -disabled debian -current default -disabled fedora -disabled ubuntu -``` - -### Changing Consoles after RancherOS has started - -You can view which console is being used by RancherOS by checking which console container is running in System Docker. If you wanted to switch consoles, you just need to run a simple command and select your new console. - -For our example, we'll switch to the Ubuntu console. - -``` -$ sudo ros console switch ubuntu -Switching consoles will -1. destroy the current console container -2. log you out -3. restart Docker -Continue [y/N]:y -Pulling console (rancher/os-ubuntuconsole:v0.5.0-3)... -v0.5.0-3: Pulling from rancher/os-ubuntuconsole -6d3a6d998241: Pull complete -606b08bdd0f3: Pull complete -1d99b95ffc1c: Pull complete -a3ed95caeb02: Pull complete -3fc2f42db623: Pull complete -2fb84911e8d2: Pull complete -fff5d987b31c: Pull complete -e7849ae8f782: Pull complete -de375d40ae05: Pull complete -8939c16614d1: Pull complete -Digest: sha256:37224c3964801d633ea8b9629137bc9d4a8db9d37f47901111b119d3e597d15b -Status: Downloaded newer image for rancher/os-ubuntuconsole:v0.5.0-3 -switch-console_1 | time="2016-07-02T01:47:14Z" level=info msg="Project [os]: Starting project " -switch-console_1 | time="2016-07-02T01:47:14Z" level=info msg="[0/18] [console]: Starting " -switch-console_1 | time="2016-07-02T01:47:14Z" level=info msg="Recreating console" -Connection to 127.0.0.1 closed by remote host. -``` - -
- -After logging back, you'll be in the Ubuntu console. - -``` -$ sudo system-docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -6bf33541b2dc rancher/os-ubuntuconsole:v0.5.0-rc3 "/usr/sbin/entry.sh /" About a minute ago Up About a minute -``` - -
- -> **Note:** When switching between consoles, the currently running console container is destroyed, Docker is restarted and you will be logged out. - -### Console persistence - -All consoles except the default (busybox) console are persistent. Persistent console means that the console container will remain the same and preserves changes made to its filesystem across reboots. If a container is deleted/rebuilt, state in the console will be lost except what is in the persisted directories. - -``` -/home -/opt -/var/lib/docker -/var/lib/rancher -``` - -
- -> **Note:** When using a persistent console and in the current version's console, [rolling back]({{}}/os/v1.x/en/upgrading/#rolling-back-an-upgrade) is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. - -### Enabling Consoles - -You can also enable a console that will be changed at the next reboot. - -For our example, we'll switch to the Debian console. - -``` -# Check the console running in System Docker -$ sudo system-docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -95d548689e82 rancher/os-docker:v0.5.0 "/usr/sbin/entry.sh /" About an hour ago Up About an hour docker -# Enable the Debian console -$ sudo ros console enable debian -Pulling console (rancher/os-debianconsole:v0.5.0-3)... -v0.5.0-3: Pulling from rancher/os-debianconsole -7268d8f794c4: Pull complete -a3ed95caeb02: Pull complete -21cb8a645d75: Pull complete -5ee1d288a088: Pull complete -c09f41c2bd29: Pull complete -02b48ce40553: Pull complete -38a4150e7e9c: Pull complete -Digest: sha256:5dbca5ba6c3b7ba6cd6ac75a1d054145db4b4ea140db732bfcbd06f17059c5d0 -Status: Downloaded newer image for rancher/os-debianconsole:v0.5.0-3 -``` - -
- -At the next reboot, RancherOS will be using the Debian console. diff --git a/content/os/v1.x/en/configuration/switching-docker-versions/_index.md b/content/os/v1.x/en/configuration/switching-docker-versions/_index.md deleted file mode 100644 index d1df6deeba..0000000000 --- a/content/os/v1.x/en/configuration/switching-docker-versions/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Switching Docker Versions -weight: 129 -aliases: - - /os/v1.x/en/installation/configuration/switching-docker-versions ---- - -The version of User Docker used in RancherOS can be configured using a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file or by using the `ros engine` command. - -> **Note:** There are known issues in Docker when switching between versions. For production systems, we recommend setting the Docker engine only once [using a cloud-config](#setting-the-docker-engine-using-cloud-config). - -### Available Docker engines - -The `ros engine list` command can be used to show which Docker engines are available to switch to. This command will also provide details of which Docker engine is currently being used. - -``` -$ sudo ros engine list -disabled docker-1.10.3 -disabled docker-1.11.2 -current docker-1.12.1 -``` - -### Setting the Docker engine using cloud-config - -RancherOS supports defining which Docker engine to use through the cloud-config file. To change the Docker version from the default packaged version, you can use the following cloud-config setting and select one of the available engines. In the following example, we'll use the cloud-config file to set RancherOS to use Docker 1.10.3 for User Docker. - -```yaml -#cloud-config -rancher: - docker: - engine: docker-1.10.3 -``` - -### Changing Docker engines after RancherOS has started - -If you've already started RancherOS and want to switch Docker engines, you can change the Docker engine by using the `ros engine switch` command. In our example, we'll switch to Docker 1.11.2. - -``` -$ sudo ros engine switch docker-1.11.2 -INFO[0000] Project [os]: Starting project -INFO[0000] [0/19] [docker]: Starting -Pulling docker (rancher/os-docker:1.11.2)... -1.11.2: Pulling from rancher/os-docker -2a6bbb293656: Pull complete -Digest: sha256:ec57fb24f6d4856d737e14c81a20f303afbeef11fc896d31b4e498829f5d18b2 -Status: Downloaded newer image for rancher/os-docker:1.11.2 -INFO[0007] Recreating docker -INFO[0007] [1/19] [docker]: Started -INFO[0007] Project [os]: Project started -$ docker version -Client: - Version: 1.11.2 - API version: 1.23 - Go version: go1.5.4 - Git commit: b9f10c9 - Built: Wed Jun 1 21:20:08 2016 - OS/Arch: linux/amd64 - -Server: - Version: 1.11.2 - API version: 1.23 - Go version: go1.5.4 - Git commit: b9f10c9 - Built: Wed Jun 1 21:20:08 2016 - OS/Arch: linux/amd64 - -``` - -### Enabling Docker engines - -If you don't want to automatically switch Docker engines, you can also set which version of Docker to use after the next reboot by enabling a Docker engine. - -``` -$ sudo ros engine enable docker-1.10.3 -``` - -## Using a Custom Version of Docker - -If you're using a version of Docker that isn't available by default or a custom build of Docker then you can create a custom Docker image and service file to distribute it. - -Docker engine images are built by adding the binaries to a folder named `engine` and then adding this folder to a `FROM scratch` image. For example, the following Dockerfile will build a Docker engine image. - -``` -FROM scratch -COPY engine /engine -``` - -Once the image is built a [system service]({{< baseurl >}}/os/v1.x/en/system-services/) configuration file must be created. An [example file](https://github.com/rancher/os-services/blob/master/d/docker-18.06.3-ce.yml) can be found in the rancher/os-services repo. Change the `image` field to point to the Docker engine image you've built. - -All of the previously mentioned methods of switching Docker engines are now available. For example, if your service file is located at `https://myservicefile` then the following cloud-config file could be used to use your custom Docker engine. - -```yaml -#cloud-config -rancher: - docker: - engine: https://myservicefile -``` diff --git a/content/os/v1.x/en/configuration/sysctl/_index.md b/content/os/v1.x/en/configuration/sysctl/_index.md deleted file mode 100644 index 1a8d6722d6..0000000000 --- a/content/os/v1.x/en/configuration/sysctl/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Sysctl Settings -weight: 132 -aliases: - - /os/v1.x/en/installation/configuration/sysctl ---- - -The `rancher.sysctl` cloud-config key can be used to control sysctl parameters. This works in a manner similar to `/etc/sysctl.conf` for other Linux distros. - -``` -#cloud-config -rancher: - sysctl: - net.ipv4.conf.default.rp_filter: 1 -``` - -You can either add these settings to your `cloud-init.yml`, or use `sudo ros config merge -i somefile.yml` to merge settings into your existing system. diff --git a/content/os/v1.x/en/configuration/users/_index.md b/content/os/v1.x/en/configuration/users/_index.md deleted file mode 100644 index 4612c1cce2..0000000000 --- a/content/os/v1.x/en/configuration/users/_index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Users -weight: 130 -aliases: - - /os/v1.x/en/installation/configuration/users ---- - -Currently, we don't support adding other users besides `rancher`. - -You _can_ add users in the console container, but these users will only exist as long as the console container exists. It only makes sense to add users in a [persistent consoles]({{}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence). - -If you want the console user to be able to ssh into RancherOS, you need to add them -to the `docker` group. diff --git a/content/os/v1.x/en/configuration/write-files/_index.md b/content/os/v1.x/en/configuration/write-files/_index.md deleted file mode 100644 index 7071d5d892..0000000000 --- a/content/os/v1.x/en/configuration/write-files/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Writing Files -weight: 122 -aliases: - - /os/v1.x/en/installation/configuration/write-files ---- - -You can automate writing files to disk using the `write_files` cloud-config directive. - -```yaml -#cloud-config -write_files: - - path: /etc/rc.local - permissions: "0755" - owner: root - content: | - #!/bin/bash - echo "I'm doing things on start" -``` - -### Writing Files in Specific System Services - -By default, the `write_files` directive will create files in the console container. To write files in other system services, the `container` key can be used. For example, the `container` key could be used to write to `/etc/ntp.conf` in the NTP system service. - -```yaml -#cloud-config -write_files: - - container: ntp - path: /etc/ntp.conf - permissions: "0644" - owner: root - content: | - server 0.pool.ntp.org iburst - server 1.pool.ntp.org iburst - server 2.pool.ntp.org iburst - server 3.pool.ntp.org iburst - - # Allow only time queries, at a limited rate, sending KoD when in excess. - # Allow all local queries (IPv4, IPv6) - restrict default nomodify nopeer noquery limited kod - restrict 127.0.0.1 - restrict [::1] -``` - -> **Note:** Currently, writing files to a specific system service is only supported for RancherOS's built-in services. You are unable to write files to any custom system services. diff --git a/content/os/v1.x/en/installation/_index.md b/content/os/v1.x/en/installation/_index.md deleted file mode 100644 index be3cae1d22..0000000000 --- a/content/os/v1.x/en/installation/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Installing and Running RancherOS -weight: 100 -aliases: - - /os/v1.x/en/installation/running-rancheros ---- - -RancherOS runs on virtualization platforms, cloud providers and bare metal servers. We also support running a local VM on your laptop. - -To start running RancherOS as quickly as possible, follow our [Quick Start Guide]({{< baseurl >}}/os/v1.x/en/quick-start-guide/). - -# Platforms -Refer to the below resources for more information on installing Rancher on your platform. - -### Workstation - -- [Docker Machine]({{< baseurl >}}/os/v1.x/en/installation/workstation//docker-machine) -- [Boot from ISO]({{< baseurl >}}/os/v1.x/en/installation/workstation//boot-from-iso) - -### Cloud - -- [Amazon EC2]({{< baseurl >}}/os/v1.x/en/installation/cloud/aws) -- [Google Compute Engine]({{< baseurl >}}/os/v1.x/en/installation/cloud/gce) -- [DigitalOcean]({{< baseurl >}}/os/v1.x/en/installation/cloud/do) -- [Azure]({{< baseurl >}}/os/v1.x/en/installation/cloud/azure) -- [OpenStack]({{< baseurl >}}/os/v1.x/en/installation/cloud/openstack) -- [VMware ESXi]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi) -- [Aliyun]({{< baseurl >}}/os/v1.x/en/installation/cloud/aliyun) - -### Bare Metal & Virtual Servers - -- [PXE]({{< baseurl >}}/os/v1.x/en/installation/server/pxe) -- [Install to Hard Disk]({{< baseurl >}}/os/v1.x/en/installation/server/install-to-disk) -- [Raspberry Pi]({{< baseurl >}}/os/v1.x/en/installation/server/raspberry-pi) diff --git a/content/os/v1.x/en/installation/amazon-ecs/_index.md b/content/os/v1.x/en/installation/amazon-ecs/_index.md deleted file mode 100644 index 1379784c5b..0000000000 --- a/content/os/v1.x/en/installation/amazon-ecs/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Amazon ECS (EC2 Container Service) -weight: 190 ---- - -[Amazon ECS](https://aws.amazon.com/ecs/) is supported, which allows RancherOS EC2 instances to join your cluster. - -### Pre-Requisites - -Before launching RancherOS EC2 instances, the [ECS Container Instance IAM Role](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html) will need to have been created. This `ecsInstanceRole` will need to be used when launching EC2 instances. If you have been using ECS, you created this role if you followed the ECS "Get Started" interactive guide. - -### Launching an instance with ECS - -RancherOS makes it easy to join your ECS cluster. The ECS agent is a [system service]({{< baseurl >}}/os/v1.x/en/system-services/) that is enabled in the ECS enabled AMI. There may be other RancherOS AMIs that don't have the ECS agent enabled by default, but it can easily be added in the user data on any RancherOS AMI. - -When launching the RancherOS AMI, you'll need to specify the **IAM Role** and **Advanced Details** -> **User Data** in the **Configure Instance Details** step. - -For the **IAM Role**, you'll need to be sure to select the ECS Container Instance IAM role. - -For the **User Data**, you'll need to pass in the [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file. - -```yaml -#cloud-config -rancher: - environment: - ECS_CLUSTER: your-ecs-cluster-name - # Note: You will need to add this variable, if using awslogs for ECS task. - ECS_AVAILABLE_LOGGING_DRIVERS: |- - ["json-file","awslogs"] -# If you have selected a RancherOS AMI that does not have ECS enabled by default, -# you'll need to enable the system service for the ECS agent. - services_include: - amazon-ecs-agent: true -``` - -#### Version - -By default, the ECS agent will be using the `latest` tag for the `amazon-ecs-agent` image. In v0.5.0, we introduced the ability to select which version of the `amazon-ecs-agent`. - -To select the version, you can update your [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file. - -```yaml -#cloud-config -rancher: - environment: - ECS_CLUSTER: your-ecs-cluster-name - # Note: You will need to make sure to include the colon in front of the version. - ECS_AGENT_VERSION: :v1.9.0 - # If you have selected a RancherOS AMI that does not have ECS enabled by default, - # you'll need to enable the system service for the ECS agent. - services_include: - amazon-ecs-agent: true -``` - -
- -> **Note:** The `:` must be in front of the version tag in order for the ECS image to be tagged correctly. - -### Amazon ECS enabled AMIs - -Latest Release: [v1.5.6](https://github.com/rancher/os/releases/tag/v1.5.6) - -Region | Type | AMI ----|--- | --- -eu-north-1 | HVM - ECS enabled | [ami-0539b842146882049](https://eu-north-1.console.aws.amazon.com/ec2/home?region=eu-north-1#launchInstanceWizard:ami=ami-0539b842146882049) -ap-south-1 | HVM - ECS enabled | [ami-0d3b8d8f26c689b4f](https://ap-south-1.console.aws.amazon.com/ec2/home?region=ap-south-1#launchInstanceWizard:ami=ami-0d3b8d8f26c689b4f) -eu-west-3 | HVM - ECS enabled | [ami-0fea51d2e82d132a0](https://eu-west-3.console.aws.amazon.com/ec2/home?region=eu-west-3#launchInstanceWizard:ami=ami-0fea51d2e82d132a0) -eu-west-2 | HVM - ECS enabled | [ami-00c3a19135715c851](https://eu-west-2.console.aws.amazon.com/ec2/home?region=eu-west-2#launchInstanceWizard:ami=ami-00c3a19135715c851) -eu-west-1 | HVM - ECS enabled | [ami-012c49dfd6efe2b69](https://eu-west-1.console.aws.amazon.com/ec2/home?region=eu-west-1#launchInstanceWizard:ami=ami-012c49dfd6efe2b69) -ap-northeast-2 | HVM - ECS enabled | [ami-02c46f0bcf41b4979](https://ap-northeast-2.console.aws.amazon.com/ec2/home?region=ap-northeast-2#launchInstanceWizard:ami=ami-02c46f0bcf41b4979) -ap-northeast-1 | HVM - ECS enabled | [ami-0354daa9f30fe60e6](https://ap-northeast-1.console.aws.amazon.com/ec2/home?region=ap-northeast-1#launchInstanceWizard:ami=ami-0354daa9f30fe60e6) -sa-east-1 | HVM - ECS enabled | [ami-00856c117fa05835f](https://sa-east-1.console.aws.amazon.com/ec2/home?region=sa-east-1#launchInstanceWizard:ami=ami-00856c117fa05835f) -ca-central-1 | HVM - ECS enabled | [ami-0b0cb5188439bf169](https://ca-central-1.console.aws.amazon.com/ec2/home?region=ca-central-1#launchInstanceWizard:ami=ami-0b0cb5188439bf169) -ap-southeast-1 | HVM - ECS enabled | [ami-0d8a0ed617d75bacc](https://ap-southeast-1.console.aws.amazon.com/ec2/home?region=ap-southeast-1#launchInstanceWizard:ami=ami-0d8a0ed617d75bacc) -ap-southeast-2 | HVM - ECS enabled | [ami-029cebde25901dcc9](https://ap-southeast-2.console.aws.amazon.com/ec2/home?region=ap-southeast-2#launchInstanceWizard:ami=ami-029cebde25901dcc9) -eu-central-1 | HVM - ECS enabled | [ami-0d513a54a3e67e38e](https://eu-central-1.console.aws.amazon.com/ec2/home?region=eu-central-1#launchInstanceWizard:ami=ami-0d513a54a3e67e38e) -us-east-1 | HVM - ECS enabled | [ami-0f5cc7a532c3171c3](https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#launchInstanceWizard:ami=ami-0f5cc7a532c3171c3) -us-east-2 | HVM - ECS enabled | [ami-030615e199666fd4b](https://us-east-2.console.aws.amazon.com/ec2/home?region=us-east-2#launchInstanceWizard:ami=ami-030615e199666fd4b) -us-west-1 | HVM - ECS enabled | [ami-0b530a841750e3315](https://us-west-1.console.aws.amazon.com/ec2/home?region=us-west-1#launchInstanceWizard:ami=ami-0b530a841750e3315) -us-west-2 | HVM - ECS enabled | [ami-0f1e06359f24f6480](https://us-west-2.console.aws.amazon.com/ec2/home?region=us-west-2#launchInstanceWizard:ami=ami-0f1e06359f24f6480) diff --git a/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md b/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md deleted file mode 100644 index d49a8ac4b5..0000000000 --- a/content/os/v1.x/en/installation/boot-process/built-in-system-services/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Built-in System Services -weight: 150 ---- - -To launch RancherOS, we have built-in system services. They are defined in the [Docker Compose](https://docs.docker.com/compose/compose-file/) format, and can be found in the default system config file, `/usr/share/ros/os-config.yml`. You can [add your own system services]({{< baseurl >}}/os/v1.x/en/system-services/) or override services in the cloud-config. - -### preload-user-images - -Read more about [image preloading]({{}}/os/v1.x/en/installation/boot-process/image-preloading/). - -### network - -During this service, networking is set up, e.g. hostname, interfaces, and DNS. - -It is configured by `hostname` and `rancher.network`settings in [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). - -### ntp - -Runs `ntpd` in a System Docker container. - -### console - -This service provides the RancherOS user interface by running `sshd` and `getty`. It completes the RancherOS configuration on start up: - -1. If the `rancher.password=` kernel parameter exists, it sets `` as the password for the `rancher` user. -2. If there are no host SSH keys, it generates host SSH keys and saves them under `rancher.ssh.keys` in [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). -3. Runs `cloud-init -execute`, which does the following: - * Updates `.ssh/authorized_keys` in `/home/rancher` and `/home/docker` from [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/ssh-keys/) and metadata. - * Writes files specified by the `write_files` [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/write-files/) setting. - * Resizes the device specified by the `rancher.resize_device` [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/resizing-device-partition/) setting. - * Mount devices specified in the `mounts` [cloud-config]({{< baseurl >}}/os/v1.x/en/storage/additional-mounts/) setting. - * Set sysctl parameters specified in the`rancher.sysctl` [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/sysctl/) setting. -4. If user-data contained a file that started with `#!`, then a file would be saved at `/var/lib/rancher/conf/cloud-config-script` during cloud-init and then executed. Any errors are ignored. -5. Runs `/opt/rancher/bin/start.sh` if it exists and is executable. Any errors are ignored. -6. Runs `/etc/rc.local` if it exists and is executable. Any errors are ignored. - -### docker - -This system service runs the user docker daemon. Normally it runs inside the console system container by running `docker-init` script which, in turn, looks for docker binaries in `/opt/bin`, `/usr/local/bin` and `/usr/bin`, adds the first found directory with docker binaries to PATH and runs `dockerlaunch docker daemon` appending the passed arguments. - -Docker daemon args are read from `rancher.docker.args` cloud-config property (followed by `rancher.docker.extra_args`). diff --git a/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md b/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md deleted file mode 100644 index 78a9c58327..0000000000 --- a/content/os/v1.x/en/installation/boot-process/cloud-init/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Cloud-Init -weight: 151 ---- - -Userdata and metadata can be fetched from a cloud provider, VM runtime, or management service during the RancherOS boot process. Since v0.8.0, this process occurs while RancherOS is still running from memory and before System Docker starts. It is configured by the `rancher.cloud_init.datasources` configuration parameter. For cloud-provider specific images, such as AWS and GCE, the datasource is pre-configured. - -### Userdata - -Userdata is a file given by users when launching RancherOS hosts. It is stored in different locations depending on its format. If the userdata is a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file, indicated by beginning with `#cloud-config` and being in YAML format, it is stored in `/var/lib/rancher/conf/cloud-config.d/boot.yml`. If the userdata is a script, indicated by beginning with `#!`, it is stored in `/var/lib/rancher/conf/cloud-config-script`. - -### Metadata - -Although the specifics vary based on provider, a metadata file will typically contain information about the RancherOS host and contain additional configuration. Its primary purpose within RancherOS is to provide an alternate source for SSH keys and hostname configuration. For example, AWS launches hosts with a set of authorized keys and RancherOS obtains these via metadata. Metadata is stored in `/var/lib/rancher/conf/metadata`. - -## Configuration Load Order - -[Cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config/) is read by system services when they need to get configuration. Each additional file overwrites and extends the previous configuration file. - -1. `/usr/share/ros/os-config.yml` - This is the system default configuration, which should **not** be modified by users. -2. `/usr/share/ros/oem/oem-config.yml` - This will typically exist by OEM, which should **not** be modified by users. -3. Files in `/var/lib/rancher/conf/cloud-config.d/` ordered by filename. If a file is passed in through user-data, it is written by cloud-init and saved as `/var/lib/rancher/conf/cloud-config.d/boot.yml`. -4. `/var/lib/rancher/conf/cloud-config.yml` - If you set anything with `ros config set`, the changes are saved in this file. -5. Kernel parameters with names starting with `rancher`. -6. `/var/lib/rancher/conf/metadata` - Metadata added by cloud-init. diff --git a/content/os/v1.x/en/installation/boot-process/image-preloading/_index.md b/content/os/v1.x/en/installation/boot-process/image-preloading/_index.md deleted file mode 100644 index d7bde60fbe..0000000000 --- a/content/os/v1.x/en/installation/boot-process/image-preloading/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Image Preloading -weight: 152 ---- - -On boot, RancherOS scans `/var/lib/rancher/preload/docker` and `/var/lib/rancher/preload/system-docker` directories and tries to load container image archives it finds there, with `docker load` and `system-docker load`. - -The archives are `.tar` files, optionally compressed with `xz` or `gzip`. These can be produced by `docker save` command, e.g.: - -``` -$ docker save my-image1 my-image2 some-other/image3 | xz > my-images.tar.xz -``` - -The resulting files should be placed into `/var/lib/rancher/preload/docker` or `/var/lib/rancher/preload/system-docker` (depending on whether you want it preloaded into Docker or System Docker). - -Pre-loading process only reads each new archive once, so it won't take time on subsequent boots (`.done` files are created to mark the read archives). If you update the archive (place a newer archive with the same name) it'll get read on the next boot as well. - -Pre-loading process is `asynchronous` by default, optionally this can be set to `synchronous` through the cloud-config file or `ros config set` command. In the following example, we’ll use the cloud-config file and `ros config set` command to set RancherOS pre-loading process to `synchronous`. - -_Available as of v1.4_ - -cloud-config file, e.g.: -``` -#cloud-config -rancher: - preload_wait: true -``` - -`ros config set` command, e.g.: -``` -$ ros config set rancher.preload_wait true -``` - -Pre-packing docker images is handy when you're customizing your RancherOS distribution (perhaps, building cloud VM images for your infrastructure). diff --git a/content/os/v1.x/en/installation/boot-process/logging/_index.md b/content/os/v1.x/en/installation/boot-process/logging/_index.md deleted file mode 100644 index ad370802db..0000000000 --- a/content/os/v1.x/en/installation/boot-process/logging/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: System Logging -weight: 153 ---- - -### System services - -RancherOS uses containers for its system services. This means the logs for `syslog`, `acipd`, `system-cron`, `udev`, `network`, `ntp`, `console` and the user Docker are available using `sudo ros service logs `. - -### Boot logging - -Since v1.1.0, the init process's logs are copied to `/var/log/boot` after the user-space filesystem is made available. These can be used to diagnose initialisation, network, and cloud-init issues. - -### Remote Syslog logging - -The Linux kernel has a `netconsole` logging facility that allows it to send the Kernel level logs to a remote Syslog server. -When you set this kernel boot parameter in RancherOS v1.1.0 and later, the RancherOS debug logs will also be sent to it. - -To set up Linux kernel and RancherOS remote Syslog logging, you need to set both a local, and remote host IP address - even if this address isn't the final IP address of your system. The kernel setting looks like: - -``` - netconsole=[+][src-port]@[src-ip]/[],[tgt-port]@/[tgt-macaddr] - - where - + if present, enable extended console support - src-port source for UDP packets (defaults to 6665) - src-ip source IP to use (interface address) - dev network interface (eth0) - tgt-port port for logging agent (6666) - tgt-ip IP address for logging agent - tgt-macaddr ethernet MAC address for logging agent (broadcast) -``` - -For example, on my current test system, I have set the kernel boot line to: - - -``` -printk.devkmsg=on console=tty1 rancher.autologin=tty1 console=ttyS0 rancher.autologin=ttyS0 rancher.state.dev=LABEL=RANCHER_STATE rancher.state.autoformat=[/dev/sda,/dev/vda] rancher.rm_usr loglevel=8 netconsole=+9999@10.0.2.14/,514@192.168.42.223/ -``` - -The kernel boot parameters can be set during installation using `sudo ros install --append "...."`, or on an installed RancherOS system, by running `sudo ros config syslinux` (which will start vi in a container, editing the `global.cfg` boot config file. diff --git a/content/os/v1.x/en/installation/cloud/aliyun/_index.md b/content/os/v1.x/en/installation/cloud/aliyun/_index.md deleted file mode 100644 index bffd35fc0d..0000000000 --- a/content/os/v1.x/en/installation/cloud/aliyun/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Aliyun -weight: 111 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/aliyun ---- - -# Adding the RancherOS Image into Aliyun - -RancherOS is available as an image in Aliyun, and can be easily run in Elastic Compute Service (ECS). Let’s walk through how to upload the ECS image. - -1. Download the most recent RancherOS image. The image `rancheros-aliyun.vhd` can be found in the [release artifacts](https://github.com/rancher/os/releases). -2. Follow Aliyun's instructions on how to [upload the image](https://help.aliyun.com/document_detail/127285.html). Before the image can be added, it must be uploaded into an OSS bucket. -3. Once the image is added to your ECS, we can start creating new instances! - -Example: - -![RancherOS on Aliyun 1]({{}}/img/os/RancherOS_aliyun1.jpg) - -## Options - -| Option | Description | -| --- | --- | -| Root disk size | The size must be greater than 10GB. Note: When booting the instance, the value must be kept the same. | -| Platform | Select `Others Linux` | -| Image Format | Select `VHD` | - -### Launching RancherOS using Aliyun Console - -After the image is uploaded, we can use the `Aliyun Console` to start a new instance. Currently, RancherOS on Aliyun only supports SSH key access, so it can only be deployed through the UI. - -Since the image is private, we need to use the `Custom Images`. - -![RancherOS on Aliyun 2]({{}}/img/os/RancherOS_aliyun2.jpg) - -After the instance is successfully started, we can login with the `rancher` user via SSH. diff --git a/content/os/v1.x/en/installation/cloud/aws/_index.md b/content/os/v1.x/en/installation/cloud/aws/_index.md deleted file mode 100644 index 2547ec84ba..0000000000 --- a/content/os/v1.x/en/installation/cloud/aws/_index.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Amazon EC2 -weight: 105 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/aws ---- - -RancherOS is available as an Amazon Web Services AMI, and can be easily run on EC2. You can launch RancherOS either using the AWS Command Line Interface (CLI) or using the AWS console. - -### Launching RancherOS through the AWS CLI - -If you haven't installed the AWS CLI, follow the instructions on the [AWS CLI page](http://aws.amazon.com/cli/) to install the CLI and configure access key and secret keys. - -Once you've installed your AWS CLI, use this command to launch an EC2 instance with the RancherOS AMI. You will need to know your SSH key name and security group name for the _region_ that you are configured for. These can be found from the AWS console. - -> **Note:** Check the RancherOS [README](https://github.com/rancher/os/blob/master/README.md) for AMI names for each region. We support PV and HVM types of AMIs. - -``` -$ aws ec2 run-instances --image-id ami-ID# --count 1 --instance-type t2.small --key-name MySSHKeyName --security-groups sg-name -``` - -Your EC2 instance is now running RancherOS! - -### Launching RancherOS through the AWS Console - -Let’s walk through how to import and create a RancherOS on EC2 machine using the AWS console. - - -1. First login to your AWS console, and go to the EC2 dashboard, click on **Launch Instance**: - {{< img "/img/os/Rancher_aws1.png" "RancherOS on AWS 1">}} -2. Select the **Community AMIs** on the sidebar and search for **RancherOS**. Pick the latest version and click **Select**. - {{< img "/img/os/Rancher_aws2.png" "RancherOS on AWS 2">}} -3. Go through the steps of creating the instance type through the AWS console. If you want to pass in a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file during boot of RancherOS, you'd pass in the file as **User data** by expanding the **Advanced Details** in **Step 3: Configure Instance Details**. You can pass in the data as text or as a file. - {{< img "/img/os/Rancher_aws6.png" "RancherOS on AWS 6">}} - After going through all the steps, you finally click on **Launch**, and either create a new key pair or choose an existing key pair to be used with the EC2 instance. If you have created a new key pair, download the key pair. If you have chosen an existing key pair, make sure you have the key pair accessible. Click on **Launch Instances**. - {{< img "/img/os/Rancher_aws3.png" "RancherOS on AWS 3">}} -4. Your instance will be launching and you can click on **View Instances** to see it's status. - {{< img "/img/os/Rancher_aws4.png" "RancherOS on AWS 4">}} - Your instance is now running! - {{< img "/img/os/Rancher_aws5.png" "RancherOS on AWS 5">}} - -## Logging into RancherOS - -From a command line, log into the EC2 Instance. If you added ssh keys using a cloud-config, -both those keys, and the one you selected in the AWS UI will be installed. - -``` -$ ssh -i /Directory/of/MySSHKeyName.pem rancher@ -``` - -If you have issues logging into RancherOS, try using this command to help debug the issue. - -``` -$ ssh -v -i /Directory/of/MySSHKeyName.pem rancher@ -``` - -## Latest AMI Releases - -Please check the [README](https://github.com/rancher/os/blob/master/README.md) in our RancherOS repository for our latest AMIs. diff --git a/content/os/v1.x/en/installation/cloud/azure/_index.md b/content/os/v1.x/en/installation/cloud/azure/_index.md deleted file mode 100644 index 19553b92b0..0000000000 --- a/content/os/v1.x/en/installation/cloud/azure/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Azure -weight: 110 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/azure ---- - -RancherOS has been published in Azure Marketplace, you can get it from [here](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/rancher.rancheros). - -### Launching RancherOS through the Azure Portal - -Using the new Azure Resource Management portal, click on **Marketplace**. Search for **RancherOS**. Click on **Create**. - -Follow the steps to create a virtual machine. - -In the _Basics_ step, provide a **name** for the VM, use _rancher_ as the **user name** and select the **SSH public key** option of authenticating. Add your ssh public key into the appropriate field. Select the **Resource group** that you want to add the VM to or create a new one. Select the **location** for your VM. - -In the _Size_ step, select a virtual machine that has at least **1GB** of memory. - -In the _Settings_ step, you can use all the default settings to get RancherOS running. - -Review your VM and buy it so that you can **Create** your VM. - -After the VM has been provisioned, click on the VM to find the public IP address. SSH into your VM using the _rancher_ username. - -``` -$ ssh rancher@ -p 22 -``` - -### Launching RancherOS with custom data - -_Available as of v1.5.2_ - -Instance Metadata Service provides the ability for the VM to have access to its custom data. The binary data must be less than 64 KB and is provided to the VM in base64 encoded form. -You can get more details from [here](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service#custom-data) - -For example, you can add custom data through [CLI](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage): - -``` -# list images from marketplace -az vm image list --location westus --publisher Rancher --offer rancheros --sku os --all --output table - -Offer Publisher Sku Urn Version ---------- ----------- ----- ----------------------------- --------- -rancheros rancher os rancher:rancheros:os:1.5.1 1.5.1 -rancheros rancher os152 rancher:rancheros:os152:1.5.2 1.5.2 -... - -# accept the terms -az vm image accept-terms --urn rancher:rancheros:os152:1.5.2 - -# create the vm -AZURE_ROS_SSH_PUBLIC_KEY="xxxxxx" -az vm create --resource-group mygroup \ - --name myvm \ - --image rancher:rancheros:os152:1.5.2 \ - --plan-name os152 \ - --plan-product rancheros \ - --plan-publisher rancher \ - --custom-data ./custom_data.txt \ - --admin-username rancher \ - --size Standard_A1 \ - --ssh-key-value "$AZURE_ROS_SSH_PUBLIC_KEY" -``` - -The `custom_data.txt` can be the cloud-config format or a shell script, such as: - -``` -#cloud-config -runcmd: -- [ touch, /home/rancher/test1 ] -- echo "test" > /home/rancher/test2 -``` - -``` -#!/bin/sh -echo "aaa" > /home/rancher/aaa.txt -``` diff --git a/content/os/v1.x/en/installation/cloud/do/_index.md b/content/os/v1.x/en/installation/cloud/do/_index.md deleted file mode 100644 index 1d04360118..0000000000 --- a/content/os/v1.x/en/installation/cloud/do/_index.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Digital Ocean -weight: 107 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/do ---- - -RancherOS is available in the Digital Ocean portal. RancherOS is a member of container distributions and you can find it easily. - ->**Note** ->Deploying to Digital Ocean will incur charges. - -To start a RancherOS Droplet on Digital Ocean: - -1. In the Digital Ocean portal, go to the project view. -1. Click **New Droplet.** -1. Click **Create Droplet.** -1. Click the **Container distributions** tab. -1. Click **RancherOS.** -1. Choose a plan. Make sure your Droplet has the [minimum hardware requirements for RancherOS]({{}}/os/v1.x/en/overview/#hardware-requirements). -1. Choose any options for backups, block storage, and datacenter region. -1. Optional: In the **Select additional options** section, you can check the **User data** box and enter a `cloud-config` file in the text box that appears. The `cloud-config` file is used to provide a script to be run on the first boot. An example is below. -1. Choose an SSH key that you have access to, or generate a new SSH key. -1. Choose your project. -1. Click **Create.** - - -You can access the host via SSH after the Droplet is booted. The default user is `rancher`. - -Below is an example `cloud-config` file that you can use to initialize the Droplet with user data, such as deploying Rancher: - -``` -#cloud-config - -write_files: - - path: /etc/rc.local - permissions: "0755" - owner: root - content: | - #!/bin/bash - wait-for-docker - - export curlimage=appropriate/curl - export jqimage=stedolan/jq - export rancher_version=v2.2.2 - - for image in $curlimage $jqimage "rancher/rancher:${rancher_version}"; do - until docker inspect $image > /dev/null 2>&1; do - docker pull $image - sleep 2 - done - done - - docker run -d --restart=unless-stopped -p 80:80 -p 443:443 -v /opt/rancher:/var/lib/rancher rancher/rancher:${rancher_version} -``` diff --git a/content/os/v1.x/en/installation/cloud/gce/_index.md b/content/os/v1.x/en/installation/cloud/gce/_index.md deleted file mode 100644 index 34159b09d1..0000000000 --- a/content/os/v1.x/en/installation/cloud/gce/_index.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Google Compute Engine (GCE) -weight: 106 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/gce ---- - -> **Note:** Due to the maximum transmission unit (MTU) of [1460 bytes on GCE](https://cloud.google.com/compute/docs/troubleshooting#packetfragmentation), you will need to configure your [network interfaces]({{< baseurl >}}/os/v1.x/en/networking/interfaces/) and both the [Docker and System Docker]({{< baseurl >}}/os/v1.x/en/configuration/docker/) to use a MTU of 1460 bytes or you will encounter weird networking related errors. - -### Adding the RancherOS Image into GCE - -RancherOS is available as an image in GCE, and can be easily run in Google Compute Engine (GCE). Let’s walk through how to upload GCE image. - -1. Download the most recent RancherOS image. The image can be found in the [release artifacts](https://github.com/rancher/os/releases). It is a `.tar.gz` file. -2. Follow Google's instructions on how to [upload the image](https://cloud.google.com/compute/docs/tutorials/building-images#publishingimage). The image must be uploaded into a Google Cloud Storage bucket before it can be added to a project. -3. Follow Google's instructions on how to [import a RAW image](https://cloud.google.com/compute/docs/images/import-existing-image#use_saved_image). -4. Once the image is added to your Google Compute Engine, we can start creating new instances! - -### Launching RancherOS using `gcloud compute` - -After the image is uploaded, we can use the `gcloud compute` [command-line tool](https://cloud.google.com/compute/docs/gcloud-compute/) to start a new instance. It automatically merges the SSH keys from the project and adds the keys to the **rancher** user. If you don't have any project level SSH keys, go to the _Adding SSH Keys_ section to learn more about adding SSH keys. - -Since the image is private, we need to follow Google's [instructions](https://cloud.google.com/compute/docs/creating-custom-image#start_an_instance_from_a_custom_image). - -``` -$ gcloud compute instances create --project --zone --image -``` - -### Using a Cloud Config File with GCE - -If you want to pass in your own cloud config file that will be processed by [cloud init]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config), you can pass it as metadata upon creation of the instance during the `gcloud compute` command. The file will need to be stored locally before running the command. The key of the metadata will be `user-data` and the value is the location of the file. If any SSH keys are added in the cloud config file, it will also be added to the **rancher** user. - -``` -$ gcloud compute instances create --project --zone --image --metadata-from-file user-data=/Directory/of/Cloud_Config.yml -``` - -**Adding your Cloud Config to Existing Instance** - -If you have already created the instance, you can still add the cloud config file after the instance is created. You will just need to reset the machine after you've added the metadata. - -``` -$ gcloud compute instances add-metadata --metadata-from-file user-data=/Directory/of/File --project --zone -Updated [https://www.googleapis.com/compute/v1/projects/PROJECT_ID/zones/ZONE_OF_INSTANCE/instances/INSTANCE_NAME]. -$ gcloud compute instances reset --project --zone -Updated [https://www.googleapis.com/compute/v1/projects/PROJECT_ID/zones/ZONE_OF_INSTANCE/instances/INSTANCE_NAME]. -``` - -**Reviewing your Cloud Config** - -If you want to review the cloud config file for your instance, review the **metadata** section: - -``` -$ gcloud compute instances describe --project --zone -``` - -**Removing your Cloud Config** - -If you want to remove your cloud config file, use the following command to remove the metadata. - -``` -$ gcloud compute instances remove-metadata --project --zone --keys user-data -Updated [https://www.googleapis.com/compute/v1/projects/PROJECT_ID/zones/ZONE_OF_INSTANCE/instances/INSTANCE_NAME]. -``` - -**Resetting your Instance** - -After any changes to the cloud config file, you'll need to reset the machine. You can reset either using the console or using this command: - -``` -$ gcloud compute instances reset --project --zone -Updated [https://www.googleapis.com/compute/v1/projects/PROJECT_ID/zones/ZONE_OF_INSTANCE/instances/INSTANCE_NAME]. -``` - -### Launching RancherOS using the Google Console - -After the image is uploaded, it's easy to use the console to create new instances. You will **not** be able to upload your own cloud config file when creating instances through the console. You can add it after the instance is created using `gcloud compute` commands and resetting the instance. - -1. Make sure you are in the project that the image was created in. - ![RancherOS on GCE 4]({{}}/img/os/Rancher_gce4.png) -2. In the navigation bar, click on the **VM instances**, which is located at Compute -> Compute Engine -> Metadata. Click on **Create instance**. - ![RancherOS on GCE 5]({{}}/img/os/Rancher_gce5.png) -2. Fill out the information for your instance. In the **Image** dropdown, your private image will be listed among the public images provided by Google. Select the private image for RancherOS. Click **Create**. - ![RancherOS on GCE 6]({{}}/img/os/Rancher_gce6.png) -3. Your instance is being created and will be up and running shortly! - -#### Adding SSH keys - -In order to SSH into the GCE instance, you will need to have SSH keys set up in either the project instance, add them to the instance after the instance is created, or add them using the `gcloud compute` commands to add meta-data to an instance. - -**Option 1: Project Level SSH Keys** - -In your project, click on **Metadata**, which is located within Compute -> Compute Engine -> Metadata. Click on **SSH Keys**. - -![RancherOS on GCE 7]({{}}/img/os/Rancher_gce7.png) - -Add the SSH keys that you want to have access to any instances within your project. - -Note: If you do this after any RancherOS instance is created, you will need to reset the instance so that the SSH keys are added to the **rancher** user. - -**Option 2: Instance Level SSH Keys** - -After your instance is created, click on the instance name. Scroll down to the **SSH Keys** section and click on **Add SSH key**. This key will only be applicable to the instance. - -![RancherOS on GCE 8]({{}}/img/os/Rancher_gce8.png) - -After the SSH keys have been added, you'll need to reset the machine, by clicking **Reset**. - -![RancherOS on GCE 9]({{}}/img/os/Rancher_gce9.png) - -After a little bit, you will be able to SSH into the box using the **rancher** user. - -**Option 3: Using the Cloud Config file** - -You can add SSH keys by adding them into the cloud config file. Follow the directions above that walk through how to pass the cloud config file to an instance. - -Example of cloud config file that has only SSH keys: - -```yaml -#cloud-config - -ssh_authorized_keys: - - ssh-rsa AAA... user@host -``` - -## Logging into RancherOS ----- - -Remember, the SSH keys are passed to the **rancher** user. The SSH keys can be passed from the project level, the instance level or through the cloud config file. If you add any of these SSH keys after the instance has been created, the instance will need to be reset before the SSH keys are passed through. - -``` -$ gcloud compute ssh rancher@ --project --zone -``` - -If you have issues logging into RancherOS, try using this command to help debug the instance. - -``` -$ gcloud compute instances get-serial-port-output --zone --project -``` diff --git a/content/os/v1.x/en/installation/cloud/openstack/_index.md b/content/os/v1.x/en/installation/cloud/openstack/_index.md deleted file mode 100644 index 9ab19b45d8..0000000000 --- a/content/os/v1.x/en/installation/cloud/openstack/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: OpenStack -weight: 109 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/openstack ---- - -As of v0.5.0, RancherOS releases include an OpenStack image that can be found on our [releases page](https://github.com/rancher/os/releases). The image format is [QCOW3](https://wiki.qemu.org/Features/Qcow3#Fully_QCOW2_backwards-compatible_feature_set) that is backward compatible with QCOW2. - -When launching an instance using the image, you must enable **Advanced Options** -> **Configuration Drive** and in order to use a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file. diff --git a/content/os/v1.x/en/installation/cloud/vmware-esxi/_index.md b/content/os/v1.x/en/installation/cloud/vmware-esxi/_index.md deleted file mode 100644 index 07913f18ae..0000000000 --- a/content/os/v1.x/en/installation/cloud/vmware-esxi/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: VMware ESXi -weight: 108 -aliases: - - /os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi ---- - -As of v1.1.0, RancherOS automatically detects that it is running on VMware ESXi, and automatically adds the `open-vm-tools` service to be downloaded and started, and uses `guestinfo` keys to set the cloud-init data. - -As of v1.5.0, RancherOS releases anything required for VMware, which includes initrd, a standard ISO for VMware, a `vmdk` image, and a specific ISO to be used with Docker Machine. The open-vm-tools is built in to RancherOS, there is no need to download it. - -| Description | Download URL | -|---|---| -| Booting from ISO | https://releases.rancher.com/os/latest/vmware/rancheros.iso | -| For docker-machine | https://releases.rancher.com/os/latest/vmware/rancheros-autoformat.iso | -| VMDK | https://releases.rancher.com/os/latest/vmware/rancheros.vmdk | -| Initrd | https://releases.rancher.com/os/latest/vmware/initrd | - -### VMware Guest Info - -| VARIABLE | TYPE | -|---|---| -| `hostname` | hostname | -| `interface..name` | string | -| `interface..mac` | MAC address (is used to match the ethernet device's MAC address, not to set it) | -| `interface..dhcp` | {"yes", "no"} | -| `interface..role` | {"public", "private"} | -| `interface..ip..address` | CIDR IP address | -| `interface..route..gateway` | IP address | -| `interface..route..destination` | CIDR IP address (not available yet) | -| `dns.server.` | IP address | -| `dns.domain.` | DNS search domain | -| `cloud-init.config.data` | string | -| `cloud-init.data.encoding` | {"", "base64", "gzip+base64"} | -| `cloud-init.config.url` | URL | - - -> **Note:** "n", "m", "l", "x" and "y" are 0-indexed, incrementing integers. The identifier for an interface (``) is used in the generation of the default interface name in the form `eth`. diff --git a/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md deleted file mode 100644 index a54cf2f4f6..0000000000 --- a/content/os/v1.x/en/installation/custom-builds/custom-console/_index.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Custom Console -weight: 180 ---- - -When [booting from the ISO]({{< baseurl >}}/os/v1.x/en/installation/workstation//boot-from-iso/), RancherOS starts with the default console, which is based on busybox. - -You can select which console you want RancherOS to start with using the [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). - -### Enabling Consoles using Cloud-Config - -When launching RancherOS with a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config) file, you can select which console you want to use. - -Currently, the list of available consoles are: - -* default -* alpine -* centos -* debian -* fedora -* ubuntu - -Here is an example cloud-config file that can be used to enable the debian console. - -```yaml -#cloud-config -rancher: - console: debian -``` - -### Listing Available Consoles - -You can easily list the available consoles in RancherOS and what their status is with `sudo ros console list`. - -``` -$ sudo ros console list -disabled alpine -disabled centos -disabled debian -current default -disabled fedora -disabled ubuntu -``` - -### Changing Consoles after RancherOS has started - -You can view which console is being used by RancherOS by checking which console container is running in System Docker. If you wanted to switch consoles, you just need to run a simple command and select your new console. - -For our example, we'll switch to the Ubuntu console. - -``` -$ sudo ros console switch ubuntu -Switching consoles will -1. destroy the current console container -2. log you out -3. restart Docker -Continue [y/N]:y -Pulling console (rancher/os-ubuntuconsole:v0.5.0-3)... -v0.5.0-3: Pulling from rancher/os-ubuntuconsole -6d3a6d998241: Pull complete -606b08bdd0f3: Pull complete -1d99b95ffc1c: Pull complete -a3ed95caeb02: Pull complete -3fc2f42db623: Pull complete -2fb84911e8d2: Pull complete -fff5d987b31c: Pull complete -e7849ae8f782: Pull complete -de375d40ae05: Pull complete -8939c16614d1: Pull complete -Digest: sha256:37224c3964801d633ea8b9629137bc9d4a8db9d37f47901111b119d3e597d15b -Status: Downloaded newer image for rancher/os-ubuntuconsole:v0.5.0-3 -switch-console_1 | time="2016-07-02T01:47:14Z" level=info msg="Project [os]: Starting project " -switch-console_1 | time="2016-07-02T01:47:14Z" level=info msg="[0/18] [console]: Starting " -switch-console_1 | time="2016-07-02T01:47:14Z" level=info msg="Recreating console" -Connection to 127.0.0.1 closed by remote host. -``` - -
- -After logging back, you'll be in the Ubuntu console. - -``` -$ sudo system-docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -6bf33541b2dc rancher/os-ubuntuconsole:v0.5.0-rc3 "/usr/sbin/entry.sh /" About a minute ago Up About a minute -``` - -
- -> **Note:** When switching between consoles, the currently running console container is destroyed, Docker is restarted and you will be logged out. - -### Console persistence - -All consoles except the default (busybox) console are persistent. Persistent console means that the console container will remain the same and preserves changes made to its filesystem across reboots. If a container is deleted/rebuilt, state in the console will be lost except what is in the persisted directories. - -``` -/home -/opt -/var/lib/docker -/var/lib/rancher -``` - -
- -> **Note:** When using a persistent console and in the current version's console, [rolling back]({{}}/os/v1.x/en/upgrading/#rolling-back-an-upgrade) is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. - -### Enabling Consoles - -You can also enable a console that will be changed at the next reboot. - -For our example, we'll switch to the Debian console. - -``` -# Check the console running in System Docker -$ sudo system-docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -95d548689e82 rancher/os-docker:v0.5.0 "/usr/sbin/entry.sh /" About an hour ago Up About an hour docker -# Enable the Debian console -$ sudo ros console enable debian -Pulling console (rancher/os-debianconsole:v0.5.0-3)... -v0.5.0-3: Pulling from rancher/os-debianconsole -7268d8f794c4: Pull complete -a3ed95caeb02: Pull complete -21cb8a645d75: Pull complete -5ee1d288a088: Pull complete -c09f41c2bd29: Pull complete -02b48ce40553: Pull complete -38a4150e7e9c: Pull complete -Digest: sha256:5dbca5ba6c3b7ba6cd6ac75a1d054145db4b4ea140db732bfcbd06f17059c5d0 -Status: Downloaded newer image for rancher/os-debianconsole:v0.5.0-3 -``` - -
- -At the next reboot, RancherOS will be using the Debian console. diff --git a/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md deleted file mode 100644 index b3d6d35baa..0000000000 --- a/content/os/v1.x/en/installation/custom-builds/custom-kernels/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Custom Kernels -weight: 181 ---- - -### Kernel version in RancherOS - -RancherOS basically uses the standard Linux kernel, but we maintain a kernel config ourselves. Due to various feature support and security fixes, we are constantly updating the kernel version. - -RancherOS | Kernel ---------- | ------ -<=v0.7.1 | 4.4.x -<=v1.3.0 | 4.9.x ->=v1.4.0 | 4.14.x - -### Building and Packaging a Kernel to be used in RancherOS - -We build the kernel for RancherOS at the [os-kernel repository](https://github.com/rancher/os-kernel). You can use this repository to help package your own custom kernel to be used in RancherOS. - -Create a clone of the [os-kernel](https://github.com/rancher/os-kernel) repository to your local machine using `git clone`. - -``` -$ git clone https://github.com/rancher/os-kernel.git -``` - -If you want to build kernel v4.14.53, you can refer to the following command. After the build is completed, a `./dist/kernel` directory will be created with the freshly built kernel tarball and headers. - -``` -$ git tag v4.14.53-rancher -$ KERNEL_TAG=4.14.53 make release -...snip... -./dist/kernel/extra-linux-4.14.53-rancher-x86.tar.gz -./dist/kernel/build-linux-4.14.53-rancher-x86.tar.gz -./dist/kernel/linux-4.14.53-rancher-x86.tar.gz -./dist/kernel/config -...snip... -Images ready to push: -rancher/os-extras:4.14.53-rancher -rancher/os-headers:4.14.53-rancher - ``` -For some users who need a custom kernel, the following information is very useful to you: - -1. The modules defined in `modules.list` will be packaged into the built-in modules. -2. The modules defined in `modules-extra.list` will be packaged into the extra modules. -3. You can modify `config/kernel-config` to build the kernel modules you need. -4. You can add your patches in the `patches` directory, and `os-kernel` will update these patches after downloading the kernel source. - -Now you need to either upload the `./dist/kernel/linux-4.14.53-rancher-x86.tar.gz` file to somewhere, or copy that file into your clone of the `rancher/os` repo, as `assets/kernel.tar.gz`. - -The `build-.tar.gz` and `extra-.tar.gz` files are used to build the `rancher/os-extras` and `rancher/os-headers` images for your RancherOS release - which you will need to tag them with a different organisation name, push them to a registry, and create custom service.yml files. - -Your kernel should be packaged and published as a set of files of the following format: - -1. `.tar.gz` is the one KERNEL_URL in `rancher/os` should point to. It contains the kernel binary, core modules and firmware. - -2. `build-.tar.gz` contains build headers to build additional modules: it is a subset of the kernel sources tarball. These files will be installed into `/usr/src/` using the `kernel-headers-system-docker` and `kernel-headers` services. - -3. `extra-.tar.gz` contains extra modules and firmware for your kernel and should be built into a `kernel-extras` service. - -### Building a RancherOS release using the Packaged kernel files. - -By default, RancherOS ships with the kernel provided by the [os-kernel repository](https://github.com/rancher/os-kernel). Swapping out the default kernel can by done by [building your own custom RancherOS ISO]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/). - -Create a clone of the main [RancherOS repository](https://github.com/rancher/os) to your local machine with a `git clone`. - -``` -$ git clone https://github.com/rancher/os.git -``` - -In the root of the repository, the "General Configuration" section of `Dockerfile.dapper` will need to be updated. Using your favorite editor, replace the appropriate `KERNEL_URL` value with a URL of your compiled custom kernel tarball. Ideally, the URL will use `HTTPS`. - -``` -# Update the URL to your own custom kernel tarball -ARG KERNEL_VERSION_amd64=4.14.63-rancher -ARG KERNEL_URL_amd64=https://link/xxxx -``` - -After you've replaced the URL with your custom kernel, you can follow the steps in [building your own custom RancherOS ISO]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/). - -> **Note:** `KERNEL_URL` settings should point to a Linux kernel, compiled and packaged in a specific way. You can fork [os-kernel repository](https://github.com/rancher/os-kernel) to package your own kernel. diff --git a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md b/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md deleted file mode 100644 index 18f3ddafcb..0000000000 --- a/content/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/_index.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Custom RancherOS ISO -weight: 182 ---- - -It's easy to build your own RancherOS ISO. - -Create a clone of the main [RancherOS repository](https://github.com/rancher/os) to your local machine with a `git clone`. - -``` -$ git clone https://github.com/rancher/os.git -``` - -In the root of the repository, the "General Configuration" section of `Dockerfile.dapper` can be updated to use [custom kernels]({{}}/os/v1.x/en/installation/custom-builds/custom-kernels). -After you've saved your edits, run `make` in the root directory. After the build has completed, a `./dist/artifacts` directory will be created with the custom built RancherOS release files. -Build Requirements: `bash`, `make`, `docker` (Docker version >= 1.10.3) - -``` -$ make -$ cd dist/artifacts -$ ls -initrd rancheros.iso -iso-checksums.txt vmlinuz -``` - -If you need a compressed ISO, you can run this command: - -``` -$ make release -``` - -The `rancheros.iso` is ready to be used to [boot RancherOS from ISO]({{< baseurl >}}/os/v1.x/en/installation/workstation//boot-from-iso/) or [launch RancherOS using Docker Machine]({{< baseurl >}}/os/v1.x/en/installation/workstation//docker-machine). - -## Creating a GCE Image Archive - -Create a clone of the main [RancherOS repository](https://github.com/rancher/os) to your local machine with a `git clone`. - -``` -$ git clone https://github.com/rancher/os-packer.git -``` - -GCE supports KVM virtualization, and we use `packer` to build KVM images. Before building, you need to verify that the host can support KVM. -If you want to build GCE image based on RancherOS v1.4.0, you can run this command: - -``` -RANCHEROS_VERSION=v1.4.0 make build-gce -``` - -## Custom Build Cases - -#### Reduce Memory Requirements - -With changes to the kernel and built Docker, RancherOS booting requires more memory. For details, please refer to the [memory requirements]({{}}/os/v1.x/en/#hardware-requirements). - -By customizing the ISO, you can reduce the memory usage on boot. The easiest way is to downgrade the built-in Docker version, because Docker takes up a lot of space. -This can effectively reduce the memory required to decompress the `initrd` on boot. Using docker 17.03 is a good choice: - -``` -# run make -$ USER_DOCKER_VERSION=17.03.2 make release -``` - -#### Building with a Different Console - -_Available as of v1.5.0_ - -When building RancherOS, you have the ability to automatically start in a supported console instead of booting into the default console and switching to your desired one. - -Here is an example of building RancherOS and having the `alpine` console enabled: - -``` -$ OS_CONSOLE=alpine make release -``` - -#### Building with Predefined Docker Images - -If you want to use a custom ISO file to address an offline scenario, you can use predefined images for `system-docker` and `user-docker`. - -RancherOS supports `APPEND_SYSTEM_IMAGES`. It can save images to the `initrd` file, and is loaded with `system-docker` when booting. - -You can build the ISO like this: - -``` -APPEND_SYSTEM_IMAGES="rancher/os-openvmtools:10.3.10-1" make release -``` - -RancherOS also supports `APPEND_USER_IMAGES`. It can save images to the `initrd` file, and is loaded with `user-docker` when booting. - -You can build the ISO like this: - -``` -APPEND_USER_IMAGES="alpine:3.9 ubuntu:bionic" make release -``` - -Please note that these will be packaged into the `initrd`, and the predefined images will affect the resource footprint at startup. diff --git a/content/os/v1.x/en/installation/running-rancheros/_index.md b/content/os/v1.x/en/installation/running-rancheros/_index.md deleted file mode 100644 index 17f070f363..0000000000 --- a/content/os/v1.x/en/installation/running-rancheros/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Running RancherOS -weight: 100 ---- - -RancherOS runs on virtualization platforms, cloud providers and bare metal servers. We also support running a local VM on your laptop. To start running RancherOS as quickly as possible, follow our [Quick Start Guide]({{}}/os/v1.x/en/quick-start-guide/). - -### Platforms - -#### Workstation - -[Docker Machine]({{}}/os/v1.x/en/installation/running-rancheros/workstation/docker-machine) - -[Boot from ISO]({{}}/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso) - - -#### Cloud - -[Amazon EC2]({{}}/os/v1.x/en/installation/running-rancheros/cloud/aws) - -[Google Compute Engine]({{}}/os/v1.x/en/installation/running-rancheros/cloud/gce) - -[DigitalOcean]({{}}/os/v1.x/en/installation/running-rancheros/cloud/do) - -[Azure]({{}}/os/v1.x/en/installation/running-rancheros/cloud/azure) - -[OpenStack]({{}}/os/v1.x/en/installation/running-rancheros/cloud/openstack) - -[VMware ESXi]({{}}/os/v1.x/en/installation/running-rancheros/cloud/vmware-esxi) - -[Aliyun]({{}}/os/v1.x/en/installation/running-rancheros/cloud/aliyun) - -#### Bare Metal & Virtual Servers - -[PXE]({{}}/os/v1.x/en/installation/running-rancheros/server/pxe) - -[Install to Hard Disk]({{}}/os/v1.x/en/installation/running-rancheros/server/install-to-disk) - -[Raspberry Pi]({{}}/os/v1.x/en/installation/running-rancheros/server/raspberry-pi) diff --git a/content/os/v1.x/en/installation/server/install-to-disk/_index.md b/content/os/v1.x/en/installation/server/install-to-disk/_index.md deleted file mode 100644 index 35f1010a6a..0000000000 --- a/content/os/v1.x/en/installation/server/install-to-disk/_index.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Installing to Disk -weight: 111 -aliases: - - /os/v1.x/en/installation/running-rancheros/server/install-to-disk ---- - -RancherOS comes with a simple installer that will install RancherOS on a given target disk. To install RancherOS on a new disk, you can use the `ros install` command. Before installing, you'll need to have already [booted RancherOS from ISO]({{< baseurl >}}/os/v1.x/en/installation/workstation//boot-from-iso). Please be sure to pick the `rancheros.iso` from our release [page](https://github.com/rancher/os/releases). - -### Using `ros install` to Install RancherOS - -The `ros install` command orchestrates the installation from the `rancher/os` container. You will need to have already created a cloud-config file and found the target disk. - -#### Cloud-Config - -The easiest way to log in is to pass a `cloud-config.yml` file containing your public SSH keys. To learn more about what's supported in our cloud-config, please read our [documentation]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). - -The `ros install` command will process your `cloud-config.yml` file specified with the `-c` flag. This file will also be placed onto the disk and installed to `/var/lib/rancher/conf/`. It will be evaluated on every boot. - -Create a cloud-config file with a SSH key, this allows you to SSH into the box as the rancher user. The yml file would look like this: - -```yaml -#cloud-config -ssh_authorized_keys: - - ssh-rsa AAA... -``` - -
- -You can generate a new SSH key for `cloud-config.yml` file by following this [article](https://help.github.com/articles/generating-ssh-keys/). - -Copy the public SSH key into RancherOS before installing to disk. - -Now that our `cloud-config.yml` contains our public SSH key, we can move on to installing RancherOS to disk! - -``` -$ sudo ros install -c cloud-config.yml -d /dev/sda -INFO[0000] No install type specified...defaulting to generic -Installing from rancher/os:v0.5.0 -Continue [y/N]: -``` - -For the `cloud-config.yml` file, you can also specify a remote URL, but you need to make sure you can get it: - -``` -$ sudo ros install -c https://link/to/cloud-config.yml -``` - -You will be prompted to see if you want to continue. Type **y**. - -``` -Unable to find image 'rancher/os:v0.5.0' locally -v0.5.0: Pulling from rancher/os -... -... -... -Status: Downloaded newer image for rancher/os:v0.5.0 -+ DEVICE=/dev/sda -... -... -... -+ umount /mnt/new_img -Continue with reboot [y/N]: -``` - -After installing RancherOS to disk, you will no longer be automatically logged in as the `rancher` user. You'll need to have added in SSH keys within your [cloud-config file]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). - -#### Installing a Different Version - -By default, `ros install` uses the same installer image version as the ISO it is run from. The `-i` option specifies the particular image to install from. To keep the ISO as small as possible, the installer image is downloaded from DockerHub and used in System Docker. For example for RancherOS v0.5.0 the default installer image would be `rancher/os:v0.5.0`. - -You can use `ros os list` command to find the list of available RancherOS images/versions. - -``` -$ sudo ros os list -rancher/os:v0.4.0 remote -rancher/os:v0.4.1 remote -rancher/os:v0.4.2 remote -rancher/os:v0.4.3 remote -rancher/os:v0.4.4 remote -rancher/os:v0.4.5 remote -rancher/os:v0.5.0 remote -``` - -Alternatively, you can set the installer image to any image in System Docker to install RancherOS. This is particularly useful for machines that will not have direct access to the internet. - -#### Caching Images - -_Available as of v1.5.3_ - -Some configurations included in `cloud-config` require images to be downloaded from Docker to start. After installation, these images are downloaded automatically by RancherOS when booting. An example of these configurations are: - -- rancher.services_include -- rancher.console -- rancher.docker - -If you want to download and save these images to disk during installation, they will be cached and not need to be downloaded again upon each boot. You can cache these images by adding `-s` when using `ros install`: - -``` -$ ros install -d -c -s -``` - -### SSH into RancherOS - -After installing RancherOS, you can ssh into RancherOS using your private key and the **rancher** user. - -``` -$ ssh -i /path/to/private/key rancher@ -``` - -### Installing with no Internet Access - -If you'd like to install RancherOS onto a machine that has no internet access, it is assumed you either have your own private registry or other means of distributing docker images to System Docker of the machine. If you need help with creating a private registry, please refer to the [Docker documentation for private registries](https://docs.docker.com/registry/). - -In the installation command (i.e. `sudo ros install`), there is an option to pass in a specific image to install. As long as this image is available in System Docker, then RancherOS will use that image to install RancherOS. - -``` -$ sudo ros install -c cloud-config.yml -d /dev/sda -i -INFO[0000] No install type specified...defaulting to generic -Installing from -Continue [y/N]: -``` diff --git a/content/os/v1.x/en/installation/server/pxe/_index.md b/content/os/v1.x/en/installation/server/pxe/_index.md deleted file mode 100644 index c866a92c4e..0000000000 --- a/content/os/v1.x/en/installation/server/pxe/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: iPXE -weight: 112 -aliases: - - /os/v1.x/en/installation/running-rancheros/server/pxe ---- - -``` -#!ipxe -# Boot a persistent RancherOS to RAM - -# Location of Kernel/Initrd images -set base-url http://releases.rancher.com/os/latest - -kernel ${base-url}/vmlinuz rancher.state.dev=LABEL=RANCHER_STATE rancher.state.autoformat=[/dev/sda] rancher.state.wait rancher.cloud_init.datasources=[url:http://example.com/cloud-config] -initrd ${base-url}/initrd -boot -``` - -If you want to autoformat the disk when booting by iPXE, you should add the `rancher.state.autoformat` part to kernel cmdline. However, this does not install the bootloader to disk, so you cannot upgrade RancherOS. - -If you don't add `rancher.state.autoformat`, RancherOS will run completely in memory, you can execute `ros install` to install to disk. - -### Hiding sensitive kernel commandline parameters - -From RancherOS v0.9.0, secrets can be put on the `kernel` parameters line afer a `--` double dash, and they will be not be shown in any `/proc/cmdline`. These parameters -will be passed to the RancherOS init process and stored in the `root` accessible `/var/lib/rancher/conf/cloud-init.d/init.yml` file, and are available to the root user from the `ros config` commands. - -For example, the `kernel` line above could be written as: - -``` -kernel ${base-url}/vmlinuz rancher.state.dev=LABEL=RANCHER_STATE rancher.state.autoformat=[/dev/sda] -- rancher.cloud_init.datasources=[url:http://example.com/cloud-config] -``` - -The hidden part of the command line can be accessed with either `sudo ros config get rancher.environment.EXTRA_CMDLINE`, or by using a service file's environment array. - -An example service.yml file: - -``` -test: - image: alpine - command: echo "tell me a secret ${EXTRA_CMDLINE}" - labels: - io.rancher.os.scope: system - environment: - - EXTRA_CMDLINE -``` - -When this service is run, the `EXTRA_CMDLINE` will be set. - - -### cloud-init Datasources - -Valid cloud-init datasources for RancherOS. - -| type | default | -|---|---| -| ec2 | Default metadata address | -| digitalocean | Default metadata address | -| packet | Default metadata address | -| cloudstack | Default metadata address | -| aliyun | Default metadata address | -| gce | Default metadata address | -| file | Path | -| cmdline | Kernel command line: `cloud-config-url=http://link/user_data` | -| configdrive | /media/config-2 | -| url | URL address | -| vmware| Set `guestinfo` cloud-init or interface data as per [VMware ESXi]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi) | -| * | This will add ["configdrive", "vmware", "ec2", "digitalocean", "packet", "gce"] into the list of datasources to try | - -The vmware datasource was added as of v1.1. - -### Cloud-Config - -When booting via iPXE, RancherOS can be configured using a [cloud-config file]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). diff --git a/content/os/v1.x/en/installation/server/raspberry-pi/_index.md b/content/os/v1.x/en/installation/server/raspberry-pi/_index.md deleted file mode 100644 index a540afe8f8..0000000000 --- a/content/os/v1.x/en/installation/server/raspberry-pi/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Raspberry Pi -weight: 113 -aliases: - - /os/v1.x/en/installation/running-rancheros/server/raspberry-pi ---- - -As of v0.5.0, RancherOS releases include a Raspberry Pi image that can be found on our [releases page](https://github.com/rancher/os/releases). The official Raspberry Pi documentation contains instructions on how to [install operating system images](https://www.raspberrypi.org/documentation/installation/installing-images/). - -When installing, there is no ability to pass in a [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). You will need to boot up, change the configuration and then reboot to apply those changes. - -Currently, only Raspberry Pi 3 is tested and known to work. - -> **Note:** It is not necessary to run `ros install` after installing RancherOS to an SD card. - -### Using the entire SD Card - -RancherOS does not currently expand the root partition to fill the remainder of the SD card automatically. Instead, the following workaround can be used to store Docker containers on a larger partition that fills the remainder. - -1. `sudo fdisk /dev/mmcblk0` -2. Create a `n`ew partition -3. Press `[Enter]` four (4x) times to accept the defaults -4. Then `w`rite the table and exit -5. `sudo reboot` to reboot and reload the new partition table -6. `sudo mkdir /mnt/docker` to create the directory to be used as the new Docker root -7. `sudo ros config set rancher.docker.extra_args [-g,/mnt/docker]` to configure Docker to use the new root -8. `sudo mkfs.ext4 /dev/mmcblk0p3` to format the disk -9. `sudo ros config set mounts "[['/dev/mmcblk0p3','/mnt/docker','ext4','']]"` to preserve this mount after reboots -10. `sudo mount /dev/mmcblk0p3 /mnt/docker` to mount the Docker root -11. `sudo system-docker restart docker` to restart Docker using the new root -If this is not a new installation, you'll have to copy over your existing Docker root (`/var/lib/docker`) to the new root (`/mnt/docker`). -1. `sudo cp -R /var/lib/docker/* /mnt/docker` to recursively copy all files -2. `sudo system-docker restart docker` to restart Docker using the new root - -### Using Wi-Fi - -_Available as of v1.5.2_ - -Here are steps about how to enable Wi-Fi on a Raspberry Pi: - -``` -modprobe brcmfmac -wpa_passphrase > /etc/wpa_supplicant.conf -wpa_supplicant -iwlan0 -B -c /etc/wpa_supplicant.conf -# wait a few seconds, then -dhcpcd -MA4 wlan0 -``` - -You can also use cloud-config to enable Wi-Fi: - -``` -#cloud-config -rancher: - network: - interfaces: - wlan0: - wifi_network: network1 - wifi_networks: - network1: - ssid: "Your wifi ssid" - psk: "Your wifi password" - scan_ssid: 1 -``` - -Raspberry Pi will automatically drop Wi-Fi connection after a while, this is due to power management. To fix this problem, you can try this: - -``` -iwconfig wlan0 power off -``` diff --git a/content/os/v1.x/en/installation/workstation/boot-from-iso/_index.md b/content/os/v1.x/en/installation/workstation/boot-from-iso/_index.md deleted file mode 100644 index 28f3a8a7fc..0000000000 --- a/content/os/v1.x/en/installation/workstation/boot-from-iso/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Booting from ISO -weight: 102 -aliases: - - /os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso ---- - -The RancherOS ISO file can be used to create a fresh RancherOS install on KVM, VMware, VirtualBox, Hyper-V, Proxmox VE, or bare metal servers. You can download the `rancheros.iso` file from our [releases page](https://github.com/rancher/os/releases/). - -Some hypervisors may require a built-in agent to communicate with the guest, for this, RancherOS precompiles some ISO files. - -Hypervisor | ISO --------- | ---------------- -VMware | [rancheros-vmware.iso](https://releases.rancher.com/os/latest/vmware/rancheros.iso) -Hyper-V | [rancheros-hyperv.iso](https://releases.rancher.com/os/latest/hyperv/rancheros.iso) -Proxmox VE | [rancheros-proxmoxve.iso](https://releases.rancher.com/os/latest/proxmoxve/rancheros.iso) - -You must boot with enough memory which you can refer to [here]({{}}/os/v1.x/en/overview/#hardware-requirements). If you boot with the ISO, you will automatically be logged in as the `rancher` user. Only the ISO is set to use autologin by default. If you run from a cloud or install to disk, SSH keys or a password of your choice is expected to be used. - -### Install to Disk - -After you boot RancherOS from ISO, you can follow the instructions [here]({{< baseurl >}}/os/v1.x/en/installation/server/install-to-disk/) to install RancherOS to a hard disk. diff --git a/content/os/v1.x/en/installation/workstation/docker-machine/_index.md b/content/os/v1.x/en/installation/workstation/docker-machine/_index.md deleted file mode 100644 index 1595b66838..0000000000 --- a/content/os/v1.x/en/installation/workstation/docker-machine/_index.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: Using Docker Machine -weight: 101 -aliases: - - /os/v1.x/en/installation/running-rancheros/workstation/docker-machine ---- - -Before we get started, you'll need to make sure that you have docker machine installed. Download it directly from the docker machine [releases](https://github.com/docker/machine/releases). -You also need to know the [memory requirements]({{}}/os/v1.x/en/#hardware-requirements). - -> **Note:** If you create a RancherOS instance using Docker Machine, you will not be able to upgrade your version of RancherOS. - -### Downloading RancherOS - -Get the latest ISO artifact from the RancherOS [releases](https://github.com/rancher/os). - -Machine Driver | Recommended RancherOS version | ISO File --------------- | ----------------------------- | ------------------------------------------------------------- -VirtualBox | >=v1.0.0 | [rancheros.iso](https://releases.rancher.com/os/latest/rancheros.iso) -VMWare VSphere | >=v1.4.0 | [rancheros-autoformat.iso](https://releases.rancher.com/os/latest/vmware/rancheros-autoformat.iso) -VMWare Fusion | >=v1.4.0 | [rancheros-autoformat.iso](https://releases.rancher.com/os/latest/vmware/rancheros-autoformat.iso) -Hyper-V | >=v1.5.0 | [rancheros.iso](https://releases.rancher.com/os/latest/hyperv/rancheros.iso) -Proxmox VE | >=v1.5.1 | [rancheros-autoformat.iso](https://releases.rancher.com/os/latest/proxmoxve/rancheros-autoformat.iso) - -### Using Docker Machine - -You can use Docker Machine to launch VMs for various providers. Currently VirtualBox and VMWare(VMWare VSphere, VMWare Fusion) and AWS are supported. - -#### Using Docker Machine with VirtualBox - -Before moving forward, you'll need to have VirtualBox installed. Download it directly from [VirtualBox](https://www.virtualbox.org/wiki/Downloads). Once you have VirtualBox and Docker Machine installed, it's just one command to get RancherOS running. - -Here is an example about using the RancherOS latest link: - -``` -$ docker-machine create -d virtualbox \ - --virtualbox-boot2docker-url https://releases.rancher.com/os/latest/rancheros.iso \ - --virtualbox-memory \ - -``` - -> **Note:** Instead of downloading the ISO, you can directly use the URL for the `rancheros.iso`. - -That's it! You should now have a RancherOS host running on VirtualBox. You can verify that you have a VirtualBox VM running on your host. - -> **Note:** After the machine is created, Docker Machine may display some errors regarding creation, but if the VirtualBox VM is running, you should be able to [log in](#logging-into-rancheros). - -``` -$ VBoxManage list runningvms | grep -``` - -This command will print out the newly created machine. If not, something went wrong with the provisioning step. - -#### Using Docker Machine with VMWare VSphere - -_Available as of v1.4_ - -Before moving forward, you’ll need to have VMWare VSphere installed. Once you have VMWare VSphere and Docker Machine installed, it’s just one command to get RancherOS running. - -Here is an example about using the RancherOS latest link: - -``` -$ docker-machine create -d vmwarevsphere \ - --vmwarevsphere-username \ - --vmwarevsphere-password \ - --vmwarevsphere-memory-size \ - --vmwarevsphere-boot2docker-url https://releases.rancher.com/os/latest/vmware/rancheros-autoformat.iso \ - --vmwarevsphere-vcenter \ - --vmwarevsphere-vcenter-port \ - --vmwarevsphere-disk-size \ - -``` - -That’s it! You should now have a RancherOS host running on VMWare VSphere. You can verify that you have a VMWare(ESXi) VM running on your host. - -#### Using Docker Machine with VMWare Fusion - -_Available as of v1.4_ - -Before moving forward, you’ll need to have VMWare Fusion installed. Once you have VMWare Fusion and Docker Machine installed, it’s just one command to get RancherOS running. - -Here is an example about using the RancherOS latest link: - -``` -$ docker-machine create -d vmwarefusion \ - --vmwarefusion-no-share \ - --vmwarefusion-memory-size \ - --vmwarefusion-boot2docker-url https://releases.rancher.com/os/latest/vmware/rancheros-autoformat.iso \ - -``` - -That’s it! You should now have a RancherOS host running on VMWare Fusion. You can verify that you have a VMWare Fusion VM running on your host. - -#### Using Docker Machine with Hyper-V - -_Available as of v1.5_ - -You should refer to the documentation of [Hyper-V driver](https://docs.docker.com/machine/drivers/hyper-v/), here is an example of using the latest RancherOS URL. We recommend using a specific version so you know which version of RancherOS that you are installing. - -``` -$ docker-machine.exe create -d hyperv \ - --hyperv-memory 2048 \ - --hyperv-boot2docker-url https://releases.rancher.com/os/latest/hyperv/rancheros.iso - --hyperv-virtual-switch \ - -``` -#### Using Docker Machine with Proxmox VE - -_Available as of v1.5.1_ - -There is currently no official Proxmox VE driver, but there is a [choice](https://github.com/lnxbil/docker-machine-driver-proxmox-ve) that you can refer to. - -### Logging into RancherOS - -Logging into RancherOS follows the standard Docker Machine commands. To login into your newly provisioned RancherOS VM. - -``` -$ docker-machine ssh -``` - -You'll be logged into RancherOS and can start exploring the OS, This will log you into the RancherOS VM. You'll then be able to explore the OS by [adding system services]({{< baseurl >}}/os/v1.x/en/system-services/), [customizing the configuration]({{< baseurl >}}/os/v1.x/en/configuration/), and launching containers. - -If you want to exit out of RancherOS, you can exit by pressing `Ctrl+D`. - -### Docker Machine Benefits - -With Docker Machine, you can point the docker client on your host to the docker daemon running inside of the VM. This allows you to run your docker commands as if you had installed docker on your host. - -To point your docker client to the docker daemon inside the VM, use the following command: - -``` -$ eval $(docker-machine env ) -``` - -After setting this up, you can run any docker command in your host, and it will execute the command in your RancherOS VM. - -``` -$ docker run -p 80:80 -p 443:443 -d nginx -``` - -In your VM, a nginx container will start on your VM. To access the container, you will need the IP address of the VM. - -``` -$ docker-machine ip -``` - -Once you obtain the IP address, paste it in a browser and a _Welcome Page_ for nginx will be displayed. diff --git a/content/os/v1.x/en/networking/dns/_index.md b/content/os/v1.x/en/networking/dns/_index.md deleted file mode 100644 index 725a4f109f..0000000000 --- a/content/os/v1.x/en/networking/dns/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Configuring DNS -weight: 171 -aliases: - - /os/v1.x/en/installation/networking/dns ---- - -If you wanted to configure the DNS through the cloud config file, you'll need to place DNS configurations within the `rancher` key. - -```yaml -#cloud-config - -#Remember, any changes for rancher will be within the rancher key -rancher: - network: - dns: - search: - - mydomain.com - - example.com -``` - -Using `ros config`, you can set the `nameservers`, and `search`, which directly map to the fields of the same name in `/etc/resolv.conf`. - -``` -$ sudo ros config set rancher.network.dns.search "['mydomain.com','example.com']" -$ sudo ros config get rancher.network.dns.search -- mydomain.com -- example.com -``` diff --git a/content/os/v1.x/en/networking/interfaces/_index.md b/content/os/v1.x/en/networking/interfaces/_index.md deleted file mode 100644 index cdbc82eaa7..0000000000 --- a/content/os/v1.x/en/networking/interfaces/_index.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Configuring Network Interfaces -weight: 170 -aliases: - - /os/v1.x/en/installation/networking/interfaces ---- - -Using `ros config`, you can configure specific interfaces. Wildcard globbing is supported so `eth*` will match `eth1` and `eth2`. The available options you can configure are `address`, `gateway`, `mtu`, and `dhcp`. - -``` -$ sudo ros config set rancher.network.interfaces.eth1.address 172.68.1.100/24 -$ sudo ros config set rancher.network.interfaces.eth1.gateway 172.68.1.1 -$ sudo ros config set rancher.network.interfaces.eth1.mtu 1500 -$ sudo ros config set rancher.network.interfaces.eth1.dhcp false -``` - -If you wanted to configure the interfaces through the cloud config file, you'll need to place interface configurations within the `rancher` key. - -```yaml -#cloud-config -rancher: - network: - interfaces: - eth1: - address: 172.68.1.100/24 - gateway: 172.68.1.1 - mtu: 1500 - dhcp: false -``` - -> **Note:** The `address` item should be the CIDR format. - -### Multiple NICs - -If you want to configure one of multiple network interfaces, you can specify the MAC address of the interface you want to configure. - -Using `ros config`, you can specify the MAC address of the NIC you want to configure as follows: - -``` -$ sudo ros config set rancher.network.interfaces.”mac=ea:34:71:66:90:12:01”.dhcp true -``` - -Alternatively, you can place the MAC address selection in your cloud config file as follows: - -```yaml -#cloud-config -rancher: - network: - interfaces: - "mac=ea:34:71:66:90:12:01": - dhcp: true -``` - -### NIC bonding - -You can aggregate several network links into one virtual link for redundancy and increased throughput. For example: - -```yaml -#cloud-config -rancher: - network: - interfaces: - bond0: - addresses: - - 192.168.101.33/31 - - 10.88.23.129/31 - gateway: 192.168.101.32 - bond_opts: - downdelay: "200" - lacp_rate: "1" - miimon: "100" - mode: "4" - updelay: "200" - xmit_hash_policy: layer3+4 - post_up: - - ip route add 10.0.0.0/8 via 10.88.23.128 - mac=0c:c4:d7:b2:14:d2: - bond: bond0 - mac=0c:c4:d7:b2:14:d3: - bond: bond0 -``` - -In this example two physical NICs (with MACs `0c:c4:d7:b2:14:d2` and `0c:c4:d7:b2:14:d3`) are aggregated into a virtual one `bond0`. - -During the bootup process, RancherOS runs cloud-init. It automatically detects the data sources of cloud-init, but sometimes a data source requires a network connection. By default, in cloud-init, we open `rancher.network.interfaces.eth*.dhcp=true`, which may affect the bonding NIC. If you do not require the network connection for your data-source, use `rancher.network.interfaces.eth*.dhcp=false` in the kernel cmdline to disable DHCP for all NICs. - -### VLANS - -In this example, you can create an interface `eth0.100` which is tied to VLAN 100 and an interface `foobar` that will be tied to VLAN 200. - -``` -#cloud-config -rancher: - network: - interfaces: - eth0: - vlans: 100,200:foobar -``` - -### Bridging - -In this example, you can create a bridge interface. - -``` -#cloud-config -rancher: - network: - interfaces: - br0: - bridge: true - dhcp: true - eth0: - bridge: br0 -``` - -### Run custom network configuration commands - -_Available as of v1.1_ - -You can configure `pre` and `post` network configuration commands to run in the `network` service container by adding `pre_cmds` and `post_cmds` array keys to `rancher.network`, or `pre_up` and`post_up` keys for specific `rancher.network.interfaces`. - -For example: - -``` -#cloud-config -write_files: - - container: network - path: /var/lib/iptables/rules.sh - permissions: "0755" - owner: root:root - content: | - #!/bin/bash - set -ex - echo $@ >> /var/log/net.log - # the last line of the file needs to be a blank line or a comment -rancher: - network: - dns: - nameservers: - - 8.8.4.4 - - 4.2.2.3 - pre_cmds: - - /var/lib/iptables/rules.sh pre_cmds - post_cmds: - - /var/lib/iptables/rules.sh post_cmds - interfaces: - lo: - pre_up: - - /var/lib/iptables/rules.sh pre_up lo - post_up: - - /var/lib/iptables/rules.sh post_up lo - eth0: - pre_up: - - /var/lib/iptables/rules.sh pre_up eth0 - post_up: - - /var/lib/iptables/rules.sh post_up eth0 - eth1: - dhcp: true - pre_up: - - /var/lib/iptables/rules.sh pre_up eth1 - post_up: - - /var/lib/iptables/rules.sh post_up eth1 - eth2: - address: 192.168.3.13/16 - mtu: 1450 - pre_up: - - /var/lib/iptables/rules.sh pre_up eth2 - post_up: - - /var/lib/iptables/rules.sh post_up eth2 -``` - -### WiFi - -_Available as of v1.5_ - -In order to enable WiFi access, update the `cloud-config` with the WiFi network information. You can use `DHCP` or `STATIC` mode. - -#### Example of a wireless adapter using DHCP - -```yaml -#cloud-config -rancher: - network: - interfaces: - wlan0: - wifi_network: network1 - wifi_networks: - network1: - ssid: "Your wifi ssid" - psk: "Your wifi password" - scan_ssid: 1 -``` - -#### Example of a wireless adapter using STATIC - - -```yaml -rancher: - network: - dns: - nameservers: - - 8.8.8.8 - - 8.8.4.4 - interfaces: - wlan0: - wifi_network: network1 - wifi_networks: - network1: - ssid: "Your wifi ssid" - psk: "Your wifi password" - scan_ssid: 1 - address: 192.168.1.78/24 - gateway: 192.168.1.1 -``` - -#### Example using two wireless adapters with DHCP - -```yaml -rancher: - network: - interfaces: - wlan0: - wifi_network: network1 - wlan1: - wifi_network: network2 - wifi_networks: - network1: - ssid: "Your wifi ssid" - psk: "Your wifi password" - scan_ssid: 1 - network2: - ssid: "Your wifi ssid" - psk: "Your wifi password" - scan_ssid: 1 -``` - -When adding in WiFi access, you do not need a system reboot, you only need to restart the `network` service in System Docker. - -``` -$ sudo system-docker restart network -``` - -> **Note:** For Intel wireless adapters, there are some built-in firmware and modules, which prevents requiring to install any new modules or firmware. For other adapters, you may need to install additional os kernel-extras. - -### 4G-LTE - -_Available as of v1.5_ - -In order to support 4G-LTE, 4G-LTE module will need to be connected to the motherboard and to get a good signal, an external antenna will need to be added. You can assemble such a device, which supports USB interface and SIM cards slot: - -![](https://ws1.sinaimg.cn/bmiddle/006tNc79ly1fzcuvhu6zpj30k80qwag1.jpg) - -In order to use RancherOS, you will need to use the ISO built for 4G-LTE support. This ISO has a built-in `modem-manager` service and is available with each release. - -After booting the ISO, there will be a 4G NIC, such as `wwan0`. Use the following `cloud-config` to set the APN parameter. - -```yaml -rancher: - network: - modem_networks: - wwan0: - apn: xxx -``` - -After any configuration changes, restart the `modem-manager` service to apply these changes. - -``` -$ sudo system-docker restart modem-manager -``` - -> **Note:** Currently, RancherOS has some built-in rules in `udev` rules to allow RancherOS to recognize specific 4G devices, but there are additional vendors that may be missing. If you need to add these in, please file an issue. diff --git a/content/os/v1.x/en/networking/proxy-settings/_index.md b/content/os/v1.x/en/networking/proxy-settings/_index.md deleted file mode 100644 index 09698194c9..0000000000 --- a/content/os/v1.x/en/networking/proxy-settings/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Configuring Proxy Settings -weight: 172 -aliases: - - /os/v1.x/en/installation/networking/proxy-settings ---- - -HTTP proxy settings can be set directly under the `network` key. This will automatically configure proxy settings for both Docker and System Docker. - -```yaml -#cloud-config -rancher: - network: - http_proxy: https://myproxy.example.com - https_proxy: https://myproxy.example.com - no_proxy: localhost,127.0.0.1 -``` - -
- -> **Note:** System Docker proxy settings will not be applied until after a reboot. - -To add the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables to a system service, specify each under the `environment` key for the service. - -```yaml -#cloud-config -rancher: - services: - myservice: - ... - environment: - - HTTP_PROXY - - HTTPS_PROXY - - NO_PROXY -``` diff --git a/content/os/v1.x/en/overview/_index.md b/content/os/v1.x/en/overview/_index.md deleted file mode 100644 index a2936d617c..0000000000 --- a/content/os/v1.x/en/overview/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Overview of RancherOS -shortTitle: RancherOS -description: RancherOS is a simplified Linux distribution built from containers, for containers. These documents describe how to install and use RancherOS. -weight: 1 ---- - -RancherOS is the smallest, easiest way to run Docker in production. Every process in RancherOS is a container managed by Docker. This includes system services such as `udev` and `syslog`. Because it only includes the services necessary to run Docker, RancherOS is significantly smaller than most traditional operating systems. By removing unnecessary libraries and services, requirements for security patches and other maintenance are also reduced. This is possible because, with Docker, users typically package all necessary libraries into their containers. - -Another way in which RancherOS is designed specifically for running Docker is that it always runs the latest version of Docker. This allows users to take advantage of the latest Docker capabilities and bug fixes. - -Like other minimalist Linux distributions, RancherOS boots incredibly quickly. Starting Docker containers is nearly instant, similar to starting any other process. This speed is ideal for organizations adopting microservices and autoscaling. - -Docker is an open-source platform designed for developers, system admins, and DevOps. It is used to build, ship, and run containers, using a simple and powerful command line interface (CLI). To get started with Docker, please visit the [Docker user guide](https://docs.docker.com/config/daemon/). - -### Hardware Requirements - -* Memory Requirements - -Platform | RAM requirement(>=v1.5.x) | RAM requirement(v1.4.x) --------- | ------------------------ | --------------------------- -Baremetal | 1GB | 1280MB -VirtualBox | 1GB | 1280MB -VMWare | 1GB | 1280MB (rancheros.iso)
2048MB (rancheros-vmware.iso) -GCE | 1GB | 1280MB -AWS | 1GB | 1.7GB - -You can adjust memory requirements by custom building RancherOS, please refer to [reduce-memory-requirements]({{}}/os/v1.x/en/installation/custom-builds/custom-rancheros-iso/#reduce-memory-requirements) - -### How RancherOS Works - -Everything in RancherOS is a Docker container. We accomplish this by launching two instances of Docker. One is what we call **System Docker** and is the first process on the system. All other system services, like `ntpd`, `syslog`, and `console`, are running in Docker containers. System Docker replaces traditional init systems like `systemd` and is used to launch [additional system services]({{< baseurl >}}/os/v1.x/en/system-services/). - -System Docker runs a special container called **Docker**, which is another Docker daemon responsible for managing all of the user’s containers. Any containers that you launch as a user from the console will run inside this Docker. This creates isolation from the System Docker containers and ensures that normal user commands don’t impact system services. - - We created this separation not only for the security benefits, but also to make sure that commands like `docker rm -f $(docker ps -qa)` don't delete the entire OS. - -{{< img "/img/os/rancheroshowitworks.png" "How it works">}} - -### Running RancherOS - -To get started with RancherOS, head over to our [Quick Start Guide]({{}}/os/v1.x/en/quick-start-guide/). - -### Latest Release - -Please check our repository for the latest release in our [README](https://github.com/rancher/os/blob/master/README.md). - -
-
diff --git a/content/os/v1.x/en/quick-start-guide/_index.md b/content/os/v1.x/en/quick-start-guide/_index.md deleted file mode 100644 index cbd93c68ef..0000000000 --- a/content/os/v1.x/en/quick-start-guide/_index.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Quick Start -weight: 1 ---- - -If you have a specific RanchersOS machine requirements, please check out our [guides on running RancherOS]({{< baseurl >}}/os/v1.x/en/installation/). With the rest of this guide, we'll start up a RancherOS using [Docker machine]({{< baseurl >}}/os/v1.x/en/installation/workstation//docker-machine/) and show you some of what RancherOS can do. - -### Launching RancherOS using Docker Machine - -Before moving forward, you'll need to have [Docker Machine](https://docs.docker.com/machine/) and [VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed. Once you have VirtualBox and Docker Machine installed, it's just one command to get RancherOS running. - -``` -$ docker-machine create -d virtualbox \ - --virtualbox-boot2docker-url https://releases.rancher.com/os/latest/rancheros.iso \ - --virtualbox-memory 2048 \ - -``` - -That's it! You're up and running a RancherOS instance. - -To log into the instance, just use the `docker-machine` command. - -``` -$ docker-machine ssh -``` - -### A First Look At RancherOS - -There are two Docker daemons running in RancherOS. The first is called **System Docker**, which is where RancherOS runs system services like ntpd and syslog. You can use the `system-docker` command to control the **System Docker** daemon. - -The other Docker daemon running on the system is **Docker**, which can be accessed by using the normal `docker` command. - -When you first launch RancherOS, there are no containers running in the Docker daemon. However, if you run the same command against the System Docker, you’ll see a number of system services that are shipped with RancherOS. - -> **Note:** `system-docker` can only be used by root, so it is necessary to use the `sudo` command whenever you want to interact with System Docker. - -``` -$ sudo system-docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -6f56057cf5ba rancher/os-base:v0.5.0 "/usr/sbin/entry.sh /" 16 seconds ago Up 15 seconds docker -bd5376830237 rancher/os-console:v0.5.0 "/usr/sbin/entry.sh /" 16 seconds ago Up 15 seconds console -ede8ce39fff5 rancher/os-base:v0.5.0 "/usr/sbin/entry.sh n" 16 seconds ago Up 15 seconds network -9e5d18bca391 rancher/os-base:v0.5.0 "/usr/sbin/entry.sh n" 17 seconds ago Up 16 seconds ntp -393b9fb7e30a rancher/os-udev:v0.5.0 "/usr/sbin/entry.sh /" 18 seconds ago Up 16 seconds udev -dc2cafca3c69 rancher/os-syslog:v0.5.0 "/usr/sbin/entry.sh /" 18 seconds ago Up 17 seconds syslog -439d5535fbfa rancher/os-base:v0.5.0 "/usr/sbin/entry.sh /" 18 seconds ago Up 17 seconds acpid -``` - -Some containers are run at boot time, and others, such as the `console`, `docker`, etc. containers are always running. - -## Using RancherOS - -### Deploying a Docker Container - -Let's try to deploy a normal Docker container on the Docker daemon. The RancherOS Docker daemon is identical to any other Docker environment, so all normal Docker commands work. - -``` -$ docker run -d nginx -``` - -You can see that the nginx container is up and running: - -``` -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -e99c2c4b8b30 nginx "nginx -g 'daemon off" 12 seconds ago Up 11 seconds 80/tcp, 443/tcp drunk_ptolemy -``` - -### Deploying A System Service Container - -The following is a simple Docker container to set up Linux-dash, which is a minimal low-overhead web dashboard for monitoring Linux servers. The Dockerfile will be like this: - -``` -FROM hwestphal/nodebox -MAINTAINER hussein.galal.ahmed.11@gmail.com - -RUN opkg-install unzip -RUN curl -k -L -o master.zip https://github.com/afaqurk/linux-dash/archive/master.zip -RUN unzip master.zip -WORKDIR linux-dash-master -RUN npm install - -ENTRYPOINT ["node","server"] -``` - -Using the `hwestphal/nodebox` image, which uses a Busybox image and installs `node.js` and `npm`. We downloaded the source code of Linux-dash, and then ran the server. Linux-dash will run on port 80 by default. - -To run this container in System Docker use the following command: - -``` -$ sudo system-docker run -d --net=host --name busydash husseingalal/busydash -``` -In the command, we used `--net=host` to tell System Docker not to containerize the container's networking, and use the host’s networking instead. After running the container, you can see the monitoring server by accessing `http://`. - -{{< img "/img/os/Rancher_busydash.png" "System Docker Container">}} - -To make the container survive during the reboots, you can create the `/opt/rancher/bin/start.sh` script, and add the Docker start line to launch the Docker at each startup. - -``` -$ sudo mkdir -p /opt/rancher/bin -$ echo "sudo system-docker start busydash" | sudo tee -a /opt/rancher/bin/start.sh -$ sudo chmod 755 /opt/rancher/bin/start.sh -``` - -### Using ROS - -Another useful command that can be used with RancherOS is `ros` which can be used to control and configure the system. - -``` -$ sudo ros -v -ros version 0.0.1 -``` - -RancherOS state is controlled by a cloud config file. `ros` is used to edit the configuration of the system, to see for example the dns configuration of the system: - -``` -$ sudo ros config get rancher.network.dns.nameservers -- 8.8.8.8 -- 8.8.4.4 -``` - - -When using the native Busybox console, any changes to the console will be lost after reboots, only changes to `/home` or `/opt` will be persistent. You can use the `ros console switch` command to switch to a [persistent console]({{}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence) and replace the native Busybox console. For example, to switch to the Ubuntu console: - -``` -$ sudo ros console switch ubuntu -``` - -### Conclusion - -RancherOS is a simple Linux distribution ideal for running Docker. By embracing containerization of system services and leveraging Docker for management, RancherOS hopes to provide a very reliable, and easy to manage OS for running containers. diff --git a/content/os/v1.x/en/storage/additional-mounts/_index.md b/content/os/v1.x/en/storage/additional-mounts/_index.md deleted file mode 100644 index cdbd75fa63..0000000000 --- a/content/os/v1.x/en/storage/additional-mounts/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Additional Mounts -weight: 161 -aliases: - - /os/v1.x/en/installation/storage/additional-mounts ---- - -Additional mounts can be specified as part of your [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config). These mounts are applied within the console container. Here's a simple example that mounts `/dev/vdb` to `/mnt/s`. - -```yaml -#cloud-config -mounts: -- ["/dev/vdb", "/mnt/s", "ext4", ""] -``` - -**Important**: Be aware, the 4th parameter is mandatory and cannot be omitted (server crashes). It also yet cannot be `defaults` - -As you will use the `ros` cli most probably, it would look like this: - -``` -ros config set mounts '[["/dev/vdb","/mnt/s","ext4",""]]' -``` - -**hint**: You need to pre-format the disks, rancher-os will not do this for you. The mount will not work (silently) until you formatted the disk, e.g. using: - -``` -mkfs.ext4 /dev/vdb -``` - -
- -The four arguments for each mount are the same as those given for [cloud-init](https://cloudinit.readthedocs.io/en/latest/topics/examples.html#adjust-mount-points-mounted). Only the first four arguments are currently supported. The `mount_default_fields` key is not yet implemented. - -RancherOS uses the mount syscall rather than the `mount` command behind the scenes. This means that `auto` cannot be used as the filesystem type (third argument) and `defaults` cannot be used for the options (forth argument). - -With rancher 1.1.1+ you do no longer need to create the mount-point folder, it will be created automatically. - -### Shared Mounts - -By default, `/media` and `/mnt` are mounted as shared in the console container. This means that mounts within these directories will propagate to the host as well as other system services that mount these folders as shared. - -See [here](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) for a more detailed overview of shared mounts and their properties. diff --git a/content/os/v1.x/en/storage/state-partition/_index.md b/content/os/v1.x/en/storage/state-partition/_index.md deleted file mode 100644 index f5ae065cd1..0000000000 --- a/content/os/v1.x/en/storage/state-partition/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Persistent State Partition -weight: 160 -aliases: - - /os/v1.x/en/installation/storage/state-partition ---- - -RancherOS will store its state in a single partition specified by the `dev` field. The field can be a device such as `/dev/sda1` or a logical name such `LABEL=state` or `UUID=123124`. The default value is `LABEL=RANCHER_STATE`. The file system type of that partition can be set to `auto` or a specific file system type such as `ext4`. - -```yaml -#cloud-config -rancher: - state: - fstype: auto - dev: LABEL=RANCHER_STATE -``` - -For other labels such as `RANCHER_BOOT` and `RANCHER_OEM` and `RANCHER_SWAP`, please refer to [Custom partition layout]({{}}/os/v1.x/en/about/custom-partition-layout/). - -### Autoformat - -You can specify a list of devices to check to format on boot. If the state partition is already found, RancherOS will not try to auto format a partition. By default, auto-formatting is off. - -RancherOS will autoformat the partition to `ext4` (_not_ what is set in `fstype`) if the device specified in `autoformat`: - -* Contains a boot2docker magic string -* Starts with 1 megabyte of zeros and `rancher.state.formatzero` is true - - -```yaml -#cloud-config -rancher: - state: - autoformat: - - /dev/sda - - /dev/vda -``` diff --git a/content/os/v1.x/en/storage/using-zfs/_index.md b/content/os/v1.x/en/storage/using-zfs/_index.md deleted file mode 100644 index 1247accff8..0000000000 --- a/content/os/v1.x/en/storage/using-zfs/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Using ZFS -weight: 162 -aliases: - - /os/v1.x/en/installation/storage/using-zfs ---- - -#### Installing the ZFS service - -The `zfs` service will install the kernel-headers for your kernel (if you build your own kernel, you'll need to replicate this service), and then download the [ZFS on Linux](https://zfsonlinux.org/) source, and build and install it. Then it will build a `zfs-tools` image that will be used to give you access to the zfs tools. - -The only restriction is that you must mount your zpool into `/mnt`, as this is the only shared mount directory that will be accessible throughout the system-docker managed containers (including the console). - - -``` -$ sudo ros service enable zfs -$ sudo ros service up zfs -# you can follow the progress of the build by running the following command in another ssh session: -$ sudo ros service logs --follow zfs -# wait until the build is finished. -$ lsmod | grep zfs -``` - -> *Note:* if you switch consoles, you may need to re-run `sudo ros service up zfs`. - -#### Creating ZFS pools - -After it's installed, it should be ready to use. Make a zpool named `zpool1` using a device that you haven't yet partitioned (you can use `sudo fdisk -l` to list all the disks and their partitions). - -> *Note:* You need to mount the zpool in `/mnt` to make it available to your host and in containers. - - -``` -$ sudo zpool list -$ sudo zpool create zpool1 -m /mnt/zpool1 /dev/ -$ sudo zpool list -$ sudo zfs list -$ sudo cp /etc/* /mnt/zpool1 -$ docker run --rm -it -v /mnt/zpool1/:/data alpine ls -la /data -``` - -
- -To experiment with ZFS, you can create zpool backed by just ordinary files, not necessarily real block devices. In fact, you can mix storage devices in your ZFS pools; it's perfectly fine to create a zpool backed by real devices **and** ordinary files. - -#### Using the ZFS debugger utility - -The `zdb` command may be used to display information about ZFS pools useful to diagnose failures and gather statistics. By default the utility tries to load pool configurations from `/etc/zfs/zpool.cache`. Since the RancherOS ZFS service does not make use of the ZFS cache file and instead detects pools by inspecting devices, the `zdb` utility has to be invoked with the `-e` flag. - -E.g. to show the configuration for the pool `zpool1` you may run the following command: - -> $ sudo zdb -e -C zpool1 - -## ZFS storage for Docker on RancherOS - -First, you need to stop the`docker` system service and wipe out `/var/lib/docker` folder: - -``` -$ sudo system-docker stop docker -$ sudo rm -rf /var/lib/docker/* -``` - -To enable ZFS as the storage driver for Docker, you'll need to create a ZFS filesystem for Docker and make sure it's mounted. - -``` -$ sudo zfs create zpool1/docker -$ sudo zfs list -o name,mountpoint,mounted -``` - -At this point you'll have a ZFS filesystem created and mounted at `/zpool1/docker`. According to [Docker ZFS storage docs](https://docs.docker.com/engine/userguide/storagedriver/zfs-driver/), if the Docker root dir is a ZFS filesystem, the Docker daemon will automatically use `zfs` as its storage driver. - -Now you'll need to remove `-s overlay` (or any other storage driver) from the Docker daemon args to allow docker to automatically detect `zfs`. - -``` -$ sudo ros config set rancher.docker.storage_driver 'zfs' -$ sudo ros config set rancher.docker.graph /mnt/zpool1/docker -# Now that you've changed the Docker daemon args, you'll need to start Docker -$ sudo system-docker start docker -``` - -After customizing the Docker daemon arguments and restarting `docker` system service, ZFS will be used as Docker storage driver: - -``` -$ docker info -Containers: 0 - Running: 0 - Paused: 0 - Stopped: 0 -Images: 0 -Server Version: 1.12.6 -Storage Driver: zfs - Zpool: error while getting pool information strconv.ParseUint: parsing "": invalid syntax - Zpool Health: not available - Parent Dataset: zpool1/docker - Space Used By Parent: 19456 - Space Available: 8256371200 - Parent Quota: no - Compression: off -Logging Driver: json-file -Cgroup Driver: cgroupfs -Plugins: - Volume: local - Network: host bridge null overlay -Swarm: inactive -Runtimes: runc -Default Runtime: runc -Security Options: seccomp -Kernel Version: 4.9.6-rancher -Operating System: RancherOS v0.8.0-rc8 -OSType: linux -Architecture: x86_64 -CPUs: 1 -Total Memory: 1.953 GiB -Name: ip-172-31-24-201.us-west-1.compute.internal -ID: IEE7:YTUL:Y3F5:L6LF:5WI7:LECX:YDB5:LGWZ:QRPN:4KDI:LD66:KYTC -Docker Root Dir: /mnt/zpool1/docker -Debug Mode (client): false -Debug Mode (server): false -Registry: https://index.docker.io/v1/ -Insecure Registries: - 127.0.0.0/8 - -``` diff --git a/content/os/v1.x/en/support/_index.md b/content/os/v1.x/en/support/_index.md deleted file mode 100644 index 26cccb68d2..0000000000 --- a/content/os/v1.x/en/support/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Support -weight: 170 ---- - -## Development and Maintenance Status - - -RancherOS 1.x is no longer being actively maintained. There are two significant reasons behind this product decision: - -**1. Docker:** The current industry requirements for a container runtime are very much evolving. Container runtimes like containerd and CRIO are now being actively considered as the default choices. RancherOS 1.x, which was specifically designed around using Docker engine only, unfortunately does not lend itself, in its current design, to this new evolving requirement. - -**2. ISV Support:** RancherOS was specifically designed as a minimalistic OS to support purpose-built containerized applications. It was not designed to be used as a general purpose OS (such as CentOS or Ubuntu). As such, most ISVs have not certified their software to run on RancherOS, nor does RancherOS even contain the necessary components for many of these applications to run. - -We're working on a replacement. Stay tuned! \ No newline at end of file diff --git a/content/os/v1.x/en/system-services/_index.md b/content/os/v1.x/en/system-services/_index.md deleted file mode 100644 index b3d0ebd605..0000000000 --- a/content/os/v1.x/en/system-services/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: System Services -weight: 140 -aliases: - - /os/v1.x/en/installation/system-services/adding-system-services ---- - -A system service is a container that can be run in either System Docker or Docker. Rancher provides services that are already available in RancherOS by adding them to the [os-services repo](https://github.com/rancher/os-services). Anything in the `index.yml` file from the repository for the tagged release will be an available system service when using the `ros service list` command. - -### Enabling and Starting System Services - -For any services that are listed from the `ros service list`, they can be enabled by running a single command. After enabling a service, you will need to run start the service. - -``` -# List out available system services -$ sudo ros service list -disabled amazon-ecs-agent -disabled kernel-headers -disabled kernel-headers-system-docker -disabled open-vm-tools -# Enable a system service -$ sudo ros service enable kernel-headers -# Start a system service -$ sudo ros service up kernel-headers -``` - -### Disabling and Removing System Services - -In order to stop a system service from running, you will need to stop and disable the system service. - -``` -# List out available system services -$ sudo ros service list -disabled amazon-ecs-agent -enabled kernel-headers -disabled kernel-headers-system-docker -disabled open-vm-tools -# Disable a system service -$ sudo ros service disable kernel-headers -# Stop a system service -$ sudo ros service stop kernel-headers -# Remove the containers associated with the system service -$ sudo ros service down kernel-headers -``` - -
-If you want to remove a system service from the list of service, just delete the service. - -``` -$ sudo ros service delete -``` diff --git a/content/os/v1.x/en/system-services/custom-system-services/_index.md b/content/os/v1.x/en/system-services/custom-system-services/_index.md deleted file mode 100644 index 0fe5665401..0000000000 --- a/content/os/v1.x/en/system-services/custom-system-services/_index.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Custom System Services -weight: 141 -aliases: - - /os/v1.x/en/installation/system-services/custom-system-services ---- - -You can also create your own system service in [Docker Compose](https://docs.docker.com/compose/) format. After creating your own custom service, you can launch it in RancherOS in a couple of methods. The service could be directly added to the [cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/#cloud-config), or a `docker-compose.yml` file could be saved at a http(s) url location or in a specific directory of RancherOS. - -### Launching Services through Cloud-Config - -If you want to boot RancherOS with a system service running, you can add the service to the cloud-config that is passed to RancherOS. When RancherOS starts, this service will automatically be started. - -```yaml -#cloud-config -rancher: - services: - nginxapp: - image: nginx - restart: always -``` - -### Launching Services using local files - -If you already have RancherOS running, you can start a system service by saving a `docker-compose.yml` file at `/var/lib/rancher/conf/`. - -```yaml -nginxapp: - image: nginx - restart: always -``` - -To enable a custom system service from the file location, the command must indicate the file location if saved in RancherOS. If the file is saved at a http(s) url, just use the http(s) url when enabling/disabling. - -``` -# Enable the system service saved in /var/lib/rancher/conf -$ sudo ros service enable /var/lib/rancher/conf/example.yml -# Enable a system service saved at a http(s) url -$ sudo ros service enable https://mydomain.com/example.yml -``` - -
- -After the custom system service is enabled, you can start the service using `sudo ros service up `. The `` will be the names of the services inside the `docker-compose.yml`. - -``` -$ sudo ros service up nginxapp -# If you have more than 1 service in your docker-compose.yml, add all service names to the command -$ sudo ros service up service1 service2 service3 -``` - -### Launching Services from a web repository - -The https://github.com/rancher/os-services repository is used for the built-in services, but you can create your own, and configure RancherOS to use it in addition (or to replace) it. - -The config settings to set the url in which `ros` should look for an `index.yml` file is: `rancher.repositories..url`. The `core` repository url is set when a release is made, and any other `` url you add will be listed together when running `ros console list`, `ros service list` or `ros engine list` - -For example, in RancherOS v0.7.0, the `core` repository is set to `https://raw.githubusercontent.com/rancher/os-services/v0.7.0`. - -### Service development and testing - -If you're building your own services in a branch on GitHub, you can push to it, and then load your service from there. - -For example, when developing the zfs service: - -``` -rancher@zfs:~$ sudo ros config set rancher.repositories.zfs.url https://raw.githubusercontent.com/SvenDowideit/os-services/zfs-service -rancher@zfs:~$ sudo ros service list -disabled amazon-ecs-agent -disabled kernel-extras -enabled kernel-headers -disabled kernel-headers-system-docker -disabled open-vm-tools -disabled amazon-ecs-agent -disabled kernel-extras -disabled kernel-headers -disabled kernel-headers-system-docker -disabled open-vm-tools -disabled zfs -[rancher@zfs ~]$ sudo ros service enable zfs -Pulling zfs (zombie/zfs)... -latest: Pulling from zombie/zfs -b3e1c725a85f: Pull complete -4daad8bdde31: Pull complete -63fe8c0068a8: Pull complete -4a70713c436f: Pull complete -bd842a2105a8: Pull complete -d1a8c0826fbb: Pull complete -5f1c5ffdf34c: Pull complete -66c2263f2388: Pull complete -Digest: sha256:eab7b8c21fbefb55f7ee311dd236acee215cb6a5d22942844178b8c6d4e02cd9 -Status: Downloaded newer image for zombie/zfs:latest -[rancher@zfs ~]$ sudo ros service up zfs -WARN[0000] The KERNEL_VERSION variable is not set. Substituting a blank string. -INFO[0000] Project [os]: Starting project -INFO[0000] [0/21] [zfs]: Starting -INFO[0000] [1/21] [zfs]: Started -INFO[0000] Project [os]: Project started - -``` - -Beware that there is an overly aggressive caching of yml files - so when you push a new yml file to your repo, you need to -delete the files in `/var/lib/rancher/cache`. - -The image that you specify in the service yml file needs to be pullable - either from a private registry, or on the Docker Hub. - -### Service cron - -_Available as of v1.1_ - -RancherOS has a system cron service based on [Container Crontab](https://github.com/rancher/container-crontab). This can be used to start, restart or stop system containers. - -To use this on your service, add a `cron.schedule` label to your service's description: - -``` -my-service: - image: namespace/my-service:v1.0.0 - command: my-command - labels: - io.rancher.os.scope: "system" - cron.schedule: "0 * * * * ?" -``` - -For a cron service that can be used with user Docker containers, see the `crontab` system service. - -### Service log rotation - -RancherOS provides a built in `logrotate` container that makes use of logrotate(8) to rotate system logs. This is called on an hourly basis by the `system-cron` container. - -If you would like to make use of system log rotation for your system service, do the following. - -Add `system-volumes` to your service description's `volumes_from` section. You could also use a volume group containing `system-volumes` e.g. `all-volumes`. - -``` -my-service: - image: namespace/my-service:v1.0.0 - command: my-command - labels: - io.rancher.os.scope: "system" - volumes_from: - - system-volumes -``` - -Next, add an entry point script to your image and copy your logrotate configs to `/etc/logrotate.d/` on startup. - -Example Dockerfile: -``` -FROM alpine:latest -COPY logrotate-myservice.conf entrypoint.sh / -ENTRYPOINT ["/entrypoint.sh"] -``` - -Example entrypoint.sh (Ensure that this script has the execute bit set). -``` -#!/bin/sh - -cp logrotate-myservice.conf /etc/logrotate.d/myservice - -exec "$@" -``` - -Your service's log rotation config will now be included when the system logrotate runs. You can view logrotate output with `system-docker logs logrotate`. - -### Creating your own Console - -Once you have your own Services repository, you can add a new service to its index.yml, and then add a `.yml` file to the directory starting with the first letter. - -To create your own console images, you need to: - -1. install some basic tools, including an ssh daemon, sudo, and kernel module tools -2. create `rancher` and `docker` users and groups with UID and GID's of `1100` and `1101` respectively -3. add both users to the `docker` and `sudo` groups -4. add both groups into the `/etc/sudoers` file to allow password-less sudo -5. configure sshd to accept logins from users in the `docker` group, and deny `root`. -6. set `ENTRYPOINT ["/usr/bin/ros", "entrypoint"]` - -the `ros` binary, and other host specific configuration files will be bind mounted into the running console container when its launched. - -For examples of existing images, see https://github.com/rancher/os-images. - -## Labels - -We use labels to determine how to handle the service containers. - -Key | Value |Description -----|-----|--- -`io.rancher.os.detach` | Default: `true` | Equivalent of `docker run -d`. If set to `false`, equivalent of `docker run --detach=false` -`io.rancher.os.scope` | `system` | Use this label to have the container deployed in System Docker instead of Docker. -`io.rancher.os.before`/`io.rancher.os.after` | Service Names (Comma separated list is accepted) | Used to determine order of when containers should be started. -`io.rancher.os.createonly` | Default: `false` | When set to `true`, only a `docker create` will be performed and not a `docker start`. -`io.rancher.os.reloadconfig` | Default: `false`| When set to `true`, it reloads the configuration. - - -RancherOS uses labels to determine if the container should be deployed in System Docker. By default without the label, the container will be deployed in User Docker. - -```yaml -labels: - - io.rancher.os.scope=system -``` - - -### Example of how to order container deployment - -```yaml -foo: - labels: - # Start foo before bar is launched - io.rancher.os.before: bar - # Start foo after baz has been launched - io.rancher.os.after: baz -``` diff --git a/content/os/v1.x/en/system-services/environment/_index.md b/content/os/v1.x/en/system-services/environment/_index.md deleted file mode 100644 index f2a5d07fcc..0000000000 --- a/content/os/v1.x/en/system-services/environment/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Environment -weight: 143 -aliases: - - /os/v1.x/en/installation/system-services/environment ---- - -The [environment key](https://docs.docker.com/compose/compose-file/#environment) can be used to customize system services. When a value is not assigned, RancherOS looks up the value from the `rancher.environment` key. - -In the example below, `ETCD_DISCOVERY` will be set to `https://discovery.etcd.io/d1cd18f5ee1c1e2223aed6a1734719f7` for the `etcd` service. - -```yaml -rancher: - environment: - ETCD_DISCOVERY: https://discovery.etcd.io/d1cd18f5ee1c1e2223aed6a1734719f7 - services: - etcd: - ... - environment: - - ETCD_DISCOVERY -``` - -Wildcard globbing is also supported. In the example below, `ETCD_DISCOVERY` will be set as in the previous example, along with any other environment variables beginning with `ETCD_`. - -```yaml -rancher: - environment: - ETCD_DISCOVERY: https://discovery.etcd.io/d1cd18f5ee1c1e2223aed6a1734719f7 - services: - etcd: - ... - environment: - - ETCD_* -``` - -_Available as of v1.2_ - -There is also a way to extend PATH environment variable, `PATH` or `path` can be set, and multiple values can be comma-separated. Note that need to reboot before taking effect. - -```yaml -rancher: - environment: - path: /opt/bin,/home/rancher/bin -``` diff --git a/content/os/v1.x/en/system-services/system-docker-volumes/_index.md b/content/os/v1.x/en/system-services/system-docker-volumes/_index.md deleted file mode 100644 index 1ec9fb1baa..0000000000 --- a/content/os/v1.x/en/system-services/system-docker-volumes/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: System Docker Volumes -weight: 142 -aliases: - - /os/v1.x/en/installation/system-services/system-docker-volumes ---- - -A few services are containers in `created` state. Their purpose is to provide volumes for other services. - -### user-volumes - -Provides user accessible persistent storage directories, used by console service: - -``` -/home -/opt -/var/lib/kubelet - Added as of v1.2 -``` - -_Available as of v1.2_ - -If you want to change user-volumes, for example, add `/etc/kubernetes` directory: - -``` -$ sudo ros config set rancher.services.user-volumes.volumes [/home:/home,/opt:/opt,/var/lib/kubelet:/var/lib/kubelet,/etc/kubernetes:/etc/kubernetes] -$ sudo reboot -``` - -Please note that after the restart, the new persistence directory can take effect. - -### container-data-volumes - -Provides docker storage directory, used by console service (and, indirectly, by docker) - -``` -/var/lib/docker -``` - -### command-volumes - -Provides necessary command binaries (read-only), used by system services: - -``` -/usr/bin/docker-containerd.dist -/usr/bin/docker-containerd-shim.dist -/usr/bin/docker-runc.dist -/usr/bin/docker.dist -/usr/bin/dockerlaunch -/usr/bin/system-docker -/sbin/poweroff -/sbin/reboot -/sbin/halt -/sbin/shutdown -/usr/bin/respawn -/usr/bin/ros -/usr/bin/cloud-init -/usr/sbin/netconf -/usr/sbin/wait-for-docker -/usr/bin/switch-console -``` - -### system-volumes - -Provides necessary persistent directories, used by system services: - -``` -/host/dev -/etc/docker -/etc/hosts -/etc/resolv.conf -/etc/ssl/certs/ca-certificates.crt.rancher -/etc/selinux -/lib/firmware -/lib/modules -/run -/usr/share/ros -/var/lib/rancher/cache -/var/lib/rancher/conf -/var/lib/rancher -/var/log -/var/run -``` - -### all-volumes - -Combines all of the above, used by the console service. diff --git a/content/os/v1.x/en/upgrading/_index.md b/content/os/v1.x/en/upgrading/_index.md deleted file mode 100644 index a1de8d3929..0000000000 --- a/content/os/v1.x/en/upgrading/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Upgrading -weight: 3 ---- - -If RancherOS has released a new version and you want to learn how to upgrade your OS, we make it easy using the `ros os` command. - -Since RancherOS is a kernel and initrd, the upgrade process is downloading a new kernel and initrd, and updating the boot loader to point to it. The old kernel and initrd are not removed. If there is a problem with your upgrade, you can select the old kernel from the Syslinux bootloader. - -Before upgrading to any version, please review the release notes on our [releases page](https://github.com/rancher/os/releases) in GitHub to review any updates in the release. - -> **Note:** If you are using [`docker-machine`]({{< baseurl >}}/os/v1.x/en/installation/workstation//docker-machine/) then you will not be able to upgrade your RancherOS version. You need to delete and re-create the machine. - - -### Version Control - -First, let's check what version you have running on your system. - -``` -$ sudo ros os version -v0.4.5 -``` - -If you just want to find out the available releases from the command line, it's a simple command. - -``` -# List all available releases -$ sudo ros os list -rancher/os:v0.4.0 remote -rancher/os:v0.4.1 remote -rancher/os:v0.4.2 remote -rancher/os:v0.4.3 remote -rancher/os:v0.4.4 remote -rancher/os:v0.4.5 remote -rancher/os:v0.5.0 local -``` - -The `local`/`remote` label shows which images are available to System Docker locally versus which need to be pulled from Docker Hub. If you choose to upgrade to a version that is remote, we will automatically pull that image during the upgrade. - -### Upgrading - -Let's walk through upgrading! The `ros os upgrade` command will automatically upgrade to the current release of RancherOS. The current release is designated as the most recent release of RancherOS. - -``` -$ sudo ros os upgrade -Upgrading to rancher/os:v0.5.0 -``` - -Confirm that you want to continue and the final step will be to confirm that you want to reboot. - -``` -Continue [y/N]: y -... -... -... -Continue with reboot [y/N]: y -INFO[0037] Rebooting -``` - -After rebooting, you can check that your version has been updated. - -``` -$ sudo ros -v -ros version v0.5.0 -``` - -> **Note:** If you are booting from ISO and have not installed to disk, your upgrade will not be saved. You can view our guide to [installing to disk]({{< baseurl >}}/os/v1.x/en/installation/server/install-to-disk/). - -#### Upgrading to a Specific Version - -If you are a couple of versions behind the current version, use the `-i` option to pick the version that you want to upgrade to. - -``` -$ sudo ros os upgrade -i rancher/os:v0.5.0 -Upgrading to rancher/os:v0.5.0 -Continue [y/N]: y -... -... -... -Continue with reboot [y/N]: y -INFO[0082] Rebooting -``` - -#### Bypassing The Prompts - -We have added the ability to bypass the prompts. Use the `-f` or `--force` option when upgrading. Your machine will automatically be rebooted and you'll just need to log back in when it's done. - -If you want to bypass the prompts, but you don't want to immediately reboot, you can add `--no-reboot` to avoid rebooting immediately. - -### Rolling back an Upgrade - -If you've upgraded your RancherOS and something's not working anymore, you can easily rollback your upgrade. - -The `ros os upgrade` command works for rolling back. We'll use the `-i` option to "upgrade" to a specific version. All you need to do is pick the previous version! Same as before, you will be prompted to confirm your upgrade version as well as confirm your reboot. - -``` -$ sudo ros -v -ros version v0.4.5 -$ sudo ros os upgrade -i rancher/os:v0.4.4 -Upgrading to rancher/os:v0.4.4 -Continue [y/N]: y -... -... -... -Continue with reboot [y/N]: y -INFO[0082] Rebooting -``` -After rebooting, the rollback will be complete. - -``` -$ sudo ros -v -ros version 0.4.4 -``` - -
- -> **Note:** If you are using a [persistent console]({{}}/os/v1.x/en/installation/custom-builds/custom-console/#console-persistence) and in the current version's console, rolling back is not supported. For example, rolling back to v0.4.5 when using a v0.5.0 persistent console is not supported. - -### Staging an Upgrade - -During an upgrade, the template of the upgrade is downloaded from the rancher/os repository. You can download this template ahead of time so that it's saved locally. This will decrease the time it takes to upgrade. We'll use the `-s` option to stage the specific template. You will need to specify the image name with the `-i` option, otherwise it will automatically stage the current version. - -``` -$ sudo ros os upgrade -s -i rancher/os:v0.5.0 -``` - -### Custom Upgrade Sources - -In the `upgrade` key, the `url` is used to find the list of available and current versions of RancherOS. This can be modified to track custom builds and releases. - -```yaml -#cloud-config -rancher: - upgrade: - url: https://releases.rancher.com/os/releases.yml - image: rancher/os -``` - -### Upgrade Notes for v1.4.0+ - -If you are upgrading to v1.4.0+, please review these notes that could alter your RancherOS settings. - -Due to changes in the location of user-docker's data-root, after upgrading to v1.4.0+, you must move or copy the files of user-docker's data-root. If you do not do this, your data will *NOT* be available. - -``` -#!/bin/bash - -old_docker_root="/proc/1/root/var/lib/docker" -new_docker_root="/proc/1/root/var/lib/user-docker" - -system-docker stop docker -cp -a $old_docker_root/* $new_docker_root -system-docker start docker -``` - -If you had another bridge IP set for system-docker, you may need to explicitly set it again depending on your upgrade path. Before re-setting it, you can confirm if it's set. - -``` -# Check to see if docker bridge IP is set -$ sudo ros config get rancher.system_docker.bip - -# If it is no longer set, re-set the setting -$ sudo ros config set rancher.system_docker.bip 10.0.0.1/16 -``` diff --git a/content/rancher/_index.md b/content/rancher/_index.md deleted file mode 100644 index 85fc726d04..0000000000 --- a/content/rancher/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Rancher -weight: 1 -showBreadcrumb: false ---- diff --git a/content/rancher/v2.0-v2.4/_index.md b/content/rancher/v2.0-v2.4/_index.md deleted file mode 100644 index 4d23120e33..0000000000 --- a/content/rancher/v2.0-v2.4/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Rancher 2.0-2.4 -weight: 3 -showBreadcrumb: false ---- diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/_index.md deleted file mode 100644 index fb0557c974..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Authentication, Permissions and Global Configuration -weight: 6 -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/ - - /rancher/v2.0-v2.4/en/tasks/global-configuration/ - - /rancher/v2.0-v2.4/en/concepts/global-configuration/server-url/ - - /rancher/v2.0-v2.4/en/tasks/global-configuration/server-url/ - - /rancher/v2.0-v2.4/en/admin-settings/log-in/ ---- - -After installation, the [system administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. - -## First Log In - -After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. - ->**Important!** After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. - -## Authentication - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. - -For more information how authentication works and how to configure each provider, see [Authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/). - -## Authorization - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. - -For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/). - -## Pod Security Policies - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. - -For more information how to create and use PSPs, see [Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). - -## Provisioning Drivers - -Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -For more information, see [Provisioning Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/). - -## Adding Kubernetes Versions into Rancher - -_Available as of v2.3.0_ - -With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. - -The information that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/) - -Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). - -For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/). - -## Enabling Experimental Features - -_Available as of v2.3.0_ - -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/_index.md deleted file mode 100644 index a53da74f6c..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/_index.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Authentication -weight: 1115 -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/authentication/ - - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/ ---- - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. - -This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. - -## External vs. Local Authentication - -The Rancher authentication proxy integrates with the following external authentication services. The following table lists the first version of Rancher each service debuted. - -| Auth Service | Available as of | -| ------------------------------------------------------------------------------------------------ | ---------------- | -| [Microsoft Active Directory]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/) | v2.0.0 | -| [GitHub]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/github/) | v2.0.0 | -| [Microsoft Azure AD]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/azure-ad/) | v2.0.3 | -| [FreeIPA]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/freeipa/) | v2.0.5 | -| [OpenLDAP]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/) | v2.0.5 | -| [Microsoft AD FS]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/) | v2.0.7 | -| [PingIdentity]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ping-federate/) | v2.0.7 | -| [Keycloak]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/keycloak/) | v2.1.0 | -| [Okta]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/okta/) | v2.2.0 | -| [Google OAuth]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/google/) | v2.3.0 | -| [Shibboleth]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth) | v2.4.0 | - -
-However, Rancher also provides [local authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/local/). - -In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. - -## Users and Groups - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/). - -> **Note:** Local authentication does not support creating or managing groups. - -For more information, see [Users and Groups]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/) - -## Scope of Rancher Authorization - -After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: - -| Access Level | Description | -|----------------------------------------------|-------------| -| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | -| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | -| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | - -To set the Rancher access level for users in the authorization service, follow these steps: - -1. From the **Global** view, click **Security > Authentication.** - -1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. - -1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. - -1. Click **Save.** - -**Result:** The Rancher access configuration settings are applied. - -{{< saml_caveats >}} - -## External Authentication Configuration and Principal Users - -Configuration of external authentication requires: - -- A local user assigned the administrator role, called hereafter the _local principal_. -- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. - -Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. - -1. Sign into Rancher as the local principal and complete configuration of external authentication. - - ![Sign In]({{}}/img/rancher/sign-in.png) - -2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. - - ![Principal ID Sharing]({{}}/img/rancher/principal-ID.png) - -3. After you complete configuration, Rancher automatically signs out the local principal. - - ![Sign Out Local Principal]({{}}/img/rancher/sign-out-local.png) - -4. Then, Rancher automatically signs you back in as the external principal. - - ![Sign In External Principal]({{}}/img/rancher/sign-in-external.png) - -5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. - - ![Sign In External Principal]({{}}/img/rancher/users-page.png) - -6. The external principal and the local principal share the same access rights. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/_index.md deleted file mode 100644 index c0361a86e7..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/_index.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Configuring Active Directory (AD) -weight: 1112 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/active-directory/ ---- - -If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. - -Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap) integration. - -> **Note:** -> -> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -## Prerequisites - -You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. - -Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. - -Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. - -> **Using TLS?** -> -> If the certificate used by the AD server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configuration Steps -### Open Active Directory Configuration - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **Active Directory**. The **Configure an AD server** form will be displayed. - -### Configure Active Directory Server Settings - -In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. - -> **Note:** -> -> If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). - -**Table 1: AD Server parameters** - -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the AD server | -| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | -| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | -| Service Account Password | The password for the service account. | -| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | -| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| - ---- - -### Configure User/Group Schema - -In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. - -Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. - -> **Note:** -> -> If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. - -#### User Schema - -The table below details the parameters for the user schema section configuration. - -**Table 2: User schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | -| User Member Attribute | The attribute containing the groups that a user is a member of. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | -| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | -| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | -| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | - ---- - -#### Group Schema - -The table below details the parameters for the group schema configuration. - -**Table 3: Group schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | -| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organisation makes use of these nested memberships (ie. you have groups that contain other groups as members. We advise avoiding nested groups when possible). | - ---- - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. - -> **Note:** -> -> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. - -1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. -2. Click **Authenticate with Active Directory** to finalise the setup. - -**Result:** - -- Active Directory authentication has been enabled. -- You have been signed into Rancher as administrator using the provided AD credentials. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Identify Search Base and Schema using ldapsearch - -In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. - -The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. - -For the purpose of the example commands provided below we will assume: - -- The Active Directory server has a hostname of `ad.acme.com` -- The server is listening for unencrypted connections on port `389` -- The Active Directory domain is `acme` -- You have a valid AD account with the username `jdoe` and password `secret` - -### Identify Search Base - -First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" -``` - -This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: - -{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} - -Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. - -Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, ie. `OU=Groups,DC=acme,DC=com`. - -### Identify User Schema - -The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: - -- `Object Class`: **person** [1] -- `Username Attribute`: **name** [2] -- `Login Attribute`: **sAMAccountName** [3] -- `User Member Attribute`: **memberOf** [4] - -> **Note:** -> -> If the AD users in our organisation were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. - -We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. - -### Identify Group Schema - -Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ --s sub "CN=examplegroup" -``` - -This command will inform us on the attributes used for group objects: - -{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} - -Again, this allows us to determine the correct values to enter in the group schema configuration: - -- `Object Class`: **group** [1] -- `Name Attribute`: **name** [2] -- `Group Member Mapping Attribute`: **member** [3] -- `Search Attribute`: **sAMAccountName** [4] - -Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. - -In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/azure-ad/_index.md deleted file mode 100644 index d4c9e8c1eb..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/azure-ad/_index.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: Configuring Azure AD -weight: 1115 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/azure-ad/ ---- - -_Available as of v2.0.3_ - -If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. - ->**Note:** Azure AD integration only supports Service Provider initiated logins. - ->**Prerequisite:** Have an instance of Azure AD configured. - ->**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). - -## Azure Active Directory Configuration Outline - -Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. - - - ->**Tip:** Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. - - - -- [1. Register Rancher with Azure](#1-register-rancher-with-azure) -- [2. Create a new client secret](#2-create-a-new-client-secret) -- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) -- [4. Add a Reply URL](#4-add-a-reply-url) -- [5. Copy Azure Application Data](#5-copy-azure-application-data) -- [6. Configure Azure AD in Rancher](#6-configure-azure-ad-in-rancher) - - - -### 1. Register Rancher with Azure - -Before enabling Azure AD within Rancher, you must register Rancher with Azure. - -1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. - -1. Use search to open the **App registrations** service. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - -1. Click **New registrations** and complete the **Create** form. - - ![New App Registration]({{}}/img/rancher/new-app-registration.png) - - 1. Enter a **Name** (something like `Rancher`). - - 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. - - 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - - 1. Click **Register**. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 2. Create a new client secret - -From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. - -1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. - - ![Open Rancher Registration]({{}}/img/rancher/open-rancher-app.png) - -1. From the navigation pane on left, click **Certificates and Secrets**. - -1. Click **New client secret**. - - ![Create new client secret]({{< baseurl >}}/img/rancher/select-client-secret.png) - - 1. Enter a **Description** (something like `Rancher`). - - 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. - - 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). - - -1. Copy the key value and save it to an [empty text file](#tip). - - You'll enter this key into the Rancher UI later as your **Application Secret**. - - You won't be able to access the key value again within the Azure UI. - -### 3. Set Required Permissions for Rancher - -Next, set API permissions for Rancher within Azure. - -1. From the navigation pane on left, select **API permissions**. - - ![Open Required Permissions]({{}}/img/rancher/select-required-permissions.png) - -1. Click **Add a permission**. - -1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: - - ![Select API Permissions]({{< baseurl >}}/img/rancher/select-required-permissions-2.png) - -
-
- - **Access the directory as the signed-in user** - - **Read directory data** - - **Read all groups** - - **Read all users' full profiles** - - **Read all users' basic profiles** - - **Sign in and read user profile** - -1. Click **Add permissions**. - -1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. - - >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. - - -### 4. Add a Reply URL - -To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. - - -1. From the **Setting** blade, select **Reply URLs**. - - ![Azure: Enter Reply URL]({{}}/img/rancher/enter-azure-reply-url.png) - -1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - -1. Click **Save**. - -**Result:** Your reply URL is saved. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 5. Copy Azure Application Data - -As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. - -1. Obtain your Rancher **Tenant ID**. - - 1. Use search to open the **Azure Active Directory** service. - - ![Open Azure Active Directory]({{}}/img/rancher/search-azure-ad.png) - - 1. From the left navigation pane, open **Overview**. - - 2. Copy the **Directory ID** and paste it into your [text file](#tip). - - You'll paste this value into Rancher as your **Tenant ID**. - -1. Obtain your Rancher **Application ID**. - - 1. Use search to open **App registrations**. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - - 1. Find the entry you created for Rancher. - - 1. Copy the **Application ID** and paste it to your [text file](#tip). - -1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. - - 1. From **App registrations**, click **Endpoints**. - - ![Click Endpoints]({{}}/img/rancher/click-endpoints.png) - - 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). - - - **Microsoft Graph API endpoint** (Graph Endpoint) - - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) - - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) - ->**Note:** Copy the v1 version of the endpoints - -### 6. Configure Azure AD in Rancher - -From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. - -Enter the values that you copied to your [text file](#tip). - -1. Log into Rancher. From the **Global** view, select **Security > Authentication**. - -1. Select **Azure AD**. - -1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). - - >**Important:** When entering your Graph Endpoint, remove the tenant ID from the URL, like below. - > - >https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c - - The following table maps the values you copied in the Azure portal to the fields in Rancher. - - | Rancher Field | Azure Value | - | ------------------ | ------------------------------------- | - | Tenant ID | Directory ID | - | Application ID | Application ID | - | Application Secret | Key Value | - | Endpoint | https://login.microsoftonline.com/ | - | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | - | Token Endpoint | OAuth 2.0 Token Endpoint | - | Auth Endpoint | OAuth 2.0 Authorization Endpoint | - -1. Click **Authenticate with Azure**. - -**Result:** Azure Active Directory authentication is configured. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/freeipa/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/freeipa/_index.md deleted file mode 100644 index b788ba9d06..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/freeipa/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Configuring FreeIPA -weight: 1114 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/freeipa/ ---- - -_Available as of v2.0.5_ - -If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. - ->**Prerequisites:** -> ->- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. ->- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. ->- Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **FreeIPA**. - -4. Complete the **Configure an FreeIPA server** form. - - You may need to log in to your domain controller to find the information requested in the form. - - >**Using TLS?** - >If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. -
-
- >**User Search Base vs. Group Search Base** - > - >Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. - > - >* If your users and groups are in the same search base, complete only the User Search Base. - >* If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. - -5. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. - - >**Search Attribute** The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. - > - >The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. - > - > * `uid`: User ID - > * `sn`: Last Name - > * `givenName`: First Name - > - > With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. - -6. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. - -**Result:** - -- FreeIPA authentication is configured. -- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/github/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/github/_index.md deleted file mode 100644 index a9667696ea..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/github/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Configuring GitHub -weight: 1116 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/github/ ---- - -In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. - ->**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **GitHub**. - -4. Follow the directions displayed to **Setup a GitHub Application**. Rancher redirects you to GitHub to complete registration. - - >**What's an Authorization Callback URL?** - > - >The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). - - >When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. - -5. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - - >**Where do I find the Client ID and Client Secret?** - > - >From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. - -6. Click **Authenticate with GitHub**. - -7. Use the **Site Access** options to configure the scope of user authorization. - - - **Allow any valid Users** - - _Any_ GitHub user can access Rancher. We generally discourage use of this setting! - - - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** - - Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. - - - **Restrict access to only Authorized Users and Organizations** - - Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. -
-8. Click **Save**. - -**Result:** - -- GitHub authentication is configured. -- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/google/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/google/_index.md deleted file mode 100644 index 564b3920fc..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/google/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Configuring Google OAuth ---- -_Available as of v2.3.0_ - -If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. - -Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. - -Within Rancher, only administrators or users with the **Manage Authentication** [global role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) can configure authentication. - -# Prerequisites -- You must have a [G Suite admin account](https://admin.google.com) configured. -- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. -- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) - -After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: -![Enable Admin APIs]({{}}/img/rancher/Google-Enable-APIs-Screen.png) - -# Setting up G Suite for OAuth with Rancher -Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: - -1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) -1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) -1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) -1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) - -### 1. Adding Rancher as an Authorized Domain -1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. -1. Select your project and click **OAuth consent screen.** -![OAuth Consent Screen]({{}}/img/rancher/Google-OAuth-consent-screen-tab.png) -1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) -1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. - -**Result:** Rancher has been added as an authorized domain for the Admin SDK API. - -### 2. Creating OAuth2 Credentials for the Rancher Server -1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) -![Credentials]({{}}/img/rancher/Google-Credentials-tab.png) -1. On the **Create Credentials** dropdown, select **OAuth client ID.** -1. Click **Web application.** -1. Provide a name. -1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. - - Under **Authorized JavaScript origins,** enter your Rancher server URL. - - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. -1. Click on **Create.** -1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. - -**Result:** Your OAuth credentials have been successfully created. - -### 3. Creating Service Account Credentials -Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. - -Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. - -As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. - -This section describes how to: - -- Create a service account -- Create a key for the service account and download the credentials as JSON - -1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. -1. Click on **Create Service Account.** -1. Enter a name and click **Create.** -![Service account creation Step 1]({{}}/img/rancher/Google-svc-acc-step1.png) -1. Don't provide any roles on the **Service account permissions** page and click **Continue** -![Service account creation Step 2]({{}}/img/rancher/Google-svc-acc-step2.png) -1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. -![Service account creation Step 3]({{}}/img/rancher/Google-svc-acc-step3-key-creation.png) - -**Result:** Your service account is created. - -### 4. Register the Service Account Key as an OAuth Client - -You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. - -Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: - -1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** - - ![Service account Unique ID]({{}}/img/rancher/Google-Select-UniqueID-column.png) -1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) -1. Add the Unique ID obtained in the previous step in the **Client Name** field. -1. In the **One or More API Scopes** field, add the following scopes: - ``` - openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly - ``` -1. Click **Authorize.** - -**Result:** The service account is registered as an OAuth client in your G Suite account. - -# Configuring Google OAuth in Rancher -1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. -1. From the **Global** view, click **Security > Authentication** from the main menu. -1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. - 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. - 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. - 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. - - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) - - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. - - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. -1. Click **Authenticate with Google**. -1. Click **Save**. - -**Result:** Google authentication is successfully configured. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/keycloak/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/keycloak/_index.md deleted file mode 100644 index be49ae9ad1..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/keycloak/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Configuring Keycloak (SAML) -description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins -weight: 1200 ---- -_Available as of v2.1.0_ - -If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -## Prerequisites - -- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. -- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. - - Setting | Value - ------------|------------ - `Sign Documents` | `ON` 1 - `Sign Assertions` | `ON` 1 - All other `ON/OFF` Settings | `OFF` - `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 - `Client Name` | (e.g. `rancher`) - `Client Protocol` | `SAML` - `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` - - >1: Optionally, you can enable either one or both of these settings. - >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. - - {{< img "/img/rancher/keycloak/keycloak-saml-client-configuration.png" "">}} - -- In the new SAML client, create Mappers to expose the users fields - - Add all "Builtin Protocol Mappers" - {{< img "/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png" "">}} - - Create a new "Group list" mapper to map the member attribute to a user's groups - {{< img "/img/rancher/keycloak/keycloak-saml-client-group-mapper.png" "">}} -- Export a `metadata.xml` file from your Keycloak client: - From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. - - >**Note** - > Keycloak versions 6.0.0 and up no longer provide the IDP metadata under the `Installation` tab. - > You can still get the XML from the following url: - > - > `https://{KEYCLOAK-URL}/auth/realms/{REALM-NAME}/protocol/saml/descriptor` - > - > The XML obtained from this URL contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: - > - > * Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. - > * Remove the `` tag from the beginning. - > * Remove the `` from the end of the xml. - > - > You are left with something similar as the example below: - > - > ``` - > - > .... - > - > ``` - -## Configuring Keycloak in Rancher - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Keycloak**. - -1. Complete the **Configure Keycloak Account** form. - - - | Field | Description | - | ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | - | Display Name Field | The attribute that contains the display name of users.

Example: `givenName` | - | User Name Field | The attribute that contains the user name/given name.

Example: `email` | - | UID Field | An attribute that is unique to every user.

Example: `email` | - | Groups Field | Make entries for managing group memberships.

Example: `member` | - | Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | - | Rancher API Host | The URL for your Rancher Server. | - | Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | - | IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | - - >**Tip:** You can generate a key/certificate pair using an openssl command. For example: - > - > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert - - -1. After you complete the **Configure Keycloak Account** form, click **Authenticate with Keycloak**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. - -{{< saml_caveats >}} - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. - -### You are not redirected to Keycloak - -When you click on **Authenticate with Keycloak**, your are not redirected to your IdP. - - * Verify your Keycloak client configuration. - * Make sure `Force Post Binding` set to `OFF`. - - -### Forbidden message displayed after IdP login - -You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. - - * Check the Rancher debug log. - * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. - -### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata - -This is usually due to the metadata not being created until a SAML provider is configured. -Try configuring and saving keycloak as your SAML provider and then accessing the metadata. - -### Keycloak Error: "We're sorry, failed to process response" - - * Check your Keycloak log. - * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. - -### Keycloak Error: "We're sorry, invalid requester" - - * Check your Keycloak log. - * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/_index.md deleted file mode 100644 index ea9a810d8c..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Configuring Microsoft Active Directory Federation Service (SAML) -weight: 1205 ---- -_Available as of v2.0.7_ - -If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. - -## Prerequisites - -You must have Rancher installed. - -- Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. -- You must have a global administrator account on your Rancher installation. - -You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. - -- Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. -- You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. - -## Setup Outline - -Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. - -- [1. Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) -- [2. Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) - -{{< saml_caveats >}} - - -### [Next: Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md deleted file mode 100644 index 0c2979fcb6..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 1. Configuring Microsoft AD FS for Rancher -weight: 1205 ---- - -Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. - -1. Log into your AD server as an administrative user. - -1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - - {{< img "/img/rancher/adfs/adfs-overview.png" "">}} - -1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - - {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} - -1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - - {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} - -1. Select **AD FS profile** as the configuration profile for your relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} - -1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - - {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} - -1. Select **Enable support for the SAML 2.0 WebSSO protocol** - and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - - {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} - -1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} - -1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - - {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} - -1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} - -1. After reviewing your settings, select **Next** to add the relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} - - -1. Select **Open the Edit Claim Rules...** and click **Close**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} - -1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - - {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} - -1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - - {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} - -1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: - - | LDAP Attribute | Outgoing Claim Type | - | -------------------------------------------- | ------------------- | - | Given-Name | Given Name | - | User-Principal-Name | UPN | - | Token-Groups - Qualified by Long Domain Name | Group | - | SAM-Account-Name | Name | -
- {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} - -1. Download the `federationmetadata.xml` from your AD server at: -``` -https:///federationmetadata/2007-06/federationmetadata.xml -``` - -**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. - -### [Next: Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md deleted file mode 100644 index be585ae2f0..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: 2. Configuring Rancher for Microsoft AD FS -weight: 1205 ---- -_Available as of v2.0.7_ - -After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. - ->**Important Notes For Configuring Your AD FS Server:** -> ->- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` ->- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` ->- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Microsoft Active Directory Federation Services**. - -1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The [configuration section below](#configuration) describe how you can map AD attributes to fields within Rancher. - - - - - - - - -1. After you complete the **Configure AD FS Account** form, click **Authenticate with AD FS**, which is at the bottom of the page. - - Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. - - >**Note:** You may have to disable your popup blocker to see the AD FS login page. - -**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. - -# Configuration - -| Field | Description | -|---------------------------|-----------------| -| Display Name Field | The AD attribute that contains the display name of users.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | -| User Name Field | The AD attribute that contains the user name/given name.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | -| UID Field | An AD attribute that is unique to every user.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | -| Groups Field | Make entries for managing group memberships.

Example: `http://schemas.xmlsoap.org/claims/Group` | -| Rancher API Host | The URL for your Rancher Server. | -| Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

[Certificate creation command](#cert-command) | -| Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | - - - - -**Tip:** You can generate a certificate using an openssl command. For example: - -``` -openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/_index.md deleted file mode 100644 index 93d1145f86..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Configuring OpenLDAP -weight: 1113 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/openldap/ ---- - -_Available as of v2.0.5_ - -If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. - -## Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](./openldap-config) - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. - -> **Note:** -> -> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. - -1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. -2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. - -**Result:** - -- OpenLDAP authentication is configured. -- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/openldap-config/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/openldap-config/_index.md deleted file mode 100644 index 74be173fe1..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/openldap-config/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: OpenLDAP Configuration Reference -weight: 2 ---- - -This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. - -For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) - -> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) -- [OpenLDAP server configuration](#openldap-server-configuration) -- [User/group schema configuration](#user-group-schema-configuration) - - [User schema configuration](#user-schema-configuration) - - [Group schema configuration](#group-schema-configuration) - -## Background: OpenLDAP Authentication Flow - -1. When a user attempts to login with his LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. -2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. -3. Once the user has been found, he is authenticated with another LDAP bind request using the user's DN and provided password. -4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. - -# OpenLDAP Server Configuration - -You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -
OpenLDAP Server Parameters
- -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the OpenLDAP server | -| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | -| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. | -| Service Account Password | The password for the service account. | -| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| - -# User/Group Schema Configuration - -If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. - -Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. - -If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -### User Schema Configuration - -The table below details the parameters for the user schema configuration. - -
User Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | -| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | -| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | -| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | - -### Group Schema Configuration - -The table below details the parameters for the group schema configuration. - -
Group Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth/_index.md deleted file mode 100644 index 511f930b9d..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Configuring Shibboleth (SAML) -weight: 1210 ---- - -_Available as of v2.4.0_ - -If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. - -In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. - -> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](./about) - -This section covers the following topics: - -- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) - - [Shibboleth Prerequisites](#shibboleth-prerequisites) - - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) - - [SAML Provider Caveats](#saml-provider-caveats) -- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) - - [OpenLDAP Prerequisites](#openldap-prerequisites) - - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) - - [Troubleshooting](#troubleshooting) - -# Setting up Shibboleth in Rancher - -### Shibboleth Prerequisites -> ->- You must have a Shibboleth IdP Server configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` -Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` ->- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) - -### Configure Shibboleth in Rancher -If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Shibboleth**. - -1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). - - 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). - - 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). - - 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). - - 1. **Rancher API Host**: Enter the URL for your Rancher Server. - - 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. - - You can generate one using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. - - -1. After you complete the **Configure Shibboleth Account** form, click **Authenticate with Shibboleth**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. - -### SAML Provider Caveats - -If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. - -- There is no validation on users or groups when assigning permissions to them in Rancher. -- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. -- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. -- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. - -To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. - -# Setting up OpenLDAP in Rancher - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. - -### OpenLDAP Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -### Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/openldap/openldap-config) Note that nested group membership is not available for Shibboleth. - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -# Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth/about/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth/about/_index.md deleted file mode 100644 index 6a057b2104..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/shibboleth/about/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Group Permissions with Shibboleth and OpenLDAP -weight: 1 ---- - -_Available as of Rancher v2.4_ - -This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. - -Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. - -One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. - -### Terminology - -- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. -- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. -- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. -- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. - -### Adding OpenLDAP Group Permissions to Rancher Resources - -The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. - -For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. - -In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. - -When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. - -Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. - -![Adding OpenLDAP Group Permissions to Rancher Resources]({{}}/img/rancher/shibboleth-with-openldap-groups.svg) - \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/_index.md deleted file mode 100644 index d22d705bef..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Users and Groups -weight: 1 ---- - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. - -Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/). - -## Managing Members - -When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. - -All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. - -{{< saml_caveats >}} - -## User Information - -Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. - -Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. - -### Automatically Refreshing User Information - -_Available as of v2.2.0_ - -Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. From the **Global** view, click on **Settings**. Two settings control this behavior: - -- **`auth-user-info-max-age-seconds`** - - This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. - -- **`auth-user-info-resync-cron`** - - This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. - - -> **Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. - -### Manually Refreshing User Information - -If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. - -1. From the **Global** view, click on **Users** in the navigation bar. - -1. Click on **Refresh Group Memberships**. - -**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. - ->**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. - - -## Session Length - -_Available as of v2.3.0_ - -The default length (TTL) of each user session is adjustable. The default session length is 16 hours. - -1. From the **Global** view, click on **Settings**. -1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** -1. Enter the amount of time in minutes a session length should last and click **Save.** - -**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/config-private-registry/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/config-private-registry/_index.md deleted file mode 100644 index 09779408bb..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/config-private-registry/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Configuring a Global Default Private Registry -weight: 400 -aliases: ---- - -You might want to use a private container registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the container images that are used in your clusters. - -There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. - -For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node) or [air gapped Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability) instructions. - -If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. - -# Setting a Private Registry with No Credentials as the Default Registry - -1. Log into Rancher and configure the default administrator password. - -1. Go into the **Settings** view. - - {{< img "/img/rancher/airgap/settings.png" "Settings" >}} - -1. Look for the setting called `system-default-registry` and choose **Edit**. - - {{< img "/img/rancher/airgap/edit-system-default-registry.png" "Edit" >}} - -1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - - {{< img "/img/rancher/airgap/enter-system-default-registry.png" "Save" >}} - -**Result:** Rancher will use your private registry to pull system images. - -# Setting a Private Registry with Credentials when Deploying a Cluster - -You can follow these steps to configure a private registry when you provision a cluster with Rancher: - -1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** -1. In the Enable Private Registries section, click **Enabled.** -1. Enter the registry URL and credentials. -1. Click **Save.** - -**Result:** The new cluster will be able to pull images from the private registry. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/drivers/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/drivers/_index.md deleted file mode 100644 index 2ae3ad4945..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/drivers/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Provisioning Drivers -weight: 1140 ---- - -Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -### Rancher Drivers - -With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. - -There are two types of drivers within Rancher: - -* [Cluster Drivers](#cluster-drivers) -* [Node Drivers](#node-drivers) - -### Cluster Drivers - -_Available as of v2.2.0_ - -Cluster drivers are used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. - -By default, Rancher has activated several hosted Kubernetes cloud providers including: - -* [Amazon EKS]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) -* [Google GKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) -* [Azure AKS]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) - -There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: - -* [Alibaba ACK]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) -* [Huawei CCE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) -* [Tencent]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) - -### Node Drivers - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: - -* [Amazon EC2]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/) -* [Azure]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/) -* [Digital Ocean]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) -* [vSphere]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/_index.md deleted file mode 100644 index 1684b16730..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Cluster Drivers -weight: 1 ---- - -_Available as of v2.2.0_ - -Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. - -If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. - -### Managing Cluster Drivers - ->**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. - -## Activating/Deactivating Cluster Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page, select the **Cluster Drivers** tab. - -3. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Cluster Drivers - -If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page select the **Cluster Drivers** tab. - -3. Click **Add Cluster Driver**. - -4. Complete the **Add Cluster Driver** form. Then click **Create**. - - -### Developing your own Cluster Driver - -In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/_index.md deleted file mode 100644 index b2b7368f31..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Node Drivers -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/node-drivers/ - - /rancher/v2.0-v2.4/en/tasks/global-configuration/node-drivers/ ---- - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -#### Managing Node Drivers - ->**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. - -## Activating/Deactivating Node Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version before v2.2.0, you can select **Node Drivers** directly in the navigation bar. - -2. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Node Drivers - -If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version before v2.2.0, you can select **Node Drivers** directly in the navigation bar. - -2. Click **Add Node Driver**. - -3. Complete the **Add Node Driver** form. Then click **Create**. - -### Developing your own node driver - -Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/_index.md deleted file mode 100644 index 40233b3567..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata/_index.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Upgrading Kubernetes without Upgrading Rancher -weight: 1120 ---- - -_Available as of v2.3.0_ - -The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. - -> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. - -Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. - -This table below describes the CRDs that are affected by the periodic data sync. - -> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. - -| Resource | Description | Rancher API URL | -|----------|-------------|-----------------| -| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | -| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | -| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | - -Administrators might configure the RKE metadata settings to do the following: - -- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher -- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub -- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher - -### Refresh Kubernetes Metadata - -The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) - -To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. - -You can configure Rancher to only refresh metadata when desired by setting `refresh-interval-minutes` to `0` (see below) and using this button to perform the metadata refresh manually when desired. - -### Configuring the Metadata Synchronization - -> Only administrators can change these settings. - -The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. - -The way that the metadata is configured depends on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.4+" %}} -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. - -If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -{{% /tab %}} -{{% tab "Rancher v2.3" %}} -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. - - `branch`: This refers to the Git branch name if the URL is a Git URL. - -If you don't have an air gap setup, you don't need to specify the URL or Git branch where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata.git) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL and Git branch in the `rke-metadata-config` settings to point to the new location of the repository. -{{% /tab %}} -{{% /tabs %}} - -### Air Gap Setups - -Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) - -If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. - -To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) - -After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. - -1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. -1. Download the OS specific image lists for Linux or Windows. -1. Download `rancher-images.txt`. -1. Prepare the private registry using the same steps during the [air gap install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. - -**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/_index.md deleted file mode 100644 index fedcf15d17..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Pod Security Policies -weight: 1135 -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/pod-security-policies/ - - /rancher/v2.0-v2.4/en/tasks/global-configuration/pod-security-policies/ - - /rancher/v2.0-v2.4/en/tasks/clusters/adding-a-pod-security-policy/ ---- - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). - -If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. - -- [How PSPs Work](#how-psps-work) -- [Default PSPs](#default-psps) - - [Restricted](#restricted) - - [Unrestricted](#unrestricted) -- [Creating PSPs](#creating-psps) - - [Requirements](#requirements) - - [Creating PSPs in the Rancher UI](#creating-psps-in-the-rancher-ui) -- [Configuration](#configuration) - -# How PSPs Work - -You can assign PSPs at the cluster or project level. - -PSPs work through inheritance: - -- By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. -- **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. -- You can override the default PSP by assigning a different PSP directly to the project. - -Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. - -Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). - -# Default PSPs - -_Available as of v2.0.7_ - -Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies. - -### Restricted - -This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: - -- Prevents pods from running as a privileged user and prevents escalation of privileges. -- Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added. - -### Unrestricted - -This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. - -# Creating PSPs - -Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. - -### Requirements - -Rancher can only assign PSPs for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) - -You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) - -It is a best practice to set PSP at the cluster level. - -We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. - -### Creating PSPs in the Rancher UI - -1. From the **Global** view, select **Security** > **Pod Security Policies** from the main menu. Then click **Add Policy**. - - **Step Result:** The **Add Policy** form opens. - -2. Name the policy. - -3. Complete each section of the form. Refer to the [Kubernetes documentation]((https://kubernetes.io/docs/concepts/policy/pod-security-policy/)) for more information on what each policy does. - - -# Configuration - -The Kubernetes documentation on PSPs is [here.](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) - - - - - -[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems -[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces -[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rbac/_index.md deleted file mode 100644 index f563c61492..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Role-Based Access Control (RBAC) -weight: 1120 -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/users-permissions-roles/ ---- - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/), users can either be local or external. - -After you configure external authentication, the users that display on the **Users** page changes. - -- If you are logged in as a local user, only local users display. - -- If you are logged in as an external user, both external and local users display. - -## Users and Roles - -Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. - -- [Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/): - - Define user authorization outside the scope of any particular cluster. - -- [Cluster and Project Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/): - - Define user authorization inside the specific cluster or project where they are assigned the role. - -Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/_index.md deleted file mode 100644 index 6e155df703..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/_index.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Cluster and Project Roles -weight: 1127 ---- - -Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. - -### Membership and Role Assignment - -The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. - -When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. - -> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. - -### Cluster Roles - -_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. - -- **Cluster Owner:** - - These users have full control over the cluster and all resources in it. - -- **Cluster Member:** - - These users can view most cluster level resources and create new projects. - -#### Custom Cluster Roles - -Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. - -#### Cluster Role Reference - -The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. - -| Built-in Cluster Role | Owner | Member | -| ---------------------------------- | ------------- | --------------------------------- | -| Create Projects | ✓ | ✓ | -| Manage Cluster Backups             | ✓ | | -| Manage Cluster Catalogs | ✓ | | -| Manage Cluster Members | ✓ | | -| Manage Nodes | ✓ | | -| Manage Storage | ✓ | | -| View All Projects | ✓ | | -| View Cluster Catalogs | ✓ | ✓ | -| View Cluster Members | ✓ | ✓ | -| View Nodes | ✓ | ✓ | - -For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Note:** ->When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Giving a Custom Cluster Role to a Cluster Member - -After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/) cluster owners and admins can then assign those roles to cluster members. - -To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. - -To assign the role to a new cluster member, - -1. Go to the **Cluster** view, then go to the **Members** tab. -1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. -1. Click **Create.** - -**Result:** The member has the assigned role. - -To assign any custom role to an existing cluster member, - -1. Go to the member you want to give the role to. Click the **⋮ > View in API.** -1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** - -**Result:** The member has the assigned role. - -### Project Roles - -_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. - -- **Project Owner:** - - These users have full control over the project and all resources in it. - -- **Project Member:** - - These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. - - >**Note:** - > - >By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. - -- **Read Only:** - - These users can view everything in the project but cannot create, update, or delete anything. - - >**Caveat:** - > - >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - - -#### Custom Project Roles - -Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. - -#### Project Role Reference - -The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. - -| Built-in Project Role | Owner | Member | Read Only | -| ---------------------------------- | ------------- | ----------------------------- | ------------- | -| Manage Project Members | ✓ | | | -| Create Namespaces | ✓ | ✓ | | -| Manage Config Maps | ✓ | ✓ | | -| Manage Ingress | ✓ | ✓ | | -| Manage Project Catalogs | ✓ | | | -| Manage Secrets | ✓ | ✓ | | -| Manage Service Accounts | ✓ | ✓ | | -| Manage Services | ✓ | ✓ | | -| Manage Volumes | ✓ | ✓ | | -| Manage Workloads | ✓ | ✓ | | -| View Secrets | ✓ | ✓ | | -| View Config Maps | ✓ | ✓ | ✓ | -| View Ingress | ✓ | ✓ | ✓ | -| View Project Members | ✓ | ✓ | ✓ | -| View Project Catalogs | ✓ | ✓ | ✓ | -| View Service Accounts | ✓ | ✓ | ✓ | -| View Services | ✓ | ✓ | ✓ | -| View Volumes | ✓ | ✓ | ✓ | -| View Workloads | ✓ | ✓ | ✓ | - -> **Notes:** -> ->- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. - -### Defining Custom Roles -As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. - -When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. - -### Default Cluster and Project Roles - -By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. - -There are two methods for changing default cluster/project roles: - -- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - -- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. - - For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). - ->**Note:** -> ->- Although you can [lock]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. ->- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. - -### Configuring Default Roles for Cluster and Project Creators - -You can change the cluster or project role(s) that are automatically assigned to the creating user. - -1. From the **Global** view, select **Security > Roles** from the main menu. Select either the **Cluster** or **Project** tab. - -1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit**. - -1. Enable the role as default. -{{% accordion id="cluster" label="For Clusters" %}} -1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. -1. Click **Save**. -{{% /accordion %}} -{{% accordion id="project" label="For Projects" %}} -1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. -1. Click **Save**. -{{% /accordion %}} - -1. If you want to remove a default role, edit the permission and select **No** from the default roles option. - -**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. - -### Cluster Membership Revocation Behavior - -When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: - -- Access the projects they hold membership in. -- Exercise any [individual project roles](#project-role-reference) they are assigned. - -If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/_index.md deleted file mode 100644 index 3496070b02..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Custom Roles -weight: 1128 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/roles/ ---- - -Within Rancher, _roles_ determine what actions a user can make within a cluster or project. - -Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) -- [Creating a custom global role](#creating-a-custom-global-role) -- [Deleting a custom global role](#deleting-a-custom-global-role) -- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) - -## Prerequisites - -To complete the tasks on this page, one of the following permissions are required: - - - [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/). - - [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. - -## Creating A Custom Role for a Cluster or Project - -While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. - -The steps to add custom roles differ depending on the version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.0.7+" %}} - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Select a tab to determine the scope of the roles you're adding. The tabs are: - - - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. - - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. - -1. Click **Add Cluster/Project Role.** - -1. **Name** the role. - -1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. - - > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -{{% /tab %}} -{{% tab "Rancher before v2.0.7" %}} - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Click **Add Role**. - -1. **Name** the role. - -1. Choose whether to set the role to a status of [locked]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/). - - > **Note:** Locked roles cannot be assigned to users. - -1. In the **Context** dropdown menu, choose the scope of the role assigned to the user. The contexts are: - - - **All:** The user can use their assigned role regardless of context. This role is valid for assignment when adding/managing members to clusters or projects. - - - **Cluster:** This role is valid for assignment when adding/managing members to _only_ clusters. - - - **Project:** This role is valid for assignment when adding/managing members to _only_ projects. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -## Creating a Custom Global Role - -_Available as of v2.4.0_ - -### Creating a Custom Global Role that Copies Rules from an Existing Role - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. - -The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. - -To create a custom global role based on an existing role, - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, go to the role that the custom global role will be based on. Click **⋮ (…) > Clone.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - -1. Click **Save.** - -### Creating a Custom Global Role that Does Not Copy Rules from Another Role - -Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, click **Add Global Role.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - -1. Click **Save.** - -## Deleting a Custom Global Role - -_Available as of v2.4.0_ - -When deleting a custom global role, all global role bindings with this custom role are deleted. - -If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. - -Custom global roles can be deleted, but built-in roles cannot be deleted. - -To delete a custom global role, - -1. Go to the **Global** view and click **Security > Roles.** -2. On the **Global** tab, go to the custom global role that should be deleted and click **⋮ (…) > Delete.** -3. Click **Delete.** - -## Assigning a Custom Global Role to a Group - -_Available as of v2.4.0_ - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. - -When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Custom** section, choose any custom global role that will be assigned to the group. -1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/_index.md deleted file mode 100644 index 125c2cbe69..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/_index.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: Global Permissions -weight: 1126 ---- - -_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. - -Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are three default global permissions: `Administrator`, `Standard User` and `User-base`. - -- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. - -- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. - -- **User-Base:** User-Base users have login-access only. - -You cannot update or delete the built-in Global Permissions. - -This section covers the following topics: - -- [Global permission assignment](#global-permission-assignment) - - [Global permissions for new local users](#global-permissions-for-new-local-users) - - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) -- [Custom global permissions](#custom-global-permissions) - - [Custom global permissions reference](#custom-global-permissions-reference) - - [Configuring default global permissions for new users](#configuring-default-global-permissions) - - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) - - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) - - [Refreshing group memberships](#refreshing-group-memberships) - -# Global Permission Assignment - -Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. - -### Global Permissions for New Local Users - -When you create a new local user, you assign them a global permission as you complete the **Add User** form. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) - -### Global Permissions for Users with External Authentication - -When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) - -Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) - -As of Rancher v2.4.0, you can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. - -# Custom Global Permissions - -Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. - -When a user from an [external authentication source]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. - -However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. - -The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. - -Administrators can enforce custom global permissions in multiple ways: - -- [Changing the default permissions for new users](#configuring-default-global-permissions) -- [Configuring global permissions for individual users](#configuring-global-permissions-for-individual-users) -- [Configuring global permissions for groups](#configuring-global-permissions-for-groups) - -### Custom Global Permissions Reference - -The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. - -| Custom Global Permission | Administrator | Standard User | User-Base | -| ---------------------------------- | ------------- | ------------- |-----------| -| Create Clusters | ✓ | ✓ | | -| Create RKE Templates | ✓ | ✓ | | -| Manage Authentication | ✓ | | | -| Manage Catalogs | ✓ | | | -| Manage Cluster Drivers | ✓ | | | -| Manage Node Drivers | ✓ | | | -| Manage PodSecurityPolicy Templates | ✓ | | | -| Manage Roles | ✓ | | | -| Manage Settings | ✓ | | | -| Manage Users | ✓ | | | -| Use Catalog Templates | ✓ | ✓ | | -| User Base\* (Basic log-in access) | ✓ | ✓ | | - -> \*This role has two names: -> -> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. -> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. - -For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Notes:** -> -> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. -> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Configuring Default Global Permissions - -If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. - -> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. - -To change the default global permissions that are assigned to external users upon their first log in, follow these steps: - -1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. - -1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit**. - -1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. - -1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. - -**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. - -### Configuring Global Permissions for Individual Users - -To configure permission for a user, - -1. Go to the **Users** tab. - -1. On this page, go to the user whose access level you want to change and click **⋮ > Edit.** - -1. In the **Global Permissions** section, click **Custom.** - -1. Check the boxes for each subset of permissions you want the user to have access to. - -1. Click **Save.** - -> **Result:** The user's global permissions have been updated. - -### Configuring Global Permissions for Groups - -_Available as of v2.4.0_ - -If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. - -After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. - -For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) - -For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,](#refreshing-group-memberships) whichever comes first. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. - -### Refreshing Group Memberships - -When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. - -To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. - -An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. - -To refresh group memberships, - -1. From the **Global** view, click **Security > Users.** -1. Click **Refresh Group Memberships.** - -**Result:** Any changes to the group members' permissions will take effect. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/_index.md deleted file mode 100644 index 7c787167e9..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Locked Roles -weight: 1129 ---- - -You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. - -Locked roles: - -- Cannot be assigned to users that don't already have it assigned. -- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. -- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. - - **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. - - To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. - -Roles can be locked by the following users: - -- Any user assigned the `Administrator` global permission. -- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. - - -## Locking/Unlocking Roles - -If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. - -You can lock roles in two contexts: - -- When you're [adding a custom role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/). -- When you editing an existing role (see below). - -1. From the **Global** view, select **Security** > **Roles**. - -2. From the role that you want to lock (or unlock), select **⋮** > **Edit**. - -3. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/_index.md deleted file mode 100644 index d137d68954..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/_index.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: RKE Templates -weight: 7010 ---- - -_Available as of Rancher v2.3.0_ - -RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. - -RKE is the [Rancher Kubernetes Engine,]({{}}/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. - -With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. - -RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. - -Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. - -If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. - -The core features of RKE templates allow DevOps and security teams to: - -- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices -- Prevent less technical users from making uninformed choices when provisioning clusters -- Share different templates with different sets of users and groups -- Delegate ownership of templates to users who are trusted to make changes to them -- Control which users can create templates -- Require users to create clusters from a template - -# Configurable Settings - -RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: - -- Cloud provider options -- Pod security options -- Network providers -- Ingress controllers -- Network security configuration -- Network plugins -- Private registry URL and credentials -- Add-ons -- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services - -The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. - -# Scope of RKE Templates - -RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. - -RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware). - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template), and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. - - -# Example Scenarios -When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. - -These [example scenarios]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios) describe how an organization could use templates to standardize cluster creation. - -Some of the example scenarios include the following: - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/#allowing-other-users-to-control-and-share-a-template) - -# Template Management - -When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. - -Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. - -RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. - -In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. - -For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. - -The documents in this section explain the details of RKE template management: - -- [Getting permission to create templates]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/) -- [Creating and revising templates]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/) -- [Enforcing template settings](./enforcement/#requiring-new-clusters-to-use-an-rke-template) -- [Overriding template settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/overrides/) -- [Sharing templates with cluster creators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users-or-groups) -- [Sharing ownership of a template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -An [example YAML configuration file for a template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-yaml) is provided for reference. - -# Applying Templates - -You can [create a cluster from a template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) - -If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -# Standardizing Hardware - -RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware). - -# YAML Customization - -If you define an RKE template as a YAML file, you can modify this [example RKE template YAML]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-yaml). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. - -The RKE documentation also has [annotated]({{}}/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. - -For guidance on available options, refer to the RKE documentation on [cluster configuration.]({{}}/rke/latest/en/config-options/) - -### Add-ons - -The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file]({{}}/rke/latest/en/config-options/add-ons/). - -The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. - -Some things you could do with add-ons include: - -- Install applications on the Kubernetes cluster after it starts -- Install plugins on nodes that are deployed with a Kubernetes daemonset -- Automatically set up namespaces, service accounts, or role binding - -The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/_index.md deleted file mode 100644 index 2b6263637b..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Applying Templates -weight: 50 ---- - -You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) - -RKE templates can be applied to new clusters. - -As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -This section covers the following topics: - -- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) -- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) -- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) - -### Creating a Cluster from an RKE Template - -To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters) using an RKE template, use these steps: - -1. From the **Global** view, go to the **Clusters** tab. -1. Click **Add Cluster** and choose the infrastructure provider. -1. Provide the cluster name and node template details as usual. -1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** -1. Choose an existing template and revision from the dropdown menu. -1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. -1. Click **Save** to launch the cluster. - -### Updating a Cluster Created with an RKE Template - -When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. - -- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) -- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. - -If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. - -As of Rancher v2.3.3, an existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. - -> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -### Converting an Existing Cluster to Use an RKE Template - -_Available as of v2.3.3_ - -This section describes how to create an RKE template from an existing cluster. - -RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/_index.md deleted file mode 100644 index 94f7022a7a..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/_index.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: Creating and Revising Templates -weight: 32 ---- - -This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** - -Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. - -Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. - -The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a template](#creating-a-template) -- [Updating a template](#updating-a-template) -- [Deleting a template](#deleting-a-template) -- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) -- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) -- [Disabling a template revision](#disabling-a-template-revision) -- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) -- [Setting a template revision as default](#setting-a-template-revision-as-default) -- [Deleting a template revision](#deleting-a-template-revision) -- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) -- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) - -### Prerequisites - -You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions) - -You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -### Creating a Template - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Click **Add Template.** -1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. -1. Optional: Share the template with other users or groups by [adding them as members.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. -1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. - -**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. - -### Updating a Template - -When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. - -You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) - -When new template revisions are created, clusters using an older revision of the template are unaffected. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to edit and click the **⋮ > Edit.** -1. Edit the required information and click **Save.** -1. Optional: You can change the default revision of this template and also change who it is shared with. - -**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) - -### Deleting a Template - -When you no longer use an RKE template for any of your clusters, you can delete it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to delete and click the **⋮ > Delete.** -1. Confirm the deletion when prompted. - -**Result:** The template is deleted. - -### Creating a Revision Based on the Default Revision - -You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to clone and click the **⋮ > New Revision From Default.** -1. Complete the rest of the form to create a new revision. - -**Result:** The RKE template revision is cloned and configured. - -### Creating a Revision Based on a Cloned Revision - -When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision.** -1. Complete the rest of the form. - -**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. - -### Disabling a Template Revision - -When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. - -You can disable the revision if it is not being used by any cluster. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to disable. Then select **⋮ > Disable.** - -**Result:** The RKE template revision cannot be used to create a new cluster. - -### Re-enabling a Disabled Template Revision - -If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to re-enable. Then select **⋮ > Enable.** - -**Result:** The RKE template revision can be used to create a new cluster. - -### Setting a Template Revision as Default - -When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. - -To set an RKE template revision as default, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default.** - -**Result:** The RKE template revision will be used as the default option when clusters are created with the template. - -### Deleting a Template Revision - -You can delete all revisions of a template except for the default revision. - -To permanently delete a revision, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete.** - -**Result:** The RKE template revision is deleted. - -### Upgrading a Cluster to Use a New Template Revision - -> This section assumes that you already have a cluster that [has an RKE template applied.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates) -> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. - -To upgrade a cluster to use a new template revision, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that you want to upgrade and click **⋮ > Edit.** -1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. -1. Click **Save.** - -**Result:** The cluster is upgraded to use the settings defined in the new template revision. - -### Exporting a Running Cluster to a New RKE Template and Revision - -You can save an existing cluster's settings as an RKE template. - -This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template and revision.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/_index.md deleted file mode 100644 index 8823bb1de1..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Template Creator Permissions -weight: 10 ---- - -Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. - -For more information on administrator permissions, refer to the [documentation on global permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/). - -# Giving Users Permission to Create Templates - -Templates can only be created by users who have the global permission **Create RKE Templates.** - -Administrators have the global permission to create templates, and only administrators can give that permission to other users. - -For information on allowing users to modify existing templates, refer to [Sharing Templates.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) - -Administrators can give users permission to create RKE templates in two ways: - -- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) -- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) - -### Allowing a User to Create Templates - -An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** - -**Result:** The user has permission to create RKE templates. - -### Allowing New Users to Create Templates by Default - -Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. - -1. From the **Global** view, click **Security > Roles.** -1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **⋮ > Edit**. -1. Select the option **Yes: Default role for new users** and click **Save.** - -**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. - -### Revoking Permission to Create Templates - -Administrators can remove a user's permission to create templates with the following steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. -1. Click **Save.** - -**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/enforcement/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/enforcement/_index.md deleted file mode 100644 index 7c949d48da..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/enforcement/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Template Enforcement -weight: 32 ---- - -This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. - -By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, - -- Only an administrator has the ability to create clusters without a template. -- All standard users must use an RKE template to create a new cluster. -- Standard users cannot create a cluster without using a template. - -Users can only create new templates if the administrator [gives them permission.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creator-permissions/#allowing-a-user-to-create-templates) - -After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) - -# Requiring New Clusters to Use an RKE Template - -You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) will use the Kubernetes and/or Rancher settings that are vetted by administrators. - -To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **True** and click **Save.** - -**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. - -# Disabling RKE Template Enforcement - -To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **False** and click **Save.** - -**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/_index.md deleted file mode 100644 index e40f654f74..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-scenarios/_index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Example Scenarios -weight: 5 ---- - -These example scenarios describe how an organization could use templates to standardize cluster creation. - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) - - -# Enforcing a Template Setting for Everyone - -Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. - -1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. -1. The administrator makes the template public. -1. The administrator turns on template enforcement. - -**Results:** - -- All Rancher users in the organization have access to the template. -- All new clusters created by [standard users]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. -- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. - -In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. - -# Templates for Basic and Advanced Users - -Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. - -1. First, an administrator turns on [RKE template enforcement.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) in Rancher will need to use an RKE template when they create a cluster. -1. The administrator then creates two templates: - - - One template for basic users, with almost every option specified except for access keys - - One template for advanced users, which has most or all options has **Allow User Override** turned on - -1. The administrator shares the advanced template with only the advanced users. -1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. - -**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. - -# Updating Templates and Clusters Created with Them - -Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. - -In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. - -The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: - -- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. -- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. -- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. - -# Allowing Other Users to Control and Share a Template - -Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. - -Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. - -To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: - -- [Revise the template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) when the best practices change -- [Disable outdated revisions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#disabling-a-template-revision) of the template so that no new clusters can be created with it -- [Delete the whole template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#deleting-a-template) if the organization wants to go in a different direction -- [Set a certain revision as default]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising/#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. -- [Share the template]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/overrides/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/overrides/_index.md deleted file mode 100644 index 3542d45b34..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/overrides/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Overriding Template Settings -weight: 33 ---- - -When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** - -After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. - -When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. - -The **Allow User Override** model of the RKE template is useful for situations such as: - -- Administrators know that some settings will need the flexibility to be frequently updated over time -- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md deleted file mode 100644 index 71c982ca22..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: RKE Templates and Infrastructure -weight: 90 ---- - -In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. - -Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. - -If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. - -### Node Templates - -[Node templates]({{}}/rancher/v2.0-v2.4/en/user-settings/node-templates) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. - -### Terraform - -Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. - -This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. - -Terraform allows you to: - -- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates -- Leverage catalog apps and multi-cluster apps -- Codify infrastructure across many platforms, including Rancher and major cloud providers -- Commit infrastructure-as-code to version control -- Easily repeat configuration and setup of infrastructure -- Incorporate infrastructure changes into standard development practices -- Prevent configuration drift, in which some servers become configured differently than others - -# How Does Terraform Work? - -Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. - -To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. - -Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. - -When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. - -# Tips for Working with Terraform - -- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) - -- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. - -- You can also modify auth in the Terraform provider. - -- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. - -- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. - -# Tip for Creating CIS Benchmark Compliant Clusters - -This section describes one way that you can make security and compliance-related config files standard in your clusters. - -When you create a [CIS benchmark compliant cluster,]({{}}/rancher/v2.0-v2.4/en/security/) you have an encryption config file and an audit log config file. - -Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. - -Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. - -In this way, you can create flags that comply with the CIS benchmark. - -# Resources - -- [Terraform documentation](https://www.terraform.io/docs/) -- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) -- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/_index.md b/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/_index.md deleted file mode 100644 index 0ab942a12a..0000000000 --- a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/template-access-and-sharing/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Access and Sharing -weight: 31 ---- - -If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. - -Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. - -When you share a template, each user can have one of two access levels: - -- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. -- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. - -If you create a template, you automatically become an owner of that template. - -If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rke-templates/creating-and-revising) - -There are several ways to share templates: - -- Add users to a new RKE template during template creation -- Add users to an existing RKE template -- Make the RKE template public, sharing it with all users in the Rancher setup -- Share template ownership with users who are trusted to modify the template - -### Sharing Templates with Specific Users or Groups - -To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. In the **Share Template** section, click on **Add Member**. -1. Search in the **Name** field for the user or group you want to share the template with. -1. Choose the **User** access type. -1. Click **Save.** - -**Result:** The user or group can create clusters using the template. - -### Sharing Templates with All Users - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** - -**Result:** All users in the Rancher setup can create clusters using the template. - -### Sharing Ownership of Templates - -If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. - -In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. - -To give Owner access to a user or group, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. -1. In the **Access Type** field, click **Owner.** -1. Click **Save.** - -**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/api/_index.md b/content/rancher/v2.0-v2.4/en/api/_index.md deleted file mode 100644 index ac4d369d0d..0000000000 --- a/content/rancher/v2.0-v2.4/en/api/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: API -weight: 24 ---- - -## How to use the API - -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). - -## Authentication - -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.0-v2.4/en/api/api-tokens). - -## Making requests - -The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). - -- Every type has a Schema which describes: - - The URL to get to the collection of this type of resources - - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. - - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). - - Every field that filtering is allowed on - - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. - - -- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. - -- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. - -- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. - -- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. - -- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. - -- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. - -- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). - -## Filtering - -Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. - -## Sorting - -Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. - -## Pagination - -API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. diff --git a/content/rancher/v2.0-v2.4/en/backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/_index.md deleted file mode 100644 index 13b9122b7d..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Backups and Disaster Recovery -weight: 5 ---- - -This section is devoted to protecting your data in a disaster scenario. - -To protect yourself from a disaster scenario, you should create backups on a regular basis. - -- [Backup](./backup) -- [Restore](./restore) - diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/_index.md deleted file mode 100644 index d74a41ca00..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/backup/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Backup -weight: 50 -aliases: - - /rancher/v2.0-v2.4/en/installation/after-installation/ - - /rancher/v2.0-v2.4/en/backups/ - - /rancher/v2.0-v2.4/en/backups/backups - - /rancher/v2.0-v2.4/en/backups/legacy/backup - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/ - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/ - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/ ---- -This section contains information about how to create backups of your Rancher data and how to restore them in a disaster scenario. - - - Rancher server backups: - - [Rancher installed on a K3s Kubernetes cluster](./k3s-backups) - - [Rancher installed on an RKE Kubernetes cluster](./rke-backups) - - [Rancher installed with Docker](./docker-backups) - -For information on backing up Rancher launched Kubernetes clusters, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/) - -If you are looking to back up your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), please refer [here]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/). diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md deleted file mode 100644 index e20ea42087..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/backup/docker-backups/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Backing up Rancher Installed with Docker -shortTitle: Docker Installs -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.0-v2.4/en/backups/backups/single-node-backups/ - - /rancher/v2.0-v2.4/en/backups/legacy/backup/single-node-backups/ - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/docker-backups - - /rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/ - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/docker-backups/ ---- - - -After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. - -### How to Read Placeholders - -During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run \ - --volumes-from rancher-data- \ - -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. - -### Obtaining Placeholder Data - -Get the placeholder data by running: - -``` -docker ps -``` - -Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. - -### Creating a Backup - -This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. - - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#how-to-read-placeholders). - - ``` - docker stop - ``` -1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data- rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each placeholder. - - ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** A stream of commands runs on the screen. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. - -1. Restart Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker start - ``` - -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.0-v2.4/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md deleted file mode 100644 index 65fd599b0b..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/backup/k3s-backups/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Backing up Rancher Installed on a K3s Kubernetes Cluster -shortTitle: K3s Installs -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/backups/backups/k3s-backups - - /rancher/v2.0-v2.4/en/backups/backups/k8s-backups/k3s-backups - - /rancher/v2.0-v2.4/en/backups/legacy/backup/k8s-backups/k3s-backups/ - - /rancher/v2.0-v2.4/en/backups/legacy/backups/k3s-backups - - /rancher/v2.0-v2.4/en/backups/legacy/backup/k3s-backups - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/k3s-backups - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/k3s-backups/ ---- - -When Rancher is installed on a high-availability Kubernetes cluster, we recommend using an external database to store the cluster data. - -The database administrator will need to back up the external database, or restore it from a snapshot or dump. - -We recommend configuring the database to take recurring snapshots. - -### K3s Kubernetes Cluster Data - -One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. - -
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) - -### Creating Snapshots and Restoring Databases from Snapshots - -For details on taking database snapshots and restoring your database from them, refer to the official database documentation: - -- [Official MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-snapshot-method.html) -- [Official PostgreSQL documentation](https://www.postgresql.org/docs/8.3/backup-dump.html) -- [Official etcd documentation](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md b/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md deleted file mode 100644 index a85625de79..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/backup/rke-backups/_index.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Backing up Rancher Installed on an RKE Kubernetes Cluster -shortTitle: RKE Installs -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/installation/after-installation/k8s-install-backup-and-restoration/ - - /rancher/v2.0-v2.4/en/installation/backups-and-restoration/ha-backup-and-restoration/ - - /rancher/v2.0-v2.4/en/backups/backups/ha-backups - - /rancher/v2.0-v2.4/en/backups/backups/k8s-backups/ha-backups - - /rancher/v2.0-v2.4/en/backups/legacy/backup/k8s-backups/ha-backups/ - - /rancher/v2.0-v2.4/en/backups/legacy/backups/ha-backups - - /rancher/v2.0-v2.4/en/backups/legacy/backup/ha-backups - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/ ---- -This section describes how to create backups of your high-availability Rancher install. - -In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. - -
Cluster Data within an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) - -# Requirements - -### RKE Version - -The commands for taking `etcd` snapshots are only available in RKE v0.1.7 and later. - -### RKE Config File - -You'll need the RKE config file that you used for Rancher install, `rancher-cluster.yml`. You created this file during your initial install. Place this file in same directory as the RKE binary. - - -# Backup Outline - - -Backing up your high-availability Rancher cluster is process that involves completing multiple tasks. - -1. [Take Snapshots of the `etcd` Database](#1-take-snapshots-of-the-etcd-database) - - Take snapshots of your current `etcd` database using Rancher Kubernetes Engine (RKE). - -1. [Store Snapshot(s) Externally](#2-back-up-local-snapshots-to-a-safe-location) - - After taking your snapshots, export them to a safe location that won't be affected if your cluster encounters issues. - - -# 1. Take Snapshots of the `etcd` Database - -Take snapshots of your `etcd` database. You can use these snapshots later to recover from a disaster scenario. There are two ways to take snapshots: recurringly, or as a one-off. Each option is better suited to a specific use case. Read the short description below each link to know when to use each option. - -- [Option A: Recurring Snapshots](#option-a-recurring-snapshots) - - After you stand up a high-availability Rancher install, we recommend configuring RKE to automatically take recurring snapshots so that you always have a safe restore point available. - -- [Option B: One-Time Snapshots](#option-b-one-time-snapshots) - - We advise taking one-time snapshots before events like upgrades or restore of another snapshot. - -### Option A: Recurring Snapshots - -For all high-availability Rancher installs, we recommend taking recurring snapshots so that you always have a safe restore point available. - -To take recurring snapshots, enable the `etcd-snapshot` service, which is a service that's included with RKE. This service runs in a service container alongside the `etcd` container. You can enable this service by adding some code to `rancher-cluster.yml`. - -**To Enable Recurring Snapshots:** - -The steps to enable recurring snapshots differ based on the version of RKE. - -{{% tabs %}} -{{% tab "RKE v0.2.0+" %}} - -1. Open `rancher-cluster.yml` with your favorite text editor. -2. Edit the code for the `etcd` service to enable recurring snapshots. Snapshots can be saved in a S3 compatible backend. - - ``` - services: - etcd: - backup_config: - enabled: true # enables recurring etcd snapshots - interval_hours: 6 # time increment between snapshots - retention: 60 # time in days before snapshot purge - # Optional S3 - s3backupconfig: - access_key: "myaccesskey" - secret_key: "myaccesssecret" - bucket_name: "my-backup-bucket" - folder: "folder-name" # Available as of v2.3.0 - endpoint: "s3.eu-west-1.amazonaws.com" - region: "eu-west-1" - custom_ca: |- - -----BEGIN CERTIFICATE----- - $CERTIFICATE - -----END CERTIFICATE----- - ``` -4. Save and close `rancher-cluster.yml`. -5. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. -6. Run the following command: - ``` - rke up --config rancher-cluster.yml - ``` - -**Result:** RKE is configured to take recurring snapshots of `etcd` on all nodes running the `etcd` role. Snapshots are saved locally to the following directory: `/opt/rke/etcd-snapshots/`. If configured, the snapshots are also uploaded to your S3 compatible backend. -{{% /tab %}} -{{% tab "RKE v0.1.x" %}} - -1. Open `rancher-cluster.yml` with your favorite text editor. -2. Edit the code for the `etcd` service to enable recurring snapshots. - - ``` - services: - etcd: - snapshot: true # enables recurring etcd snapshots - creation: 6h0s # time increment between snapshots - retention: 24h # time increment before snapshot purge - ``` -4. Save and close `rancher-cluster.yml`. -5. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. -6. Run the following command: - ``` - rke up --config rancher-cluster.yml - ``` - -**Result:** RKE is configured to take recurring snapshots of `etcd` on all nodes running the `etcd` role. Snapshots are saved locally to the following directory: `/opt/rke/etcd-snapshots/`. -{{% /tab %}} -{{% /tabs %}} - - -### Option B: One-Time Snapshots - -When you're about to upgrade Rancher or restore it to a previous snapshot, you should snapshot your live image so that you have a backup of `etcd` in its last known state. - -**To Take a One-Time Local Snapshot:** - -1. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. - -2. Enter the following command. Replace `` with any name that you want to use for the snapshot (e.g. `upgrade.db`). - - ``` - rke etcd snapshot-save \ - --name \ - --config rancher-cluster.yml - ``` - -**Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. - -**To Take a One-Time S3 Snapshot:** - -_Available as of RKE v0.2.0_ - -1. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. - -2. Enter the following command. Replace `` with any name that you want to use for the snapshot (e.g. `upgrade.db`). - - ```shell - rke etcd snapshot-save \ - --config rancher-cluster.yml \ - --name snapshot-name \ - --s3 \ - --access-key S3_ACCESS_KEY \ - --secret-key S3_SECRET_KEY \ - --bucket-name s3-bucket-name \ - --s3-endpoint s3.amazonaws.com \ - --folder folder-name # Available as of v2.3.0 - ``` - -**Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. It is also uploaded to the S3 compatible backend. - -# 2. Back up Local Snapshots to a Safe Location - -> **Note:** If you are using RKE v0.2.0, you can enable saving the backups to a S3 compatible backend directly and skip this step. - -After taking the `etcd` snapshots, save them to a safe location so that they're unaffected if your cluster experiences a disaster scenario. This location should be persistent. - -In this documentation, as an example, we're using Amazon S3 as our safe location, and [S3cmd](http://s3tools.org/s3cmd) as our tool to create the backups. The backup location and tool that you use are ultimately your decision. - -**Example:** - -``` -root@node:~# s3cmd mb s3://rke-etcd-snapshots -root@node:~# s3cmd put /opt/rke/etcd-snapshots/snapshot.db s3://rke-etcd-snapshots/ -``` diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/_index.md deleted file mode 100644 index bb6569b5e8..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/restore/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Restore -weight: 1010 -aliases: - - /rancher/v2.0-v2.4/en/backups/restorations - - /rancher/v2.0-v2.4/en/backups/legacy/restore - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/ ---- -If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. - -- [Restoring backups for Rancher installed with Docker](./docker-restores) -- [Restoring backups for Rancher installed on an RKE Kubernetes cluster](./rke-restore) -- [Restoring backups for Rancher installed on a K3s Kubernetes cluster](./k3s-restore) - -If you are looking to restore your [Rancher launched Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), please refer to [this section]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md deleted file mode 100644 index 1dd88ae4ee..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/restore/docker-restores/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Restoring Backups—Docker Installs -shortTitle: Docker Installs -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.0-v2.4/en/backups/restorations/single-node-restoration - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/docker-restores - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/docker-restores/ ---- - -If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. - -## Before You Start - -During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from -v $PWD:/backup \ -busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar pzxvf /backup/rancher-data-backup--" -``` - -In this command, `` and `-` are environment variables for your Rancher deployment. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version number for your Rancher backup. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Restoring Backups - -Using a [backup]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.0-v2.4/en/backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. - - >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. - - ``` - docker run --volumes-from -v $PWD:/backup \ - busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar pzxvf /backup/rancher-data-backup--.tar.gz" - ``` - - **Step Result:** A series of commands should run. - -1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. - - ``` - docker start - ``` - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md deleted file mode 100644 index c6de8e35c6..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/_index.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Restoring Backups—Kubernetes installs -shortTitle: RKE Installs -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/installation/after-installation/ha-backup-and-restoration/ - - /rancher/v2.0-v2.4/en/backups/restorations/ha-restoration - - /rancher/v2.0-v2.4/en/backups/restorations/k8s-restore/rke-restore - - /rancher/v2.0-v2.4/en/backups/legacy/restore/k8s-restore/rke-restore/ - - /rancher/v2.0-v2.4/en/backups/legacy/restore/rke-restore - - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/rke-restore - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/ ---- - -This procedure describes how to use RKE to restore a snapshot of the Rancher Kubernetes cluster. -This will restore the Kubernetes configuration and the Rancher database and state. - -> **Note:** This document covers clusters set up with RKE >= v0.2.x, for older RKE versions refer to the [RKE Documentation]({{}}/rke/latest/en/etcd-snapshots/restoring-from-backup). - -## Restore Outline - - - -- [1. Preparation](#1-preparation) -- [2. Place Snapshot](#2-place-snapshot) -- [3. Configure RKE](#3-configure-rke) -- [4. Restore the Database and bring up the Cluster](#4-restore-the-database-and-bring-up-the-cluster) - - - -### 1. Preparation - -It is advised that you run the restore from your local host or a jump box/bastion where your cluster yaml, rke statefile, and kubeconfig are stored. You will need [RKE]({{}}/rke/latest/en/installation/) and [kubectl]({{}}/rancher/v2.0-v2.4/en/faq/kubectl/) CLI utilities installed locally. - -Prepare by creating 3 new nodes to be the target for the restored Rancher instance. We recommend that you start with fresh nodes and a clean state. For clarification on the requirements, review the [Installation Requirements](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation/requirements/). - -Alternatively you can re-use the existing nodes after clearing Kubernetes and Rancher configurations. This will destroy the data on these nodes. See [Node Cleanup]({{}}/rancher/v2.0-v2.4/en/faq/cleaning-cluster-nodes/) for the procedure. - -You must restore each of your etcd nodes to the same snapshot. Copy the snapshot you're using from one of your nodes to the others before running the `etcd snapshot-restore` command. - -> **IMPORTANT:** Before starting the restore make sure all the Kubernetes services on the old cluster nodes are stopped. We recommend powering off the nodes to be sure. - -### 2. Place Snapshot - -As of RKE v0.2.0, snapshots could be saved in an S3 compatible backend. To restore your cluster from the snapshot stored in S3 compatible backend, you can skip this step and retrieve the snapshot in [4. Restore the Database and bring up the Cluster](#4-restore-the-database-and-bring-up-the-cluster). Otherwise, you will need to place the snapshot directly on one of the etcd nodes. - -Pick one of the clean nodes that will have the etcd role assigned and place the zip-compressed snapshot file in `/opt/rke/etcd-snapshots` on that node. - -> **Note:** Because of a current limitation in RKE, the restore process does not work correctly if `/opt/rke/etcd-snapshots` is a NFS share that is mounted on all nodes with the etcd role. The easiest options are to either keep `/opt/rke/etcd-snapshots` as a local folder during the restore process and only mount the NFS share there after it has been completed, or to only mount the NFS share to one node with an etcd role in the beginning. - -### 3. Configure RKE - -Use your original `rancher-cluster.yml` and `rancher-cluster.rkestate` files. If they are not stored in a version control system, it is a good idea to back them up before making any changes. - -``` -cp rancher-cluster.yml rancher-cluster.yml.bak -cp rancher-cluster.rkestate rancher-cluster.rkestate.bak -``` - -If the replaced or cleaned nodes have been configured with new IP addresses, modify the `rancher-cluster.yml` file to ensure the address and optional internal_address fields reflect the new addresses. - -> **IMPORTANT:** You should not rename the `rancher-cluster.yml` or `rancher-cluster.rkestate` files. It is important that the filenames match each other. - -### 4. Restore the Database and bring up the Cluster - -You will now use the RKE command-line tool with the `rancher-cluster.yml` and the `rancher-cluster.rkestate` configuration files to restore the etcd database and bring up the cluster on the new nodes. - -> **Note:** Ensure your `rancher-cluster.rkestate` is present in the same directory as the `rancher-cluster.yml` file before starting the restore, as this file contains the certificate data for the cluster. - -#### Restoring from a Local Snapshot - -When restoring etcd from a local snapshot, the snapshot is assumed to be located on the target node in the directory `/opt/rke/etcd-snapshots`. - -``` -rke etcd snapshot-restore --name snapshot-name --config ./rancher-cluster.yml -``` - -> **Note:** The --name parameter expects the filename of the snapshot without the extension. - -#### Restoring from a Snapshot in S3 - -_Available as of RKE v0.2.0_ - -When restoring etcd from a snapshot located in an S3 compatible backend, the command needs the S3 information in order to connect to the S3 backend and retrieve the snapshot. - -``` -$ rke etcd snapshot-restore --config ./rancher-cluster.yml --name snapshot-name \ ---s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com \ ---folder folder-name # Available as of v2.3.0 -``` - -#### Options for `rke etcd snapshot-restore` - -S3 specific options are only available for RKE v0.2.0+. - -| Option | Description | S3 Specific | -| --- | --- | ---| -| `--name` value | Specify snapshot name | | -| `--config` value | Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] | | -| `--s3` | Enabled backup to s3 |* | -| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | -| `--access-key` value | Specify s3 accessKey | *| -| `--secret-key` value | Specify s3 secretKey | *| -| `--bucket-name` value | Specify s3 bucket name | *| -| `--folder` value | Specify s3 folder in the bucket name _Available as of v2.3.0_ | *| -| `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | - -#### Testing the Cluster - -Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl]({{}}/rancher/v2.0-v2.4/en/faq/kubectl/#configuration) for details. - -#### Check Kubernetes Pods - -Wait for the pods running in `kube-system`, `ingress-nginx` and the `rancher` pod in `cattle-system` to return to the `Running` state. - -> **Note:** `cattle-cluster-agent` and `cattle-node-agent` pods will be in an `Error` or `CrashLoopBackOff` state until Rancher server is up and the DNS/Load Balancer have been pointed at the new cluster. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -cattle-system cattle-cluster-agent-766585f6b-kj88m 0/1 Error 6 4m -cattle-system cattle-node-agent-wvhqm 0/1 Error 8 8m -cattle-system rancher-78947c8548-jzlsr 0/1 Running 1 4m -ingress-nginx default-http-backend-797c5bc547-f5ztd 1/1 Running 1 4m -ingress-nginx nginx-ingress-controller-ljvkf 1/1 Running 1 8m -kube-system canal-4pf9v 3/3 Running 3 8m -kube-system cert-manager-6b47fc5fc-jnrl5 1/1 Running 1 4m -kube-system kube-dns-7588d5b5f5-kgskt 3/3 Running 3 4m -kube-system kube-dns-autoscaler-5db9bbb766-s698d 1/1 Running 1 4m -kube-system metrics-server-97bc649d5-6w7zc 1/1 Running 1 4m -kube-system tiller-deploy-56c4cf647b-j4whh 1/1 Running 1 4m -``` - -#### Finishing Up - -Rancher should now be running and available to manage your Kubernetes clusters. -> **IMPORTANT:** Remember to save your updated RKE config (`rancher-cluster.yml`) state file (`rancher-cluster.rkestate`) and `kubectl` credentials (`kube_config_rancher-cluster.yml`) files in a safe place for future maintenance for example in a version control system. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md b/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md deleted file mode 100644 index bfc20ae2b4..0000000000 --- a/content/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Rolling back to v2.0.0-v2.1.5" -weight: 1 -aliases: - - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1/ ---- - -> Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved here and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. - -If you are rolling back to versions in either of these scenarios, you must follow some extra instructions in order to get your clusters working. - -- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. -- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. - -Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321), special steps are necessary if the user wants to roll back to a previous version of Rancher where this vulnerability exists. The steps are as follows: - -1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. - - **Rancher Installed with Docker** - ``` - docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - - **Rancher Installed on a Kubernetes Cluster** - ``` - kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - -2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** - -3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/). - -4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. - -5. Apply the backed up tokens based on how you installed Rancher. - - **Rancher Installed with Docker** - - Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - docker exec $1 kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from Unavailable back to Available. - - **Rancher Installed on a Kubernetes Cluster** - - Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - Set the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from `Unavailable` back to `Available`. - -6. Continue using Rancher as normal. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/best-practices/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/_index.md deleted file mode 100644 index 6ea4f98b95..0000000000 --- a/content/rancher/v2.0-v2.4/en/best-practices/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Best Practices Guide -weight: 4 -aliases: - - /rancher/v2.x/en/best-practices/v2.0-v2.4/ ---- - -The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. - -If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. - -Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. - -For more guidance on best practices, you can consult these resources: - -- [Security]({{}}/rancher/v2.0-v2.4/en/security/) -- [Rancher Blog](https://rancher.com/blog/) - - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) -- [Rancher Forum](https://forums.rancher.com/) -- [Rancher Users Slack](https://slack.rancher.io/) -- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md deleted file mode 100644 index 6a11761bfc..0000000000 --- a/content/rancher/v2.0-v2.4/en/best-practices/deployment-strategies/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Rancher Deployment Strategies -weight: 100 -aliases: - - /rancher/v2.0-v2.4/en/best-practices/deployment-strategies - - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/deployment-strategies - - /rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-strategies/ ---- - -There are two recommended deployment strategies. Each one has its own pros and cons. Read more about which one would fit best for your use case: - -* [Hub and Spoke](#hub-and-spoke-strategy) -* [Regional](#regional-strategy) - -# Hub and Spoke Strategy ---- - -In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. - -{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} - -### Pros - -* Environments could have nodes and network connectivity across regions. -* Single control plane interface to view/see all regions and environments. -* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. - -### Cons - -* Subject to network latencies. -* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. - -# Regional Strategy ---- -In the regional deployment model a control plane is deployed in close proximity to the compute nodes. - -{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} - -### Pros - -* Rancher functionality in regions stay operational if a control plane in another region goes down. -* Network latency is greatly reduced, improving the performance of functionality in Rancher. -* Upgrades of the Rancher control plane can be done independently per region. - -### Cons - -* Overhead of managing multiple Rancher installations. -* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. -* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md deleted file mode 100644 index 34f1f0c173..0000000000 --- a/content/rancher/v2.0-v2.4/en/best-practices/deployment-types/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Tips for Running Rancher -weight: 100 -aliases: - - /rancher/v2.0-v2.4/en/best-practices/deployment-types - - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/deployment-types - - /rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-types/ ---- - -A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. - -When you set up your high-availability Rancher installation, consider the following: - -### Run Rancher on a Separate Cluster -Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. - -### Don't Run Rancher on a Hosted Kubernetes Environment -When the Rancher server is installed on a Kubernetes cluster, it should not be run in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. - -It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE]({{}}/rke/latest/en/etcd-snapshots/) or [Rancher]({{}}/rancher/v2.0-v2.4/en/backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. - -### Make sure nodes are configured correctly for Kubernetes ### -It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) - -### When using RKE: Backup the Statefile -RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. - -### Run All Nodes in the Cluster in the Same Datacenter -For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. - -### Development and Production Environments Should be Similar -It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. - -### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. - -However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. - -After you [enable monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) and [cluster alerts]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. - diff --git a/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md b/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md deleted file mode 100644 index 5e23755f72..0000000000 --- a/content/rancher/v2.0-v2.4/en/best-practices/management/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Tips for Scaling, Security and Reliability -weight: 101 -aliases: - - /rancher/v2.0-v2.4/en/best-practices/management - - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/management - - /rancher/v2.x/en/best-practices/management/ - - /rancher/v2.x/en/best-practices/v2.0-v2.4/management/ ---- - -Rancher allows you to set up numerous combinations of configurations. Some configurations are more appropriate for development and testing, while there are other best practices for production environments for maximum availability and fault tolerance. The following best practices should be followed for production. - -- [Tips for Preventing and Handling Problems](#tips-for-preventing-and-handling-problems) -- [Network Topology](#network-topology) -- [Tips for Scaling and Reliability](#tips-for-scaling-and-reliability) -- [Tips for Security](#tips-for-security) -- [Tips for Multi-Tenant Clusters](#tips-for-multi-tenant-clusters) -- [Class of Service and Kubernetes Clusters](#class-of-service-and-kubernetes-clusters) -- [Network Security](#network-security) - -# Tips for Preventing and Handling Problems - -These tips can help you solve problems before they happen. - -### Run Rancher on a Supported OS and Supported Docker Version -Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. - -### Upgrade Your Kubernetes Version -Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). - -Rancher’s SLAs are not community dependent, but as Kubernetes is a community-driven software, the quality of experience will degrade as you get farther away from the community's supported target. - -### Kill Pods Randomly During Testing -Run chaoskube or a similar mechanism to randomly kill pods in your test environment. This will test the resiliency of your infrastructure and the ability of Kubernetes to self-heal. It's not recommended to run this in your production environment. - -### Deploy Complicated Clusters with Terraform -Rancher's "Add Cluster" UI is preferable for getting started with Kubernetes cluster orchestration or for simple use cases. However, for more complex or demanding use cases, it is recommended to use a CLI/API driven approach. [Terraform](https://www.terraform.io/) is recommended as the tooling to implement this. When you use Terraform with version control and a CI/CD environment, you can have high assurances of consistency and reliability when deploying Kubernetes clusters. This approach also gives you the most customization options. - -Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2-terraform-provider/) for working with Rancher 2.0 Kubernetes. It is called the [Rancher2 Provider.](https://www.terraform.io/docs/providers/rancher2/index.html) - -### Upgrade Rancher in a Staging Environment -All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. - -### Renew Certificates Before they Expire -Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) to track certificate expiration. - -Rancher-provisioned Kubernetes clusters will use certificates that expire in one year. Clusters provisioned by other means may have a longer or shorter expiration. - -Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/). - -### Enable Recurring Snapshots for Backing up and Restoring the Cluster -Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation]({{}}/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups]({{}}/rancher/v2.0-v2.4/en/backups/). - -### Provision Clusters with Rancher -When possible, use Rancher to provision your Kubernetes cluster rather than importing a cluster. This will ensure the best compatibility and supportability. - -### Use Stable and Supported Rancher Versions for Production -Do not upgrade production environments to alpha, beta, release candidate (rc), or "latest" versions. These early releases are often not stable and may not have a future upgrade path. - -When installing or upgrading a non-production environment to an early release, anticipate problems such as features not working, data loss, outages, and inability to upgrade without a reinstall. - -Make sure the feature version you are upgrading to is considered "stable" as determined by Rancher. Use the beta, release candidate, and "latest" versions in a testing, development, or demo environment to try out new features. Feature version upgrades, for example 2.1.x to 2.2.x, should be considered as and when they are released. Some bug fixes and most features are not back ported into older versions. - -Keep in mind that Rancher does End of Life support for old versions, so you will eventually want to upgrade if you want to continue to receive patches. - -For more detail on what happens during the Rancher product lifecycle, refer to the [Support Maintenance Terms](https://rancher.com/support-maintenance-terms/). - -# Network Topology -These tips can help Rancher work more smoothly with your network. - -### Use Low-latency Networks for Communication Within Clusters -Kubernetes clusters are best served by low-latency networks. This is especially true for the control plane components and etcd, where lots of coordination and leader election traffic occurs. Networking between Rancher server and the Kubernetes clusters it manages are more tolerant of latency. - -### Allow Rancher to Communicate Directly with Clusters -Limit the use of proxies or load balancers between Rancher server and Kubernetes clusters. As Rancher is maintaining a long-lived web sockets connection, these intermediaries can interfere with the connection lifecycle as they often weren't configured with this use case in mind. - - -# Tips for Scaling and Reliability -These tips can help you scale your cluster more easily. - -### Use One Kubernetes Role Per Host -Separate the etcd, control plane, and worker roles onto different hosts. Don't assign multiple roles to the same host, such as a worker and control plane. This will give you maximum scalability. - -### Run the Control Plane and etcd on Virtual Machines -Run your etcd and control plane nodes on virtual machines where you can scale vCPU and memory easily if needed in the future. - -### Use at Least Three etcd Nodes -Provision 3 or 5 etcd nodes. Etcd requires a quorum to determine a leader by the majority of nodes, therefore it is not recommended to have clusters of even numbers. Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Use at Least Three Control Plane Nodes -Provision three or more control plane nodes. Some control plane components, such as the `kube-apiserver`, run in [active-active](https://www.jscape.com/blog/active-active-vs-active-passive-high-availability-cluster) mode and will give you more scalability. Other components such as kube-scheduler and kube-controller run in active-passive mode (leader elect) and give you more fault tolerance. - -### Monitor Your Cluster -Closely monitor and scale your nodes as needed. You should [enable cluster monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. - - -# Tips for Security -Below are some basic tips for increasing security in Rancher. For more detailed information about securing your cluster, you can refer to these resources: - -- Rancher's [security documentation and Kubernetes cluster hardening guide]({{}}/rancher/v2.0-v2.4/en/security/) -- [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) - -### Update Rancher with Security Patches -Keep your Rancher installation up to date with the latest patches. Patch updates have important software fixes and sometimes have security fixes. When patches with security fixes are released, customers with Rancher licenses are notified by e-mail. These updates are also posted on Rancher's [forum](https://forums.rancher.com/). - -### Report Security Issues Directly to Rancher -If you believe you have uncovered a security-related problem in Rancher, please communicate this immediately and discretely to the Rancher team (security@rancher.com). Posting security issues on public forums such as Twitter, Rancher Slack, GitHub, etc. can potentially compromise security for all Rancher customers. Reporting security issues discretely allows Rancher to assess and mitigate the problem. Security patches are typically given high priority and released as quickly as possible. - -### Only Upgrade One Component at a Time -In addition to Rancher software updates, closely monitor security fixes for related software, such as Docker, Linux, and any libraries used by your workloads. For production environments, try to avoid upgrading too many entities during a single maintenance window. Upgrading multiple components can make it difficult to root cause an issue in the event of a failure. As business requirements allow, upgrade one component at a time. - -# Tips for Multi-Tenant Clusters - -### Namespaces -Each tenant should have their own unique namespaces within the cluster. This avoids naming conflicts and allows resources to be only visible to their owner through use of RBAC policy - -### Project Isolation -Use Rancher's Project Isolation to automatically generate Network Policy between Projects (sets of Namespaces). This further protects workloads from interference - -### Resource Limits -Enforce use of sane resource limit definitions for every deployment in your cluster. This not only protects the owners of the deployment, but the neighboring resources from other tenants as well. Remember, namespaces do not isolate at the node level, so over-consumption of resources on a node affects other namespace deployments. Admission controllers can be written to require resource limit definitions - -### Resource Requirements -Enforce use of resource requirement definitions for each deployment in your cluster. This enables the scheduler to appropriately schedule workloads. Otherwise you will eventually end up with over-provisioned nodes. - -# Class of Service and Kubernetes Clusters -A class of service describes the expectations around cluster uptime, durability, and duration of maintenance windows. Typically organizations group these characteristics into labels such as "dev" or "prod" - -### Consider fault domains -Kubernetes clusters can span multiple classes of service, however it is important to consider the ability for one workload to affect another. Without proper deployment practices such as resource limits, requirements, etc, a deployment that is not behaving well has the potential to impact the health of the cluster. In a "dev" environment it is common for end-users to exercise less caution with deployments, thus increasing the chance of such behavior. Sharing this behavior with your production workload increases risk. - -### Upgrade risks -Upgrades of Kubernetes are not without risk, the best way to predict the outcome of an upgrade is try it on a cluster of similar load and use case as your production cluster. This is where having non-prod class of service clusters can be advantageous. - -### Resource Efficiency -Clusters can be built with varying degrees of redundancy. In a class of service with low expectations for uptime, resources and cost can be conserved by building clusters without redundant Kubernetes control components. This approach may also free up more budget/resources to increase the redundancy at the production level - -# Network Security -In general, you can use network security best practices in your Rancher and Kubernetes clusters. Consider the following: - -### Use a Firewall Between your Hosts and the Internet -Firewalls should be used between your hosts and the Internet (or corporate Intranet). This could be enterprise firewall appliances in a datacenter or SDN constructs in the cloud, such as VPCs, security groups, ingress, and egress rules. Try to limit inbound access only to ports and IP addresses that require it. Outbound access can be shut off (air gap) if environment sensitive information that requires this restriction. If available, use firewalls with intrusion detection and DDoS prevention. - -### Run Periodic Security Scans -Run security and penetration scans on your environment periodically. Even with well design infrastructure, a poorly designed microservice could compromise the entire environment. diff --git a/content/rancher/v2.0-v2.4/en/cli/_index.md b/content/rancher/v2.0-v2.4/en/cli/_index.md deleted file mode 100644 index fe6e865db4..0000000000 --- a/content/rancher/v2.0-v2.4/en/cli/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Using the Rancher Command Line Interface -description: The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI -metaTitle: "Using the Rancher Command Line Interface " -metaDescription: "The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI" -weight: 21 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cli - - /rancher/v2.x/en/cli/ ---- - -The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. - -### Download Rancher CLI - -The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. - -### Requirements - -After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - -- Your Rancher Server URL, which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). - -### CLI Authentication - -Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): - -```bash -$ ./rancher login https:// --token -``` - -If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. - -### Project Selection - -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. - -**Example: `./rancher context switch` Output** -``` -User:rancher-cli-directory user$ ./rancher context switch -NUMBER CLUSTER NAME PROJECT ID PROJECT NAME -1 cluster-2 c-7q96s:p-h4tmb project-2 -2 cluster-2 c-7q96s:project-j6z6d Default -3 cluster-1 c-lchzv:p-xbpdt project-1 -4 cluster-1 c-lchzv:project-s2mch Default -Select a Project: -``` - -After you enter a number, the console displays a message that you've changed projects. - -``` -INFO[0005] Setting new context to project project-1 -INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json -``` - -### Commands - -The following commands are available for use in Rancher CLI. - -| Command | Result | -|---|---| -| `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or Rancher charts. | -| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | -| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | -| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | -| `namespaces, [namespace]` |Performs operations on namespaces. | -| `nodes, [node]` |Performs operations on nodes. | -| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads) in a project. | -| `settings, [setting]` | Shows the current settings for your Rancher Server. | -| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | -| `help, [h]` | Shows a list of commands or help for one command. | - - -### Rancher CLI Help - -Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. - -All commands accept the `--help` flag, which documents each command's usage. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/_index.md deleted file mode 100644 index 29946116c5..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Cluster Administration -weight: 8 ---- - -After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. - -This page covers the following topics: - -- [Switching between clusters](#switching-between-clusters) -- [Managing clusters in Rancher](#managing-clusters-in-rancher) -- [Configuring tools](#configuring-tools) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.0-v2.4/en/overview/concepts) page. - -## Switching between Clusters - -To switch between clusters, use the drop-down available in the navigation bar. - -Alternatively, you can switch between projects and clusters directly in the navigation bar. Open the **Global** view and select **Clusters** from the main menu. Then select the name of the cluster you want to open. - -## Managing Clusters in Rancher - -After clusters have been [provisioned into Rancher]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. - -{{% include file="/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table" %}} - -## Configuring Tools - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - -- Alerts -- Notifiers -- Logging -- Monitoring -- Istio Service Mesh -- OPA Gatekeeper - -For more information, see [Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/_index.md deleted file mode 100644 index 5cac3d1bab..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/_index.md +++ /dev/null @@ -1,220 +0,0 @@ ---- -title: Backing up a Cluster -weight: 2045 ---- - -_Available as of v2.2.0_ - -In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) can be easily performed. - -Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. - -Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -This section covers the following topics: - -- [How snapshots work](#how-snapshots-work) -- [Configuring recurring snapshots](#configuring-recurring-snapshots) -- [One-time snapshots](#one-time-snapshots) -- [Snapshot backup targets](#snapshot-backup-targets) - - [Local backup target](#local-backup-target) - - [S3 backup target](#s3-backup-target) - - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) - - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) -- [Viewing available snapshots](#viewing-available-snapshots) -- [Safe timestamps](#safe-timestamps) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -# How Snapshots Work - -{{% tabs %}} -{{% tab "Rancher v2.4.0+" %}} - -### Snapshot Components - -When Rancher creates a snapshot, it includes three components: - -- The cluster data in etcd -- The Kubernetes version -- The cluster configuration in the form of the `cluster.yml` - -Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. - -The multiple components of the snapshot allow you to select from the following options if you need to restore a cluster from a snapshot: - -- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -It's always recommended to take a new snapshot before any upgrades. - -### Generating the Snapshot from etcd Nodes - -For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. - -The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. - -In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. - -### Snapshot Naming Conventions - -The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. - -When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: - -- `m` stands for manual -- `r` stands for recurring -- `l` stands for local -- `s` stands for S3 - -Some example snapshot names are: - -- c-9dmxz-rl-8b2cx -- c-9dmxz-ml-kr56m -- c-9dmxz-ms-t6bjb -- c-9dmxz-rs-8gxc8 - -### How Restoring from a Snapshot Works - -On restore, the following process is used: - -1. The snapshot is retrieved from S3, if S3 is configured. -2. The snapshot is unzipped (if zipped). -3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. -4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. -5. The cluster is restored and post-restore actions will be done in the cluster. - -{{% /tab %}} -{{% tab "Rancher before v2.4.0" %}} -When Rancher creates a snapshot, only the etcd data is included in the snapshot. - -Because the Kubernetes version is not included in the snapshot, there is no option to restore a cluster to a different Kubernetes version. - -It's always recommended to take a new snapshot before any upgrades. - -### Generating the Snapshot from etcd Nodes - -For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. - -The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. - -In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. - -### Snapshot Naming Conventions - -The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. - -When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: - -- `m` stands for manual -- `r` stands for recurring -- `l` stands for local -- `s` stands for S3 - -Some example snapshot names are: - -- c-9dmxz-rl-8b2cx -- c-9dmxz-ml-kr56m -- c-9dmxz-ms-t6bjb -- c-9dmxz-rs-8gxc8 - -### How Restoring from a Snapshot Works - -On restore, the following process is used: - -1. The snapshot is retrieved from S3, if S3 is configured. -2. The snapshot is unzipped (if zipped). -3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. -4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. -5. The cluster is restored and post-restore actions will be done in the cluster. - -{{% /tab %}} -{{% /tabs %}} - -# Configuring Recurring Snapshots - -Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. - -By default, [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. - -During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. - -In the **Advanced Cluster Options** section, there are several options available to configure: - -| Option | Description | Default Value| -| --- | ---| --- | -| etcd Snapshot Backup Target | Select where you want the snapshots to be saved. Options are either local or in S3 | local| -|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| -| Recurring etcd Snapshot Creation Period | Time in hours between recurring snapshots| 12 hours | -| Recurring etcd Snapshot Retention Count | Number of snapshots to retain| 6 | - -# One-Time Snapshots - -In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. - -1. In the **Global** view, navigate to the cluster that you want to take a one-time snapshot. - -2. Click the **⋮ > Snapshot Now**. - -**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. - -# Snapshot Backup Targets - -Rancher supports two different backup targets: - -* [Local Target](#local-backup-target) -* [S3 Target](#s3-backup-target) - -### Local Backup Target - -By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. - -### S3 Backup Target - -The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. - -| Option | Description | Required| -|---|---|---| -|S3 Bucket Name| S3 bucket name where backups will be stored| *| -|S3 Region|S3 region for the backup bucket| | -|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | -|S3 Access Key|S3 access key with permission to access the backup bucket|*| -|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| -| Custom CA Certificate | A custom certificate used to access private S3 backends _Available as of v2.2.5_ || - -### Using a custom CA certificate for S3 - -_Available as of v2.2.5_ - -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. - -### IAM Support for Storing Snapshots in S3 - -The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: - - - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. - - The cluster etcd nodes must have network access to the specified S3 endpoint. - - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. - - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. - - To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -# Viewing Available Snapshots - -The list of all available snapshots for the cluster is available in the Rancher UI. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -# Safe Timestamps - -_Available as of v2.3.0_ - -As of v2.2.6, snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. As of Rancher v2.3.0, the option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. - -This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cleaning-cluster-nodes/_index.md deleted file mode 100644 index 3fe9ad5797..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Removing Kubernetes Components from Nodes -description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually -weight: 2055 ---- - -This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. - -When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. - -When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. - -## What Gets Removed? - -When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. - -| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Imported Nodes][4] | -| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | -| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | -| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | -| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | -| Rancher Deployment | ✓ | ✓ | ✓ | | -| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | -| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | -| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | - -[1]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/ - -## Removing a Node from a Cluster by Rancher UI - -When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -## Removing Rancher Components from a Cluster Manually - -When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. - ->**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. - -### Removing Rancher Components from Imported Clusters - -For imported clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. - -After the imported cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. - -{{% tabs %}} -{{% tab "By UI / API" %}} ->**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. - -After you initiate the removal of an imported cluster using the Rancher UI (or API), the following events occur. - -1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. - -1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. - -1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. - -**Result:** All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% tab "By Script" %}} -Rather than cleaning imported cluster nodes using the Rancher UI, you can run a script instead. This functionality is available since `v2.1.0`. - ->**Prerequisite:** -> ->Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. - -1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: - - ``` - chmod +x user-cluster.sh - ``` - -1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. - - If you don't have an air gap environment, skip this step. - -1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): - - >**Tip:** - > - >Add the `-dry-run` flag to preview the script's outcome without making changes. - ``` - ./user-cluster.sh rancher/rancher-agent: - ``` - -**Result:** The script runs. All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% /tabs %}} - -### Windows Nodes - -To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. - -To run the script, you can use this command in the PowerShell: - -``` -pushd c:\etc\rancher -.\cleanup.ps1 -popd -``` - -**Result:** The node is reset and can be re-added to a Kubernetes cluster. - -### Docker Containers, Images, and Volumes - -Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) - -**To clean all Docker containers, images and volumes:** - -``` -docker rm -f $(docker ps -qa) -docker rmi -f $(docker images -q) -docker volume rm $(docker volume ls -q) -``` - -### Mounts - -Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. - -Mounts | ---------| -`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | -`/var/lib/kubelet` | -`/var/lib/rancher` | - -**To unmount all mounts:** - -``` -for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done -``` - -### Directories and Files - -The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. - ->**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. - -Directories | ---------| -`/etc/ceph` | -`/etc/cni` | -`/etc/kubernetes` | -`/opt/cni` | -`/opt/rke` | -`/run/secrets/kubernetes.io` | -`/run/calico` | -`/run/flannel` | -`/var/lib/calico` | -`/var/lib/etcd` | -`/var/lib/cni` | -`/var/lib/kubelet` | -`/var/lib/rancher/rke/log` | -`/var/log/containers` | -`/var/log/kube-audit` | -`/var/log/pods` | -`/var/run/calico` | - -**To clean the directories:** - -``` -rm -rf /etc/ceph \ - /etc/cni \ - /etc/kubernetes \ - /opt/cni \ - /opt/rke \ - /run/secrets/kubernetes.io \ - /run/calico \ - /run/flannel \ - /var/lib/calico \ - /var/lib/etcd \ - /var/lib/cni \ - /var/lib/kubelet \ - /var/lib/rancher/rke/log \ - /var/log/containers \ - /var/log/kube-audit \ - /var/log/pods \ - /var/run/calico -``` - -### Network Interfaces and Iptables - -The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. - -### Network Interfaces - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. - -Interfaces | ---------| -`flannel.1` | -`cni0` | -`tunl0` | -`caliXXXXXXXXXXX` (random interface names) | -`vethXXXXXXXX` (random interface names) | - -**To list all interfaces:** - -``` -# Using ip -ip address show - -# Using ifconfig -ifconfig -a -``` - -**To remove an interface:** - -``` -ip link delete interface_name -``` - -### Iptables - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. - -Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. - -Chains | ---------| -`cali-failsafe-in` | -`cali-failsafe-out` | -`cali-fip-dnat` | -`cali-fip-snat` | -`cali-from-hep-forward` | -`cali-from-host-endpoint` | -`cali-from-wl-dispatch` | -`cali-fw-caliXXXXXXXXXXX` (random chain names) | -`cali-nat-outgoing` | -`cali-pri-kns.NAMESPACE` (chain per namespace) | -`cali-pro-kns.NAMESPACE` (chain per namespace) | -`cali-to-hep-forward` | -`cali-to-host-endpoint` | -`cali-to-wl-dispatch` | -`cali-tw-caliXXXXXXXXXXX` (random chain names) | -`cali-wl-to-host` | -`KUBE-EXTERNAL-SERVICES` | -`KUBE-FIREWALL` | -`KUBE-MARK-DROP` | -`KUBE-MARK-MASQ` | -`KUBE-NODEPORTS` | -`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | -`KUBE-SERVICES` | -`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | - -**To list all iptables rules:** - -``` -iptables -L -t nat -iptables -L -t mangle -iptables -L -``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cloning-clusters/_index.md deleted file mode 100644 index 11c9c443ef..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cloning-clusters/_index.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Cloning Clusters -weight: 2035 -aliases: - - /rancher/v2.0-v2.4/en/cluster-provisioning/cloning-clusters/ ---- - -If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. - -Duplication of imported clusters is not supported. - -| Cluster Type | Cloneable? | -|----------------------------------|---------------| -| [Nodes Hosted by Infrastructure Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | -| [Hosted Kubernetes Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) | ✓ | -| [Custom Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) | ✓ | -| [Imported Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) | | - -> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. - -## Prerequisites - -Download and install [Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli). Remember to [create an API bearer token]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys) if necessary. - - -## 1. Export Cluster Config - -Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. - -1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. - -1. Enter the following command to list the clusters managed by Rancher. - - - ./rancher cluster ls - - -1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. - -1. Enter the following command to export the configuration for your cluster. - - - ./rancher clusters export - - - **Step Result:** The YAML for a cloned cluster prints to Terminal. - -1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). - -## 2. Modify Cluster Config - -Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. - -> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. - - >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. - - -1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. - - ```yml - Version: v3 - clusters: - : # ENTER UNIQUE NAME - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false - rancherKubernetesEngineConfig: - addonJobTimeout: 30 - authentication: - strategy: x509 - authorization: {} - bastionHost: {} - cloudProvider: {} - ignoreDockerVersion: true - ``` - -1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. - - ```yml - nodePools: - : - clusterId: do - controlPlane: true - etcd: true - hostnamePrefix: mark-do - nodeTemplateId: do - quantity: 1 - worker: true - ``` - -1. When you're done, save and close the configuration. - -## 3. Launch Cloned Cluster - -Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: - - ./rancher up --file cluster-template.yml - -**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. You can also log into the Rancher UI and open the **Global** view to watch your provisioning cluster's progress. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/_index.md deleted file mode 100644 index 1b979a5af4..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Cluster Access -weight: 1 ---- - -This section is about what tools can be used to access clusters managed by Rancher. - -For information on how to give users permission to access a cluster, see the section on [adding users to clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/) - -For more information on roles-based access control, see [this section.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/) - -For information on how to set up an authentication system, see [this section.]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) - - -### Rancher UI - -Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. - -### kubectl - -You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - -- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/kubectl/). -- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](./kubectl/). - -### Rancher CLI - -You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. - -### Rancher API - -Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/ace/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/ace/_index.md deleted file mode 100644 index 866126c459..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/ace/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: How the Authorized Cluster Endpoint Works -weight: 2015 ---- - -This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -### About the kubeconfig File - -The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). - -This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. - -After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. - -_Available as of v2.4.6_ - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.0-v2.4/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](../cli) to be present in your PATH. - - -### Two Authentication Methods for RKE Clusters - -If the cluster is not an [RKE cluster,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. - -For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: - -- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. -- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. - -This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. - -To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. - -### About the kube-api-auth Authentication Webhook - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) which is only available for [RKE clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. - -The scheduling rules for `kube-api-auth` are listed below: - -_Applies to v2.3.0 and higher_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
`node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/_index.md deleted file mode 100644 index 4f4ebe8559..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Adding Users to Clusters -weight: 2020 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/adding-managing-cluster-members/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/cluster-members/ - - /rancher/v2.0-v2.4/en/cluster-admin/cluster-members ---- - -If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. - ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/project-members/) instead. - -There are two contexts where you can add cluster members: - -- Adding Members to a New Cluster - - You can add members to a cluster as you create it (recommended if possible). - -- [Adding Members to an Existing Cluster](#editing-cluster-membership) - - You can always add members to a cluster after a cluster is provisioned. - -## Editing Cluster Membership - -Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. - -1. From the **Global** view, open the cluster that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the cluster. - - If external authentication is configured: - - - Rancher returns users from your [external authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) source as you type. - - >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/ad/). - - - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -4. Assign the user or group **Cluster** roles. - - [What are Cluster Roles?]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) - - >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles). - -**Result:** The chosen users are added to the cluster. - -- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/_index.md deleted file mode 100644 index cb74848ec1..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Access a Cluster with Kubectl and kubeconfig" -description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." -weight: 2010 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/kubectl/ - - /rancher/v2.0-v2.4/en/cluster-admin/kubectl - - /rancher/v2.0-v2.4/en/concepts/clusters/kubeconfig-files/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/kubeconfig/ - - /rancher/2.x/en/cluster-admin/kubeconfig ---- - -This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. - -For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). - -- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) -- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) -- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) -- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) - - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) - - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) - - -### Accessing Clusters with kubectl Shell in the Rancher UI - -You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. - -1. From the **Global** view, open the cluster that you want to access with kubectl. - -2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. - -### Accessing Clusters with kubectl from Your Workstation - -This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. - -This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. - -> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. -1. Click **Kubeconfig File**. -1. Copy the contents displayed to your clipboard. -1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: - ``` - kubectl --kubeconfig /custom/path/kube.config get pods - ``` -1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. - - -### Note on Resources Created Using kubectl - -Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. - -# Authenticating Directly with a Downstream Cluster - -This section intended to help you set up an alternative method to access an [RKE cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters) - -This method is only available for RKE clusters that have the [authorized cluster endpoint]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](../ace) - -We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. - -> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) - -To find the name of the context(s) in your downloaded kubeconfig file, run: - -``` -kubectl config get-contexts --kubeconfig /custom/path/kube.config -CURRENT NAME CLUSTER AUTHINFO NAMESPACE -* my-cluster my-cluster user-46tmn - my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn -``` - -In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. - -With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. - -When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. - -### Connecting Directly to Clusters with FQDN Defined - -If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: - -``` -kubectl --context -fqdn get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods -``` - -### Connecting Directly to Clusters without FQDN Defined - -If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: -``` -kubectl --context - get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context - get pods -``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/_index.md deleted file mode 100644 index 5de09b65ba..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Cluster Autoscaler -weight: 1 ---- - -In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. - -To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. - -Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. - -It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. - -# Cloud Providers - -Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) - -### Setting up Cluster Autoscaler on Amazon Cloud Provider - -For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/amazon) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/amazon/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/amazon/_index.md deleted file mode 100644 index bd8dfee2fd..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/cluster-autoscaler/amazon/_index.md +++ /dev/null @@ -1,580 +0,0 @@ ---- -title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups -weight: 1 ---- - -This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. - -- [Prerequisites](#prerequisites) -- [1. Create a Custom Cluster](#1-create-a-custom-cluster) -- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) -- [3. Deploy Nodes](#3-deploy-nodes) -- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) - - [Parameters](#parameters) - - [Deployment](#deployment) -- [Testing](#testing) - - [Generating Load](#generating-load) - - [Checking Scale](#checking-scale) - -# Prerequisites - -These elements are required to follow this guide: - -* The Rancher server is up and running -* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles - -### 1. Create a Custom Cluster - -On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: - -* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag -* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag -* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster - - ```sh - sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum - ``` - -### 2. Configure the Cloud Provider - -On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. - -1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. - * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:DescribeTags", - "autoscaling:DescribeLaunchConfigurations", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. - * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - - * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` - * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--etcd --controlplane" - - sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. - * IAM profile: Provides cloud_provider worker integration. - This profile is called `K8sWorkerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] - } - ``` - - * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` - * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--worker" - - sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -More info is at [RKE clusters on AWS]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - -### 3. Deploy Nodes - -Once we've configured AWS, let's create VMs to bootstrap our cluster: - -* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production/) - * IAM role: `K8sMasterRole` - * Security group: `K8sMasterSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` - -* worker: Define an ASG on EC2 with the following settings: - * Name: `K8sWorkerAsg` - * IAM role: `K8sWorkerRole` - * Security group: `K8sWorkerSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` - * Instances: - * minimum: 2 - * desired: 2 - * maximum: 10 - -Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. - -### 4. Install Cluster-autoscaler - -At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. - -#### Parameters - -This table shows cluster-autoscaler parameters for fine tuning: - -| Parameter | Default | Description | -|---|---|---| -|cluster-name|-|Autoscaled cluster name, if available| -|address|:8085|The address to expose Prometheus metrics| -|kubernetes|-|Kubernetes master location. Leave blank for default| -|kubeconfig|-|Path to kubeconfig file with authorization and master location information| -|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| -|namespace|"kube-system"|Namespace in which cluster-autoscaler run| -|scale-down-enabled|true|Should CA scale down the cluster| -|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| -|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| -|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| -|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| -|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| -|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| -|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| -|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| -|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| -|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| -|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| -|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -cloud-provider|-|Cloud provider type| -|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| -|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| -|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| -|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| -|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| -|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| -|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| -|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| -|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: ::| -|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| -|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| -|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| -|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| -|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| -|write-status-configmap|true|Should CA write status information to a configmap| -|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| -|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| -|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| -|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| -|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| -|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| -|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| -|regional|false|Cluster is regional| -|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| -|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| -|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| -|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| -|profiling|false|Is debug/pprof endpoint enabled| - -#### Deployment - -Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: - - -```yml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler - name: cluster-autoscaler - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["events", "endpoints"] - verbs: ["create", "patch"] - - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["endpoints"] - resourceNames: ["cluster-autoscaler"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: - - "pods" - - "services" - - "replicationcontrollers" - - "persistentvolumeclaims" - - "persistentvolumes" - verbs: ["watch", "list", "get"] - - apiGroups: ["extensions"] - resources: ["replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["watch", "list"] - - apiGroups: ["apps"] - resources: ["statefulsets", "replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["watch", "list", "get"] - - apiGroups: ["batch", "extensions"] - resources: ["jobs"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resourceNames: ["cluster-autoscaler"] - resources: ["leases"] - verbs: ["get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create","list","watch"] - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] - verbs: ["delete", "get", "update", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - app: cluster-autoscaler -spec: - replicas: 1 - selector: - matchLabels: - app: cluster-autoscaler - template: - metadata: - labels: - app: cluster-autoscaler - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '8085' - spec: - serviceAccountName: cluster-autoscaler - tolerations: - - effect: NoSchedule - operator: "Equal" - value: "true" - key: node-role.kubernetes.io/controlplane - nodeSelector: - node-role.kubernetes.io/controlplane: "true" - containers: - - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 - name: cluster-autoscaler - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - command: - - ./cluster-autoscaler - - --v=4 - - --stderrthreshold=info - - --cloud-provider=aws - - --skip-nodes-with-local-storage=false - - --expander=least-waste - - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs/ca-certificates.crt - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - -``` - -Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): - -```sh -kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml -``` - -**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) - -# Testing - -At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. - -### Generating Load - -We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world -spec: - replicas: 3 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - ports: - - containerPort: 80 - protocol: TCP - resources: - limits: - cpu: 1000m - memory: 1024Mi - requests: - cpu: 1000m - memory: 1024Mi -``` - -Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): - -``` -kubectl -n default apply -f test-deployment.yaml -``` - -### Checking Scale - -Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. - -Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/_index.md deleted file mode 100644 index 904417b914..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Cluster Configuration -weight: 2025 ---- - -After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. - -For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members) - -- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) -- [Editing Clusters in the Rancher UI](#editing-clusters-in-the-rancher-ui) -- [Editing Clusters with YAML](#editing-clusters-with-yaml) -- [Updating ingress-nginx](#updating-ingress-nginx) - -### Cluster Management Capabilities by Cluster Type - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table" %}} - -### Editing Clusters in the Rancher UI - -To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. - -In [clusters launched by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. - -Note that these options are not available for imported clusters or hosted Kubernetes clusters. - -Option | Description | ----------|----------| - Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes). | - Network Provider | The \container networking interface (CNI) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | - Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | - Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | - Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | - Pod Security Policy Support | Enables [pod security policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | - Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version]({{}}/rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | - Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | - Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | - Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | - -### Editing Clusters with YAML - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from File**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) - ->**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - - - - -### Updating ingress-nginx - -Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. - -If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/nodes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/nodes/_index.md deleted file mode 100644 index 7311650ae0..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/nodes/_index.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -title: Nodes and Node Pools -weight: 2030 ---- - -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) to provision the cluster, there are different node options available. - -> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/#editing-clusters-with-yaml). - -This section covers the following topics: - -- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) - - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) - - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) - - [Imported nodes](#imported-nodes) -- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) -- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) -- [Deleting a node](#deleting-a-node) -- [Scaling nodes](#scaling-nodes) -- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) -- [Cordoning a node](#cordoning-a-node) -- [Draining a node](#draining-a-node) - - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) - - [Grace period](#grace-period) - - [Timeout](#timeout) - - [Drained and cordoned state](#drained-and-cordoned-state) -- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) - -# Node Options Available for Each Cluster Creation Option - -The following table lists which node options are available for each type of cluster in Rancher. Click the links in the **Option** column for more detailed information about each feature. - -| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Imported Nodes][4] | Description | -| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | ------------------------------------------------------------------ | -| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable. | -| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable _and_ evicts all pods. | -| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | | Enter a custom name, description, label, or taints for a node. | -| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | | View API data. | -| [Delete](#deleting-a-node) | ✓ | ✓ | | | Deletes defective nodes from the cluster. | -| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key for in order to SSH into the node. | -| [Node Scaling](#scaling-nodes) | ✓ | | | | Scale the number of nodes in the node pool up or down. | - -[1]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/ - -### Nodes Hosted by an Infrastructure Provider - -Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) - -Clusters provisioned using [one of the node pool options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) can be scaled up or down if the node pool is edited. - -A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. - -Rancher uses [node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. - -### Nodes Provisioned by Hosted Kubernetes Providers - -Options for managing nodes [hosted by a Kubernetes provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. - -### Imported Nodes - -Although you can deploy workloads to an [imported cluster]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. - -# Managing and Editing Individual Nodes - -Editing a node lets you: - -* Change its name -* Change its description -* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) - -To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**...**). - -# Viewing a Node in the Rancher API - -Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.0-v2.4/en/api/). - -# Deleting a Node - -Use **Delete** to remove defective nodes from the cloud provider. - -When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) - ->**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. - -# Scaling Nodes - -For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) by using the scale controls. This option isn't available for other cluster types. - -# SSH into a Node Hosted by an Infrastructure Provider - -For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. - -1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. - -1. Find the node that you want to remote into. Select **⋮ > Download Keys**. - - **Step Result:** A ZIP file containing files used for SSH is downloaded. - -1. Extract the ZIP file to any location. - -1. Open Terminal. Change your location to the extracted ZIP file. - -1. Enter the following command: - - ``` - ssh -i id_rsa root@ - ``` - -# Cordoning a Node - -_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. - -# Draining a Node - -_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. - -- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. - -- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. - -You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. - -However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. - -### Aggressive and Safe Draining Options - -The node draining options are different based on your version of Rancher. - -{{% tabs %}} -{{% tab "Rancher v2.2.x+" %}} -There are two drain modes: aggressive and safe. - -- **Aggressive Mode** - - In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. - - Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. - -- **Safe Mode** - - If a node has standalone pods or ephemeral data it will be cordoned but not drained. -{{% /tab %}} -{{% tab "Rancher before v2.2.x" %}} - -The following list describes each drain option: - -- **Even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet** - - These types of pods won't get rescheduled to a new node, since they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. Kubernetes forces you to choose this option (which will delete/evict these pods) or drain won't proceed. - -- **Even if there are DaemonSet-managed pods** - - Similar to above, if you have any daemonsets, drain would proceed only if this option is selected. Even when this option is on, pods won't be deleted since they'll immediately be replaced. On startup, Rancher currently has a few daemonsets running by default in the system, so this option is turned on by default. - -- **Even if there are pods using emptyDir** - - If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Similar to the first option, Kubernetes expects the implementation to decide what to do with these pods. Choosing this option will delete these pods. -{{% /tab %}} -{{% /tabs %}} - -### Grace Period - -The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. - -### Timeout - -The amount of time drain should continue to wait before giving up. - ->**Kubernetes Known Issue:** The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node before Kubernetes 1.12. - -### Drained and Cordoned State - -If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. - -If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. - -Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. - ->**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). - -# Labeling a Node to be Ignored by Rancher - -_Available as of 2.3.3_ - -Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. - -Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. - -In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. - -You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. - -> **Note:** There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. - -### Labeling Nodes to be Ignored with the Rancher UI - -To add a node that is ignored by Rancher, - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `ignore-node-name` setting and click **⋮ > Edit.** -1. Enter a name that Rancher will use to ignore nodes. All nodes with this name will be ignored. -1. Click **Save.** - -**Result:** Rancher will not wait to register nodes with this name. In the UI, the node will displayed with a grayed-out status. The node is still part of the cluster and can be listed with `kubectl`. - -If the setting is changed afterward, the ignored nodes will continue to be hidden. - -### Labeling Nodes to be Ignored with kubectl - -To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: - -``` -cattle.rancher.io/node-status: ignore -``` - -**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. - -If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. - -If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. - -If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings under `ignore-node-name`. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy/_index.md deleted file mode 100644 index ef2ec5ccc1..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Adding a Pod Security Policy -weight: 80 ---- - -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) - -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. - -You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. - -1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **⋮ > Edit**. - -2. Expand **Cluster Options**. - -3. From **Pod Security Policy Support**, select **Enabled**. - - >**Note:** This option is only available for clusters [provisioned by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). - -4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - - Rancher ships with [policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. - -5. Click **Save**. - -**Result:** The pod security policy is applied to the cluster and any projects within the cluster. - ->**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. -> ->To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/_index.md deleted file mode 100644 index 545de58bf8..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/_index.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: Projects and Kubernetes Namespaces with Rancher -description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces -weight: 2032 -aliases: - - /rancher/v2.0-v2.4/en/concepts/projects/ - - /rancher/v2.0-v2.4/en/tasks/projects/ - - /rancher/v2.0-v2.4/en/tasks/projects/create-project/ - - /rancher/v2.0-v2.4/en/tasks/projects/create-project/ ---- - -A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. - -A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -This section describes how projects and namespaces work with Rancher. It covers the following topics: - -- [About namespaces](#about-namespaces) -- [About projects](#about-projects) - - [The cluster's default project](#the-cluster-s-default-project) - - [The system project](#the-system-project) -- [Project authorization](#project-authorization) -- [Pod security policies](#pod-security-policies) -- [Creating projects](#creating-projects) -- [Switching between clusters and projects](#switching-between-clusters-and-projects) - -# About Namespaces - -A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) - -> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. - -Namespaces provide the following functionality: - -- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. -- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. - -You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. - -You can assign the following resources directly to namespaces: - -- [Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -For more information on creating and moving namespaces, see [Namespaces]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces/). - -### Role-based access control issues with namespaces and kubectl - -Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. - -This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. - -If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces/) to ensure that you will have permission to access the namespace. - -If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. - -# About Projects - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. - -You can use projects to perform actions such as: - -- Assign users to a group of namespaces (i.e., [project membership]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/project-members)). -- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/). -- Assign resources to the project. -- Assign Pod Security Policies. - -When you create a cluster, two projects are automatically created within it: - -- [Default Project](#the-cluster-s-default-project) -- [System Project](#the-system-project) - -### The Cluster's Default Project - -When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. - -If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. - -If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. - -### The System Project - -_Available as of v2.0.7_ - -When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. - -To open it, open the **Global** menu, and then select the `system` project for your cluster. - -The `system` project: - -- Is automatically created when you provision a cluster. -- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. -- Allows you to add more namespaces or move its namespaces to other projects. -- Cannot be deleted because it's required for cluster operations. - ->**Note:** In clusters where both: -> -> - The Canal network plug-in is in use. -> - The Project Network Isolation option is enabled. -> ->The `system` project overrides the Project Network Isolation option so that it can communicate with other projects, collect logs, and check health. - -# Project Authorization - -Standard users are only authorized for project access in two situations: - -- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. -- Standard users can access projects that they create themselves. - -# Pod Security Policies - -Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/pod-security-policies) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. - -# Creating Projects - -This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. - -1. [Name a new project.](#1-name-a-new-project) -2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) -3. [Recommended: Add project members.](#3-recommended-add-project-members) -4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) - -### 1. Name a New Project - -1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. - -1. From the main menu, choose **Projects/Namespaces**. Then click **Add Project**. - -1. Enter a **Project Name**. - -### 2. Optional: Select a Pod Security Policy - -This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). - -Assigning a PSP to a project will: - -- Override the cluster's default PSP. -- Apply the PSP to the project. -- Apply the PSP to any namespaces you add to the project later. - -### 3. Recommended: Add Project Members - -Use the **Members** section to provide other users with project access and roles. - -By default, your user is added as the project `Owner`. - ->**Notes on Permissions:** -> ->- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. -> ->- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. -> ->- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). - -To add members: - -1. Click **Add Member**. -1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. -1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) - -### 4. Optional: Add Resource Quotas - -_Available as of v2.1.0_ - -Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas). - -To add a resource quota, - -1. Click **Add Quota**. -1. Select a Resource Type. For more information, see [Resource Quotas.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/). -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. -1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{}}/rancher/v2.0-v2.4/en/project-admin/resource-quotas/) Note: This option is available as of v2.2.0. -1. Click **Create**. - -**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. - -| Field | Description | -| ----------------------- | -------------------------------------------------------------------------------------------------------- | -| Project Limit | The overall resource limit for the project. | -| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | - -# Switching between Clusters and Projects - -To switch between clusters and projects, use the **Global** drop-down available in the main menu. - -![Global Menu]({{}}/img/rancher/global-menu.png) - -Alternatively, you can switch between projects and clusters using the main menu. - -- To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. -- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/_index.md deleted file mode 100644 index 2d795e8bb0..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Restoring a Cluster from Backup -weight: 2050 ---- - -_Available as of v2.2.0_ - -etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots), but [one-time snapshots]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). - -As of Rancher v2.4.0, clusters can also be restored to a prior Kubernetes version and cluster configuration. - -This section covers the following topics: - -- [Viewing Available Snapshots](#viewing-available-snapshots) -- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) -- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -## Viewing Available Snapshots - -The list of all available snapshots for the cluster is available. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -## Restoring a Cluster from a Snapshot - -If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. - -Restores changed in Rancher v2.4.0. - -{{% tabs %}} -{{% tab "Rancher v2.4.0+" %}} - -Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: - -- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -When rolling back to a prior Kubernetes version, the [upgrade strategy options]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. - -> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. In the **Restoration Type** field, choose one of the restore options described above. - -5. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -{{% /tab %}} -{{% tab "Rancher before v2.4.0" %}} - -> **Prerequisites:** -> -> - Make sure your etcd nodes are healthy. If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters in which Rancher used node pools to provision [nodes in an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/), new etcd nodes will automatically be created. For [custom clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/), please ensure that you add new etcd nodes to the cluster. -> - To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshot. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -{{% /tab %}} -{{% /tabs %}} - -## Recovering etcd without a Snapshot - -If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. The cluster should have three etcd nodes to prevent a loss of quorum. If you want to recover your set of etcd nodes, follow these instructions: - -1. Keep only one etcd node in the cluster by removing all other etcd nodes. - -2. On the single remaining etcd node, run the following command: - - ``` - $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd - ``` - - This command outputs the running command for etcd, save this command to use later. - -3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. - - ``` - $ docker stop etcd - $ docker rename etcd etcd-old - ``` - -4. Take the saved command from Step 2 and revise it: - - - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. - - Add `--force-new-cluster` to the end of the command. - -5. Run the revised command. - -6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) and you want to reuse an old node, you are required to [clean up the nodes]({{}}/rancher/v2.0-v2.4/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/_index.md deleted file mode 100644 index 8318bf11b3..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/_index.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Tools for Logging, Monitoring, and More -weight: 2033 -aliases: - - /rancher/v2.0-v2.4/en/tools/notifiers-and-alerts/ ---- - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - - -- [Logging](#logging) -- [Monitoring](#monitoring) -- [Alerts](#alerts) -- [Notifiers](#notifiers) -- [Istio](#istio) -- [OPA Gatekeeper](#opa-gatekeeper) -- [CIS Scans](#cis-scans) - - - - -# Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debugg and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -Refer to the logging documentation [here.](./cluster-logging) - -# Monitoring - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -For details, refer to [Monitoring.](./cluster-monitoring) -# Alerts - -After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. - -Alerts are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -For details, refer to [Alerts.](./cluster-alerts) -# Notifiers - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -For details, refer to [Notifiers.](./notifiers) -# Istio - -_Available as of v2.3_ - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. - -Refer to the Istio documentation [here.](./istio) - -# OPA Gatekeeper - -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.](./opa-gatekeeper) - - -# CIS Scans - -Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. - -Refer to the CIS scan documentation [here.](./cis-scans) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md deleted file mode 100644 index b9c354f493..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/_index.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: CIS Scans -weight: 18 -aliases: - - /rancher/v2.0-v2.4/en/cis-scans/legacy - - /rancher/v2.0-v2.4/en/cis-scans - - /rancher/v2.x/en/cis-scans/v2.4/ ---- - -_Available as of v2.4.0_ - -- [Prerequisites](#prerequisites) -- [Running a scan](#running-a-scan) -- [Scheduling recurring scans](#scheduling-recurring-scans) -- [Skipping tests](#skipping-tests) -- [Setting alerts](#setting-alerts) -- [Deleting a report](#deleting-a-report) -- [Downloading a report](#downloading-a-report) -- [List of skipped and not applicable tests](#list-of-skipped-and-not-applicable-tests) - -# Prerequisites - -To run security scans on a cluster and access the generated reports, you must be an [Administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [Cluster Owner.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) - -Rancher can only run security scans on clusters that were created with RKE, which includes custom clusters and clusters that Rancher created in an infrastructure provider such as Amazon EC2 or GCE. Imported clusters and clusters in hosted Kubernetes providers can't be scanned by Rancher. - -The security scan cannot run in a cluster that has Windows nodes. - -You will only be able to see the CIS scan reports for clusters that you have access to. - -# Running a Scan - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Click **Run Scan.** -1. Choose a CIS scan profile. - -**Result:** A report is generated and displayed in the **CIS Scans** page. To see details of the report, click the report's name. - -# Scheduling Recurring Scans - -Recurring scans can be scheduled to run on any RKE Kubernetes cluster. - -To enable recurring scans, edit the advanced options in the cluster configuration during cluster creation or after the cluster has been created. - -To schedule scans for an existing cluster: - -1. Go to the cluster view in Rancher. -1. Click **Tools > CIS Scans.** -1. Click **Add Schedule.** This takes you to the section of the cluster editing page that is applicable to configuring a schedule for CIS scans. (This section can also be reached by going to the cluster view, clicking **⋮ > Edit,** and going to the **Advanced Options.**) -1. In the **CIS Scan Enabled** field, click **Yes.** -1. In the **CIS Scan Profile** field, choose a **Permissive** or **Hardened** profile. The corresponding CIS Benchmark version is included in the profile name. Note: Any skipped tests [defined in a separate ConfigMap](#skipping-tests) will be skipped regardless of whether a **Permissive** or **Hardened** profile is selected. When selecting the the permissive profile, you should see which tests were skipped by Rancher (tests that are skipped by default for RKE clusters) and which tests were skipped by a Rancher user. In the hardened test profile, the only skipped tests will be skipped by users. -1. In the **CIS Scan Interval (cron)** job, enter a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) to define how often the cluster will be scanned. -1. In the **CIS Scan Report Retention** field, enter the number of past reports that should be kept. - -**Result:** The security scan will run and generate reports at the scheduled intervals. - -The test schedule can be configured in the `cluster.yml`: - -```yaml -scheduled_cluster_scan: -    enabled: true -    scan_config: -        cis_scan_config: -            override_benchmark_version: rke-cis-1.4 -            profile: permissive -    schedule_config: -        cron_schedule: 0 0 * * * -        retention: 24 -``` - - -# Skipping Tests - -You can define a set of tests that will be skipped by the CIS scan when the next report is generated. - -These tests will be skipped for subsequent CIS scans, including both manually triggered and scheduled scans, and the tests will be skipped with any profile. - -The skipped tests will be listed alongside the test profile name in the cluster configuration options when a test profile is selected for a recurring cluster scan. The skipped tests will also be shown every time a scan is triggered manually from the Rancher UI by clicking **Run Scan.** The display of skipped tests allows you to know ahead of time which tests will be run in each scan. - -To skip tests, you will need to define them in a Kubernetes ConfigMap resource. Each skipped CIS scan test is listed in the ConfigMap alongside the version of the CIS benchmark that the test belongs to. - -To skip tests by editing a ConfigMap resource, - -1. Create a `security-scan` namespace. -1. Create a ConfigMap named `security-scan-cfg`. -1. Enter the skip information under the key `config.json` in the following format: - - ```json - { - "skip": { - "rke-cis-1.4": [ - "1.1.1", - "1.2.2" - ] - } - } - ``` - - In the example above, the CIS benchmark version is specified alongside the tests to be skipped for that version. - -**Result:** These tests will be skipped on subsequent scans that use the defined CIS Benchmark version. - -# Setting Alerts - -Rancher provides a set of alerts for cluster scans. which are not configured to have notifiers by default: - -- A manual cluster scan was completed -- A manual cluster scan has failures -- A scheduled cluster scan was completed -- A scheduled cluster scan has failures - -> **Prerequisite:** You need to configure a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) before configuring, sending, or receiving alerts. - -To activate an existing alert for a CIS scan result, - -1. From the cluster view in Rancher, click **Tools > Alerts.** -1. Go to the section called **A set of alerts for cluster scans.** -1. Go to the alert you want to activate and click **⋮ > Activate.** -1. Go to the alert rule group **A set of alerts for cluster scans** and click **⋮ > Edit.** -1. Scroll down to the **Alert** section. In the **To** field, select the notifier that you would like to use for sending alert notifications. -1. Optional: To limit the frequency of the notifications, click on **Show advanced options** and configure the time interval of the alerts. -1. Click **Save.** - -**Result:** The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. - -To create a new alert, - -1. Go to the cluster view and click **Tools > CIS Scans.** -1. Click **Add Alert.** -1. Fill out the form. -1. Enter a name for the alert. -1. In the **Is** field, set the alert to be triggered when a scan is completed or when a scan has a failure. -1. In the **Send a** field, set the alert as a **Critical,** **Warning,** or **Info** alert level. -1. Choose a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) for the alert. - -**Result:** The alert is created and activated. The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. - -For more information about alerts, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) - -# Deleting a Report - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Go to the report that should be deleted. -1. Click the **⋮ > Delete.** -1. Click **Delete.** - -# Downloading a Report - -1. From the cluster view in Rancher, click **Tools > CIS Scans.** -1. Go to the report that you want to download. Click **⋮ > Download.** - -**Result:** The report is downloaded in CSV format. - -# List of Skipped and Not Applicable Tests - -For a list of skipped and not applicable tests, refer to this page. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md deleted file mode 100644 index 67430e68c2..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/_index.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -title: Cluster Alerts -shortTitle: Alerts -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/alerts - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/cluster-alerts - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/ ---- - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -This section covers the following topics: - -- [About Alerts](#about-alerts) - - [Alert Event Examples](#alert-event-examples) - - [Alerts Triggered by Prometheus Queries](#alerts-triggered-by-prometheus-queries) - - [Urgency Levels](#urgency-levels) - - [Scope of Alerts](#scope-of-alerts) - - [Managing Cluster Alerts](#managing-cluster-alerts) -- [Adding Cluster Alerts](#adding-cluster-alerts) -- [Cluster Alert Configuration](#cluster-alert-configuration) - - [System Service Alerts](#system-service-alerts) - - [Resource Event Alerts](#resource-event-alerts) - - [Node Alerts](#node-alerts) - - [Node Selector Alerts](#node-selector-alerts) - - [CIS Scan Alerts](#cis-scan-alerts) - - [Metric Expression Alerts](#metric-expression-alerts) - -# About Alerts - -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -Before you can receive alerts, you must configure one or more notifier in Rancher. - -When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) for them. - -For details about what triggers the predefined alerts, refer to the [documentation on default alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/default-alerts) - -### Alert Event Examples - -Some examples of alert events are: - -- A Kubernetes master component entering an unhealthy state. -- A node or workload error occurring. -- A scheduled deployment taking place as planned. -- A node's hardware resources becoming overstressed. - -### Alerts Triggered by Prometheus Queries - -When you edit an alert rule, you will have the opportunity to configure the alert to be triggered based on a Prometheus expression. For examples of expressions, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/) - -Monitoring must be [enabled]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/) before you can trigger alerts with custom Prometheus queries or expressions. - -### Urgency Levels - -You can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. - -### Scope of Alerts - -The scope for alerts can be set at either the cluster level or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/alerts/). - -At the cluster level, Rancher monitors components in your Kubernetes cluster, and sends you alerts related to: - -- The state of your nodes. -- The system services that manage your Kubernetes cluster. -- The resource events from specific system services. -- The Prometheus expression cross the thresholds - -### Managing Cluster Alerts - -After you set up cluster alerts, you can manage each alert object. To manage alerts, browse to the cluster containing the alerts, and then select **Tools > Alerts** that you want to manage. You can: - -- Deactivate/Reactive alerts -- Edit alert settings -- Delete unnecessary alerts -- Mute firing alerts -- Unmute muted alerts - -# Adding Cluster Alerts - -As a [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send you alerts for cluster events. - ->**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers/). - -1. From the **Global** view, navigate to the cluster that you want to configure cluster alerts for. Select **Tools > Alerts**. Then click **Add Alert Group**. -1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. -1. Based on the type of alert you want to create, refer to the [cluster alert configuration section.](#cluster-alert-configuration) -1. Continue adding more **Alert Rule** to the group. -1. Finally, choose the [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) to send the alerts to. - - - You can set up multiple notifiers. - - You can change notifier recipients on the fly. -1. Click **Create.** - -**Result:** Your alert is configured. A notification is sent when the alert is triggered. - - -# Cluster Alert Configuration - - - [System Service Alerts](#system-service-alerts) - - [Resource Event Alerts](#resource-event-alerts) - - [Node Alerts](#node-alerts) - - [Node Selector Alerts](#node-selector-alerts) - - [CIS Scan Alerts](#cis-scan-alerts) - - [Metric Expression Alerts](#metric-expression-alerts) - -# System Service Alerts - -This alert type monitor for events that affect one of the Kubernetes master components, regardless of the node it occurs on. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **System Services** option, and then select an option from the dropdown: - -- [controller-manager](https://kubernetes.io/docs/concepts/overview/components/#kube-controller-manager) -- [etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) -- [scheduler](https://kubernetes.io/docs/concepts/overview/components/#kube-scheduler) - -### Is - -The alert will be triggered when the selected Kubernetes master component is unhealthy. - -### Send a - -Select the urgency level of the alert. The options are: - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - - Select the urgency level based on the importance of the service and how many nodes fill the role within your cluster. For example, if you're making an alert for the `etcd` service, select **Critical**. If you're making an alert for redundant schedulers, **Warning** is more appropriate. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Resource Event Alerts - -This alert type monitors for specific events that are thrown from a resource type. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Choose the type of resource event that triggers an alert. The options are: - -- **Normal**: triggers an alert when any standard resource event occurs. -- **Warning**: triggers an alert when unexpected resource events occur. - -Select a resource type from the **Choose a Resource** drop-down that you want to trigger an alert. - -- [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) -- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) -- [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) -- [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) -- [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert by considering factors such as how often the event occurs or its importance. For example: - -- If you set a normal alert for pods, you're likely to receive alerts often, and individual pods usually self-heal, so select an urgency of **Info**. -- If you set a warning alert for StatefulSets, it's very likely to impact operations, so select an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Node Alerts - -This alert type monitors for events that occur on a specific node. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Node** option, and then make a selection from the **Choose a Node** drop-down. - -### Is - -Choose an event to trigger the alert. - -- **Not Ready**: Sends you an alert when the node is unresponsive. -- **CPU usage over**: Sends you an alert when the node raises above an entered percentage of its processing allocation. -- **Mem usage over**: Sends you an alert when the node raises above an entered percentage of its memory allocation. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Node Selector Alerts - -This alert type monitors for events that occur on any node on marked with a label. For more information, see the Kubernetes documentation for [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Node Selector** option, and then click **Add Selector** to enter a key value pair for a label. This label should be applied to one or more of your nodes. Add as many selectors as you'd like. - -### Is - -Choose an event to trigger the alert. - -- **Not Ready**: Sends you an alert when selected nodes are unresponsive. -- **CPU usage over**: Sends you an alert when selected nodes raise above an entered percentage of processing allocation. -- **Mem usage over**: Sends you an alert when selected nodes raise above an entered percentage of memory allocation. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# CIS Scan Alerts -_Available as of v2.4.0_ - -This alert type is triggered based on the results of a CIS scan. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select **CIS Scan.** - -### Is - -Choose an event to trigger the alert: - -- Completed Scan -- Has Failure - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. - -# Metric Expression Alerts - -This alert type monitors for the overload from Prometheus expression querying, it would be available after you enable monitoring. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Input or select an **Expression**, the dropdown shows the original metrics from Prometheus, including: - -- [**Node**](https://github.com/prometheus/node_exporter) -- [**Container**](https://github.com/google/cadvisor) -- [**ETCD**](https://etcd.io/docs/v3.4.0/op-guide/monitoring/) -- [**Kubernetes Components**](https://github.com/kubernetes/metrics) -- [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) -- [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{}}/rancher/v2.0-v2.4//en/cluster-admin/tools/logging)) -- [**Cluster Level Grafana**](http://docs.grafana.org/administration/metrics/) -- **Cluster Level Prometheus** - -### Is - -Choose a comparison: - -- **Equal**: Trigger alert when expression value equal to the threshold. -- **Not Equal**: Trigger alert when expression value not equal to the threshold. -- **Greater Than**: Trigger alert when expression value greater than to threshold. -- **Less Than**: Trigger alert when expression value equal or less than the threshold. -- **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. -- **Less or Equal**: Trigger alert when expression value less or equal to the threshold. - -If applicable, choose a comparison value or a threshold for the alert to be triggered. - -### For - -Select a duration for a trigger alert when the expression value crosses the threshold longer than the configured duration. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's load expression ```sum(node_load5) / count(node_cpu_seconds_total{mode="system"})``` raises above 0.6 deems an urgency of **Info**, but 1 deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/default-alerts/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/default-alerts/_index.md deleted file mode 100644 index 0ae49c9a73..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-alerts/default-alerts/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Default Alerts for Cluster Monitoring -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/default-alerts - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/cluster-alerts/default-alerts - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts ---- - -When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) for them. - -Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). - -# Alerts for etcd -Etcd is the key-value store that contains the state of the Kubernetes cluster. Rancher provides default alerts if the built-in monitoring detects a potential problem with etcd. You don't have to enable monitoring to receive these alerts. - -A leader is the node that handles all client requests that need cluster consensus. For more information, you can refer to this [explanation of how etcd works.](https://rancher.com/blog/2019/2019-01-29-what-is-etcd/#how-does-etcd-work) - -The leader of the cluster can change in response to certain events. It is normal for the leader to change, but too many changes can indicate a problem with the network or a high CPU load. With longer latencies, the default etcd configuration may cause frequent heartbeat timeouts, which trigger a new leader election. - -| Alert | Explanation | -|-------|-------------| -| A high number of leader changes within the etcd cluster are happening | A warning alert is triggered when the leader changes more than three times in one hour. | -| Database usage close to the quota 500M | A warning alert is triggered when the size of etcd exceeds 500M.| -| Etcd is unavailable | A critical alert is triggered when etcd becomes unavailable. | -| Etcd member has no leader | A critical alert is triggered when the etcd cluster does not have a leader for at least three minutes. | - - -# Alerts for Kubernetes Components -Rancher provides alerts when core Kubernetes system components become unhealthy. - -Controllers update Kubernetes resources based on changes in etcd. The [controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. - -The [scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) service is a core component of Kubernetes. It is responsible for scheduling cluster workloads to nodes, based on various configurations, metrics, resource requirements and workload-specific requirements. - -| Alert | Explanation | -|-------|-------------| -| Controller Manager is unavailable | A critical warning is triggered when the cluster’s controller-manager becomes unavailable. | -| Scheduler is unavailable | A critical warning is triggered when the cluster’s scheduler becomes unavailable. | - - -# Alerts for Events -Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. In the Rancher UI, from the project view, you can see events for each workload. - -| Alert | Explanation | -|-------|-------------| -| Get warning deployment event | A warning alert is triggered when a warning event happens on a deployment. | - - -# Alerts for Nodes -Alerts can be triggered based on node metrics. Each computing resource in a Kubernetes cluster is called a node. Nodes can be either bare-metal servers or virtual machines. - -| Alert | Explanation | -|-------|-------------| -| High CPU load | A warning alert is triggered if the node uses more than 100 percent of the node’s available CPU seconds for at least three minutes. | -| High node memory utilization | A warning alert is triggered if the node uses more than 80 percent of its available memory for at least three minutes. | -| Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | - -# Project-level Alerts -When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/alerts/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md deleted file mode 100644 index 60af88703f..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/_index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Cluster Logging -shortTitle: Logging -description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. -metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/tasks/logging/ - - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging - - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging - - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/ - - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/ - - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/ ---- - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debug and troubleshoot problems - -Rancher supports integration with the following services: - -- Elasticsearch -- Splunk -- Kafka -- Syslog -- Fluentd - -This section covers the following topics: - -- [How logging integrations work](#how-logging-integrations-work) -- [Requirements](#requirements) -- [Logging scope](#logging-scope) -- [Enabling cluster logging](#enabling-cluster-logging) - -# How Logging Integrations Work - -Rancher can integrate with popular external services used for event streams, telemetry, or search. These services can log errors and warnings in your Kubernetes infrastructure to a stream. - -These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. - -When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. - -Additionally, you'll have the opportunity to enter key-value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key-value pairs. - ->**Note:** You can only configure one logging service per cluster or per project. - -# Requirements - -The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: - -``` -$ docker info | grep 'Logging Driver' -Logging Driver: json-file -``` - -# Logging Scope - -You can configure logging at either cluster level or project level. - -- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. -- [Project logging]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/logging/) writes logs for every pod in that particular project. - -Logs that are sent to your logging service are from the following locations: - - - Pod logs stored at `/var/log/containers`. - - Kubernetes system components logs stored at `/var/lib/rancher/rke/log/`. - -# Enabling Cluster Logging - -As an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. - -1. From the **Global** view, navigate to the cluster that you want to configure cluster logging. - -1. Select **Tools > Logging** in the navigation bar. - -1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports integration with the following services: - - - [Elasticsearch]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/fluentd/) - -1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. - - - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. - - - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) - - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) - - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) - - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) - - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) - - - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. - 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. - -1. (Optional) Complete the **Additional Logging Configuration** form. - - 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. - - 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. - - 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. - -1. Click **Test**. Rancher sends a test log to the service. - - > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. - -1. Click **Save**. - -**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. - -## Related Links - -[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md deleted file mode 100644 index f5510ee822..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/splunk/_index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Splunk -weight: 300 -aliases: - - /rancher/v2.0-v2.4/en/tasks/logging/splunk/ - - /rancher/v2.0-v2.4/en/tools/logging/splunk/ - - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk - - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/splunk - - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk - - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk/ - - /rancher/v2.x/en/cluster-admin/tools/logging/splunk ---- - -If your organization uses [Splunk](https://www.splunk.com/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Splunk server to view logs. - ->**Prerequisites:** -> ->- Configure HTTP event collection for your Splunk Server (Splunk Enterprise or Splunk Cloud). ->- Either create a new token or copy an existing token. -> ->For more information, see [Splunk Documentation](http://docs.splunk.com/Documentation/Splunk/7.1.2/Data/UsetheHTTPEventCollector#About_Event_Collector_tokens). - -## Splunk Configuration - -1. In the **Endpoint** field, enter the IP address and port for you Splunk instance (i.e. `http://splunk-server:8088`) - - * Splunk usually uses port `8088`. If you're using Splunk Cloud, you'll need to work with [Splunk support](https://www.splunk.com/en_us/support-and-services.html) to get an endpoint URL. - -1. Enter the **Token** you obtained while completing the prerequisites (i.e., when you created a token in Splunk). - -1. In the **Source** field, enter the name of the token as entered in Splunk. - -1. **Optional:** Provide one or more [index](http://docs.splunk.com/Documentation/Splunk/7.1.2/Indexer/Aboutindexesandindexers) that's allowed for your token. - -## SSL Configuration - -If your instance of Splunk uses SSL, your **Endpoint** will need to begin with `https://`. With the correct endpoint, the **SSL Configuration** form is enabled and ready to be completed. - -1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - -1. Enter your **Client Key Password**. - -1. Select whether or not you want to verify your SSL. - - * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. - * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. - -## Viewing Logs - -1. Log into your Splunk server. - -1. Click on **Search & Reporting**. The number of **Indexed Events** listed should be increasing. - -1. Click on Data Summary and select the Sources tab. - ![View Logs]({{}}/img/rancher/splunk/splunk4.jpg) - -1. To view the actual logs, click on the source that you declared earlier. - ![View Logs]({{}}/img/rancher/splunk/splunk5.jpg) - -## Troubleshooting - -You can use curl to see if **HEC** is listening for HTTP event data. - -``` -$ curl http://splunk-server:8088/services/collector/event \ - -H 'Authorization: Splunk 8da70994-b1b0-4a79-b154-bfaae8f93432' \ - -d '{"event": "hello world"}' -``` - -If Splunk is configured correctly, you should receive **json** data returning `success code 0`. You should be able -to send logging data to HEC. - -If you received an error, check your configuration in Splunk and Rancher. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md deleted file mode 100644 index d21612e27f..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/_index.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: Integrating Rancher and Prometheus for Cluster Monitoring -shortTitle: Monitoring -description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/cluster-monitoring - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/ - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/ ---- - -_Available as of v2.2.0_ - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -This section covers the following topics: - -- [About Prometheus](#about-prometheus) -- [Monitoring scope](#monitoring-scope) -- [Enabling cluster monitoring](#enabling-cluster-monitoring) -- [Resource consumption](#resource-consumption) - - [Resource consumption of Prometheus pods](#resource-consumption-of-prometheus-pods) - - [Resource consumption of other pods](#resource-consumption-of-other-pods) - -# About Prometheus - -Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): - -You can configure these services to collect logs at either the cluster level or the project level. This page describes how to enable monitoring for a cluster. For details on enabling monitoring for a project, refer to the [project administration section]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/). - ->A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. - -In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. - -By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. - -Multi-tenancy support in terms of cluster-only and project-only Prometheus instances are also supported. - -# Monitoring Scope - -Using Prometheus, you can monitor Rancher at both the cluster level and [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - -- Cluster monitoring allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - - Kubernetes control plane - - etcd database - - All nodes (including workers) - -- [Project monitoring]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. - -# Enabling Cluster Monitoring - -As an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. - -> **Prerequisites:** The following TCP ports need to be opened for metrics scraping: -> -> | Port | Node type | Component | -> | --- | --- | --- | -> | 9796 | Worker | Node exporter | -> | 10254 | Worker | Nginx Ingress Controller | -> | 10250 | Worker/Controlplane | Kubelet | -> | 10251 | Controlplane | Kube scheduler | -> | 10252 | Controlplane | Kube controller manager | -> | 2379 | Etcd | Etcd server | - -> Monitoring V1 requires a Kubernetes verison less than or equal to v1.20.x. To install monitoring on Kubernetes v1.21+, you will need to [migrate to Monitoring V2.]({{}}/rancher/v2.5/en/monitoring-alerting/migrating/) - -1. From the **Global** view, navigate to the cluster that you want to configure cluster monitoring. - -1. Select **Tools > Monitoring** in the navigation bar. - -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. - -1. Click **Save**. - -**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/) through the Rancher dashboard or directly from Grafana. - -> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. - -# Resource Consumption - -When enabling cluster monitoring, you need to ensure your worker nodes and Prometheus pod have enough resources. The tables below provides a guide of how much resource consumption will be used. In larger deployments, it is strongly advised that the monitoring infrastructure be placed on dedicated nodes in the cluster. - -### Resource Consumption of Prometheus Pods - -This table is the resource consumption of the Prometheus pod, which is based on the number of all the nodes in the cluster. The count of nodes includes the worker, control plane and etcd nodes. Total disk space allocation should be approximated by the `rate * retention` period set at the cluster level. When enabling cluster level monitoring, you should adjust the CPU and Memory limits and reservation. - -Number of Cluster Nodes | CPU (milli CPU) | Memory | Disk -------------------------|-----|--------|------ -5 | 500 | 650 MB | ~1 GB/Day -50| 2000 | 2 GB | ~5 GB/Day -256| 4000 | 6 GB | ~18 GB/Day - -Additional pod resource requirements for cluster level monitoring. - -| Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable | -|---------------------|---------------------------------|---------------|---------------|-------------|-------------|--------------| -| Prometheus | prometheus | 750m | 750Mi | 1000m | 1000Mi | Y | -| | prometheus-proxy | 50m | 50Mi | 100m | 100Mi | Y | -| | prometheus-auth | 100m | 100Mi | 500m | 200Mi | Y | -| | prometheus-config-reloader | - | - | 50m | 50Mi | N | -| | rules-configmap-reloader | - | - | 100m | 25Mi | N | -| Grafana | grafana-init-plugin-json-copy | 50m | 50Mi | 50m | 50Mi | Y | -| | grafana-init-plugin-json-modify | 50m | 50Mi | 50m | 50Mi | Y | -| | grafana | 100m | 100Mi | 200m | 200Mi | Y | -| | grafana-proxy | 50m | 50Mi | 100m | 100Mi | Y | -| Kube-State Exporter | kube-state | 100m | 130Mi | 100m | 200Mi | Y | -| Node Exporter | exporter-node | 200m | 200Mi | 200m | 200Mi | Y | -| Operator | prometheus-operator | 100m | 50Mi | 200m | 100Mi | Y | - - -### Resource Consumption of Other Pods - -Besides the Prometheus pod, there are components that are deployed that require additional resources on the worker nodes. - -Pod | CPU (milli CPU) | Memory (MB) -----|-----------------|------------ -Node Exporter (Per Node) | 100 | 30 -Kube State Cluster Monitor | 100 | 130 -Grafana | 100 | 150 -Prometheus Cluster Monitoring Nginx | 50 | 50 diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md deleted file mode 100644 index c6ea196ada..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/cluster-metrics/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Cluster Metrics -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/cluster-metrics - - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/cluster-metrics - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics/ ---- - -_Available as of v2.2.0_ - -Cluster metrics display the hardware utilization for all nodes in your cluster, regardless of its role. They give you a global monitoring insight into the cluster. - -Some of the biggest metrics to look out for: - -- **CPU Utilization** - - High load either indicates that your cluster is running efficiently or that you're running out of CPU resources. - -- **Disk Utilization** - - Be on the lookout for increased read and write rates on nodes nearing their disk capacity. This advice is especially true for etcd nodes, as running out of storage on an etcd node leads to cluster failure. - -- **Memory Utilization** - - Deltas in memory utilization usually indicate a memory leak. - -- **Load Average** - - Generally, you want your load average to match your number of logical CPUs for the cluster. For example, if your cluster has 8 logical CPUs, the ideal load average would be 8 as well. If you load average is well under the number of logical CPUs for the cluster, you may want to reduce cluster resources. On the other hand, if your average is over 8, your cluster may need more resources. - -## Finding Node Metrics - -1. From the **Global** view, navigate to the cluster that you want to view metrics. - -1. Select **Nodes** in the navigation bar. - -1. Select a specific node and click on its name. - -1. Click on **Node Metrics**. - -[_Get expressions for Cluster Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#cluster-metrics) - -### Etcd Metrics - ->**Note:** Only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). - -Etcd metrics display the operations of the etcd database on each of your cluster nodes. After establishing a baseline of normal etcd operational metrics, observe them for abnormal deltas between metric refreshes, which indicate potential issues with etcd. Always address etcd issues immediately! - -You should also pay attention to the text at the top of the etcd metrics, which displays leader election statistics. This text indicates if etcd currently has a leader, which is the etcd instance that coordinates the other etcd instances in your cluster. A large increase in leader changes implies etcd is unstable. If you notice a change in leader election statistics, you should investigate them for issues. - -Some of the biggest metrics to look out for: - -- **Etcd has a leader** - - etcd is usually deployed on multiple nodes and elects a leader to coordinate its operations. If etcd does not have a leader, its operations are not being coordinated. - -- **Number of leader changes** - - If this statistic suddenly grows, it usually indicates network communication issues that constantly force the cluster to elect a new leader. - -[_Get expressions for Etcd Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#etcd-metrics) - -### Kubernetes Components Metrics - -Kubernetes components metrics display data about the cluster's individual Kubernetes components. Primarily, it displays information about connections and latency for each component: the API server, controller manager, scheduler, and ingress controller. - ->**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). - -When analyzing Kubernetes component metrics, don't be concerned about any single standalone metric in the charts and graphs that display. Rather, you should establish a baseline for metrics considered normal following a period of observation, e.g. the range of values that your components usually operate within and are considered normal. After you establish this baseline, be on the lookout for large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. - -Some of the more important component metrics to monitor are: - -- **API Server Request Latency** - - Increasing API response times indicate there's a generalized problem that requires investigation. - -- **API Server Request Rate** - - Rising API request rates usually coincide with increased API response times. Increased request rates also indicate a generalized problem requiring investigation. - -- **Scheduler Preemption Attempts** - - If you see a spike in scheduler preemptions, it's an indication that you're running out of hardware resources, as Kubernetes is recognizing it doesn't have enough resources to run all your pods and is prioritizing the more important ones. - -- **Scheduling Failed Pods** - - Failed pods can have a variety of causes, such as unbound persistent volume claims, exhausted hardware resources, non-responsive nodes, etc. - -- **Ingress Controller Request Process Time** - - How fast ingress is routing connections to your cluster services. - -[_Get expressions for Kubernetes Component Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression/#kubernetes-components-metrics) - -## Rancher Logging Metrics - -Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/). - -[_Get expressions for Rancher Logging Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#rancher-logging-metrics) - -## Finding Workload Metrics - -Workload metrics display the hardware utilization for a Kubernetes workload. You can also view metrics for [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [stateful sets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) and so on. - -1. From the **Global** view, navigate to the project that you want to view workload metrics. - -1. From the main navigation bar, choose **Resources > Workloads.** In versions before v2.3.0, choose **Workloads** on the main navigation bar. - -1. Select a specific workload and click on its name. - -1. In the **Pods** section, select a specific pod and click on its name. - - - **View the Pod Metrics:** Click on **Pod Metrics**. - - **View the Container Metrics:** In the **Containers** section, select a specific container and click on its name. Click on **Container Metrics**. - -[_Get expressions for Workload Metrics_]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md deleted file mode 100644 index f710ae39b7..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/custom-metrics/_index.md +++ /dev/null @@ -1,493 +0,0 @@ ---- -title: Prometheus Custom Metrics Adapter -weight: 5 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/custom-metrics - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/custom-metrics - - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/custom-metrics/ - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/ ---- - -After you've enabled [cluster level monitoring]({{< baseurl >}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. - -## Deploy Prometheus Custom Metrics Adapter - -We are going to use the [Prometheus custom metrics adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/releases/tag/v0.5.0), version v0.5.0. This is a great example for the [custom metrics server](https://github.com/kubernetes-incubator/custom-metrics-apiserver). And you must be the *cluster owner* to execute following steps. - -- Get the service account of the cluster monitoring is using. It should be configured in the workload ID: `statefulset:cattle-prometheus:prometheus-cluster-monitoring`. And if you didn't customize anything, the service account name should be `cluster-monitoring`. - -- Grant permission to that service account. You will need two kinds of permission. -One role is `extension-apiserver-authentication-reader` in `kube-system`, so you will need to create a `Rolebinding` to in `kube-system`. This permission is to get api aggregation configuration from config map in `kube-system`. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: custom-metrics-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: cluster-monitoring - namespace: cattle-prometheus -``` - -The other one is cluster role `system:auth-delegator`, so you will need to create a `ClusterRoleBinding`. This permission is to have subject access review permission. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: custom-metrics:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: cluster-monitoring - namespace: cattle-prometheus -``` - -- Create configuration for custom metrics adapter. Following is an example configuration. There will be a configuration details in next session. - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: adapter-config - namespace: cattle-prometheus -data: - config.yaml: | - rules: - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: [] - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_seconds_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - seriesFilters: - - isNot: ^container_.*_total$ - resources: - overrides: - namespace: - resource: namespace - pod_name: - resource: pod - name: - matches: ^container_(.*)$ - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_total$ - resources: - template: <<.Resource>> - name: - matches: "" - as: "" - metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: - - isNot: .*_seconds_total - resources: - template: <<.Resource>> - name: - matches: ^(.*)_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' - seriesFilters: [] - resources: - template: <<.Resource>> - name: - matches: ^(.*)_seconds_total$ - as: "" - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - resourceRules: - cpu: - containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) - nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - memory: - containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) - nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) - resources: - overrides: - instance: - resource: node - namespace: - resource: namespace - pod_name: - resource: pod - containerLabel: container_name - window: 1m -``` - -- Create HTTPS TLS certs for your api server. You can use following command to create a self-signed cert. - -```bash -openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out serving.crt -keyout serving.key -subj "/C=CN/CN=custom-metrics-apiserver.cattle-prometheus.svc.cluster.local" -# And you will find serving.crt and serving.key in your path. And then you are going to create a secret in cattle-prometheus namespace. -kubectl create secret generic -n cattle-prometheus cm-adapter-serving-certs --from-file=serving.key=./serving.key --from-file=serving.crt=./serving.crt -``` - -- Then you can create the prometheus custom metrics adapter. And you will need a service for this deployment too. Creating it via Import YAML or Rancher would do. Please create those resources in `cattle-prometheus` namespaces. - -Here is the prometheus custom metrics adapter deployment. -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: custom-metrics-apiserver - name: custom-metrics-apiserver - namespace: cattle-prometheus -spec: - replicas: 1 - selector: - matchLabels: - app: custom-metrics-apiserver - template: - metadata: - labels: - app: custom-metrics-apiserver - name: custom-metrics-apiserver - spec: - serviceAccountName: cluster-monitoring - containers: - - name: custom-metrics-apiserver - image: directxman12/k8s-prometheus-adapter-amd64:v0.5.0 - args: - - --secure-port=6443 - - --tls-cert-file=/var/run/serving-cert/serving.crt - - --tls-private-key-file=/var/run/serving-cert/serving.key - - --logtostderr=true - - --prometheus-url=http://prometheus-operated/ - - --metrics-relist-interval=1m - - --v=10 - - --config=/etc/adapter/config.yaml - ports: - - containerPort: 6443 - volumeMounts: - - mountPath: /var/run/serving-cert - name: volume-serving-cert - readOnly: true - - mountPath: /etc/adapter/ - name: config - readOnly: true - - mountPath: /tmp - name: tmp-vol - volumes: - - name: volume-serving-cert - secret: - secretName: cm-adapter-serving-certs - - name: config - configMap: - name: adapter-config - - name: tmp-vol - emptyDir: {} - -``` - -Here is the service of the deployment. -```yaml -apiVersion: v1 -kind: Service -metadata: - name: custom-metrics-apiserver - namespace: cattle-prometheus -spec: - ports: - - port: 443 - targetPort: 6443 - selector: - app: custom-metrics-apiserver -``` - -- Create API service for your custom metric server. - -```yaml -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.custom.metrics.k8s.io -spec: - service: - name: custom-metrics-apiserver - namespace: cattle-prometheus - group: custom.metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 - -``` - -- Then you can verify your custom metrics server by `kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1`. If you see the return datas from the api, it means that the metrics server has been successfully set up. - -- You create HPA with custom metrics now. Here is an example of HPA. You will need to create a nginx deployment in your namespace first. - -```yaml -kind: HorizontalPodAutoscaler -apiVersion: autoscaling/v2beta1 -metadata: - name: nginx -spec: - scaleTargetRef: - # point the HPA at the nginx deployment you just created - apiVersion: apps/v1 - kind: Deployment - name: nginx - # autoscale between 1 and 10 replicas - minReplicas: 1 - maxReplicas: 10 - metrics: - # use a "Pods" metric, which takes the average of the - # given metric across all pods controlled by the autoscaling target - - type: Pods - pods: - metricName: memory_usage_bytes - targetAverageValue: 5000000 -``` - -And then, you should see your nginx is scaling up. HPA with custom metrics works. - -## Configuration of prometheus custom metrics adapter - -> Refer to https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md - -The adapter determines which metrics to expose, and how to expose them, -through a set of "discovery" rules. Each rule is executed independently -(so make sure that your rules are mutually exclusive), and specifies each -of the steps the adapter needs to take to expose a metric in the API. - -Each rule can be broken down into roughly four parts: - -- *Discovery*, which specifies how the adapter should find all Prometheus - metrics for this rule. - -- *Association*, which specifies how the adapter should determine which - Kubernetes resources a particular metric is associated with. - -- *Naming*, which specifies how the adapter should expose the metric in - the custom metrics API. - -- *Querying*, which specifies how a request for a particular metric on one - or more Kubernetes objects should be turned into a query to Prometheus. - -A basic config with one rule might look like: - -```yaml -rules: -# this rule matches cumulative cAdvisor metrics measured in seconds -- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' - resources: - # skip specifying generic resource<->label mappings, and just - # attach only pod and namespace resources by mapping label names to group-resources - overrides: - namespace: {resource: "namespace"}, - pod_name: {resource: "pod"}, - # specify that the `container_` and `_seconds_total` suffixes should be removed. - # this also introduces an implicit filter on metric family names - name: - # we use the value of the capture group implicitly as the API name - # we could also explicitly write `as: "$1"` - matches: "^container_(.*)_seconds_total$" - # specify how to construct a query to fetch samples for a given series - # This is a Go template where the `.Series` and `.LabelMatchers` string values - # are available, and the delimiters are `<<` and `>>` to avoid conflicts with - # the prometheus query language - metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" -``` - -### Discovery - -Discovery governs the process of finding the metrics that you want to -expose in the custom metrics API. There are two fields that factor into -discovery: `seriesQuery` and `seriesFilters`. - -`seriesQuery` specifies Prometheus series query (as passed to the -`/api/v1/series` endpoint in Prometheus) to use to find some set of -Prometheus series. The adapter will strip the label values from this -series, and then use the resulting metric-name-label-names combinations -later on. - -In many cases, `seriesQuery` will be sufficient to narrow down the list of -Prometheus series. However, sometimes (especially if two rules might -otherwise overlap), it's useful to do additional filtering on metric -names. In this case, `seriesFilters` can be used. After the list of -series is returned from `seriesQuery`, each series has its metric name -filtered through any specified filters. - -Filters may be either: - -- `is: `, which matches any series whose name matches the specified - regex. - -- `isNot: `, which matches any series whose name does not match the - specified regex. - -For example: - -```yaml -# match all cAdvisor metrics that aren't measured in seconds -seriesQuery: '{__name__=~"^container_.*_total",container_name!="POD",namespace!="",pod_name!=""}' -seriesFilters: - isNot: "^container_.*_seconds_total" -``` - -### Association - -Association governs the process of figuring out which Kubernetes resources -a particular metric could be attached to. The `resources` field controls -this process. - -There are two ways to associate resources with a particular metric. In -both cases, the value of the label becomes the name of the particular -object. - -One way is to specify that any label name that matches some particular -pattern refers to some group-resource based on the label name. This can -be done using the `template` field. The pattern is specified as a Go -template, with the `Group` and `Resource` fields representing group and -resource. You don't necessarily have to use the `Group` field (in which -case the group is guessed by the system). For instance: - -```yaml -# any label `kube__` becomes . in Kubernetes -resources: - template: "kube_<<.Group>>_<<.Resource>>" -``` - -The other way is to specify that some particular label represents some -particular Kubernetes resource. This can be done using the `overrides` -field. Each override maps a Prometheus label to a Kubernetes -group-resource. For instance: - -```yaml -# the microservice label corresponds to the apps.deployment resource -resource: - overrides: - microservice: {group: "apps", resource: "deployment"} -``` - -These two can be combined, so you can specify both a template and some -individual overrides. - -The resources mentioned can be any resource available in your kubernetes -cluster, as long as you've got a corresponding label. - -### Naming - -Naming governs the process of converting a Prometheus metric name into -a metric in the custom metrics API, and vice versa. It's controlled by -the `name` field. - -Naming is controlled by specifying a pattern to extract an API name from -a Prometheus name, and potentially a transformation on that extracted -value. - -The pattern is specified in the `matches` field, and is just a regular -expression. If not specified, it defaults to `.*`. - -The transformation is specified by the `as` field. You can use any -capture groups defined in the `matches` field. If the `matches` field -doesn't contain capture groups, the `as` field defaults to `$0`. If it -contains a single capture group, the `as` field defautls to `$1`. -Otherwise, it's an error not to specify the as field. - -For example: - -```yaml -# match turn any name _total to _per_second -# e.g. http_requests_total becomes http_requests_per_second -name: - matches: "^(.*)_total$" - as: "${1}_per_second" -``` - -### Querying - -Querying governs the process of actually fetching values for a particular -metric. It's controlled by the `metricsQuery` field. - -The `metricsQuery` field is a Go template that gets turned into -a Prometheus query, using input from a particular call to the custom -metrics API. A given call to the custom metrics API is distilled down to -a metric name, a group-resource, and one or more objects of that -group-resource. These get turned into the following fields in the -template: - -- `Series`: the metric name -- `LabelMatchers`: a comma-separated list of label matchers matching the - given objects. Currently, this is the label for the particular - group-resource, plus the label for namespace, if the group-resource is - namespaced. -- `GroupBy`: a comma-separated list of labels to group by. Currently, - this contains the group-resource label used in `LabelMatchers`. - -For instance, suppose we had a series `http_requests_total` (exposed as -`http_requests_per_second` in the API) with labels `service`, `pod`, -`ingress`, `namespace`, and `verb`. The first four correspond to -Kubernetes resources. Then, if someone requested the metric -`pods/http_request_per_second` for the pods `pod1` and `pod2` in the -`somens` namespace, we'd have: - -- `Series: "http_requests_total"` -- `LabelMatchers: "pod=~\"pod1|pod2",namespace="somens"` -- `GroupBy`: `pod` - -Additionally, there are two advanced fields that are "raw" forms of other -fields: - -- `LabelValuesByName`: a map mapping the labels and values from the - `LabelMatchers` field. The values are pre-joined by `|` - (for used with the `=~` matcher in Prometheus). -- `GroupBySlice`: the slice form of `GroupBy`. - -In general, you'll probably want to use the `Series`, `LabelMatchers`, and -`GroupBy` fields. The other two are for advanced usage. - -The query is expected to return one value for each object requested. The -adapter will use the labels on the returned series to associate a given -series back to its corresponding object. - -For example: - -```yaml -# convert cumulative cAdvisor metrics into rates calculated over 2 minutes -metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" -``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md deleted file mode 100644 index 9109666fa0..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/expression/_index.md +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: Prometheus Expressions -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/expression - - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/expression - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/ ---- - -The PromQL expressions in this doc can be used to configure [alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) - -> Before expressions can be used in alerts, monitoring must be enabled. For more information, refer to the documentation on enabling monitoring [at the cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [at the project level.]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/) - -For more information about querying Prometheus, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) - - - -- [Cluster Metrics](#cluster-metrics) - - [Cluster CPU Utilization](#cluster-cpu-utilization) - - [Cluster Load Average](#cluster-load-average) - - [Cluster Memory Utilization](#cluster-memory-utilization) - - [Cluster Disk Utilization](#cluster-disk-utilization) - - [Cluster Disk I/O](#cluster-disk-i-o) - - [Cluster Network Packets](#cluster-network-packets) - - [Cluster Network I/O](#cluster-network-i-o) -- [Node Metrics](#node-metrics) - - [Node CPU Utilization](#node-cpu-utilization) - - [Node Load Average](#node-load-average) - - [Node Memory Utilization](#node-memory-utilization) - - [Node Disk Utilization](#node-disk-utilization) - - [Node Disk I/O](#node-disk-i-o) - - [Node Network Packets](#node-network-packets) - - [Node Network I/O](#node-network-i-o) -- [Etcd Metrics](#etcd-metrics) - - [Etcd Has a Leader](#etcd-has-a-leader) - - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) - - [Number of Failed Proposals](#number-of-failed-proposals) - - [GRPC Client Traffic](#grpc-client-traffic) - - [Peer Traffic](#peer-traffic) - - [DB Size](#db-size) - - [Active Streams](#active-streams) - - [Raft Proposals](#raft-proposals) - - [RPC Rate](#rpc-rate) - - [Disk Operations](#disk-operations) - - [Disk Sync Duration](#disk-sync-duration) -- [Kubernetes Components Metrics](#kubernetes-components-metrics) - - [API Server Request Latency](#api-server-request-latency) - - [API Server Request Rate](#api-server-request-rate) - - [Scheduling Failed Pods](#scheduling-failed-pods) - - [Controller Manager Queue Depth](#controller-manager-queue-depth) - - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) - - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) - - [Ingress Controller Connections](#ingress-controller-connections) - - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) -- [Rancher Logging Metrics](#rancher-logging-metrics) - - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) - - [Fluentd Input Rate](#fluentd-input-rate) - - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) - - [Fluentd Output Rate](#fluentd-output-rate) -- [Workload Metrics](#workload-metrics) - - [Workload CPU Utilization](#workload-cpu-utilization) - - [Workload Memory Utilization](#workload-memory-utilization) - - [Workload Network Packets](#workload-network-packets) - - [Workload Network I/O](#workload-network-i-o) - - [Workload Disk I/O](#workload-disk-i-o) -- [Pod Metrics](#pod-metrics) - - [Pod CPU Utilization](#pod-cpu-utilization) - - [Pod Memory Utilization](#pod-memory-utilization) - - [Pod Network Packets](#pod-network-packets) - - [Pod Network I/O](#pod-network-i-o) - - [Pod Disk I/O](#pod-disk-i-o) -- [Container Metrics](#container-metrics) - - [Container CPU Utilization](#container-cpu-utilization) - - [Container Memory Utilization](#container-memory-utilization) - - [Container Disk I/O](#container-disk-i-o) - - - -# Cluster Metrics - -### Cluster CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | - -### Cluster Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
| -| Summary |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
| - -### Cluster Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | - -### Cluster Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | - -### Cluster Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total[5m]))`
written`sum(rate(node_disk_written_bytes_total[5m]))`
| - -### Cluster Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -### Cluster Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -# Node Metrics - -### Node CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | - -### Node Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| -| Summary |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| - -### Node Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | - -### Node Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | - -### Node Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| - -### Node Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -### Node Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -# Etcd Metrics - -### Etcd Has a Leader - -`max(etcd_server_has_leader)` - -### Number of Times the Leader Changes - -`max(etcd_server_leader_changes_seen_total)` - -### Number of Failed Proposals - -`sum(etcd_server_proposals_failed_total)` - -### GRPC Client Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
| - -### Peer Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
| - -### DB Size - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | -| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | - -### Active Streams - -| Catalog | Expression | -| --- | --- | -| Detail |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
| -| Summary |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
| - -### Raft Proposals - -| Catalog | Expression | -| --- | --- | -| Detail |
applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
| -| Summary |
applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
pending`sum(increase(etcd_server_proposals_pending[5m]))`
failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
| - -### RPC Rate - -| Catalog | Expression | -| --- | --- | -| Detail |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
| -| Summary |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
| - -### Disk Operations - -| Catalog | Expression | -| --- | --- | -| Detail |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
| -| Summary |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
| - -### Disk Sync Duration - -| Catalog | Expression | -| --- | --- | -| Detail |
wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
| -| Summary |
wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
| - -# Kubernetes Components Metrics - -### API Server Request Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | -| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | - -### API Server Request Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | -| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | - -### Scheduling Failed Pods - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | -| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | - -### Controller Manager Queue Depth - -| Catalog | Expression | -| --- | --- | -| Detail |
volumes`sum(volumes_depth) by instance`
deployment`sum(deployment_depth) by instance`
replicaset`sum(replicaset_depth) by instance`
service`sum(service_depth) by instance`
serviceaccount`sum(serviceaccount_depth) by instance`
endpoint`sum(endpoint_depth) by instance`
daemonset`sum(daemonset_depth) by instance`
statefulset`sum(statefulset_depth) by instance`
replicationmanager`sum(replicationmanager_depth) by instance`
| -| Summary |
volumes`sum(volumes_depth)`
deployment`sum(deployment_depth)`
replicaset`sum(replicaset_depth)`
service`sum(service_depth)`
serviceaccount`sum(serviceaccount_depth)`
endpoint`sum(endpoint_depth)`
daemonset`sum(daemonset_depth)`
statefulset`sum(statefulset_depth)`
replicationmanager`sum(replicationmanager_depth)`
| - -### Scheduler E2E Scheduling Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | -| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | - -### Scheduler Preemption Attempts - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | -| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | - -### Ingress Controller Connections - -| Catalog | Expression | -| --- | --- | -| Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| -| Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| - -### Ingress Controller Request Process Time - -| Catalog | Expression | -| --- | --- | -| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | -| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | - -# Rancher Logging Metrics - - -### Fluentd Buffer Queue Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | - -### Fluentd Input Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | - -### Fluentd Output Errors Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | -| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | - -### Fluentd Output Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | - -# Workload Metrics - -### Workload CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | -| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | - -### Workload Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -# Pod Metrics - -### Pod CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
| - -### Pod Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | -| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | - -### Pod Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -# Container Metrics - -### Container CPU Utilization - -| Catalog | Expression | -| --- | --- | -| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | - -### Container Memory Utilization - -`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` - -### Container Disk I/O - -| Catalog | Expression | -| --- | --- | -| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/project-monitoring/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/project-monitoring/_index.md deleted file mode 100644 index 94e8203c58..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/project-monitoring/_index.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Project Monitoring -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/project-monitoring - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring ---- - -_Available as of v2.2.4_ - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -This section covers the following topics: - -- [Monitoring scope](#monitoring-scope) -- [Permissions to configure project monitoring](#permissions-to-configure-project-monitoring) -- [Enabling project monitoring](#enabling-project-monitoring) -- [Project-level monitoring resource requirements](#project-level-monitoring-resource-requirements) -- [Project metrics](#project-metrics) - -### Monitoring Scope - -Using Prometheus, you can monitor Rancher at both the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. - -- [Cluster monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. - - - Kubernetes control plane - - etcd database - - All nodes (including workers) - -- Project monitoring allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. - -### Permissions to Configure Project Monitoring - -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. - -### Enabling Project Monitoring - -> **Prerequisite:** Cluster monitoring must be [enabled.]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) - -1. Go to the project where monitoring should be enabled. Note: When cluster monitoring is enabled, monitoring is also enabled by default in the **System** project. - -1. Select **Tools > Monitoring** in the navigation bar. - -1. Select **Enable** to show the [Prometheus configuration options]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Enter in your desired configuration options. - -1. Click **Save**. - -### Project-Level Monitoring Resource Requirements - -Container| CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable ----------|---------------|---------------|-------------|-------------|------------- -Prometheus|750m| 750Mi | 1000m | 1000Mi | Yes -Grafana | 100m | 100Mi | 200m | 200Mi | No - - -**Result:** A single application,`project-monitoring`, is added as an [application]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) to the project. After the application is `active`, you can start viewing project metrics through the [Rancher dashboard]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or directly from Grafana. - -> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. - -### Project Metrics -[Workload metrics]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/#workload-metrics) are available for the project if monitoring is enabled at the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and at the [project level.](#enabling-project-monitoring) - -You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. - -> **Example:** -> A [Redis](https://redis.io/) application is deployed in the namespace `redis-app` in the project `Datacenter`. It is monitored via [Redis exporter](https://github.com/oliver006/redis_exporter). After enabling project monitoring, you can edit the application to configure the Advanced Options -> Custom Metrics section. Enter the `Container Port` and `Path` and select the `Protocol`. - -To access a project-level Grafana instance, - -1. From the **Global** view, navigate to a cluster that has monitoring enabled. - -1. Go to a project that has monitoring enabled. - -1. From the project view, click **Apps.** In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. - -1. Go to the `project-monitoring` application. - -1. In the `project-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. - -1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. - -**Results:** You will be logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md deleted file mode 100644 index 01c490eb02..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/prometheus/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Prometheus Configuration -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/prometheus - - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/prometheus/ - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus/ ---- - -_Available as of v2.2.0_ - -While configuring monitoring at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/), there are multiple options that can be configured. - -- [Basic Configuration](#basic-configuration) -- [Advanced Options](#advanced-options) -- [Node Exporter](#node-exporter) -- [Persistent Storage](#persistent-storage) -- [Remote Storage](#remote-storage) - -# Basic Configuration - -Option | Description --------|------------- -Data Retention | How long your Prometheus instance retains monitoring data scraped from Rancher objects before it's purged. -[Enable Node Exporter](#node-exporter) | Whether or not to deploy the node exporter. -Node Exporter Host Port | The host port on which data is exposed, i.e. data that Prometheus collects from your node hardware. Required if you have enabled the node exporter. -[Enable Persistent Storage](#persistent-storage) for Prometheus | Whether or not to configure storage for Prometheus so that metrics can be retained even if the Prometheus pod fails. -[Enable Persistent Storage](#persistent-storage) for Grafana | Whether or not to configure storage for Grafana so that the Grafana dashboards and configuration can be retained even if the Grafana pod fails. -Prometheus [CPU Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU resource limit for the Prometheus pod. -Prometheus [CPU Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU reservation for the Prometheus pod. -Prometheus [Memory Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource limit for the Prometheus pod. -Prometheus [Memory Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource requests for the Prometheus pod. -Selector | Ability to select the nodes in which Prometheus and Grafana pods are deployed to. To use this option, the nodes must have labels. - -# Advanced Options - -Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog]({{}}/rancher/v2.0-v2.4/en/catalog/), it can be configured like any other catalog application, by passing in values to Helm. - -> **Warning:** Any modification to the application without understanding the entire application can lead to catastrophic errors. - -### Prometheus RemoteRead and RemoteWrite - -_Available as of v2.4.0_ - -Prometheus RemoteRead and RemoteWrite can be configured as custom answers in the **Advanced Options** section. - -For more information on remote endpoints and storage, refer to the [Prometheus documentation.](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) - -The Prometheus operator documentation contains the full [RemoteReadSpec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) - -An example configuration would be: - -| Variable | Value | -|--------------|------------| -| `prometheus.remoteWrite[0].url` | `http://mytarget.com` | - -### LivenessProbe and ReadinessProbe - -_Available as of v2.4.0_ - -Prometheus LivenessProbe and ReadinessProbe can be configured as custom answers in the **Advanced Options** section. - -The Kubernetes probe spec is [here.](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#probe-v1-core) - -Some example key-value pairs are: - -| Variable | Value | -|--------------|------------| -| `prometheus.livenessProbe.timeoutSeconds` | 60 | -| `prometheus.readinessProbe.timeoutSeconds` | 60 | - -# Node Exporter - -The [node exporter](https://github.com/prometheus/node_exporter/blob/master/README.md) is a popular open source exporter, which exposes the metrics for hardware and \*NIX kernels OS. It is designed to monitor the host system. However, there are still issues with namespaces when running it in a container, mostly around filesystem mount spaces. In order to monitor actual network metrics for the container network, the node exporter must be deployed with the `hostNetwork` mode. - -When configuring Prometheus and enabling the node exporter, enter a host port in the **Node Exporter Host Port** that will not produce port conflicts with existing applications. The host port chosen must be open to allow internal traffic between Prometheus and the Node Exporter. - ->**Warning:** In order for Prometheus to collect the metrics of the node exporter, after enabling cluster monitoring, you must open the Node Exporter Host Port in the host firewall rules to allow intranet access. By default, `9796` is used as that host port. - -# Persistent Storage - ->**Prerequisite:** Configure one or more StorageClasses to use as [persistent storage]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) for your Prometheus or Grafana pod. - -By default, when you enable Prometheus for either a cluster or project, all monitoring data that Prometheus collects is stored on its own pod. With local storage, if the Prometheus or Grafana pods fail, all the data is lost. Rancher recommends configuring an external persistent storage to the cluster. With the external persistent storage, if the Prometheus or Grafana pods fail, the new pods can recover using data from the persistent storage. - -When enabling persistent storage for Prometheus or Grafana, specify the size of the persistent volume and select the StorageClass. - -# Remote Storage - ->**Prerequisite:** Need a remote storage endpoint to be available. The possible list of integrations is available [here](https://prometheus.io/docs/operating/integrations/) - -Using advanced options, remote storage integration for the Prometheus installation can be configured as follows: - -``` -prometheus.remoteWrite[0].url = http://remote1/push -prometheus.remoteWrite[0].remoteTimeout = 33s - -prometheus.remoteWrite[1].url = http://remote2/push - - -prometheus.remoteRead[0].url = http://remote1/read -prometheus.remoteRead[0].proxyUrl = http://proxy.url -prometheus.remoteRead[0].bearerToken = token-value - -prometheus.remoteRead[1].url = http://remote2/read -prometheus.remoteRead[1].remoteTimeout = 33s -prometheus.remoteRead[1].readRecent = true -``` - -Additional fields can be set up based on the [ReadSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md deleted file mode 100644 index f3748f37e3..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring/viewing-metrics/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Viewing Metrics -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/viewing-metrics - - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/viewing-metrics - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/viewing-metrics - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics/ ---- - -_Available as of v2.2.0_ - -After you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/), you will want to be start viewing the data being collected. There are multiple ways to view this data. - -## Rancher Dashboard - ->**Note:** This is only available if you've enabled monitoring at the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/). Project specific analytics must be viewed using the project's Grafana instance. - -Rancher's dashboards are available at multiple locations: - -- **Cluster Dashboard**: From the **Global** view, navigate to the cluster. -- **Node Metrics**: From the **Global** view, navigate to the cluster. Select **Nodes**. Find the individual node and click on its name. Click **Node Metrics.** -- **Workload Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Click **Workload Metrics.** -- **Pod Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Click **Pod Metrics.** -- **Container Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** - -Prometheus metrics are displayed and are denoted with the Grafana icon. If you click on the icon, the metrics will open a new tab in Grafana. - -Within each Prometheus metrics widget, there are several ways to customize your view. - -- Toggle between two views: - - **Detail**: Displays graphs and charts that let you view each event in a Prometheus time series - - **Summary** Displays events in a Prometheus time series that are outside the norm. -- Change the range of the time series that you're viewing to see a more refined or expansive data sample. -- Customize the data sample to display data between specific dates and times. - -When analyzing these metrics, don't be concerned about any single standalone metric in the charts and graphs. Rather, you should establish a baseline for your metrics over the course of time, e.g. the range of values that your components usually operate within and are considered normal. After you establish the baseline, be on the lookout for any large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. - -## Grafana - -If you've enabled monitoring at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/monitoring/), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. - -Grafana allows you to query, visualize, alert, and ultimately, understand your cluster and workload data. For more information on Grafana and its capabilities, visit the [Grafana website](https://grafana.com/grafana). - -### Authentication - -Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/). In other words, a user's access in Grafana mirrors their access in Rancher. - -When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. - -### Accessing the Cluster-level Grafana Instance - -1. From the **Global** view, navigate to a cluster that has monitoring enabled. - -1. Go to the **System** project view. This project is where the cluster-level Grafana instance runs. - -1. Click **Apps.** In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. - -1. Go to the `cluster-monitoring` application. - -1. In the `cluster-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. - -1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. - -**Results:** You are logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md deleted file mode 100644 index 4cbdfdd96b..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/_index.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Istio -weight: 15 -aliases: - - /rancher/v2.0-v2.4/en/dashboard/istio - - /rancher/v2.0-v2.4/en/project-admin/istio/configuring-resource-allocations/ - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/ - - /rancher/v2.0-v2.4/en/project-admin/istio - - /rancher/v2.0-v2.4/en/istio/legacy/cluster-istio - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/ ---- -_Available as of v2.3.0_ - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. - -As a network of microservices changes and grows, the interactions between them can become more difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. - -Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -This service mesh provides features that include but are not limited to the following: - -- Traffic management features -- Enhanced monitoring and tracing -- Service discovery and routing -- Secure connections and service-to-service authentication with mutual TLS -- Load balancing -- Automatic retries, backoff, and circuit breaking - -After Istio is enabled in a cluster, you can leverage Istio's control plane functionality with `kubectl`. - -Rancher's Istio integration comes with comprehensive visualization aids: - -- **Trace the root cause of errors with Jaeger.** [Jaeger](https://www.jaegertracing.io/) is an open-source tool that provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. Distributed tracing allows you to view an entire chain of calls, which might originate with a user request and traverse dozens of microservices. -- **Get the full picture of your microservice architecture with Kiali.** [Kiali](https://www.kiali.io/) provides a diagram that shows the services within a service mesh and how they are connected, including the traffic rates and latencies between them. You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. -- **Gain insights from time series analytics with Grafana dashboards.** [Grafana](https://grafana.com/) is an analytics platform that allows you to query, visualize, alert on and understand the data gathered by Prometheus. -- **Write custom queries for time series data with the Prometheus UI.** [Prometheus](https://prometheus.io/) is a systems monitoring and alerting toolkit. Prometheus scrapes data from your cluster, which is then used by Grafana. A Prometheus UI is also integrated into Rancher, and lets you write custom queries for time series data and see the results in the UI. - - -Istio needs to be set up by a Rancher administrator or cluster administrator before it can be used in a project. - -# Prerequisites - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources) to run all of the components of Istio. - -# Setup Guide - -Refer to the [setup guide]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup) for instructions on how to set up Istio and use it in a project. - -# Disabling Istio - -To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio) - -# Accessing Visualizations - -> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/) - -After Istio is set up in a cluster, Grafana, Prometheus, Jaeger, and Kiali are available in the Rancher UI. - -Your access to the visualizations depend on your role. Grafana and Prometheus are only available for cluster owners. The Kiali and Jaeger UIs are available only to cluster owners by default, but cluster owners can allow project members to access them by editing the Istio settings. When you go to your project and click **Resources > Istio,** you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. - -To see the visualizations, go to the cluster where Istio is set up and click **Tools > Istio.** You should see links to each UI at the top of the page. - -You can also get to the visualization tools from the project view. - -# Viewing the Kiali Traffic Graph - -1. From the project view in Rancher, click **Resources > Istio.** -1. If you are a cluster owner, you can go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. - -# Viewing Traffic Metrics - -Istio’s monitoring features provide visibility into the performance of all your services. - -1. From the project view in Rancher, click **Resources > Istio.** -1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** Cluster owners can see all of the metrics, while project members can see a subset of the metrics. - -# Architecture - -Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. - -Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. - -Enabling Istio in Rancher enables monitoring in the cluster, and enables Istio in all new namespaces that are created in a cluster. You need to manually enable Istio in preexisting namespaces. - -When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. - -For more information on the Istio sidecar, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). - -### Two Ingresses - -By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. To allow Istio to receive external traffic, you need to enable the Istio ingress gateway for the cluster. The result is that your cluster will have two ingresses. - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md deleted file mode 100644 index 3f44270f3a..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Setup Guide -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup - - /rancher/v2.0-v2.4/en/istio/legacy/setup - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/ ---- - -This section describes how to enable Istio and start using it in your projects. - -This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. - -If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. - -> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management) - -1. [Enable Istio in the cluster.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster) -1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) -1. [Select the nodes where the main Istio components will be deployed.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors) -1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads) -1. [Set up the Istio gateway. ]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway) -1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management) -1. [Generate traffic and see Istio in action.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/#view-traffic) - diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md deleted file mode 100644 index fa88cd2852..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads/_index.md +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: 4. Add Deployments and Services with the Istio Sidecar -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads - - /rancher/v2.0-v2.4/en/istio/legacy/setup/deploy-workloads - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads/ ---- - -> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. - -Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. - -To inject the Istio sidecar on an existing workload in the namespace, go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. - -Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see istio-init and istio-proxy alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. - -### 3. Add Deployments and Services - -Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. - -1. Go to the project inside the cluster you want to deploy the workload on. -1. In Workloads, click **Import YAML.** -1. Copy the below resources into the form. -1. Click **Import.** - -This will set up the following sample resources from Istio's example BookInfo app: - -Details service and deployment: - -- A `details` Service -- A ServiceAccount for `bookinfo-details` -- A `details-v1` Deployment - -Ratings service and deployment: - -- A `ratings` Service -- A ServiceAccount for `bookinfo-ratings` -- A `ratings-v1` Deployment - -Reviews service and deployments (three versions): - -- A `reviews` Service -- A ServiceAccount for `bookinfo-reviews` -- A `reviews-v1` Deployment -- A `reviews-v2` Deployment -- A `reviews-v3` Deployment - -Productpage service and deployment: - -This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. - -- A `productpage` service -- A ServiceAccount for `bookinfo-productpage` -- A `productpage-v1` Deployment - -### Resource YAML - -```yaml -# Copyright 2017 Istio Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################################################################################## -# Details service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: details - labels: - app: details - service: details -spec: - ports: - - port: 9080 - name: http - selector: - app: details ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-details ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: details-v1 - labels: - app: details - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: details - version: v1 - template: - metadata: - labels: - app: details - version: v1 - spec: - serviceAccountName: bookinfo-details - containers: - - name: details - image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Ratings service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: ratings - labels: - app: ratings - service: ratings -spec: - ports: - - port: 9080 - name: http - selector: - app: ratings ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-ratings ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ratings-v1 - labels: - app: ratings - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: ratings - version: v1 - template: - metadata: - labels: - app: ratings - version: v1 - spec: - serviceAccountName: bookinfo-ratings - containers: - - name: ratings - image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Reviews service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: reviews - labels: - app: reviews - service: reviews -spec: - ports: - - port: 9080 - name: http - selector: - app: reviews ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-reviews ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v1 - labels: - app: reviews - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v1 - template: - metadata: - labels: - app: reviews - version: v1 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v2 - labels: - app: reviews - version: v2 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v2 - template: - metadata: - labels: - app: reviews - version: v2 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v3 - labels: - app: reviews - version: v3 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v3 - template: - metadata: - labels: - app: reviews - version: v3 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Productpage services -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: productpage - labels: - app: productpage - service: productpage -spec: - ports: - - port: 9080 - name: http - selector: - app: productpage ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-productpage ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: productpage-v1 - labels: - app: productpage - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: productpage - version: v1 - template: - metadata: - labels: - app: productpage - version: v1 - spec: - serviceAccountName: bookinfo-productpage - containers: - - name: productpage - image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -``` - -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md deleted file mode 100644 index c7a06c44a5..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: 1. Enable Istio in the Cluster -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster - - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-cluster - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/ ---- - -This cluster uses the default Nginx controller to allow traffic into the cluster. - -A Rancher [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. - -# Prerequisites - -This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.0-v2.4/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning) on which you will install Istio. - -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/) - -The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) - -> If the cluster has a Pod Security Policy enabled there are [additional prerequisites steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/) - -# Enable Istio in the Cluster - -1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. -1. Click **Tools > Istio.** -1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. -1. Click **Enable**. -1. Click **Save**. - -**Result:** Istio is enabled at the cluster level. - -The Istio application, `cluster-istio`, is added as an application to the cluster's `system` project. - -When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. - -### [Next: Enable Istio in a Namespace]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md deleted file mode 100644 index d3a8130ac2..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Enable Istio with Pod Security Policies -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp/ ---- - - >**Note:** The following guide is only for RKE provisioned clusters. - -If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. - -The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). - -- 1. [Configure the System Project Policy to allow Istio install.](#1-configure-the-system-project-policy-to-allow-istio-install) -- 2. [Install the CNI plugin in the System project.](#2-install-the-cni-plugin-in-the-system-project) -- 3. [Install Istio.](#3-install-istio) - -### 1. Configure the System Project Policy to allow Istio install - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Find the **Project: System** project and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click Save. - - -### 2. Install the CNI Plugin in the System Project - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Select the **Project: System** project. -1. Choose **Tools > Catalogs** in the navigation bar. -1. Add a catalog with the following: - 1. Name: istio-cni - 1. Catalog URL: https://github.com/istio/cni - 1. Branch: The branch that matches your current release, for example: `release-1.4`. -1. From the main menu select **Apps** -1. Click Launch and select istio-cni -1. Update the namespace to be "kube-system" -1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: - -``` ---- - logLevel: "info" - excludeNamespaces: - - "istio-system" - - "kube-system" -``` - -### 3. Install Istio - -Follow the [primary instructions]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/), adding a custom answer: `istio_cni.enabled: true`. - -After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md deleted file mode 100644 index 97a725a79d..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 2. Enable Istio in a Namespace -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace - - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-namespace - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/ ---- - -You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. - -This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. - -> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio enabled. - -1. In the Rancher UI, go to the cluster view. Click the **Projects/Namespaces** tab. -1. Go to the namespace where you want to enable the Istio sidecar auto injection and click the **⋮.** -1. Click **Edit.** -1. In the **Istio sidecar auto injection** section, click **Enable.** -1. Click **Save.** - -**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. - -### Verifying that Automatic Istio Sidecar Injection is Enabled - -To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. - -### Excluding Workloads from Being Injected with the Istio Sidecar - -If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: - -``` -sidecar.istio.io/inject: “false” -``` - -To add the annotation to a workload, - -1. From the **Global** view, open the project that has the workload that should not have the sidecar. -1. Click **Resources > Workloads.** -1. Go to the workload that should not have the sidecar and click **⋮ > Edit.** -1. Click **Show Advanced Options.** Then expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. In the **Key** field, enter `sidecar.istio.io/inject`. -1. In the **Value** field, enter `false`. -1. Click **Save.** - -**Result:** The Istio sidecar will not be injected into the workload. - -> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. - - -### [Next: Select the Nodes ]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md deleted file mode 100644 index 7bd777e235..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway/_index.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: 5. Set up the Istio Gateway -weight: 5 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway - - /rancher/v2.0-v2.4/en/istio/legacy/setup/gateway - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/gateway - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/gateway/ ---- - -The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. - -You can use the NGINX ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. - -To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two ingresses. - -You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. - -You can route traffic into the service mesh with a load balancer or just Istio's NodePort gateway. This section describes how to set up the NodePort gateway. - -For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - -# Enable the Istio Gateway - -The ingress gateway is a Kubernetes service that will be deployed in your cluster. There is only one Istio gateway per cluster. - -1. Go to the cluster where you want to allow outside traffic into Istio. -1. Click **Tools > Istio.** -1. Expand the **Ingress Gateway** section. -1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/) -1. Optionally, configure the ports, service types, node selectors and tolerations, and resource requests and limits for this service. The default resource requests for CPU and memory are the minimum recommended resources. -1. Click **Save.** - -**Result:** The gateway is deployed, which allows Istio to receive traffic from outside the cluster. - -# Add a Kubernetes Gateway that Points to the Istio Gateway - -To allow traffic to reach Ingress, you will also need to provide a Kubernetes gateway resource in your YAML that points to Istio's implementation of the ingress gateway to the cluster. - -1. Go to the namespace where you want to deploy the Kubernetes gateway and click **Import YAML.** -1. Upload the gateway YAML as a file or paste it into the form. An example gateway YAML is provided below. -1. Click **Import.** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: bookinfo-gateway -spec: - selector: - istio: ingressgateway # use istio default controller - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "*" ---- -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: bookinfo -spec: - hosts: - - "*" - gateways: - - bookinfo-gateway - http: - - match: - - uri: - exact: /productpage - - uri: - prefix: /static - - uri: - exact: /login - - uri: - exact: /logout - - uri: - prefix: /api/v1/products - route: - - destination: - host: productpage - port: - number: 9080 -``` - -**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. - -Confirm that the resource exists by running: -``` -kubectl get gateway -A -``` - -The result should be something like this: -``` -NAME AGE -bookinfo-gateway 64m -``` - -### Access the ProductPage Service from a Web Browser - -To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: - -`http://:/productpage` - -To get the ingress gateway URL and port, - -1. Go to the `System` project in your cluster. -1. Within the `System` project, go to `Resources` > `Workloads` then scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. -1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. - -**Result:** You should see the BookInfo app in the web browser. - -For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) - -# Troubleshooting - -The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. - -### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller - -You can try the steps in this section to make sure the Kubernetes gateway is configured properly. - -In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: - -1. Go to the `System` project in your cluster. -1. Within the `System` project, go to the namespace `istio-system`. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. -1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. - -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md deleted file mode 100644 index cae0c5936f..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 3. Select the Nodes Where Istio Components Will be Deployed -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors - - /rancher/v2.0-v2.4/en/istio/legacy/setup/node-selectors - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/node-selectors - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/ ---- - -> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources) - -This section describes how use node selectors to configure Istio components to be deployed on a designated node. - -In larger deployments, it is strongly advised that Istio's infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -# Adding a Label to the Istio Node - -First, add a label to the node where Istio components should be deployed. This label can have any key-value pair. For this example, we will use the key `istio` and the value `enabled`. - -1. From the cluster view, go to the **Nodes** tab. -1. Go to a worker node that will host the Istio components and click **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Label.** -1. In the fields that appear, enter `istio` for the key and `enabled` for the value. -1. Click **Save.** - -**Result:** A worker node has the label that will allow you to designate it for Istio components. - -# Configuring Istio Components to Use the Labeled Node - -Configure each Istio component to be deployed to the node with the Istio label. Each Istio component can be configured individually, but in this tutorial, we will configure all of the components to be scheduled on the same node for the sake of simplicity. - -For larger deployments, it is recommended to schedule each component of Istio onto separate nodes. - -1. From the cluster view, click **Tools > Istio.** -1. Expand the **Pilot** section and click **Add Selector** in the form that appears. Enter the node selector label that you added to the Istio node. In our case, we are using the key `istio` and the value `enabled.` -1. Repeat the previous step for the **Mixer** and **Tracing** sections. -1. Click **Save.** - -**Result:** The Istio components will be deployed on the Istio node. - -### [Next: Add Deployments and Services]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md deleted file mode 100644 index 003ec7c710..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: 6. Set up Istio's Components for Traffic Management -weight: 6 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management - - /rancher/v2.0-v2.4/en/istio/legacy/setup/set-up-traffic-management - - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management - - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management/ ---- - -A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - -- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. -- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. - -This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. - -In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. - -After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. - -To deploy the virtual service and destination rules for the `reviews` service, - -1. Go to the project view and click **Import YAML.** -1. Copy resources below into the form. -1. Click **Import.** - -``` -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: reviews -spec: - hosts: - - reviews - http: - - route: - - destination: - host: reviews - subset: v1 - weight: 50 - - destination: - host: reviews - subset: v3 - weight: 50 ---- -apiVersion: networking.istio.io/v1alpha3 -kind: DestinationRule -metadata: - name: reviews -spec: - host: reviews - subsets: - - name: v1 - labels: - version: v1 - - name: v2 - labels: - version: v2 - - name: v3 - labels: - version: v3 -``` -**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. - -### [Next: Generate and View Traffic]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md deleted file mode 100644 index 1d4887810e..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/_index.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: Notifiers -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/notifiers - - /rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers/ - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts/ ---- - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. - -Rancher integrates with a variety of popular IT services, including: - -- **Slack**: Send alert notifications to your Slack channels. -- **Email**: Choose email recipients for alert notifications. -- **PagerDuty**: Route notifications to staff by phone, SMS, or personal email. -- **WebHooks**: Update a webpage with alert notifications. -- **WeChat**: (Available as of v2.2.0) Send alert notifications to your Enterprise WeChat contacts. -- **DingTalk**: (Available as of v2.4.6) Send alert notifications to DingTalk using a webhook. -- **Microsoft Teams**: (Available as of v2.4.6) Send alert notifications to Teams using a webhook. - -This section covers the following topics: - -- [Roles-based access control for notifiers](#roles-based-access-control-for-notifiers) -- [Adding notifiers](#adding-notifiers) -- [Configuration](#configuration) -- [Managing notifiers](#managing-notifiers) -- [Example payload for a webhook alert notifier](#example-payload-for-a-webhook-alert-notifier) - -# Roles-based Access Control for Notifiers - -Notifiers are configured at the cluster level. This model ensures that only cluster owners need to configure notifiers, leaving project owners to simply configure alerts in the scope of their projects. You don't need to dispense privileges like SMTP server access or cloud account access. - -# Adding Notifiers - -Set up a notifier so that you can begin configuring and sending alerts. - -1. From the **Global View**, open the cluster that you want to add a notifier. -1. From the main menu, select **Tools > Notifiers**. Then click **Add Notifier**. -1. Select the service you want to use as your notifier, and then fill out the form. For help filling out the form, refer to the configuration section below. -1. Click **Test.** You should receive a notification confirming that the notifier is configured correctly. -1. Click **Add** to complete adding the notifier. - -**Result:** Your notifier is added to Rancher. - -# Configuration - -- [Slack](#slack) -- [Email](#email) -- [PagerDuty](#pagerduty) -- [Webhook](#webhook) -- [WeChat](#wechat) -- [DingTalk](#dingtalk) -- [Microsoft Teams](#microsoft-teams) - -### Slack - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| URL | From Slack, create a webhook. For instructions, see the [Slack Documentation](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack). Then enter the Slack webhook URL. | -| Default Channel | Enter the name of the channel that you want to send alert notifications in the following format: `#`. Both public and private channels are supported. | -| Proxy URL | Proxy for the Slack webhook. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test**. If the test is successful, the Slack channel you're configuring for the notifier outputs **Slack setting validated.** - -### Email - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Default Recipient Address | Enter the email address that you want to receive the notification. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -SMTP Server Configuration: - -| Field | Explanation | -|----------|----------------------| -| Sender | Enter an email address available on your mail server that you want to send the notification. | -| Host | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com` | -| Port | In the **Port** field, enter the port used for email. Typically, TLS uses `587` and SSL uses `465`. | -| Use TLS | If you're using TLS, make sure **Use TLS** is selected. | -| Username | Username to authenticate with the SMTP server. | -| Password | Password to authenticate with the SMTP server. | - -**Validation:** Click **Test**. If the test is successful, Rancher prints **settings validated** and you receive a test notification email. - -### PagerDuty - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Default Integration Key | From PagerDuty, create a Prometheus integration. For instructions, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). Then enter the integration key. -| Service Key | The same as the integration key. For instructions on creating a Prometheus integration, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). Then enter the integration key. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test**. If the test is successful, your PagerDuty endpoint outputs **PagerDuty setting validated.** - -### Webhook - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| URL | Using the app of your choice, create a webhook URL. | -| Proxy URL | Proxy for the webhook. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test**. If the test is successful, the URL you're configuring as a notifier outputs **Webhook setting validated.** - -### WeChat - -_Available as of v2.2.0_ - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Corporation ID | Enter the "EnterpriseID" of your corporation. You can get it fro the [Profile page](https://work.weixin.qq.com/wework_admin/frame#profile). | -| Application Agent ID | From Enterprise WeChat, create an application in the [Application page](https://work.weixin.qq.com/wework_admin/frame#apps), and then enter the "AgentId" of this application. You will also need to enter the application secret. | -| Application Secret | The secret that corresponds to the Application Agent ID. | -| Recipient Type | Party, tag, or user. | -| Default Recipient | The default recipient ID should correspond to the recipient type. It should be the party ID, tag ID or user account that you want to receive the notification. You could get contact information from [Contacts page](https://work.weixin.qq.com/wework_admin/frame#contacts). | -| Proxy URL | If you are using a proxy, enter the proxy URL. | -| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test.** If the test is successful, you should receive an alert message. - -### DingTalk - -_Available as of v2.4.6_ - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Webhook URL | Enter the DingTalk webhook URL. For help setting up the webhook, refer to the [DingTalk documentation.](https://www.alibabacloud.com/help/doc-detail/52872.htm) | -| Secret | Optional: Enter a secret for the DingTalk webhook. | -| Proxy URL | Optional: Enter a proxy for the DingTalk webhook. | -| Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test.** If the test is successful, the DingTalk notifier output is **DingTalk setting validated.** - -### Microsoft Teams - -_Available as of v2.4.6_ - -| Field | Explanation | -|----------|----------------------| -| Name | Enter a **Name** for the notifier. | -| Webhook URL | Enter the Microsoft Teams webhook URL. For help setting up the webhook, refer to the [Teams Documentation.](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook) | -| Proxy URL | Optional: Enter a proxy for the Teams webhook. | -| Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | - -**Validation:** Click **Test.** If the test is successful, the Teams notifier output is **MicrosoftTeams setting validated.** - -# Managing Notifiers - -After you set up notifiers, you can manage them. From the **Global** view, open the cluster that you want to manage your notifiers. Select **Tools > Notifiers**. You can: - -- **Edit** their settings that you configured during their initial setup. -- **Clone** them, to quickly setup slightly different notifiers. -- **Delete** them when they're no longer necessary. - -# Example Payload for a Webhook Alert Notifier - -```json -{ - "receiver": "c-2a3bc:kube-components-alert", - "status": "firing", - "alerts": [ - { - "status": "firing", - "labels": { - "alert_name": "Scheduler is unavailable", - "alert_type": "systemService", - "cluster_name": "mycluster (ID: c-2a3bc)", - "component_name": "scheduler", - "group_id": "c-2a3bc:kube-components-alert", - "logs": "Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused", - "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service", - "severity": "critical" - }, - "annotations": {}, - "startsAt": "2020-01-30T19:18:13.321684733Z", - "endsAt": "0001-01-01T00:00:00Z", - "generatorURL": "" - } - ], - "groupLabels": { - "component_name": "scheduler", - "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service" - }, - "commonLabels": { - "alert_name": "Scheduler is unavailable", - "alert_type": "systemService", - "cluster_name": "mycluster (ID: c-2a3bc)" - } -} -``` -# What's Next? - -After creating a notifier, set up alerts to receive notifications of Rancher system events. - -- [Cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) can set up alerts at the [cluster level]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/). -- [Project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can set up alerts at the [project level]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/alerts/). diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/opa-gatekeeper/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/tools/opa-gatekeeper/_index.md deleted file mode 100644 index 4161da3c3c..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/opa-gatekeeper/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: OPA Gatekeeper -weight: 17 -aliases: - - /rancher/v2.0-v2.4/en/cluster-admin/tools/opa-gatekeeper - - /rancher/v2.0-v2.4/en/opa-gatekeper/Open%20Policy%20Agent - - /rancher/v2.0-v2.4/en/opa-gatekeper ---- -_Available as of v2.4.0_ - -To ensure consistency and compliance, every organization needs the ability to define and enforce policies in its environment in an automated way. [OPA (Open Policy Agent)](https://www.openpolicyagent.org/) is a policy engine that facilitates policy-based control for cloud native environments. Rancher provides the ability to enable OPA Gatekeeper in Kubernetes clusters, and also installs a couple of built-in policy definitions, which are also called constraint templates. - -OPA provides a high-level declarative language that lets you specify policy as code and ability to extend simple APIs to offload policy decision-making. - -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is a project that provides integration between OPA and Kubernetes. OPA Gatekeeper provides: - -- An extensible, parameterized policy library. -- Native Kubernetes CRDs for instantiating the policy library, also called “constraints." -- Native Kubernetes CRDs for extending the policy library, also called "constraint templates." -- Audit functionality. - -To read more about OPA, please refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/) - -# How the OPA Gatekeeper Integration Works - -Kubernetes provides the ability to extend API server functionality via admission controller webhooks, which are invoked whenever a resource is created, updated or deleted. Gatekeeper is installed as a validating webhook and enforces policies defined by Kubernetes custom resource definitions. In addition to the admission control usage, Gatekeeper provides the capability to audit existing resources in Kubernetes clusters and mark current violations of enabled policies. - -OPA Gatekeeper is made available via Rancher's Helm system chart, and it is installed in a namespace named `gatekeeper-system.` - -# Enabling OPA Gatekeeper in a Cluster - -> **Prerequisites:** -> -> - Only administrators and cluster owners can enable OPA Gatekeeper. -> - The dashboard needs to be enabled using the `dashboard` feature flag. For more information, refer to the [section on enabling experimental features.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) - -1. Navigate to the cluster's **Dashboard** view. -1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** -1. To install Gatekeeper with the default configuration, click on **Enable Gatekeeper (v0.1.0) with defaults.** -1. To change any default configuration, click on **Customize Gatekeeper yaml configuration.** - -# Constraint Templates - -[Constraint templates](https://github.com/open-policy-agent/gatekeeper#constraint-templates) are Kubernetes custom resources that define the schema and Rego logic of the OPA policy to be applied by Gatekeeper. For more information on the Rego policy language, refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/policy-language/) - -When OPA Gatekeeper is enabled, Rancher installs some templates by default. - -To list the constraint templates installed in the cluster, go to the left side menu under OPA Gatekeeper and click on **Templates.** - -Rancher also provides the ability to create your own constraint templates by importing YAML definitions. - -# Creating and Configuring Constraints - -[Constraints](https://github.com/open-policy-agent/gatekeeper#constraints) are Kubernetes custom resources that define the scope of objects to which a specific constraint template applies to. The complete policy is defined by constraint templates and constraints together. - -> **Prerequisites:** OPA Gatekeeper must be enabled in the cluster. - -To list the constraints installed, go to the left side menu under OPA Gatekeeper, and click on **Constraints.** - -New constraints can be created from a constraint template. - -Rancher provides the ability to create a constraint by using a convenient form that lets you input the various constraint fields. - -The **Edit as yaml** option is also available to configure the the constraint's yaml definition. - -### Exempting Rancher's System Namespaces from Constraints - -When a constraint is created, ensure that it does not apply to any Rancher or Kubernetes system namespaces. If the system namespaces are not excluded, then it is possible to see many resources under them marked as violations of the constraint. - -To limit the scope of the constraint only to user namespaces, always specify these namespaces under the **Match** field of the constraint. - -Also, the constraint may interfere with other Rancher functionality and deny system workloads from being deployed. To avoid this, exclude all Rancher-specific namespaces from your constraints. - -# Enforcing Constraints in your Cluster - -When the **Enforcement Action** is **Deny,** the constraint is immediately enabled and will deny any requests that violate the policy defined. By default, the enforcement value is **Deny.** - -When the **Enforcement Action** is **Dryrun,** then any resources that violate the policy are only recorded under the constraint's status field. - -To enforce constraints, create a constraint using the form. In the **Enforcement Action** field, choose **Deny.** - -# Audit and Violations in your Cluster - -OPA Gatekeeper runs a periodic audit to check if any existing resource violates any enforced constraint. The audit-interval (default 300s) can be configured while installing Gatekeeper. - -On the Gatekeeper page, any violations of the defined constraints are listed. - -Also under **Constraints,** the number of violations of the constraint can be found. - -The detail view of each constraint lists information about the resource that violated the constraint. - -# Disabling Gatekeeper - -1. Navigate to the cluster's Dashboard view -1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** -1. Click the **⋮ > Disable**. - -**Result:** Upon disabling OPA Gatekeeper, all constraint templates and constraints will also be deleted. - diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/_index.md deleted file mode 100644 index 51e7fa4b58..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/_index.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Upgrading and Rolling Back Kubernetes -weight: 70 ---- - -Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. - -Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation]({{}}/rke/latest/en/). - -This section covers the following topics: - -- [New Features](#new-features) -- [Tested Kubernetes Versions](#tested-kubernetes-versions) -- [How Upgrades Work](#how-upgrades-work) -- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) -- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) -- [Rolling Back](#rolling-back) -- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) - - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) - - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) - - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) - - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) -- [Troubleshooting](#troubleshooting) - -# New Features - -As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.]({{}}/rancher/v2.0-v2.4/en/admin-settings/k8s-metadata) - -As of Rancher v2.4.0, - -- The ability to import K3s Kubernetes clusters into Rancher was added, along with the ability to upgrade Kubernetes when editing those clusters. For details, refer to the [section on imported clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters) -- New advanced options are exposed in the Rancher UI for configuring the upgrade strategy of an RKE cluster: **Maximum Worker Nodes Unavailable** and **Drain nodes.** These options leverage the new cluster upgrade process of RKE v1.1.0, in which worker nodes are upgraded in batches, so that applications can remain available during cluster upgrades, under [certain conditions.](#maintaining-availability-for-applications-during-upgrades) - -# Tested Kubernetes Versions - -Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.4.17/) - -# How Upgrades Work - -RKE v1.1.0 changed the way that clusters are upgraded. - -In this section of the [RKE documentation,]({{}}/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. - - -# Recommended Best Practice for Upgrades - -{{% tabs %}} -{{% tab "Rancher v2.4+" %}} -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. This is achieved by selecting the **Restore etcd and Kubernetes version** option. This will return your cluster to the pre-upgrade kubernetes version before restoring the etcd snapshot. - -The restore operation will work on a cluster that is not in a healthy or active state. -{{% /tab %}} -{{% tab "Rancher before v2.4" %}} -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, restore the cluster from the etcd snapshot. - -The cluster cannot be downgraded to a previous Kubernetes version. -{{% /tab %}} -{{% /tabs %}} - -# Upgrading the Kubernetes Version - -> **Prerequisites:** -> -> - The options below are available only for [Rancher-launched RKE Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) and imported/registered K3s Kubernetes clusters. -> - Before upgrading Kubernetes, [back up your cluster.]({{}}/rancher/v2.0-v2.4/en/backups) - -1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. - -1. Expand **Cluster Options**. - -1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. - -1. Click **Save**. - -**Result:** Kubernetes begins upgrading for the cluster. - -# Rolling Back - -_Available as of v2.4_ - -A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: - -- [Backing up a cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/#how-snapshots-work) -- [Restoring a cluster from backup]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/#restoring-a-cluster-from-a-snapshot) - -# Configuring the Upgrade Strategy - -As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements]({{}}/rke/latest/en/upgrades/maintaining-availability) are met. - -The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. - -### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI - -From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. - -By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. - -To change the default number or percentage of worker nodes, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Maxiumum Worker Nodes Unavailable** field. Enter the percentage of worker nodes that can be upgraded in a batch. Optionally, select **Count** from the drop-down menu and enter the maximum unavailable worker nodes as an integer. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -### Enabling Draining Nodes During Upgrades from the Rancher UI - -By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. - -To enable draining each node during a cluster upgrade, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** -1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/nodes/#aggressive-and-safe-draining-options) -1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. -1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -> **Note:** As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. - -### Maintaining Availability for Applications During Upgrades - -_Available as of RKE v1.1.0_ - -In [this section of the RKE documentation,]({{}}/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. - -### Configuring the Upgrade Strategy in the cluster.yml - -More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. - -For details, refer to [Configuring the Upgrade Strategy]({{}}/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. - -# Troubleshooting - -If a node doesn't come up after an upgrade, the `rke up` command errors out. - -No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. - -If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. - -A failed node could be in many different states: - -- Powered off -- Unavailable -- User drains a node while upgrade is in process, so there are no kubelets on the node -- The upgrade itself failed - -If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/_index.md deleted file mode 100644 index 2be0a8a58d..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "Kubernetes Persistent Storage: Volumes and Storage Classes" -description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" -weight: 2031 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/ - - /rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/ ---- -When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. - -The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](./how-storage-works) - -### Prerequisites - -To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. - -If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) - -For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. - -### Setting up Existing Storage - -The overall workflow for setting up existing storage is as follows: - -1. Set up your persistent storage. This may be storage in an infrastructure provider, or it could be your own storage. -2. Add a persistent volume (PV) that refers to the persistent storage. -3. Add a persistent volume claim (PVC) that refers to the PV. -4. Mount the PVC as a volume in your workload. - -For details and prerequisites, refer to [this page.](./attaching-existing-storage) - -### Dynamically Provisioning New Storage in Rancher - -The overall workflow for provisioning new storage is as follows: - -1. Add a StorageClass and configure it to use your storage provider. The StorageClass could refer to storage in an infrastructure provider, or it could refer to your own storage. -2. Add a persistent volume claim (PVC) that refers to the storage class. -3. Mount the PVC as a volume for your workload. - -For details and prerequisites, refer to [this page.](./provisioning-new-storage) - -### Longhorn Storage - -[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. - -Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. - -If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/1.0.2/what-is-longhorn/) - -### Provisioning Storage Examples - -We provide examples of how to provision storage with [NFS,](./examples/nfs) [vSphere,](./examples/vsphere) and [Amazon's EBS.](./examples/ebs) - -### GlusterFS Volumes - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](./glusterfs-volumes) - -### iSCSI Volumes - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](./iscsi-volumes) - -### hostPath Volumes -Before you create a hostPath volume, you need to set up an [extra_bind]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. - -### Related Links - -- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md deleted file mode 100644 index 407782a7fd..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Setting up Existing Storage -weight: 1 ---- - -This section describes how to set up existing persistent storage for workloads in Rancher. - -> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -To set up storage, follow these steps: - -1. [Set up persistent storage.](#1-set-up-persistent-storage) -2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) -3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) -4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-volume-claim-as-a-volume-in-your-workload) - -### Prerequisites - -- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -### 1. Set up persistent storage - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../examples/vsphere) [NFS,](../examples/nfs) or Amazon's [EBS.](../examples/ebs) - -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. - -### 2. Add a persistent volume that refers to the persistent storage - -These steps describe how to set up a persistent volume at the cluster level in Kubernetes. - -1. From the cluster view, select **Storage > Persistent Volumes**. - -1. Click **Add Volume**. - -1. Enter a **Name** for the persistent volume. - -1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. - -1. Enter the **Capacity** of your volume in gigabytes. - -1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. - -1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. - -1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. - -1. Click **Save**. - -**Result:** Your new persistent volume is created. - -### 3. Add a persistent volume claim that refers to the persistent volume - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the project containing a workload that you want to add a persistent volume claim to. - -1. Then click the **Volumes** tab and click **Add Volume**. (In versions before v2.3.0, click **Workloads** on the main navigation bar, then **Volumes.**) - -1. Enter a **Name** for the volume claim. - -1. Select the namespace of the workload that you want to add the persistent storage to. - -1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. - -1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 4. Mount the persistent volume claim as a volume in your workload - -Mount PVCs to stateful workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -The following steps describe how to assign existing storage to a new workload that is a stateful set: - -1. From the **Project** view, go to the **Workloads** tab. -1. Click **Deploy.** -1. Enter a name for the workload. -1. Next to the **Workload Type** field, click **More Options.** -1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. -1. Choose the namespace where the workload will be deployed. -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -The following steps describe how to assign persistent storage to an existing workload: - -1. From the **Project** view, go to the **Workloads** tab. -1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/_index.md deleted file mode 100644 index 491d3728cf..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Provisioning Storage Examples -weight: 3053 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/examples/ ---- - -Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. - -For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: - -- [NFS](./nfs) -- [vSphere](./vsphere) -- [EBS](./ebs) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md deleted file mode 100644 index 5bb23411b7..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Creating Persistent Storage in Amazon's EBS -weight: 3053 ---- - -This section describes how to set up Amazon's Elastic Block Store in EC2. - -1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** -1. Click **Create Volume.** -1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. -1. Click **Create Volume.** -1. Click **Close.** - -**Result:** Persistent storage has been created. - -For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/attaching-existing-storage/) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md deleted file mode 100644 index 579fbcd30a..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: NFS Storage -weight: 3054 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ ---- - -Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. - ->**Note:** -> ->- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/). -> ->- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. - ->**Recommended:** To simplify the process of managing firewall rules, use NFSv4. - -1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. - -1. Enter the following command: - - ``` - sudo apt-get install nfs-kernel-server - ``` - -1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. - - ``` - mkdir -p /nfs && chown nobody:nogroup /nfs - ``` - - The `-p /nfs` parameter creates a directory named `nfs` at root. - - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. - -1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. - - 1. Open `/etc/exports` using your text editor of choice. - 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. - - ``` - /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) - ``` - - **Tip:** You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` - - 1. Update the NFS table by entering the following command: - - ``` - exportfs -ra - ``` - -1. Open the ports used by NFS. - - 1. To find out what ports NFS is using, enter the following command: - - ``` - rpcinfo -p | grep nfs - ``` - 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: - - ``` - sudo ufw allow 2049 - ``` - -**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. - -## What's Next? - -Within Rancher, add the NFS server as a storage volume and/or storage class. After adding the server, you can use it for storage for your deployments. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md deleted file mode 100644 index c52508aec7..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: vSphere Storage -weight: 3055 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ ---- - -To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). - -In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) - -- [Prerequisites](#prerequisites) -- [Creating a StorageClass](#creating-a-storageclass) -- [Creating a Workload with a vSphere Volume](#creating-a-workload-with-a-vsphere-volume) -- [Verifying Persistence of the Volume](#verifying-persistence-of-the-volume) -- [Why to Use StatefulSets Instead of Deployments](#why-to-use-statefulsets-instead-of-deployments) - -### Prerequisites - -In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/). - -### Creating a StorageClass - -> **Note:** -> -> The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. - -1. From the Global view, open the cluster where you want to provide vSphere storage. -2. From the main menu, select **Storage > Storage Classes**. Then click **Add Class**. -3. Enter a **Name** for the class. -4. Under **Provisioner**, select **VMWare vSphere Volume**. - - {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} - -5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. -5. Click **Save**. - -### Creating a Workload with a vSphere Volume - -1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). -2. For **Workload Type**, select **Stateful set of 1 pod**. -3. Expand the **Volumes** section and click **Add Volume**. -4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. -5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. -6. Enter the required **Capacity** for the volume. Then click **Define**. - - {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} - -7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. -8. Click **Launch** to create the workload. - -### Verifying Persistence of the Volume - -1. From the context menu of the workload you just created, click **Execute Shell**. -2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). -3. Create a file in the volume by executing the command `touch //data.txt`. -4. **Close** the shell window. -5. Click on the name of the workload to reveal detail information. -6. Open the context menu next to the Pod in the *Running* state. -7. Delete the Pod by selecting **Delete**. -8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. -9. Once the replacement pod is running, click **Execute Shell**. -10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. - - ![workload-persistent-data]({{}}/img/rancher/workload-persistent-data.png) - -### Why to Use StatefulSets Instead of Deployments - -You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. - -Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. - -Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. - -### Related Links - -- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) -- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md deleted file mode 100644 index dbb81dcbac..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: GlusterFS Volumes -weight: 5000 ---- - -> This section only applies to [RKE clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: - -- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) -- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) - -``` -docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version -``` - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/usr/bin/systemd-run:/usr/bin/systemd-run" -``` - -After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: - -``` -Detected OS with systemd -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md deleted file mode 100644 index 86c858dc41..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: How Persistent Storage Works -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/tasks/workloads/add-persistent-volume-claim ---- - -A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. - -There are two ways to use persistent storage in Kubernetes: - -- Use an existing persistent volume -- Dynamically provision new persistent volumes - -To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. - -For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. - -![Setting Up New and Existing Persistent Storage]({{}}/img/rancher/rancher-storage.svg) - -For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) - -This section covers the following topics: - -- [About persistent volume claims](#about-persistent-volume-claims) - - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) -- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) - - [Binding PVs to PVCs](#binding-pvs-to-pvcs) -- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) - -# About Persistent Volume Claims - -Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. - -To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. - -Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** (In versions before v2.3.0, the PVCs are in the **Volumes** tab.) You can reuse these PVCs when creating deployments in the future. - -### PVCs are Required for Both New and Existing Persistent Storage - -A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. - -If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. - -If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. - -Rancher lets you create as many PVCs within a project as you'd like. - -You can mount PVCs to a deployment as you create it, or later, after the deployment is running. - -# Setting up Existing Storage with a PVC and PV - -Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. - -PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. - -### Binding PVs to PVCs - -When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - -> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. - -In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. - -To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. - -# Provisioning New Storage with a PVC and Storage Class - -Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. - -For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. - -The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. - diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md deleted file mode 100644 index 14c5fc50c4..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: iSCSI Volumes -weight: 6000 ---- - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. - -Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. - -If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: - -| Platform | Package Name | Install Command | -| ------------- | ----------------------- | -------------------------------------- | -| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | -| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | - - -After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/etc/iscsi:/etc/iscsi" - - "/sbin/iscsiadm:/sbin/iscsiadm" -``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md deleted file mode 100644 index d819c28d2f..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Dynamically Provisioning New Storage in Rancher -weight: 2 ---- - -This section describes how to provision new persistent storage for workloads in Rancher. - -This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. - -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. - -To provision new storage for your workloads, follow these steps: - -1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage) -2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) -3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) - -### Prerequisites - -- To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- Make sure your storage provisioner is available to be enabled. - -The following storage provisioners are enabled by default: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers/) - -### 1. Add a storage class and configure it to use your storage - -These steps describe how to set up a storage class at the cluster level. - -1. Go to the cluster for which you want to dynamically provision persistent storage volumes. - -1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. - -1. Enter a `Name` for your storage class. - -1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. - -1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. - -1. Click `Save`. - -**Result:** The storage class is available to be consumed by a PVC. - -For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). - -### 2. Add a persistent volume claim that refers to the storage class - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the project containing a workload that you want to add a PVC to. - -1. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Then select the **Volumes** tab. Click **Add Volume**. - -1. Enter a **Name** for the volume claim. - -1. Select the namespace of the volume claim. - -1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** - -1. Go to the **Storage Class** drop-down and select the storage class that you created. - -1. Enter a volume **Capacity**. - -1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 3. Mount the persistent volume claim as a volume for your workload - -Mount PVCs to workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -To attach the PVC to a new workload, - -1. Create a workload as you would in [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). -1. For **Workload Type**, select **Stateful set of 1 pod**. -1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -To attach the PVC to an existing workload, - -1. Go to the project that has the workload that will have the PVC attached. -1. Go to the workload that will have persistent storage and click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/_index.md deleted file mode 100644 index 7e5a757a27..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Setting up Kubernetes Clusters in Rancher -description: Provisioning Kubernetes Clusters -weight: 7 -aliases: - - /rancher/v2.0-v2.4/en/concepts/clusters/ - - /rancher/v2.0-v2.4/en/concepts/clusters/cluster-providers/ - - /rancher/v2.0-v2.4/en/tasks/clusters/ ---- - -Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. - -This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.0-v2.4/en/overview/concepts) page. - -For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.0-v2.4/en/overview/architecture/) page. - -This section covers the following topics: - - - -- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) -- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) - - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) - - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) -- [Importing Existing Clusters](#importing-existing-clusters) - - - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table" %}} - -# Setting up Clusters in a Hosted Kubernetes Provider - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters) - -# Launching Kubernetes with Rancher - -Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. - -In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. - -These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. - -If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. - -For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) - -### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider - -Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. - -One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. - -The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. - -For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. - -You can bring any nodes you want to Rancher and use them to create a cluster. - -These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. - -# Importing Existing Clusters - -_Available from Rancher v2.0.x-v2.4.x_ - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -Note that Rancher does not automate the provisioning, scaling, or upgrade of imported clusters. Other Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. - -For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. - -In Rancher v2.4, it became possible to import a K3s cluster and upgrade Kubernetes by editing the cluster in the Rancher UI. - -For more information, refer to the section on [importing existing clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) - -### Importing and Editing K3s Clusters - -_Available as of Rancher v2.4.0_ - -[K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. K3s Kubernetes clusters can now be imported into Rancher. - -When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - -- The ability to upgrade the K3s version -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. - -For more information, refer to the section on [imported K3s clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table/index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table/index.md deleted file mode 100644 index a1c3bb4f8e..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table/index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -headless: true ---- -| Action | [Rancher launched Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) | [Hosted Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) | [Imported Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters) | -| --- | --- | ---| ---| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) | ✓ | ✓ | * | -| [Managing Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/) | ✓ | ✓ | ✓ | -| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) | ✓ | ✓ | ✓ | -| [Cloning Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cloning-clusters/)| ✓ | ✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/) | ✓ | | | -| [Ability to back up your Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-admin/backing-up-etcd/) | ✓ | | | -| [Ability to recover and restore etcd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/restoring-etcd/) | ✓ | | | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy/) | ✓ | | | -| [Running Security Scans]({{}}/rancher/v2.0-v2.4/en/security/security-scan/) | ✓ | | | -| [Authorized Cluster Endpoint]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) | ✓ | | | - -\* Cluster configuration options can't be edited for imported clusters, except for K3s clusters. diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md deleted file mode 100644 index c4346db11f..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Setting up Clusters from Hosted Kubernetes Providers -weight: 3 ---- - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-prem or in an infrastructure provider. - -Rancher supports the following Kubernetes providers: - -Kubernetes Providers | Available as of | - --- | --- | -[Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) | v2.0.0 | -[Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) | v2.0.0 | -[Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) | v2.0.0 | -[Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) | v2.2.0 | -[Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) | v2.2.0 | -[Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) | v2.2.0 | - -## Hosted Kubernetes Provider Authentication - -When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: - -- [Creating a GKE Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/gke) -- [Creating an EKS Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks) -- [Creating an AKS Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/aks) -- [Creating an ACK Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack) -- [Creating a TKE Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke) -- [Creating a CCE Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md deleted file mode 100644 index c6daf4d0cd..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Creating an Aliyun ACK Cluster -shortTitle: Alibaba Cloud Container Service for Kubernetes -weight: 2120 ---- - -_Available as of v2.2.0_ - -You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. - -## Prerequisites - ->**Note** ->Deploying to ACK will incur charges. - -1. In Aliyun, activate the following services in their respective consoles. - - - [Container Service](https://cs.console.aliyun.com) - - [Resource Orchestration Service](https://ros.console.aliyun.com) - - [RAM](https://ram.console.aliyun.com) - -2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. - -3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). - -4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. - -## Create an ACK Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Alibaba ACK**. - -1. Enter a **Cluster Name**. - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. - -1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. - -1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. - -1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. - -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md deleted file mode 100644 index ce9f35e236..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Creating a Huawei CCE Cluster -shortTitle: Huawei Cloud Kubernetes Service -weight: 2130 ---- - -_Available as of v2.2.0_ - -You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. - -## Prerequisites in Huawei - ->**Note** ->Deploying to CCE will incur charges. - -1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). - -2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). - -## Limitations - -Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. - -## Create the CCE Cluster - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Huawei CCE**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. Fill in the cluster configuration. For help filling out the form, refer to [Huawei CCE Configuration.](#huawei-cce-configuration) -1. Fill the following node configuration of the cluster. For help filling out the form, refer to [Node Configuration.](#node-configuration) -1. Click **Create** to create the CCE cluster. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -# Huawei CCE Configuration - -|Settings|Description| -|---|---| -| Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | -| Description | The description of the cluster. | -| Master Version | The Kubernetes version. | -| Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | -| High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | -| Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | -| Container Network CIDR | Network CIDR for the cluster. | -| VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | -| Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | -| External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | -| Cluster Label | The labels for the cluster. | -| Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | - -**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -# Node Configuration - -|Settings|Description| -|---|---| -| Zone | The available zone at where the node(s) of the cluster is deployed. | -| Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | -| Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | -| Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | -| Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | -| Data Volume Size | Data volume size for the cluster node(s) | -| Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | -| Root Volume Size | Root volume size for the cluster node(s) | -| Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | -| Node Count | The node count of the cluster | -| Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | -| SSH Key Name | The ssh key for the cluster node(s) | -| EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | -| EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | -| EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | -| EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | -| EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | -| EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | -| Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | -| Node Label | The labels for the cluster node(s). | \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md deleted file mode 100644 index 4bbe6983d3..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ /dev/null @@ -1,427 +0,0 @@ ---- -title: Creating an EKS Cluster -shortTitle: Amazon EKS -weight: 2110 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-eks/ ---- - -Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). - -- [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) - - [Amazon VPC](#amazon-vpc) - - [IAM Policies](#iam-policies) -- [Architecture](#architecture) -- [Create the EKS Cluster](#create-the-eks-cluster) -- [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) -- [Troubleshooting](#troubleshooting) -- [AWS Service Events](#aws-service-events) -- [Security and Compliance](#security-and-compliance) -- [Tutorial](#tutorial) -- [Minimum EKS Permissions](#minimum-eks-permissions) - - [Service Role Permissions](#service-role-permissions) - - [VPC Permissions](#vpc-permissions) -- [Syncing](#syncing) - -# Prerequisites in Amazon Web Services - ->**Note** ->Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). - -To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate [permissions.](#minimum-eks-permissions) For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). - -### Amazon VPC - -You need to set up an Amazon VPC to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). - -### IAM Policies - -Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. - -1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - -2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. The minimum permissions required for an EKS cluster are listed [here.](#minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. - -3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. - -> **Note:** It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. - -For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). - -# Architecture - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -# Create the EKS Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Amazon EKS**. - -1. Enter a **Cluster Name.** - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Fill out the rest of the form. For help, refer to the [configuration reference.](#eks-cluster-configuration-reference) - -1. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -# EKS Cluster Configuration Reference - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Access Key | Enter the access key that you created for your IAM policy. | -| Secret Key | Enter the secret key that you created for your IAM policy. | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Public IP for Worker Nodes - - - -Your selection for this option determines what options are available for **VPC & Subnet**. - -Option | Description --------|------------ -Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. -No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. - -### VPC & Subnet - - - -The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) - -Option | Description - -------|------------ - Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. - Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - - -If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. - -{{% accordion id="yes" label="Click to expand" %}} - -If you're using **Custom: Choose from your existing VPC and Subnets**: - -(If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) - -1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -1. Click **Next: Select Security Group**. -{{% /accordion %}} - -If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. -{{% accordion id="no" label="Click to expand" %}} -Follow the steps below. - ->**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -{{% /accordion %}} - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Instance Options - - - -Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. - -Option | Description --------|------------ -Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. -Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. -Desired ASG Size | The number of instances that your cluster will provision. -User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ - -# Troubleshooting - -If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) - -If an unauthorized error is returned while attempting to modify or import the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) - -For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). - -# AWS Service Events - -To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). - -# Security and Compliance - -By default only the IAM user or role that created a cluster has access to it. Attempting to access the cluster with any other user or role without additional configuration will lead to an error. In Rancher, this means using a credential that maps to a user or role that was not used to create the cluster will cause an unauthorized error. For example, an EKSCtl cluster will not be imported in Rancher unless the credentials used to import the cluster match the role or user used by EKSCtl. Additional users and roles can be authorized to access a cluster by being added to the aws-auth configmap in the kube-system namespace. For a more in-depth explanation and detailed instructions, please see this [documentation](https://aws.amazon.com/premiumsupport/knowledge-center/amazon-eks-cluster-access/). - -For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). - -# Tutorial - -This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. - -# Minimum EKS Permissions - -Documented here is a minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. Additional permissions are required for Rancher to provision the `Service Role` and `VPC` resources. Optionally these resources can be created **before** the cluster creation and will be selectable when defining the cluster configuration. - -Resource | Description ----------|------------ -Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#service-role-permissions). -VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/eks/#vpc-permissions). - - -Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "EC2Permisssions", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances", - "ec2:RevokeSecurityGroupIngress", - "ec2:RevokeSecurityGroupEgress", - "ec2:DescribeVpcs", - "ec2:DescribeTags", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeRouteTables", - "ec2:DescribeLaunchTemplateVersions", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeKeyPairs", - "ec2:DescribeInternetGateways", - "ec2:DescribeImages", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeAccountAttributes", - "ec2:DeleteTags", - "ec2:DeleteSecurityGroup", - "ec2:DeleteKeyPair", - "ec2:CreateTags", - "ec2:CreateSecurityGroup", - "ec2:CreateLaunchTemplateVersion", - "ec2:CreateLaunchTemplate", - "ec2:CreateKeyPair", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:AuthorizeSecurityGroupEgress" - ], - "Resource": "*" - }, - { - "Sid": "CloudFormationPermisssions", - "Effect": "Allow", - "Action": [ - "cloudformation:ListStacks", - "cloudformation:ListStackResources", - "cloudformation:DescribeStacks", - "cloudformation:DescribeStackResources", - "cloudformation:DescribeStackResource", - "cloudformation:DeleteStack", - "cloudformation:CreateStackSet", - "cloudformation:CreateStack" - ], - "Resource": "*" - }, - { - "Sid": "IAMPermissions", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "iam:ListRoles", - "iam:ListRoleTags", - "iam:ListInstanceProfilesForRole", - "iam:ListInstanceProfiles", - "iam:ListAttachedRolePolicies", - "iam:GetRole", - "iam:GetInstanceProfile", - "iam:DetachRolePolicy", - "iam:DeleteRole", - "iam:CreateRole", - "iam:AttachRolePolicy" - ], - "Resource": "*" - }, - { - "Sid": "KMSPermisssions", - "Effect": "Allow", - "Action": "kms:ListKeys", - "Resource": "*" - }, - { - "Sid": "EKSPermisssions", - "Effect": "Allow", - "Action": [ - "eks:UpdateNodegroupVersion", - "eks:UpdateNodegroupConfig", - "eks:UpdateClusterVersion", - "eks:UpdateClusterConfig", - "eks:UntagResource", - "eks:TagResource", - "eks:ListUpdates", - "eks:ListTagsForResource", - "eks:ListNodegroups", - "eks:ListFargateProfiles", - "eks:ListClusters", - "eks:DescribeUpdate", - "eks:DescribeNodegroup", - "eks:DescribeFargateProfile", - "eks:DescribeCluster", - "eks:DeleteNodegroup", - "eks:DeleteFargateProfile", - "eks:DeleteCluster", - "eks:CreateNodegroup", - "eks:CreateFargateProfile", - "eks:CreateCluster" - ], - "Resource": "*" - } - ] -} -``` - -### Service Role Permissions - -Rancher will create a service role with the following trust policy: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "sts:AssumeRole", - "Principal": { - "Service": "eks.amazonaws.com" - }, - "Effect": "Allow", - "Sid": "" - } - ] -} -``` - -This role will also have two role policy attachments with the following policies ARNs: - -``` -arn:aws:iam::aws:policy/AmazonEKSClusterPolicy -arn:aws:iam::aws:policy/AmazonEKSServicePolicy -``` - -Permissions required for Rancher to create service role on users behalf during the EKS cluster creation process. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "IAMPermisssions", - "Effect": "Allow", - "Action": [ - "iam:AddRoleToInstanceProfile", - "iam:AttachRolePolicy", - "iam:CreateInstanceProfile", - "iam:CreateRole", - "iam:CreateServiceLinkedRole", - "iam:DeleteInstanceProfile", - "iam:DeleteRole", - "iam:DetachRolePolicy", - "iam:GetInstanceProfile", - "iam:GetRole", - "iam:ListAttachedRolePolicies", - "iam:ListInstanceProfiles", - "iam:ListInstanceProfilesForRole", - "iam:ListRoles", - "iam:ListRoleTags", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile" - ], - "Resource": "*" - } - ] -} -``` - -### VPC Permissions - -Permissions required for Rancher to create VPC and associated resources. - -```json -{ - "Sid": "VPCPermissions", - "Effect": "Allow", - "Action": [ - "ec2:ReplaceRoute", - "ec2:ModifyVpcAttribute", - "ec2:ModifySubnetAttribute", - "ec2:DisassociateRouteTable", - "ec2:DetachInternetGateway", - "ec2:DescribeVpcs", - "ec2:DeleteVpc", - "ec2:DeleteTags", - "ec2:DeleteSubnet", - "ec2:DeleteRouteTable", - "ec2:DeleteRoute", - "ec2:DeleteInternetGateway", - "ec2:CreateVpc", - "ec2:CreateSubnet", - "ec2:CreateSecurityGroup", - "ec2:CreateRouteTable", - "ec2:CreateRoute", - "ec2:CreateInternetGateway", - "ec2:AttachInternetGateway", - "ec2:AssociateRouteTable" - ], - "Resource": "*" -} -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md deleted file mode 100644 index dca60a832a..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Creating a Tencent TKE Cluster -shortTitle: Tencent Kubernetes Engine -weight: 2125 ---- - -_Available as of v2.2.0_ - -You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. - -## Prerequisites in Tencent - ->**Note** ->Deploying to TKE will incur charges. - -1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. - -2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). - -3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. - -4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. - -## Create a TKE Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Tencent TKE**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites-in-tencent). - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Region | From the drop-down chooses the geographical region in which to build your cluster. | - | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | - | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | - -6. Click `Next: Configure Cluster` to set your TKE cluster configurations. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | - | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | - | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | - | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | - - **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Availability Zone | Choose the availability zone of the VPC region. | - | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | - | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | - -8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. - - Option | Description - -------|------------ - Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 - Security Group | Security group ID, default does not bind any security groups. - Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). - Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. - Data Disk Type | Data disk type, default value to the SSD cloud drive - Data Disk Size | Data disk size (GB), the step size is 10 - Band Width Type | Type of bandwidth, PayByTraffic or PayByHour - Band Width | Public network bandwidth (Mbps) - Key Pair | Key id, after associating the key can be used to logging to the VM node - -9. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/_index.md deleted file mode 100644 index afc04b75e2..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/_index.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: Importing Existing Clusters -description: Learn how you can create a cluster in Rancher by importing an existing Kubernetes cluster. Then, you can manage it using Rancher -metaTitle: 'Kubernetes Cluster Management' -metaDescription: 'Learn how you can import an existing Kubernetes cluster and then manage it using Rancher' -weight: 5 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/import-cluster/ ---- - -_Available as of v2.0.x-v2.4.x_ - -When managing an imported cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. Note that Rancher does not automate the provisioning or scaling of imported clusters. - -For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. - -Rancher v2.4 added the capability to import a K3s cluster into Rancher, as well as the ability to upgrade Kubernetes by editing the cluster in the Rancher UI. - -- [Features](#features) -- [Prerequisites](#prerequisites) -- [Importing a cluster](#importing-a-cluster) -- [Imported K3s clusters](#imported-k3s-clusters) - - [Additional features for imported K3s clusters](#additional-features-for-imported-k3s-clusters) - - [Configuring a K3s Cluster to Enable Importation to Rancher](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) - - [Debug Logging and Troubleshooting for Imported K3s clusters](#debug-logging-and-troubleshooting-for-imported-k3s-clusters) -- [Annotating imported clusters](#annotating-imported-clusters) - -# Features - -After importing a cluster, the cluster owner can: - -- [Manage cluster access]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/) and [logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) -- Enable [Istio]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/) -- Use [pipelines]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines/) -- Configure [alerts]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) and [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) -- Manage [projects]({{}}/rancher/v2.0-v2.4/en/project-admin/) and [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/) - -After importing a K3s cluster, the cluster owner can also [upgrade Kubernetes from the Rancher UI.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes/) - -# Prerequisites - -If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to import the cluster into Rancher. - -In order to apply the privilege, you need to run: - -```plain -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole cluster-admin \ - --user [USER_ACCOUNT] -``` - -before running the `kubectl` command to import the cluster. - -By default, GKE users are not given this privilege, so you will need to run the command before importing GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - -> If you are importing a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) - -# Importing a Cluster - -1. From the **Clusters** page, click **Add Cluster**. -2. Choose **Import**. -3. Enter a **Cluster Name**. -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user.} -5. Click **Create**. -6. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. -7. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. -8. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. -9. When you finish running the command(s) on your node, click **Done**. - -**Result:** - -- Your cluster is imported and assigned a state of **Pending.** Rancher is deploying resources to manage your cluster. -- You can access your cluster after its state is updated to **Active.** -- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). - -> **Note:** -> You can not re-import a cluster that is currently active in a Rancher setup. - -# Imported K3s Clusters - -You can now import a K3s Kubernetes cluster into Rancher. [K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. You can also upgrade Kubernetes by editing the K3s cluster in the Rancher UI. - -### Additional Features for Imported K3s Clusters - -_Available as of v2.4.0_ - -When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: - -- The ability to upgrade the K3s version -- The ability to configure the maximum number of nodes that will be upgraded concurrently -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. - -### Configuring K3s Cluster Upgrades - -> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. - -The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. - -- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes -- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes - -In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. - -Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. - -### Configuring a K3s Cluster to Enable Importation to Rancher - -The K3s server needs to be configured to allow writing to the kubeconfig file. - -This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: - -``` -$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 -``` - -The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: - -``` -$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - -``` - -### Debug Logging and Troubleshooting for Imported K3s Clusters - -Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. - -To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. - -Logs created by the `system-upgrade-controller` can be viewed by running this command: - -``` -kubectl logs -n cattle-system system-upgrade-controller -``` - -The current status of the plans can be viewed with this command: - -``` -kubectl get plans -A -o yaml -``` - -If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. - -To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. - -# Annotating Imported Clusters - -For all types of imported Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. - -Therefore, when Rancher imports a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the imported cluster. - -However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. - -By annotating an imported cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. - -This example annotation indicates that a pod security policy is enabled: - -``` -"capabilities.cattle.io/pspEnabled": "true" -``` - -The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. - -``` -"capabilities.cattle.io/ingressCapabilities": "[ - { - "customDefaultBackend":true, - "ingressProvider":"asdf" - } -]" -``` - -These capabilities can be annotated for the cluster: - -- `ingressCapabilities` -- `loadBalancerCapabilities` -- `nodePoolScalingSupported` -- `nodePortRange` -- `pspEnabled` -- `taintSupport` - -All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. - -To annotate an imported cluster, - -1. Go to the cluster view in Rancher and select **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. -1. Click **Save.** - -**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/_index.md deleted file mode 100644 index 81b0972581..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/_index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Node Requirements for Rancher Managed Clusters -weight: 1 ---- - -This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. - -> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -Make sure the nodes for the Rancher server fulfill the following requirements: - -- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) -- [Networking Requirements](#networking-requirements) -- [Optional: Security Considerations](#optional-security-considerations) - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) The capability to use Windows worker nodes in downstream clusters was added in Rancher v2.3.0. - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.0-v2.4/en/installation/options/arm64-platform/) - -For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) - -### Oracle Linux and RHEL Derived Linux Nodes - -Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - -### SUSE Linux Nodes - -SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. - -### Flatcar Container Linux Nodes - -When [Launching Kubernetes with Rancher]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) - -{{% tabs %}} -{{% tab "Canal"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: canal - options: - canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} - -{{% tab "Calico"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: calico - options: - calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} -{{% /tabs %}} - -It is also required to enable the Docker service, you can enable the Docker service using the following command: - -``` -systemctl enable docker.service -``` - -The Docker service is enabled automatically when using [Node Drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/#node-drivers). - -### Windows Nodes - -_Windows worker nodes can be used as of Rancher v2.3.0_ - -Nodes with Windows Server must run Docker Enterprise Edition. - -Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/) - -# Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. - -For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) - -For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) - -# Networking Requirements - -For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. - -The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). - -For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) - -Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports#downstream-kubernetes-cluster-nodes). - -# Optional: Security Considerations - -If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. - -For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.0-v2.4/en/security/#rancher-hardening-guide) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/_index.md deleted file mode 100644 index ba2d48fdde..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Checklist for Production-Ready Clusters -weight: 2 ---- - -In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. - -For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements) - -This is a shortlist of best practices that we strongly recommend for all production clusters. - -For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.0-v2.4/en/best-practices) - -### Node Requirements - -* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/) including the port requirements. - -### Back up etcd - -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots]({{}}/rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. - -### Cluster Architecture - -* Nodes should have one of the following role configurations: - * `etcd` - * `controlplane` - * `etcd` and `controlplane` - * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) -* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -* Assign two or more nodes the `controlplane` role for master component high availability. -* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles) - -For more information about the -number of nodes for each Kubernetes role, refer to the section on [recommended architecture.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/) - -### Logging and Monitoring - -* Configure alerts/notifiers for Kubernetes components (System Service). -* Configure logging for cluster analysis and post-mortems. - -### Reliability - -* Perform load tests on your cluster to verify that its hardware can support your workloads. - -### Networking - -* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). -* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles/_index.md deleted file mode 100644 index 4013efb7e0..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Roles for Nodes in Kubernetes -weight: 1 ---- - -This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. - -This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -# etcd - -Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. - ->**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -# controlplane - -Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. - ->**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -### kube-apiserver - -The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. - -### kube-controller-manager - -The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -### kube-scheduler - -The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -# worker - -Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. - -# References - -* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/recommended-architecture/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/recommended-architecture/_index.md deleted file mode 100644 index 7e5fc92f89..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/production/recommended-architecture/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Recommended Cluster Architecture -weight: 1 ---- - -There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. - -# Separating Worker Nodes from Nodes with Other Roles - -When designing your cluster(s), you have two options: - -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements). -* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. - -In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. - -Therefore, each node should have one of the following role configurations: - - * `etcd` - * `controlplane` - * Both `etcd` and `controlplane` - * `worker` - -# Recommended Number of Nodes with Each Role - -The cluster should have: - -- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -- At least two nodes with the role `controlplane` for master component high availability. -- At least two nodes with the role `worker` for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production/nodes-and-roles) - - -### Number of Controlplane Nodes - -Adding more than one node with the `controlplane` role makes every master component highly available. - -### Number of etcd Nodes - -The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. - -| Nodes with `etcd` role | Majority | Failure Tolerance | -|--------------|------------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | **1** | -| 4 | 3 | 1 | -| 5 | 3 | **2** | -| 6 | 4 | 2 | -| 7 | 4 | **3** | -| 8 | 5 | 3 | -| 9 | 5 | **4** | - -References: - -* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) -* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) - -### Number of Worker Nodes - -Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. - -### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications - -You may have noticed that our [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -# References - -* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/_index.md deleted file mode 100644 index 507a4524b7..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Launching Kubernetes with Rancher -weight: 4 ---- - -You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - -- Bare-metal servers -- On-premise virtual machines -- Virtual machines hosted by an infrastructure provider - -Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. - -RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. - -### Requirements - -If you use RKE to set up a cluster, your nodes must meet the [requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements) for nodes in downstream user clusters. - -### Launching Kubernetes on New Nodes in an Infrastructure Provider - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -For more information, refer to the section on [launching Kubernetes on new nodes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.0-v2.4/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -For more information, refer to the section on [custom nodes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md deleted file mode 100644 index 586c827112..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Setting up Cloud Providers -weight: 2300 -aliases: - - /rancher/v2.0-v2.4/en/concepts/clusters/cloud-providers/ - - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers ---- -A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. For more information, refer to the [official Kubernetes documentation on cloud providers.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. - -Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. - -By default, the **Cloud Provider** option is set to `None`. - -The following cloud providers can be enabled: - -* Amazon -* Azure -* GCE (Google Compute Engine) -* vSphere - -### Setting up the Amazon Cloud Provider - -For details on enabling the Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon) - -### Setting up the Azure Cloud Provider - -For details on enabling the Azure cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/azure) - -### Setting up the GCE Cloud Provider - -For details on enabling the Google Compute Engine cloud provider, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/gce) - -### Setting up the vSphere Cloud Provider - -For details on enabling the vSphere cloud provider, refer to [this page.](./vsphere) - -### Setting up a Custom Cloud Provider - -The `Custom` cloud provider is available if you want to configure any [Kubernetes cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). - -For the custom cloud provider option, you can refer to the [RKE docs]({{}}/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration : - -* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/) -* [OpenStack]({{}}/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md deleted file mode 100644 index bd449cadc5..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Setting up the Amazon Cloud Provider -weight: 1 ---- - -When using the `Amazon` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. -- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. - -See [cloud-provider-aws README](https://github.com/kubernetes/cloud-provider-aws/blob/master/README.md) for all information regarding the Amazon cloud provider. - -To set up the Amazon cloud provider, - -1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) -2. [Configure the ClusterID](#2-configure-the-clusterid) - -### 1. Create an IAM Role and attach to the instances - -All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: - -* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. -* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. - -While creating an [Amazon EC2 cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. - -While creating a [Custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes), you must manually attach the IAM role to the instance(s). - -IAM Policy for nodes with the `controlplane` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } -] -} -``` - -IAM policy for nodes with the `etcd` or `worker` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } -] -} -``` - -### 2. Configure the ClusterID - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster. -- **Security Group**: The security group used for your cluster. - ->**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). - -When you create an [Amazon EC2 Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. - -Use the following tag: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` - -`CLUSTERID` can be any string you like, as long as it is equal across all tags set. - -Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. - -### Using Amazon Elastic Container Registry (ECR) - -The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md deleted file mode 100644 index 2ecc8a4e6a..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Setting up the vSphere Cloud Provider -weight: 4 ---- - -In this section, you'll learn how to set up the vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. - -Follow these steps while creating the vSphere cluster in Rancher: - -1. Set **Cloud Provider** option to `Custom`. - - {{< img "/img/rancher/vsphere-node-driver-cloudprovider.png" "vsphere-node-driver-cloudprovider">}} - -1. Click on **Edit as YAML** -1. Insert the following structure to the pre-populated cluster YAML. As of Rancher v2.3+, this structure must be placed under `rancher_kubernetes_engine_config`. In versions before v2.3, it has to be defined as a top-level field. Note that the `name` *must* be set to `vsphere`. - - ```yaml - rancher_kubernetes_engine_config: # Required as of Rancher v2.3+ - cloud_provider: - name: vsphere - vsphereCloudProvider: - [Insert provider configuration] - ``` - -Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md deleted file mode 100644 index 65e38b6431..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Launching Kubernetes on Existing Custom Nodes -description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements -metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" -weight: 2225 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ - - /rancher/v2.0-v2.4/en/cluster-provisioning/custom-clusters/ ---- - -When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. - -To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. - -This section describes how to set up a custom cluster. - -# Creating a Cluster with Custom Nodes - ->**Want to use Windows hosts as Kubernetes workers?** -> ->See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. - - - -- [1. Provision a Linux Host](#1-provision-a-linux-host) -- [2. Create the Custom Cluster](#2-create-the-custom-cluster) -- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) - - - -### 1. Provision a Linux Host - -Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-prem VM -- A bare-metal server - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.0-v2.4/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -Provision the host according to the [installation requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements) and the [checklist for production-ready clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production) - -### 2. Create the Custom Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Custom**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** - - >**Using Windows nodes as Kubernetes workers?** - > - >- See [Enable the Windows Support Option]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/). - >- The only Network Provider available for clusters with Windows support is Flannel. -6. Click **Next**. - -7. From **Node Role**, choose the roles that you want filled by a cluster node. - - >**Notes:** - > - >- Using Windows nodes as Kubernetes workers? See [this section]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/). - >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). - -8. **Optional**: Click **[Show advanced options]({{}}/rancher/v2.0-v2.4/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. - -9. Copy the command displayed on screen to your clipboard. - -10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - - >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. - -11. When you finish running the command(s) on your Linux host(s), click **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -### 3. Amazon Only: Tag Resources - -If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. - -[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) - ->**Note:** You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster -- **Security Group**: The security group used for your cluster. - - >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. - -The tag that should be used is: - -``` -Key=kubernetes.io/cluster/, Value=owned -``` - -`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. - -If you share resources between clusters, you can change the tag to: - -``` -Key=kubernetes.io/cluster/CLUSTERID, Value=shared -``` - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md deleted file mode 100644 index 716fdbaaaf..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Rancher Agent Options -weight: 2500 -aliases: - - /rancher/v2.0-v2.4/en/admin-settings/agent-options/ - - /rancher/v2.0-v2.4/en/cluster-provisioning/custom-clusters/agent-options ---- - -Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) and add the options to the generated `docker run` command when adding a node. - -For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#3-node-agents) - -## General options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | -| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | -| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | -| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | -| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | -| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | - -## Role options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | -| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | -| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | -| `--worker` | `WORKER=true` | Apply the role `worker` to the node | - -## IP address options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | -| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | - -### Dynamic IP address options - -For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. - -| Value | Example | Description | -| ---------- | -------------------- | ----------- | -| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | -| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | -| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | -| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | -| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | -| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | -| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | -| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | -| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | -| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | -| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | -| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/_index.md deleted file mode 100644 index 89778a7738..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Launching Kubernetes on New Nodes in an Infrastructure Provider -weight: 2205 -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/node-templates/ ---- - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -The available cloud providers to create a node template are decided based on active [node drivers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). - -This section covers the following topics: - -- [Node templates](#node-templates) - - [Node labels](#node-labels) - - [Node taints](#node-taints) - - [Administrator control of node templates](#administrator-control-of-node-templates) -- [Node pools](#node-pools) - - [Node pool taints](#node-pool-taints) - - [About node auto-replace](#about-node-auto-replace) - - [Enabling node auto-replace](#enabling-node-auto-replace) - - [Disabling node auto-replace](#disabling-node-auto-replace) -- [Cloud credentials](#cloud-credentials) -- [Node drivers](#node-drivers) - -# Node Templates - -A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. - -After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. - -### Node Labels - -You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. - -### Node Taints - -_Available as of Rancher v2.3.0_ - -You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. - -Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### Administrator Control of Node Templates - -_Available as of v2.3.3_ - -Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. - -To access all node templates, an administrator will need to do the following: - -1. In the Rancher UI, click the user profile icon in the upper right corner. -1. Click **Node Templates.** - -**Result:** All node templates are listed and grouped by owner. The templates can be edited or cloned by clicking the **⋮.** - -# Node Pools - -Using Rancher, you can create pools of nodes based on a [node template](#node-templates). - -A node template defines the configuration of a node, like what operating system to use, number of CPUs and amount of memory. - -The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. - -Each node pool must have one or more nodes roles assigned. - -Each node role (i.e. etcd, control plane, and worker) should be assigned to a distinct node pool. Although it is possible to assign multiple node roles to a node pool, this should not be done for production clusters. - -The recommended setup is to have: - -- a node pool with the etcd node role and a count of three -- a node pool with the control plane node role and a count of at least two -- a node pool with the worker node role and a count of at least two - -### Node Pool Taints - -_Available as of Rancher v2.3.0_ - -If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. - -For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. - -When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### About Node Auto-replace - -_Available as of Rancher v2.3.0_ - -If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. - -> **Important:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. - -### Enabling Node Auto-replace - -When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. - -1. In the form for creating a cluster, go to the **Node Pools** section. -1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Fill out the rest of the form for creating a cluster. - -**Result:** Node auto-replace is enabled for the node pool. - -You can also enable node auto-replace after the cluster is created with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Click **Save.** - -**Result:** Node auto-replace is enabled for the node pool. - -### Disabling Node Auto-replace - -You can disable node auto-replace from the Rancher UI with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. -1. Click **Save.** - -**Result:** Node auto-replace is disabled for the node pool. - -# Cloud Credentials - -_Available as of v2.2.0_ - -Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: - -- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. - -- After the cloud credential is created, it can be re-used to create additional node templates. - -- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. - -> **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/). - -# Node Drivers - -If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md deleted file mode 100644 index 263db5c0d3..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Creating an Azure Cluster -shortTitle: Azure -weight: 2220 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-azure/ ---- - -In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. - -First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. - -Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - ->**Warning:** When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: - -> - Terminate the SSL/TLS on the internal load balancer -> - Use the L7 load balancer - -> For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). - -For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) - -For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](./azure-node-template-config) - -- [Preparation in Azure](#preparation-in-azure) -- [Creating an Azure Cluster](#creating-an-azure-cluster) - -# Preparation in Azure - -Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. - -To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. - -The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: - -``` -az ad sp create-for-rbac \ - --name="" \ - --role="Contributor" \ - --scopes="/subscriptions/" -``` - -The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, *The client secret*, and *The tenant ID*. This information will be used when you create a node template for Azure. - -# Creating an Azure Cluster - -{{%tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Azure**. -1. Enter your Azure credentials. -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](./azure-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in Azure. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Azure**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -Use Rancher to create a Kubernetes cluster in Azure. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Azure**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Azure Options** form. For help filling out the form, refer to the [Azure node template configuration reference.](./azure-node-template-config) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md deleted file mode 100644 index 1c2db8c79c..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Azure Node Template Configuration -weight: 1 ---- - -For more information about Azure, refer to the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/?product=featured) - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. - -- **Placement** sets the geographical region where your cluster is hosted and other location metadata. -- **Network** configures the networking used in your cluster. -- **Instance** customizes your VM configuration. - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -- **Account Access** stores your account information for authenticating with Azure. -- **Placement** sets the geographical region where your cluster is hosted and other location metadata. -- **Network** configures the networking used in your cluster. -- **Instance** customizes your VM configuration. - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md deleted file mode 100644 index c22ef45317..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Creating a DigitalOcean Cluster -shortTitle: DigitalOcean -weight: 2215 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-digital-ocean/ ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. - -First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. - -Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **DigitalOcean**. -1. Enter your Digital Ocean credentials. -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](./do-node-template-config) - -### 3. Create a cluster with node pools using the node template - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **DigitalOcean**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **DigitalOcean**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Digital Ocean Options** form. For help filling out the form, refer to the [Digital Ocean node template configuration reference.](./do-node-template-config) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -{{% /tab %}} -{{% /tabs %}} - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md deleted file mode 100644 index 4d9a0066f4..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: DigitalOcean Node Template Configuration -weight: 1 ----- - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. - -### Droplet Options - -The **Droplet Options** provision your cluster's geographical region and specifications. - -### Docker Daemon - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -### Access Token - -The **Access Token** stores your DigitalOcean Personal Access Token. Refer to [DigitalOcean Instructions: How To Generate a Personal Access Token](https://www.digitalocean.com/community/tutorials/how-to-use-the-digitalocean-api-v2#how-to-generate-a-personal-access-token). - -### Droplet Options - -The **Droplet Options** provision your cluster's geographical region and specifications. - -### Docker Daemon - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md deleted file mode 100644 index b320cfc9f4..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Creating an Amazon EC2 Cluster -shortTitle: Amazon EC2 -description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher -weight: 2210 ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. - -First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. - -Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -### Prerequisites - -- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) -- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. - -> **Note:** Rancher v2.4.6 and v2.4.7 had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. - -# Creating an EC2 Cluster - -The steps to create a cluster differ based on your Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Amazon.** -1. In the **Region** field, select the AWS region where your cluster nodes will be located. -1. Enter your AWS EC2 **Access Key** and **Secret Key.** -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials and information from EC2 - -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Add one or more node pools to your cluster. For more information about node pools, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Amazon EC2**. -1. Enter a **Cluster Name**. -1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) -1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Amazon EC2**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** Refer to [Selecting Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) To create a node template, click **Add Node Template**. For help filling out the node template, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) -1. Click **Create**. -1. **Optional:** Add additional node pools. -1. Review your cluster settings to confirm they are correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# IAM Policies - -> **Note:** Rancher v2.4.6 and v2.4.7 had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. - -### Example IAM Policy - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` - -### Example IAM Policy with PassRole - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", - "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` -### Example IAM Policy to allow encrypted EBS volumes -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:GenerateDataKeyWithoutPlaintext", - "kms:Encrypt", - "kms:DescribeKey", - "kms:CreateGrant", - "ec2:DetachVolume", - "ec2:AttachVolume", - "ec2:DeleteSnapshot", - "ec2:DeleteTags", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteVolume", - "ec2:CreateSnapshot" - ], - "Resource": [ - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", - "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" - ] - }, - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots" - ], - "Resource": "*" - } - ] -} -``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md deleted file mode 100644 index 4b7110fe78..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: EC2 Node Template Configuration -weight: 1 ---- - -For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} - -### Region - -In the **Region** field, select the same region that you used when creating your cloud credentials. - -### Cloud Credentials - -Your AWS account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) - -See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - -See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. - -See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM - -See our three example JSON policies: - -- [Example IAM Policy]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. - -### Authenticate & Configure Nodes - -Choose an availability zone and network settings for your cluster. - -### Security Group - -Choose the default security group or configure a security group. - -Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. - -### Instance Options - -Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. - -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -### Engine Options - -In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. - -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -### Account Access - -**Account Access** is where you configure the region of the nodes, and the credentials (Access Key and Secret Key) used to create the machine. - -See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - -See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. - -See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM - -See our three example JSON policies: - -- [Example IAM Policy]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. - -### Zone and Network - -**Zone and Network** configures the availability zone and network settings for your cluster. - -### Security Groups - -**Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. - -### Instance - -**Instance** configures the instances that will be created. - -### SSH User - -Make sure you configure the correct **SSH User** for the configured AMI. - -### IAM Instance Profile Name - -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -### Docker Daemon - -The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: - -- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) -- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. -- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon -- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md deleted file mode 100644 index 963a18ac45..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Creating a vSphere Cluster -shortTitle: vSphere -description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -weight: 2225 -aliases: - - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ ---- - -By using Rancher with vSphere, you can bring cloud operations on-premises. - -Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. - -A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. - -- [vSphere Enhancements in Rancher v2.3](#vsphere-enhancements-in-rancher-v2-3) -- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) -- [Provisioning Storage](#provisioning-storage) -- [Enabling the vSphere Cloud Provider](#enabling-the-vsphere-cloud-provider) - -# vSphere Enhancements in Rancher v2.3 - -The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: - -### Self-healing Node Pools - -_Available as of v2.3.0_ - -One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. - -> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -### Dynamically Populated Options for Instances and Scheduling - -_Available as of v2.3.3_ - -Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. - -For the fields to be populated, your setup needs to fulfill the [prerequisites.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) - -### More Supported Operating Systems - -In Rancher v2.3.3+, you can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) - -In Rancher before v2.3.3, the vSphere node driver included in Rancher only supported the provisioning of VMs with [RancherOS]({{}}/os/v1.x/en/) as the guest operating system. - -### Video Walkthrough of v2.3.3 Node Template Features - -In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. - -{{< youtube id="dPIwg6x1AlU">}} - -# Creating a vSphere Cluster - -In [this section,](./provisioning-vsphere-clusters) you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. - -# Provisioning Storage - -For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) - -# Enabling the vSphere Cloud Provider - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. - -For details, refer to the section on [enabling the vSphere cloud provider.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md deleted file mode 100644 index ce7a1fe5d3..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Creating Credentials in the vSphere Console -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials ---- - -This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. - -The following table lists the permissions required for the vSphere user account: - -| Privilege Group | Operations | -|:----------------------|:-----------------------------------------------------------------------| -| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | -| Network | Assign | -| Resource | AssignVMToPool | -| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | - -The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: - -1. From the **vSphere** console, go to the **Administration** page. - -2. Go to the **Roles** tab. - -3. Create a new role. Give it a name and select the privileges listed in the permissions table above. - - {{< img "/img/rancher/rancherroles1.png" "image" >}} - -4. Go to the **Users and Groups** tab. - -5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. - - {{< img "/img/rancher/rancheruser.png" "image" >}} - -6. Go to the **Global Permissions** tab. - -7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. - - {{< img "/img/rancher/globalpermissionuser.png" "image" >}} - - {{< img "/img/rancher/globalpermissionrole.png" "image" >}} - -**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md deleted file mode 100644 index d299c958c4..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Provisioning Kubernetes Clusters in vSphere -weight: 1 ---- - -In this section, you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. - -First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. - -Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) - -For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) - -- [Preparation in vSphere](#preparation-in-vsphere) -- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) - -# Preparation in vSphere - -This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. - -The node templates are documented and tested with the vSphere Web Services API version 6.5. - -### Create Credentials in vSphere - -Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. - -Refer to this [how-to guide]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. - -### Network Permissions - -It must be ensured that the hosts running the Rancher server are able to establish the following network connections: - -- To the vSphere API on the vCenter server (usually port 443/TCP). -- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required with Rancher before v2.3.3 or when using the ISO creation method in later versions*). -- To port 22/TCP and 2376/TCP on the created VMs - -See [Node Networking Requirements]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. - -### Valid ESXi License for vSphere API Access - -The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. - -### VM-VM Affinity Rules for Clusters with DRS - -If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. - -# Creating a vSphere Cluster - -The a vSphere cluster is created in Rancher depends on the Rancher version. - -{{% tabs %}} -{{% tab "Rancher v2.2.0+" %}} -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **vSphere**. -1. Enter your vSphere credentials. For help, refer to **Account Access** in the [configuration reference for your Rancher version.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/) -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: - - [v2.3.3]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3) - - [v2.3.0]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0) - - [v2.2.0]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0) - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in vSphere. - -1. Navigate to **Clusters** in the **Global** view. -1. Click **Add Cluster** and select the **vSphere** infrastructure provider. -1. Enter a **Cluster Name.** -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -{{% /tab %}} -{{% tab "Rancher before v2.2.0" %}} - -Use Rancher to create a Kubernetes cluster in vSphere. - -For Rancher versions before v2.0.4, when you create the cluster, you will also need to follow the steps in [this section](http://localhost:9001/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vpshere-node-template-config/prior-to-2.0.4/#disk-uuids) to enable disk UUIDs. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **vSphere**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more [node pools]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **vSphere Options** form. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: - - [v2.0.4]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4) - - [before v2.0.4]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4) -1. Review your options to confirm they're correct. Then click **Create** to start provisioning the VMs and Kubernetes services. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} - - - - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. -- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md deleted file mode 100644 index 665733f833..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: VSphere Node Template Configuration -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference - - /rancher/v2.0-v2.4/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids ---- - -The vSphere node templates in Rancher were updated in the following Rancher versions. Refer to the newest configuration reference that is less than or equal to your Rancher version: - -- [v2.3.3](./v2.3.3) -- [v2.3.0](./v2.3.0) -- [v2.2.0](./v2.2.0) -- [v2.0.4](./v2.0.4) - -For Rancher versions before v2.0.4, refer to [this version.](./prior-to-2.0.4) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md deleted file mode 100644 index f66a4fa794..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/_index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher before v2.0.4 -shortTitle: Before v2.0.4 -weight: 5 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/ ---- - -- [Account access](#account-access) -- [Scheduling](#scheduling) -- [Instance options](#instance-options) -- [Disk UUIDs](#disk-uuids) -- [Node Tags and Custom Attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access -In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | * | Port to use when connecting to the server. Defaults to `443`. | -| Username | * | vCenter/ESXi user to authenticate with the server. | -| Password | * | User's password. | - - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -# Disk UUIDs - -In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. Follow these instructions to enable UUIDs for the nodes in your vSphere cluster. - -To enable disk UUIDs for all VMs created for a cluster, - -1. Navigate to the **Node Templates** in the Rancher UI while logged in as an administrator. -2. Add or edit an existing vSphere node template. -3. Under **Instance Options** click on **Add Parameter**. -4. Enter `disk.enableUUID` as key with a value of **TRUE**. - - {{< img "/img/rke/vsphere-nodedriver-enable-uuid.png" "vsphere-nodedriver-enable-uuid" >}} - -5. Click **Create** or **Save**. - -**Result:** The disk UUID is enabled in the vSphere node template. - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md deleted file mode 100644 index 658c575eaa..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.0.4 -shortTitle: v2.0.4 -weight: 4 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/ ---- -- [Account access](#account-access) -- [Scheduling](#scheduling) -- [Instance options](#instance-options) -- [Node Tags and Custom Attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access -In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | * | Port to use when connecting to the server. Defaults to `443`. | -| Username | * | vCenter/ESXi user to authenticate with the server. | -| Password | * | User's password. | - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md deleted file mode 100644 index feab925f58..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.2.0 -shortTitle: v2.2.0 -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/ ---- -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------|----------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling -Choose what hypervisor the virtual machine will be scheduled to. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md deleted file mode 100644 index 829e7edea5..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/_index.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.3.0 -shortTitle: v2.3.0 -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/ ---- -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [Cloud Init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------------|-----------------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling -Choose what hypervisor the virtual machine will be scheduled to. - -In the **Scheduling** section, enter: - -- The name/path of the **Data Center** to create the VMs in -- The name of the **VM Network** to attach to -- The name/path of the **Datastore** to store the disks in - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| Data Center | * | Name/path of the datacenter to create VMs in. | -| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | -| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | -| Network | * | Name of the VM network to attach VMs to. | -| Data Store | * | Datastore to store the VM disks. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -Only VMs booting from RancherOS ISO are supported. - -Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. - -| Parameter | Required | Description | -|:------------------------|:--------:|:------------------------------------------------------------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Cloud Init | | URL of a [RancherOS cloud-config]({{< baseurl >}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| -| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | -| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - - -# Node Tags and Custom Attributes - -These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -Optionally, you can: - -- Provide a set of configuration parameters (instance-options) for the VMs. -- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. -- Customize the configuration of the Docker daemon on the VMs that will be created. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# Cloud Init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md deleted file mode 100644 index 9b4c539037..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: vSphere Node Template Configuration in Rancher v2.3.3 -shortTitle: v2.3.3 -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/ ---- -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Networks](#networks) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [cloud-init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------------|--------------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. - -| Field | Required | Explanation | -|---------|---------------|-----------| -| Data Center | * | Choose the name/path of the data center where the VM will be scheduled. | -| Resource Pool | | Name of the resource pool to schedule the VMs in. Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | -| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. | -| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -| Parameter | Required | Description | -|:----------------|:--------:|:-----------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | -| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | -| Networks | | Name(s) of the network to attach the VM to. | -| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - - -### About VM Creation Methods - -In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). - -The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). - -Choose the way that the VM will be created: - -- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. -- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list **Library templates.** -- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. -- **Install from boot2docker ISO:** Ensure that the **OS ISO URL** field contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). Note that this URL must be accessible from the nodes running your Rancher server installation. - -# Networks - -The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. - -# Node Tags and Custom Attributes - -Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -For tags, all your vSphere tags will show up as options to select from in your node template. - -In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# cloud-init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. - -Note that cloud-init is not supported when using the ISO creation method. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/_index.md deleted file mode 100644 index a02c277da8..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/_index.md +++ /dev/null @@ -1,401 +0,0 @@ ---- -title: RKE Cluster Configuration Reference -weight: 2250 ---- - -When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) as the Kubernetes distribution. - -This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. - -You can configure the Kubernetes options one of two ways: - -- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -In Rancher v2.0.0-v2.2.x, the RKE cluster config file in Rancher is identical to the [cluster config file for the Rancher Kubernetes Engine]({{}}/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) - -This section is a cluster configuration reference, covering the following topics: - -- [Rancher UI Options](#rancher-ui-options) - - [Kubernetes version](#kubernetes-version) - - [Network provider](#network-provider) - - [Kubernetes cloud providers](#kubernetes-cloud-providers) - - [Private registries](#private-registries) - - [Authorized cluster endpoint](#authorized-cluster-endpoint) - - [Node pools](#node-pools) -- [Advanced Options](#advanced-options) - - [NGINX Ingress](#nginx-ingress) - - [Node port range](#node-port-range) - - [Metrics server monitoring](#metrics-server-monitoring) - - [Pod security policy support](#pod-security-policy-support) - - [Docker version on nodes](#docker-version-on-nodes) - - [Docker root directory](#docker-root-directory) - - [Recurring etcd snapshots](#recurring-etcd-snapshots) -- [Cluster config file](#cluster-config-file) - - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0) - - [Config file structure in Rancher v2.0.0-v2.2.x](#config-file-structure-in-rancher-v2-0-0-v2-2-x) - - [Default DNS provider](#default-dns-provider) -- [Rancher specific parameters](#rancher-specific-parameters) - -# Rancher UI Options - -When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. - -### Kubernetes Version - -The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). - -### Network Provider - -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.0-v2.4/en/faq/networking/cni-providers/). - ->**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - -Out of the box, Rancher is compatible with the following network providers: - -- [Canal](https://github.com/projectcalico/canal) -- [Flannel](https://github.com/coreos/flannel#flannel) -- [Calico](https://docs.projectcalico.org/v3.11/introduction/) -- [Weave](https://github.com/weaveworks/weave) (Available as of v2.2.0) - -**Notes on Canal:** - -In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/). - -As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/). - ->**Attention Rancher v2.0.0 - v2.0.6 Users** -> ->- In previous Rancher releases, Canal isolates project network communications with no option to disable it. If you are using any of these Rancher releases, be aware that using Canal prevents all communication between pods in different projects. ->- If you have clusters using Canal and are upgrading to v2.0.7, those clusters enable Project Network Isolation by default. If you want to disable Project Network Isolation, edit the cluster and disable the option. - -**Notes on Flannel:** - -In v2.0.5, this was the default option, which did not prevent any network isolation between projects. - -**Notes on Weave:** - -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). - -### Kubernetes Cloud Providers - -You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. - ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. - -If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: - -### Private registries - -_Available as of v2.2.0_ - -The cluster-level private registry configuration is only used for provisioning clusters. - -There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.0-v2.4/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. - -The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. - -- **System images** are components needed to maintain the Kubernetes cluster. -- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. - -See the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. - -### Authorized Cluster Endpoint - -_Available as of v2.2.0_ - -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. - -> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are imported into Rancher; it is available only on Rancher-launched Kubernetes clusters. - -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. - -For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -### Node Pools - -For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools) - -# Advanced Options - -The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** - -### NGINX Ingress - -Option to enable or disable the [NGINX ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). - -### Node Port Range - -Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. - -### Metrics Server Monitoring - -Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). - -### Pod Security Policy Support - -Option to enable and select a default [Pod Security Policy]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. - -### Docker Version on Nodes - -Option to require [a supported Docker version]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. - -### Docker Root Directory - -If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. - -### Recurring etcd Snapshots - -Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). - -# Cluster Config File - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. - ->**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from a file**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. - -### Config File Structure in Rancher v2.3.0+ - -RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. - -{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.3.0+" %}} - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` -{{% /accordion %}} - -### Config File Structure in Rancher v2.0.0-v2.2.x - -An example cluster config file is included below. - -{{% accordion id="before-v2.3.0-cluster-config-file" label="Example Cluster Config File for Rancher v2.0.0-v2.2.x" %}} -```yaml -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.15.3-rancher3-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 -ssh_agent_auth: false -``` -{{% /accordion %}} - -### Default DNS provider - -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. - -| Rancher version | Kubernetes version | Default DNS provider | -|-------------|--------------------|----------------------| -| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | -| v2.2.5 and higher | v1.13.x and lower | kube-dns | -| v2.2.4 and lower | any | kube-dns | - -# Rancher specific parameters - -_Available as of v2.2.0_ - -Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): - -### docker_root_dir - -See [Docker Root Directory](#docker-root-directory). - -### enable_cluster_monitoring - -Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/). - -### enable_network_policy - -Option to enable or disable Project Network Isolation. - -### local_cluster_auth_endpoint - -See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). - -Example: - -```yaml -local_cluster_auth_endpoint: - enabled: true - fqdn: "FQDN" - ca_certs: "BASE64_CACERT" -``` - -### Custom Network Plug-in - -_Available as of v2.2.4_ - -You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. - -There are two ways that you can specify an add-on: - -- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) -- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) - -For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md deleted file mode 100644 index aeb89d2378..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Assigning Pod Security Policies -weight: 2260 ---- - -_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). - -## Adding a Default Pod Security Policy - -When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. - ->**Prerequisite:** ->Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). ->**Note:** ->For security purposes, we recommend assigning a PSP as you create your clusters. - -To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. - -When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md deleted file mode 100644 index 5af2f49ba9..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Rancher Agents -weight: 2400 ---- - -There are two different agent resources deployed on Rancher managed clusters: - -- [cattle-cluster-agent](#cattle-cluster-agent) -- [cattle-node-agent](#cattle-node-agent) - -For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture]({{}}/rancher/v2.0-v2.4/en/overview/architecture/) - -### cattle-cluster-agent - -The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. - -### cattle-node-agent - -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. - -> **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. - -### Scheduling rules - -_Applies to v2.3.0 up to v2.5.3_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | -| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | - -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. - -The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: - -| Weight | Expression | -| ------ | ------------------------------------------------ | -| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | -| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md deleted file mode 100644 index db83f053eb..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -title: Launching Kubernetes on Windows Clusters -weight: 2240 ---- - -_Available as of v2.3.0_ - -When provisioning a [custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. - -In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. - -Some other requirements for Windows clusters include: - -- You can only add Windows nodes to a cluster if Windows support is enabled when the cluster is created. Windows support cannot be enabled for existing clusters. -- Kubernetes 1.15+ is required. -- The Flannel network provider must be used. -- Windows nodes must have 50 GB of disk space. - -For the full list of requirements, see [this section.](#requirements-for-windows-clusters) - -For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). - -This guide covers the following topics: - - - -- [Requirements](#requirements-for-windows-clusters) -- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) -- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) - - -# Requirements for Windows Clusters - -The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). - -### OS and Docker Requirements - -In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): - -- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. -- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. - -> **Notes:** -> -> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). -> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. - -### Kubernetes Version - -Kubernetes v1.15+ is required. - -### Node Requirements - -The hosts in the cluster need to have at least: - -- 2 core CPUs -- 5 GB memory -- 50 GB disk space - -Rancher will not provision the node if the node does not meet these requirements. - -### Networking Requirements - -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{}}/rancher/v2.0-v2.4/en/installation/) before proceeding with this guide. - -Rancher only supports Windows using Flannel as the network provider. - -There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. - -For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. - -If you are configuring DHCP options sets for an AWS virtual private cloud, note that in the `domain-name` option field, only one domain name can be specified. According to the DHCP options [documentation:](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) - -> Some Linux operating systems accept multiple domain names separated by spaces. However, other Linux operating systems and Windows treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name. - -### Architecture Requirements - -The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. - -The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. - -We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: - - - -| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | -| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | Control plane, etcd, worker | Manage the Kubernetes cluster | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | Worker | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | -| Node 3 | Windows (Windows Server core version 1809 or above) | Worker | Run your Windows containers | - -### Container Requirements - -Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. - -### Cloud Provider Specific Requirements - -If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. - -If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: - -- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/gce) -- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. - -# Tutorial: How to Create a Cluster with Windows Support - -This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) - -When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. - -To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. - - - -1. [Provision Hosts](#1-provision-hosts) -1. [Create the Cluster on Existing Nodes](#2-create-the-cluster-on-existing-nodes) -1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) -1. [Optional: Configuration for Azure Files](#4-optional-configuration-for-azure-files) - - -# 1. Provision Hosts - -To begin provisioning a cluster on existing nodes with Windows support, prepare your hosts. - -Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -You will provision three nodes: - -- One Linux node, which manages the Kubernetes control plane and stores your `etcd` -- A second Linux node, which will be another worker node -- The Windows node, which will run your Windows containers as a worker node - -| Node | Operating System | -| ------ | ------------------------------------------------------------ | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | -| Node 3 | Windows (Windows Server core version 1809 or above required) | - -If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) - -# 2. Create the Cluster on Existing Nodes - -The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) with some Windows-specific requirements. - -1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. -1. Click **From existing nodes (Custom)**. -1. Enter a name for your cluster in the **Cluster Name** text box. -1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. -1. In the **Network Provider** field, select **Flannel.** -1. In the **Windows Support** section, click **Enable.** -1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. -1. Click **Next**. - -> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -# 3. Add Nodes to the Cluster - -This section describes how to register your Linux and Worker nodes to your cluster. You will run a command on each node, which will install the Rancher agent and allow Rancher to manage each node. - -### Add Linux Master Node - -In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. - -The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. - -1. In the **Node Operating System** section, click **Linux**. -1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. -1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{}}/rancher/v2.0-v2.4/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -1. Copy the command displayed on the screen to your clipboard. -1. SSH into your Linux host and run the command that you copied to your clipboard. -1. When you are finished provisioning your Linux node(s), select **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -It may take a few minutes for the node to be registered in your cluster. - -### Add Linux Worker Node - -In this section, we run a command to register the Linux worker node to the cluster. - -After the initial provisioning of your cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. - -1. From the **Global** view, click **Clusters.** -1. Go to the cluster that you created and click **⋮ > Edit.** -1. Scroll down to **Node Operating System**. Choose **Linux**. -1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. -1. Copy the command displayed on screen to your clipboard. -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. -1. From **Rancher**, click **Save**. - -**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. - -> **Note:** Taints on Linux Worker Nodes -> -> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the Windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. - -> | Taint Key | Taint Value | Taint Effect | -> | -------------- | ----------- | ------------ | -> | `cattle.io/os` | `linux` | `NoSchedule` | - -### Add a Windows Worker Node - -In this section, we run a command to register the Windows worker node to the cluster. - -You can add Windows hosts to the cluster by editing the cluster and choosing the **Windows** option. - -1. From the **Global** view, click **Clusters.** -1. Go to the cluster that you created and click **⋮ > Edit.** -1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. -1. Copy the command displayed on screen to your clipboard. -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. -1. From Rancher, click **Save**. -1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. - -**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# Configuration for Storage Classes in Azure - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md deleted file mode 100644 index 0677c360b5..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Configuration for Storage Classes in Azure -weight: 3 ---- - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. - -In order to have the Azure platform create the required storage resources, follow these steps: - -1. [Configure the Azure cloud provider.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/azure) -1. Configure `kubectl` to connect to your cluster. -1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: - - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: system:azure-cloud-provider - rules: - - apiGroups: [''] - resources: ['secrets'] - verbs: ['get','create'] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: system:azure-cloud-provider - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:azure-cloud-provider - subjects: - - kind: ServiceAccount - name: persistent-volume-binder - namespace: kube-system - -1. Create these in your cluster using one of the follow command. - - ``` - # kubectl create -f - ``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md b/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md deleted file mode 100644 index c1f5ce3bae..0000000000 --- a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/_index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: v2.1.x and v2.2.x Windows Documentation (Experimental) -weight: 9100 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/ ---- - -_Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ - -This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/). - -When you create a [custom cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. - -You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. - ->**Important:** In versions of Rancher before v2.3, support for Windows nodes is experimental. Therefore, it is not recommended to use Windows nodes for production environments if you are using Rancher before v2.3. - -This guide walks you through create of a custom cluster that includes three nodes: - -- A Linux node, which serves as a Kubernetes control plane node -- Another Linux node, which serves as a Kubernetes worker used to support Ingress for the cluster -- A Windows node, which is assigned the Kubernetes worker role and runs your Windows containers - -For a summary of Kubernetes features supported in Windows, see [Using Windows in Kubernetes](https://kubernetes.io/docs/setup/windows/intro-windows-in-kubernetes/). - -## OS and Container Requirements - -- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1809 or above. -- You must build containers on a Windows Server core version 1809 or above to run these containers on the same server version. - -## Objectives for Creating Cluster with Windows Support - -When setting up a custom cluster with support for Windows nodes and containers, complete the series of tasks below. - - - -- [1. Provision Hosts](#1-provision-hosts) -- [2. Cloud-host VM Networking Configuration](#2-cloud-hosted-vm-networking-configuration) -- [3. Create the Custom Cluster](#3-create-the-custom-cluster) -- [4. Add Linux Host for Ingress Support](#4-add-linux-host-for-ingress-support) -- [5. Adding Windows Workers](#5-adding-windows-workers) -- [6. Cloud-host VM Routes Configuration](#6-cloud-hosted-vm-routes-configuration) - - - -## 1. Provision Hosts - -To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/)—two Linux, one Windows. Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -The table below lists the Kubernetes node roles you'll assign to each host, although you won't enable these roles until further along in the configuration process—we're just informing you of each node's purpose. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane, although, in this use case, we're installing all three roles on this node. Node 2 is also a Linux worker, which is responsible for Ingress support. Finally, the third node is your Windows worker, which will run your Windows applications. - -Node | Operating System | Future Cluster Role(s) ---------|------------------|------ -Node 1 | Linux (Ubuntu Server 16.04 recommended) | Control plane, etcd, worker -Node 2 | Linux (Ubuntu Server 16.04 recommended) | Worker (This node is used for Ingress support) -Node 3 | Windows (Windows Server core version 1809 or above) | Worker - -### Requirements - -- You can view node requirements for Linux and Windows nodes in the [installation section]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). -- All nodes in a virtualization cluster or a bare metal cluster must be connected using a layer 2 network. -- To support [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), your cluster must include at least one Linux node dedicated to the worker role. -- Although we recommend the three node architecture listed in the table above, you can add additional Linux and Windows workers to scale up your cluster for redundancy. - - -## 2. Cloud-hosted VM Networking Configuration - ->**Note:** This step only applies to nodes hosted on cloud-hosted virtual machines. If you're using virtualization clusters or bare-metal servers, skip ahead to [Create the Custom Cluster](#3-create-the-custom-cluster). - -If you're hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. - -Service | Directions to disable private IP address checks ---------|------------------------------------------------ -Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) -Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) -Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) - -## 3. Create the Custom Cluster - -To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/), starting from 2. Create the Custom Cluster. While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. - - -### Enable the Windows Support Option - -While choosing **Cluster Options**, set **Windows Support (Experimental)** to **Enabled**. - -After you select this option, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) from [step 6]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/#step-6). - -### Networking Option - -When choosing a network provider for a cluster that supports Windows, the only option available is Flannel, as [host-gw](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) is needed for IP routing. - -If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. - -### Node Configuration - -The first node in your cluster should be a Linux host that fills the Control Plane role. This role must be fulfilled before you can add Windows hosts to your cluster. At minimum, the node must have this role enabled, but we recommend enabling all three. The following table lists our recommended settings (we'll provide the recommended settings for nodes 2 and 3 later). - -Option | Setting --------|-------- -Node Operating System | Linux -Node Roles | etcd
Control Plane
Worker - -When you're done with these configurations, resume [Creating a Cluster with Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/) from [step 8]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/#step-8). - - - -## 4. Add Linux Host for Ingress Support - -After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Add another Linux host, which will be used to support Ingress for your cluster. - -1. Using the content menu, open the custom cluster your created in [2. Create the Custom Cluster](#3-create-the-custom-cluster). - -1. From the main menu, select **Nodes**. - -1. Click **Edit Cluster**. - -1. Scroll down to **Node Operating System**. Choose **Linux**. - -1. Select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. - -1. From **Rancher**, click **Save**. - -**Result:** The worker role is installed on your Linux host, and the node registers with Rancher. - -## 5. Adding Windows Workers - -You can add Windows hosts to a custom cluster by editing the cluster and choosing the **Windows** option. - -1. From the main menu, select **Nodes**. - -1. Click **Edit Cluster**. - -1. Scroll down to **Node Operating System**. Choose **Windows**. - -1. Select the **Worker** role. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. - -1. From Rancher, click **Save**. - -1. **Optional:** Repeat these instruction if you want to add more Windows nodes to your cluster. - -**Result:** The worker role is installed on your Windows host, and the node registers with Rancher. - -## 6. Cloud-hosted VM Routes Configuration - -In Windows clusters, containers communicate with each other using the `host-gw` mode of Flannel. In `host-gw` mode, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. - -- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. - -- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. - -To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: - -```bash -kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR -``` - -Then follow the instructions for each cloud provider to configure routing rules for each node: - -Service | Instructions ---------|------------- -Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). -Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). - - -` ` diff --git a/content/rancher/v2.0-v2.4/en/contributing/_index.md b/content/rancher/v2.0-v2.4/en/contributing/_index.md deleted file mode 100644 index bc01f70c5e..0000000000 --- a/content/rancher/v2.0-v2.4/en/contributing/_index.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Contributing to Rancher -weight: 27 -aliases: - - /rancher/v2.0-v2.4/en/faq/contributing/ ---- - -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. - -For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: - -- How to set up the Rancher development environment and run tests -- The typical flow of an issue through the development lifecycle -- Coding guidelines and development best practices -- Debugging and troubleshooting -- Developing the Rancher API - -On the Rancher Users Slack, the channel for developers is **#developer**. - -# Repositories - -All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. - -Repository | URL | Description ------------|-----|------------- -Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. -Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. -API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. -User Interface | https://github.com/rancher/ui | This repository is the source of the UI. -(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. -machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. -kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. -RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. -CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. -(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. -Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. -loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. - -To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. - -![Rancher diagram]({{}}/img/rancher/ranchercomponentsdiagram.svg)
-Rancher components used for provisioning/managing Kubernetes clusters. - -# Building - -Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. - -The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. - -# Bugs, Issues or Questions - -If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. - -If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). - -### Checklist for Filing Issues - -Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. - ->**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. ->**Important:** Please remove any sensitive data as it will be publicly viewable. - -- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce - - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used - - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` - - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer - - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host - - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it -- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. -- **Logs:** Provide data/logs from the used resources. - - Rancher - - Docker install - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') - ``` - - Kubernetes install using `kubectl` - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - -l app=rancher \ - --timestamps=true - ``` - - Docker install using `docker` on each of the nodes in the RKE cluster - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') - ``` - - Kubernetes Install with RKE Add-On - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - --timestamps=true \ - -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') - ``` - - System logging (these might not all exist, depending on operating system) - - `/var/log/messages` - - `/var/log/syslog` - - `/var/log/kern.log` - - Docker daemon logging (these might not all exist, depending on operating system) - - `/var/log/docker.log` -- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -# Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/content/rancher/v2.0-v2.4/en/deploy-across-clusters/_index.md b/content/rancher/v2.0-v2.4/en/deploy-across-clusters/_index.md deleted file mode 100644 index f02706abbd..0000000000 --- a/content/rancher/v2.0-v2.4/en/deploy-across-clusters/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Deploying Applications across Clusters -weight: 13 -aliases: - - /rancher/v2.0-v2.4/en/deploy-across-clusters/multi-cluster-apps ---- - -_Available as of v2.2.0_ - -Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. - -Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. - -After creating a multi-cluster application, you can program a [Global DNS entry]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/) to make it easier to access the application. - -- [Prerequisites](#prerequisites) -- [Launching a multi-cluster app](#launching-a-multi-cluster-app) -- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) - - [Targets](#targets) - - [Upgrades](#upgrades) - - [Roles](#roles) -- [Application configuration options](#application-configuration-options) - - [Using a questions.yml file](#using-a-questions-yml-file) - - [Key value pairs for native Helm charts](#key-value-pairs-for-native-helm-charts) - - [Members](#members) - - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) -- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) -- [Multi-cluster application management](#multi-cluster-application-management) -- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) - -# Prerequisites - -To create a multi-cluster app in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the clusters(s) that include the target project(s) - -# Launching a Multi-Cluster App - -1. From the **Global** view, choose **Apps** in the navigation bar. Click **Launch**. - -2. Find the application that you want to launch, and then click **View Details**. - -3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. - -4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. - -5. Select a **Template Version**. - -6. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). - -7. Select the **Members** who can [interact with the multi-cluster application](#members). - -8. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. - -7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: - -# Multi-cluster App Configuration Options - -Rancher has divided the configuration option for the multi-cluster application into several sections. - -### Targets - -In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. - -### Upgrades - -In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. - -* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. - -* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. - -### Roles - -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{}}/rancher/v2.0-v2.4/en/catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. - -For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. - -Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. - -- **Project** - This is the equivalent of a [project member]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. - -- **Cluster** - This is the equivalent of a [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. - -When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. - -> **Note:** There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. - -# Application Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.0-v2.4/en/catalog/custom/), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -### Members - -By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. - -1. Find the user that you want to add by typing in the member's name in the **Member** search box. - -2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. - - - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. - - > **Note:** Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. - -### Overriding Application Configuration Options for Specific Projects - -The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. - -1. In the **Answer Overrides** section, click **Add Override**. - -2. For each override, you can select the following: - - - **Scope**: Select which target projects you want to override the answer in the configuration option. - - - **Question**: Select which question you want to override. - - - **Answer**: Enter the answer that you want to be used instead. - -# Upgrading Multi-Cluster App Roles and Projects - -- **Changing Roles on an existing Multi-Cluster app** -The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. - -- **Adding/Removing target projects** -1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. -2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. - - -# Multi-Cluster Application Management - -One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: - - * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. - * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). - * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. - -# Deleting a Multi-Cluster Application - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. - - > **Note:** The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. diff --git a/content/rancher/v2.0-v2.4/en/faq/_index.md b/content/rancher/v2.0-v2.4/en/faq/_index.md deleted file mode 100644 index d0eb15d8b2..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: FAQ -weight: 25 -aliases: - - /rancher/v2.0-v2.4/en/about/ ---- - -This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. - -See [Technical FAQ]({{}}/rancher/v2.0-v2.4/en/faq/technical/), for frequently asked technical questions. - -
- -**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** - -When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. - -
- -**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** - -Yes. - -
- -**Does Rancher support Windows?** - -As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/) - -
- -**Does Rancher support Istio?** - -As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/) - -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) - -
- -**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** - -Secrets management is on our roadmap but we haven't assigned it to a specific release yet. - -
- -**Does Rancher v2.x support RKT containers as well?** - -At this time, we only support Docker. - -
- -**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes?** - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. - -
- -**Are you planning on supporting Traefik for existing setups?** - -We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. - -
- -**Can I import OpenShift Kubernetes clusters into v2.x?** - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -
- -**Are you going to integrate Longhorn?** - -Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/faq/networking/_index.md b/content/rancher/v2.0-v2.4/en/faq/networking/_index.md deleted file mode 100644 index 9551f35a3a..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/networking/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Networking -weight: 8005 ---- - -Networking FAQ's - -- [CNI Providers]({{}}/rancher/v2.0-v2.4/en/faq/networking/cni-providers/) - diff --git a/content/rancher/v2.0-v2.4/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.0-v2.4/en/faq/networking/cni-providers/_index.md deleted file mode 100644 index a1fbf15e9f..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/networking/cni-providers/_index.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Container Network Interface (CNI) Providers -description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you -weight: 2300 ---- - -## What is CNI? - -CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. - -Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. - -![CNI Logo]({{}}/img/rancher/cni-logo.png) - -For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). - -### What Network Models are Used in CNI? - -CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). - -#### What is an Encapsulated Network? - -This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. - -In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. - -This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. - -CNI network providers using this network model include Flannel, Canal, and Weave. - -![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) - -#### What is an Unencapsulated Network? - -This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). - -In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. - -This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. - -CNI network providers using this network model include Calico and Romana. - -![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) - -### What CNI Providers are Provided by Rancher? - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. - -#### Canal - -![Canal Logo]({{}}/img/rancher/canal-logo.png) - -Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. - -In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/) - -{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} - -For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) - -#### Flannel - -![Flannel Logo]({{}}/img/rancher/flannel-logo.png) - -Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). - -Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) - -For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). - -#### Calico - -![Calico Logo]({{}}/img/rancher/calico-logo.png) - -Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. - -Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. - -Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) - -For more information, see the following pages: - -- [Project Calico Official Site](https://www.projectcalico.org/) -- [Project Calico GitHub Page](https://github.com/projectcalico/calico) - - -#### Weave - -![Weave Logo]({{}}/img/rancher/weave-logo.png) - -_Available as of v2.2.0_ - -Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. - -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -For more information, see the following pages: - -- [Weave Net Official Site](https://www.weave.works/) - -### CNI Features by Provider - -The following table summarizes the different features available for each CNI network provider provided by Rancher. - -| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | -| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | -| Canal | Encapsulated (VXLAN) | No | Yes | No | K8S API | No | Yes | -| Flannel | Encapsulated (VXLAN) | No | No | No | K8S API | No | No | -| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8S API | No | Yes | -| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | - -- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) - -- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. - -- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. - -- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. - -- External Datastore: CNI network providers with this feature need an external datastore for its data. - -- Encryption: This feature allows cyphered and secure network control and data planes. - -- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. - -#### CNI Community Popularity - -The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. - -| Provider | Project | Stars | Forks | Contributors | -| ---- | ---- | ---- | ---- | ---- | -| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | -| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | -| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | -| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 | - -
-### Which CNI Provider Should I Use? - -It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. - -As of Rancher v2.0.7, Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. - -### How can I configure a CNI network provider? - -Please see [Cluster Options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.0-v2.4/en/faq/removing-rancher/_index.md b/content/rancher/v2.0-v2.4/en/faq/removing-rancher/_index.md deleted file mode 100644 index f3b448c249..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/removing-rancher/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Rancher is No Longer Needed -weight: 8010 -aliases: - - /rancher/v2.0-v2.4/en/installation/removing-rancher/cleaning-cluster-nodes/ - - /rancher/v2.0-v2.4/en/installation/removing-rancher/ - - /rancher/v2.0-v2.4/en/admin-settings/removing-rancher/ - - /rancher/v2.0-v2.4/en/admin-settings/removing-rancher/rancher-cluster-nodes/ ---- - -This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. - -- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) -- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) -- [What if I don't want my imported cluster managed by Rancher?](#what-if-i-don-t-want-my-imported-cluster-managed-by-rancher) -- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) - -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? - -If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. - -### If the Rancher server is deleted, how do I access my downstream clusters? - -The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: - -- **Imported clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. -- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. -- **RKE clusters:** To access an [RKE cluster,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) the cluster must have the [authorized cluster endpoint]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. - -### What if I don't want Rancher anymore? - -If you [installed Rancher on a Kubernetes cluster,]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) remove Rancher by using the [System Tools]({{}}/rancher/v2.0-v2.4/en/system-tools/) with the `remove` subcommand. - -If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. - -Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) - -### What if I don't want my imported cluster managed by Rancher? - -If an imported cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was imported into Rancher. - -To detach the cluster, - -1. From the **Global** view in Rancher, go to the **Clusters** tab. -2. Go to the imported cluster that should be detached from Rancher and click **⋮ > Delete.** -3. Click **Delete.** - -**Result:** The imported cluster is detached from Rancher and functions normally outside of Rancher. - -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? - -At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. - -The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) - -For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/content/rancher/v2.0-v2.4/en/faq/security/_index.md b/content/rancher/v2.0-v2.4/en/faq/security/_index.md deleted file mode 100644 index 048455e8f3..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/security/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Security -weight: 8007 - ---- - -**Is there a Hardening Guide?** - -The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.0-v2.4/en/security/) section. - -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** - -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.0-v2.4/en/security/) section. diff --git a/content/rancher/v2.0-v2.4/en/faq/technical/_index.md b/content/rancher/v2.0-v2.4/en/faq/technical/_index.md deleted file mode 100644 index 61d8de2142..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/technical/_index.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: Technical -weight: 8006 ---- - -### How can I reset the administrator password? - -Docker Install: -``` -$ docker exec -ti reset-password -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password -New password for default administrator (user-xxxxx): - -``` - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- reset-password -New password for default administrator (user-xxxxx): - -``` - -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: -``` -$ docker exec -ti ensure-default-admin -New default administrator (user-xxxxx) -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin -New password for default administrator (user-xxxxx): - -``` - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -Kubernetes install (RKE add-on): -``` -$ KUBECONFIG=./kube_config_rancher-cluster.yml -$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- ensure-default-admin -New password for default admin user (user-xxxxx): - -``` - -### How can I enable debug logging? - -See [Troubleshooting: Logging]({{}}/rancher/v2.0-v2.4/en/troubleshooting/logging/) - -### My ClusterIP does not respond to ping - -ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. - -### Where can I manage Node Templates? - -Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. - -### Why is my Layer-4 Load Balancer in `Pending` state? - -The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) - -### Where is the state of Rancher stored? - -- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. -- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. - -### How are the supported Docker versions determined? - -We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. - -### How can I access nodes created by Rancher? - -SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. - -![Download Keys]({{}}/img/rancher/downloadsshkeys.png) - -Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) - -``` -$ ssh -i id_rsa user@ip_of_node -``` - -### How can I automate task X in Rancher? - -The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: - -* Visit `https://your_rancher_ip/v3` and browse the API options. -* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) - -### The IP address of a node changed, how can I recover? - -A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. - -When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{}}/rancher/v2.0-v2.4/en/faq/cleaning-cluster-nodes/) to clean the node. - -When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. - -### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? - -You can add additional arguments/binds/environment variables via the [Config File]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{}}/rke/latest/en/example-yamls/). - -### How do I check if my certificate chain is valid? - -Use the `openssl verify` command to validate your certificate chain: - ->**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). - -``` ------BEGIN CERTIFICATE----- -%YOUR_CERTIFICATE% ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -%YOUR_INTERMEDIATE_CERTIFICATE% ------END CERTIFICATE----- -``` - -If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: - -``` -openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem -subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com -issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA -``` - -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? - -Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. - -Check `Common Name`: - -``` -openssl x509 -noout -subject -in cert.pem -subject= /CN=rancher.my.org -``` - -Check `Subject Alternative Names`: - -``` -openssl x509 -noout -in cert.pem -text | grep DNS - DNS:rancher.my.org -``` - -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? - -This is due to a combination of the following default Kubernetes settings: - -* kubelet - * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) -* kube-controller-manager - * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) - * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) - * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) - -See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. - -In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. - -* kube-apiserver (Kubernetes v1.13 and up) - * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. - * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. - -### Can I use keyboard shortcuts in the UI? - -Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md b/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md deleted file mode 100644 index db5cc1e85b..0000000000 --- a/content/rancher/v2.0-v2.4/en/faq/upgrades-to-2x/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Questions about Upgrading to Rancher v2.x -weight: 1 -aliases: - - /rancher/v2.x/en/faq/upgrades-to-2x/ ---- - -This page contains frequently asked questions about the changes between Rancher v1.x and v2.x, and how to upgrade from Rancher v1.x to v2.x. - -# Kubernetes - -**What does it mean when you say Rancher v2.x is built on Kubernetes?** - -Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. - -
- -**Do you plan to implement upstream Kubernetes, or continue to work on your own fork?** - -We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. - -
- -**Does this release mean that we need to re-train our support staff in Kubernetes?** - -Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. - -
- -**Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI.** - -No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. - -
- -**If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure?** - -Absolutely. - -# Cattle - -**How does Rancher v2.x affect Cattle?** - -Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. - -
- -**Can I migrate existing Cattle workloads into Kubernetes?** - -Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. - -# Feature Changes - -**Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x?** - -Yes. You can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. - -
- -**Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC?** - -The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. - -
- -**Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x?** - -Yes. You can do so by leveraging Kubernetes' network policies. - -
- -**What about the CLI? Will that work the same way with the same features?** - -Yes. Definitely. - -# Environments & Clusters - -**Can I still create templates for environments and clusters?** - -Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. - -Kubernetes RKE Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. - -
- -**Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher)** - -Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. - -# Upgrading/Migrating - -**How would the migration from v1.x to v2.x work?** - -Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. - -
- -**Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters?** - -At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. - -# Support - -**Are you planning some long-term support releases for Rancher v1.6?** - -That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it. New releases of the v1.6 stream are announced in the [Rancher forums.](https://forums.rancher.com/c/announcements) The Rancher wiki contains the [v1.6 release notes.](https://github.com/rancher/rancher/wiki/Rancher-1.6) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/_index.md deleted file mode 100644 index 989a22e38c..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Helm Charts in Rancher -weight: 12 -description: Rancher enables the use of catalogs to repeatedly deploy applications easily. Catalogs are GitHub or Helm Chart repositories filled with deployment-ready apps. -aliases: - - /rancher/v2.0-v2.4/en/concepts/global-configuration/catalog/ - - /rancher/v2.0-v2.4/en/concepts/catalogs/ - - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/ - - /rancher/v2.0-v2.4/en/catalog - - /rancher/v2.0-v2.4/en/catalog/apps ---- - -Rancher provides the ability to use a catalog of Helm charts that make it easy to repeatedly deploy applications. - -- **Catalogs** are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. -- **Helm charts** are a collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on. - -Rancher improves on Helm catalogs and charts. All native Helm charts can work within Rancher, but Rancher adds several enhancements to improve their user experience. - -This section covers the following topics: - -- [Catalog scopes](#catalog-scopes) -- [Catalog Helm Deployment Versions](#catalog-helm-deployment-versions) -- [When to use Helm 3](#when-to-use-helm-3) -- [Helm 3 Backwards Compatibility](#helm-3-backwards-compatibility) -- [Built-in global catalogs](#built-in-global-catalogs) -- [Custom catalogs](#custom-catalogs) -- [Creating and launching applications](#creating-and-launching-applications) -- [Chart compatibility with Rancher](#chart-compatibility-with-rancher) -- [Global DNS](#global-dns) - -# Catalog Scopes - -Within Rancher, you can manage catalogs at three different scopes. Global catalogs are shared across all clusters and project. There are some use cases where you might not want to share catalogs between different clusters or even projects in the same cluster. By leveraging cluster and project scoped catalogs, you will be able to provide applications for specific teams without needing to share them with all clusters and/or projects. - -Scope | Description | Available As of | ---- | --- | --- | -Global | All clusters and all projects can access the Helm charts in this catalog | v2.0.0 | -Cluster | All projects in the specific cluster can access the Helm charts in this catalog | v2.2.0 | -Project | This specific cluster can access the Helm charts in this catalog | v2.2.0 | - -# Catalog Helm Deployment Versions - -_Applicable as of v2.4.0_ - -In November 2019, Helm 3 was released, and some features were deprecated or refactored. It is not fully [backwards compatible]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#helm-3-backwards-compatibility) with Helm 2. Therefore, catalogs in Rancher need to be separated, with each catalog only using one Helm version. This will help reduce app deployment issues as your Rancher users will not need to know which version of your chart is compatible with which Helm version - they can just select a catalog, select an app and deploy a version that has already been vetted for compatibility. - -When you create a custom catalog, you will have to configure the catalog to use either Helm 2 or Helm 3. This version cannot be changed later. If the catalog is added with the wrong Helm version, it will need to be deleted and re-added. - -When you launch a new app from a catalog, the app will be managed by the catalog's Helm version. A Helm 2 catalog will use Helm 2 to manage all of the apps, and a Helm 3 catalog will use Helm 3 to manage all apps. - -By default, catalogs are assumed to be deployed using Helm 2. If you run an app in Rancher before v2.4.0, then upgrade to Rancher v2.4.0+, the app will still be managed by Helm 2. If the app was already using a Helm 3 Chart (API version 2) it will no longer work in v2.4.0+. You must either downgrade the chart's API version or recreate the catalog to use Helm 3. - -Charts that are specific to Helm 2 should only be added to a Helm 2 catalog, and Helm 3 specific charts should only be added to a Helm 3 catalog. - -# When to use Helm 3 - -_Applicable as of v2.4.0_ - -- If you want to ensure that the security permissions are being pulled from the kubeconfig file -- If you want to utilize apiVersion `v2` features such as creating a library chart to reduce code duplication, or moving your requirements from the `requirements.yaml` into the `Chart.yaml` - -Overall Helm 3 is a movement towards a more standardized Kubernetes feel. As the Kubernetes community has evolved, standards and best practices have as well. Helm 3 is an attempt to adopt those practices and streamline how charts are maintained. - -# Helm 3 Backwards Compatibility - -_Applicable as of v2.4.0_ - -With the use of the OpenAPI schema to validate your rendered templates in Helm 3, you will find charts that worked in Helm 2 may not work in Helm 3. This will require you to update your chart templates to meet the new validation requirements. This is one of the main reasons support for Helm 2 and Helm 3 was provided starting in Rancher 2.4.x, as not all charts can be deployed immediately in Helm 3. - -Helm 3 does not create a namespace for you, so you will have to provide an existing one. This can cause issues if you have integrated code with Helm 2, as you will need to make code changes to ensure a namespace is being created and passed in for Helm 3. Rancher will continue to manage namespaces for Helm to ensure this does not impact your app deployment. - -apiVersion `v2` is now reserved for Helm 3 charts. This apiVersion enforcement could cause issues as older versions of Helm 2 did not validate the apiVersion in the `Chart.yaml` file. In general, your Helm 2 chart’s apiVersion should be set to `v1` and your Helm 3 chart’s apiVersion should be set to `v2`. You can install charts with apiVersion `v1` with Helm 3, but you cannot install `v2` charts into Helm 2. - -# Built-in Global Catalogs - -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. For details, refer to the section on managing [built-in global catalogs.]({{}}/rancher/v2.0-v2.4/en/catalog/built-in) - -# Custom Catalogs - -There are two types of catalogs in Rancher: [Built-in global catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/built-in/) and [custom catalogs.]({{}}/rancher/v2.0-v2.4/en/catalog/adding-catalogs/) - -Any user can create custom catalogs to add into Rancher. Custom catalogs can be added into Rancher at the global level, cluster level, or project level. For details, refer to the [section on adding custom catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/adding-catalogs) and the [catalog configuration reference.]({{}}/rancher/v2.0-v2.4/en/catalog/catalog-config) - -# Creating and Launching Applications - -In Rancher, applications are deployed from the templates in a catalog. This section covers the following topics: - -* [Multi-cluster applications]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) -* [Creating catalog apps]({{}}/rancher/v2.0-v2.4/en/catalog/creating-apps) -* [Launching catalog apps within a project]({{}}/rancher/v2.0-v2.4/en/catalog/launching-apps) -* [Managing catalog apps]({{}}/rancher/v2.0-v2.4/en/catalog/managing-apps) -* [Tutorial: Example custom chart creation]({{}}/rancher/v2.0-v2.4/en/catalog/tutorial) - -# Chart Compatibility with Rancher - -Charts now support the fields `rancher_min_version` and `rancher_max_version` in the [`questions.yml` file](https://github.com/rancher/integration-test-charts/blob/master/charts/chartmuseum/v1.6.0/questions.yml) to specify the versions of Rancher that the chart is compatible with. When using the UI, only app versions that are valid for the version of Rancher running will be shown. API validation is done to ensure apps that don't meet the Rancher requirements cannot be launched. An app that is already running will not be affected on a Rancher upgrade if the newer Rancher version does not meet the app's requirements. - -# Global DNS - -_Available as v2.2.0_ - -When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. - -For more information on how to use this feature, see [Global DNS]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/adding-catalogs/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/adding-catalogs/_index.md deleted file mode 100644 index 139fcc7336..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/adding-catalogs/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Creating Custom Catalogs -weight: 200 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/adding-custom-catalogs/ - - /rancher/v2.0-v2.4/en/catalog/custom/adding - - /rancher/v2.0-v2.4/en/catalog/adding-catalogs - - /rancher/v2.0-v2.4/en/catalog/custom/ - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/adding-catalogs ---- - -Custom catalogs can be added into Rancher at a global scope, cluster scope, or project scope. - -- [Adding catalog repositories](#adding-catalog-repositories) - - [Add custom Git repositories](#add-custom-git-repositories) - - [Add custom Helm chart repositories](#add-custom-helm-chart-repositories) - - [Add private Git/Helm chart repositories](#add-private-git-helm-chart-repositories) -- [Adding global catalogs](#adding-global-catalogs) -- [Adding cluster level catalogs](#adding-cluster-level-catalogs) -- [Adding project level catalogs](#adding-project-level-catalogs) -- [Custom catalog configuration reference](#custom-catalog-configuration-reference) - -# Adding Catalog Repositories - -Adding a catalog is as simple as adding a catalog name, a URL and a branch name. - -**Prerequisite:** An [admin]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) of Rancher has the ability to add or remove catalogs globally in Rancher. - -### Add Custom Git Repositories -The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will use the `master` branch by default. Whenever you add a catalog to Rancher, it will be available immediately. - -### Add Custom Helm Chart Repositories - -A Helm chart repository is an HTTP server that houses one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. - -Helm comes with built-in package server for developer testing (helm serve). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). - -In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. - -### Add Private Git/Helm Chart Repositories -_Available as of v2.2.0_ - -Private catalog repositories can be added using credentials like Username and Password. You may also want to use the OAuth token if your Git or Helm repository server supports that. - -For more information on private Git/Helm catalogs, refer to the [custom catalog configuration reference.]({{}}/rancher/v2.0-v2.4/en/catalog/catalog-config) - - 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. - 2. Click **Add Catalog**. - 3. Complete the form and click **Create**. - - **Result:** Your catalog is added to Rancher. - -# Adding Global Catalogs - ->**Prerequisites:** In order to manage the [built-in catalogs]({{}}/rancher/v2.0-v2.4/en/catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) role assigned. - - 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. - 2. Click **Add Catalog**. - 3. Complete the form. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) -4. Click **Create**. - - **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or [applications in any project]({{}}/rancher/v2.0-v2.4/en/catalog/launching-apps/) from this catalog. - -# Adding Cluster Level Catalogs - -_Available as of v2.2.0_ - ->**Prerequisites:** In order to manage cluster scoped catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Custom Cluster Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Cluster Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-role-reference) role assigned. - -1. From the **Global** view, navigate to your cluster that you want to start adding custom catalogs. -2. Choose the **Tools > Catalogs** in the navigation bar. -2. Click **Add Catalog**. -3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Cluster** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) -5. Click **Create**. - -**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) from this catalog. - -# Adding Project Level Catalogs - -_Available as of v2.2.0_ - ->**Prerequisites:** In order to manage project scoped catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Cluster Owner Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) ->- [Project Owner Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) ->- [Custom Project Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) with the [Manage Project Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) role assigned. - -1. From the **Global** view, navigate to your project that you want to start adding custom catalogs. -2. Choose the **Tools > Catalogs** in the navigation bar. -2. Click **Add Catalog**. -3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Project** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( -{{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) -5. Click **Create**. - -**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project]({{}}/rancher/v2.0-v2.4/en/catalog/apps/) from this catalog. - -# Custom Catalog Configuration Reference - -Refer to [this page]({{}}/rancher/v2.0-v2.4/en/catalog/catalog-config) more information on configuring custom catalogs. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/built-in/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/built-in/_index.md deleted file mode 100644 index 597d2d6cf1..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/built-in/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Enabling and Disabling Built-in Global Catalogs -weight: 100 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/enabling-default-catalogs/ - - /rancher/v2.0-v2.4/en/catalog/built-in - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/built-in ---- - -There are default global catalogs packaged as part of Rancher. - -Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. - ->**Prerequisites:** In order to manage the built-in catalogs or manage global catalogs, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Catalogs]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/#custom-global-permissions-reference) role assigned. - -1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. - -2. Toggle the default catalogs that you want to be enabled or disabled: - - - **Library:** The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. This catalog features Rancher Charts, which include some [notable advantages]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps/#rancher-charts) over native Helm charts. - - **Helm Stable:** This catalog, which is maintained by the Kubernetes community, includes native [Helm charts](https://helm.sh/docs/chart_template_guide/). This catalog features the largest pool of apps. - - **Helm Incubator:** Similar in user experience to Helm Stable, but this catalog is filled with applications in **beta**. - - **Result**: The chosen catalogs are enabled. Wait a few minutes for Rancher to replicate the catalog charts. When replication completes, you'll be able to see them in any of your projects by selecting **Apps** from the main navigation bar. In versions before v2.2.0, within a project, you can select **Catalog Apps** from the main navigation bar. diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/catalog-config/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/catalog-config/_index.md deleted file mode 100644 index d5f0a99a17..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/catalog-config/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Custom Catalog Configuration Reference -weight: 300 -aliases: - - /rancher/v2.0-v2.4/en/catalog/catalog-config - - /rancher/v2.0-v2.4/en/catalog/catalog-config - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/catalog-config ---- - -Any user can create custom catalogs to add into Rancher. Besides the content of the catalog, users must ensure their catalogs are able to be added into Rancher. - -- [Types of Repositories](#types-of-repositories) -- [Custom Git Repository](#custom-git-repository) -- [Custom Helm Chart Repository](#custom-helm-chart-repository) -- [Catalog Fields](#catalog-fields) -- [Private Repositories](#private-repositories) - - [Using Username and Password](#using-username-and-password) - - [Using an OAuth token](#using-an-oauth-token) - -# Types of Repositories - -Rancher supports adding in different types of repositories as a catalog: - -* Custom Git Repository -* Custom Helm Chart Repository - -# Custom Git Repository - -The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will default to use the `master` branch. Whenever you add a catalog to Rancher, it will be available almost immediately. - -# Custom Helm Chart Repository - -A Helm chart repository is an HTTP server that contains one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. - -Helm comes with a built-in package server for developer testing (`helm serve`). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). - -In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. - -# Catalog Fields - -When [adding your catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/adding/) to Rancher, you'll provide the following information: - - -| Variable | Description | -| -------------------- | ------------- | -| Name | Name for your custom catalog to distinguish the repositories in Rancher | -| Catalog URL | URL of your custom chart repository| -| Use Private Catalog | Selected if you are using a private repository that requires authentication | -| Username (Optional) | Username or OAuth Token | -| Password (Optional) | If you are authenticating using a username, enter the associated password. If you are using an OAuth token, use `x-oauth-basic`. | -| Branch | For a Git repository, the branch name. Default: `master`. For a Helm Chart repository, this field is ignored. | -| Helm version | The Helm version that will be used to deploy all of the charts in the catalog. This field cannot be changed later. For more information, refer to the [section on Helm versions.]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) | - -# Private Repositories - -_Available as of v2.2.0_ - -Private Git or Helm chart repositories can be added into Rancher using either credentials, i.e. `Username` and `Password`. Private Git repositories also support authentication using OAuth tokens. - -### Using Username and Password - -1. When [adding the catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/adding/), select the **Use private catalog** checkbox. - -2. Provide the `Username` and `Password` for your Git or Helm repository. - -### Using an OAuth token - -Read [using Git over HTTPS and OAuth](https://github.blog/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/) for more details on how OAuth authentication works. - -1. Create an [OAuth token](https://github.com/settings/tokens) -with `repo` permission selected, and click **Generate token**. - -2. When [adding the catalog]({{}}/rancher/v2.0-v2.4/en/catalog/custom/adding/), select the **Use private catalog** checkbox. - -3. For `Username`, provide the Git generated OAuth token. For `Password`, enter `x-oauth-basic`. diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/creating-apps/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/creating-apps/_index.md deleted file mode 100644 index 73d1f6efe4..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/creating-apps/_index.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Creating Catalog Apps -weight: 400 -aliases: - - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/customizing-charts/ - - /rancher/v2.0-v2.4/en/catalog/custom/creating - - /rancher/v2.0-v2.4/en/catalog/custom - - /rancher/v2.0-v2.4/en/catalog/creating-apps - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps ---- - -Rancher's catalog service requires any custom catalogs to be structured in a specific format for the catalog service to be able to leverage it in Rancher. - -> For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. - -- [Chart types](#chart-types) - - [Helm charts](#helm-charts) - - [Rancher charts](#rancher-charts) -- [Chart directory structure](#chart-directory-structure) -- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) - - [questions.yml](#questions-yml) - - [Min/Max Rancher versions](#min-max-rancher-versions) - - [Question variable reference](#question-variable-reference) -- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) - -# Chart Types - -Rancher supports two different types of charts: Helm charts and Rancher charts. - -### Helm Charts - -Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you'll learn the chart's parameters and then configure them using **Answers**, which are sets of key value pairs. - -The Helm Stable and Helm Incubators are populated with native Helm charts. However, you can also use native Helm charts in Custom catalogs (although we recommend Rancher Charts). - -### Rancher Charts - -Rancher charts mirror native helm charts, although they add two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) - -Advantages of Rancher charts include: - -- **Enhanced revision tracking:** While Helm supports versioned deployments, Rancher adds tracking and revision history to display changes between different versions of the chart. -- **Streamlined application launch:** Rancher charts add simplified chart descriptions and configuration forms to make catalog application deployment easy. Rancher users need not read through the entire list of Helm variables to understand how to launch an application. -- **Application resource management:** Rancher tracks all the resources created by a specific application. Users can easily navigate to and troubleshoot on a page listing all the workload objects used to power an application. - -# Chart Directory Structure - -The following table demonstrates the directory structure for a Rancher Chart. The `charts` directory is the top level directory under the repository base. Adding the repository to Rancher will expose all charts contained within it. This information is helpful when customizing charts for a custom catalog. The `questions.yaml`, `README.md`, and `requirements.yml` files are specific to Rancher charts, but are optional for chart customization. - -``` -/ - │ - ├── charts/ - │ ├── / # This directory name will be surfaced in the Rancher UI as the chart name - │ │ ├── / # Each directory at this level provides different app versions that will be selectable within the chart in the Rancher UI - │ │ │ ├── Chart.yaml # Required Helm chart information file. - │ │ │ ├── questions.yaml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* - │ │ │ ├── README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. - │ │ │ ├── requirements.yml # Optional: YAML file listing dependencies for the chart. - │ │ │ ├── values.yml # Default configuration values for the chart. - │ │ │ ├── templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. -``` - -# Additional Files for Rancher Charts - -Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. - -- `app-readme.md` - - A file that provides descriptive text in the chart's UI header. The following image displays the difference between a Rancher chart (which includes `app-readme.md`) and a native Helm chart (which does not). - -
Rancher Chart with app-readme.md (left) vs. Helm Chart without (right)
- - ![app-readme.md]({{}}/img/rancher/app-readme.png) - -- `questions.yml` - - A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using key value pairs, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). - - -
Rancher Chart with questions.yml (left) vs. Helm Chart without (right)
- - ![questions.yml]({{}}/img/rancher/questions.png) - - -### questions.yml - -Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. - -### Min/Max Rancher versions - -_Available as of v2.3.0_ - -For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. - -> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. - -``` -rancher_min_version: 2.3.0 -rancher_max_version: 2.3.99 -``` - -### Question Variable Reference - -This reference contains variables that you can use in `questions.yml` nested under `questions:`. - -| Variable | Type | Required | Description | -| ------------- | ------------- | --- |------------- | -| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | -| label | string | true | Define the UI label. | -| description | string | false | Specify the description of the variable.| -| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| -| required | bool | false | Define if the variable is required or not (true \| false)| -| default | string | false | Specify the default value. | -| group | string | false | Group questions by input value. | -| min_length | int | false | Min character length.| -| max_length | int | false | Max character length.| -| min | int | false | Min integer length. | -| max | int | false | Max integer length. | -| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"| -| valid_chars | string | false | Regular expression for input chars validation. | -| invalid_chars | string | false | Regular expression for invalid input chars validation.| -| subquestions | []subquestion | false| Add an array of subquestions.| -| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | -| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| - ->**Note:** `subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. - -# Tutorial: Example Custom Chart Creation - -For a tutorial on adding a custom Helm chart to a custom catalog, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/catalog/tutorial) diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/globaldns/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/globaldns/_index.md deleted file mode 100644 index f5de5931b5..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/globaldns/_index.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: Global DNS -weight: 5010 -aliases: - - /rancher/v2.0-v2.4/en/catalog/globaldns - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/globaldns ---- - -_Available as of v2.2.0_ - -Rancher's Global DNS feature provides a way to program an external DNS provider to route traffic to your Kubernetes applications. Since the DNS programming supports spanning applications across different Kubernetes clusters, Global DNS is configured at a global level. An application can become highly available as it allows you to have one application run on different Kubernetes clusters. If one of your Kubernetes clusters goes down, the application would still be accessible. - -> **Note:** Global DNS is only available in [Kubernetes installations]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) with the `local` cluster enabled. - -- [Global DNS Providers](#global-dns-providers) -- [Global-DNS-Entries](#global-dns-entries) -- [Permissions for Global DNS Providers and Entries](#permissions-for-global-dns-providers-and-entries) -- [Setting up Global DNS for Applications](#setting-up-global-dns-for-applications) -- [Adding a Global DNS Entry](#adding-a-global-dns-entry) -- [Editing a Global DNS Provider](#editing-a-global-dns-provider) -- [Global DNS Entry Configuration](#global-dns-entry-configuration) -- [DNS Provider Configuration](#dns-provider-configuration) - - [Route53](#route53) - - [CloudFlare](#cloudflare) - - [AliDNS](#alidns) -- [Adding Annotations to Ingresses to program the External DNS](#adding-annotations-to-ingresses-to-program-the-external-dns) - -# Global DNS Providers - -Before adding in Global DNS entries, you will need to configure access to an external provider. - -The following table lists the first version of Rancher each provider debuted. - -| DNS Provider | Available as of | -| --- | --- | -| [AWS Route53](https://aws.amazon.com/route53/) | v2.2.0 | -| [CloudFlare](https://www.cloudflare.com/dns/) | v2.2.0 | -| [AliDNS](https://www.alibabacloud.com/product/dns) | v2.2.0 | - -# Global DNS Entries - -For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. - -# Permissions for Global DNS Providers and Entries - -By default, only [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. - -# Setting up Global DNS for Applications - -1. From the **Global View**, select **Tools > Global DNS Providers**. -1. To add a provider, choose from the available provider options and configure the Global DNS Provider with necessary credentials and an optional domain. For help, see [DNS Provider Configuration.](#dns-provider-configuration) -1. (Optional) Add additional users so they could use the provider when creating Global DNS entries as well as manage the Global DNS provider. -1. (Optional) Pass any custom values in the Additional Options section. - -# Adding a Global DNS Entry - -1. From the **Global View**, select **Tools > Global DNS Entries**. -1. Click on **Add DNS Entry**. -1. Fill out the form. For help, refer to [Global DNS Entry Configuration.](#global-dns-entry-configuration) -1. Click **Create.** - -# Editing a Global DNS Provider - -The [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: - -- Root Domain -- Access Key & Secret Key -- Members -- Custom values - -1. From the **Global View**, select **Tools > Global DNS Providers**. - -1. For the Global DNS provider that you want to edit, click the **⋮ > Edit**. - -# Editing a Global DNS Entry - -The [global administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: - -- FQDN -- Global DNS Provider -- Target Projects or Multi-Cluster App -- DNS TTL -- Members - -Any users who can access the Global DNS entry can **only** add target projects that they have access to. However, users can remove **any** target project as there is no check to confirm if that user has access to the target project. - -Permission checks are relaxed for removing target projects in order to support situations where the user's permissions might have changed before they were able to delete the target project. Another use case could be that the target project was removed from the cluster before being removed from a target project of the Global DNS entry. - -1. From the **Global View**, select **Tools > Global DNS Entries**. - -1. For the Global DNS entry that you want to edit, click the **⋮ > Edit**. - - -# Global DNS Entry Configuration - -| Field | Description | -|----------|--------------------| -| FQDN | Enter the **FQDN** you wish to program on the external DNS. | -| Provider | Select a Global DNS **Provider** from the list. | -| Resolves To | Select if this DNS entry will be for a [multi-cluster application]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or for workloads in different [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/). | -| Multi-Cluster App Target | The target for the global DNS entry. You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. | -| DNS TTL | Configure the DNS time to live value in seconds. By default, it will be 300 seconds. | -| Member Access | Search for any users that you want to have the ability to manage this Global DNS entry. | - -# DNS Provider Configuration - -### Route53 - -| Field | Explanation | -|---------|---------------------| -| Name | Enter a **Name** for the provider. | -| Root Domain | (Optional) Enter the **Root Domain** of the hosted zone on AWS Route53. If this is not provided, Rancher's Global DNS Provider will work with all hosted zones that the AWS keys can access. | -| Credential Path | The [AWS credential path.](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-where) | -| Role ARN | An [Amazon Resource Name.](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) | -| Region | An [AWS region.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) | -| Zone | An [AWS zone.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.AvailabilityZones) | -| Access Key | Enter the AWS **Access Key**. | -| Secret Key | Enter the AWS **Secret Key**. | -| Member Access | Under **Member Access**, search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | - - -### CloudFlare - -| Field | Explanation | -|---------|---------------------| -| Name | Enter a **Name** for the provider. | -| Root Domain | Optional: Enter the **Root Domain**. In case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. | -| Proxy Setting | When set to yes, the global DNS entry that gets created for the provider has proxy settings on. | -| API Email | Enter the CloudFlare **API Email**. | -| API Key | Enter the CloudFlare **API Key**. | -| Member Access | Search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | - -### AliDNS - ->**Notes:** -> ->- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running `local` cluster, and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. ->- Different versions of AliDNS have different allowable TTL range, where the default TTL for a global DNS entry may not be valid. Please see the [reference](https://www.alibabacloud.com/help/doc-detail/34338.htm) before adding an AliDNS entry. - -| Field | Explanation | -|---------|---------------------| -| Name | Enter a **Name** for the provider. | -| Root Domain | Optional: Enter the **Root Domain**. In case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. | -| Access Key | Enter the **Access Key**. | -| Secret Key | Enter the **Secret Key**. | -| Member Access | Search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | - -# Adding Annotations to Ingresses to program the External DNS - -In order for Global DNS entries to be programmed, you will need to add a specific annotation on an ingress in your application or target project. - -For any application that you want targeted for your Global DNS entry, find an ingress associated with the application. - -This ingress needs to use a specific `hostname` and an annotation that should match the FQDN of the Global DNS entry. - -In order for the DNS to be programmed, the following requirements must be met: - -* The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. -* The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. - -Once the ingress in your [multi-cluster application]({{}}/rancher/v2.0-v2.4/en/catalog/multi-cluster-apps/) or in your target projects is in an `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/launching-apps/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/launching-apps/_index.md deleted file mode 100644 index e3af01f5d4..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/launching-apps/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Launching Catalog Apps -weight: 700 -aliases: - - /rancher/v2.0-v2.4/en/catalog/launching-apps - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/launching-apps ---- - -Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/#catalog-scopes). - -If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). - -- [Prerequisites](#prerequisites) -- [Launching a catalog app](#launching-a-catalog-app) -- [Configuration options](#configuration-options) - -# Prerequisites - -When Rancher deploys a catalog app, it launches an ephemeral instance of a Helm service account that has the permissions of the user deploying the catalog app. Therefore, a user cannot gain more access to the cluster through Helm or a catalog application than they otherwise would have. - -To launch an app from a catalog in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster, which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the cluster that include the target project - -Before launching an app, you'll need to either [enable a built-in global catalog]({{}}/rancher/v2.0-v2.4/en/catalog/built-in) or [add your own custom catalog.]({{}}/rancher/v2.0-v2.4/en/catalog/adding-catalogs) - -# Launching a Catalog App - -1. From the **Global** view, open the project that you want to deploy an app to. - -2. From the main navigation bar, choose **Apps**. In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. - -3. Find the app that you want to launch, and then click **View Now**. - -4. Under **Configuration Options** enter a **Name**. By default, this name is also used to create a Kubernetes namespace for the application. - - * If you would like to change the **Namespace**, click **Customize** and enter a new name. - * If you want to use a different namespace that already exists, click **Customize**, and then click **Use an existing namespace**. Choose a namespace from the list. - -5. Select a **Template Version**. - -6. Complete the rest of the **Configuration Options**. - - * For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs), answers are provided as key value pairs in the **Answers** section. - * Keys and values are available within **Detailed Descriptions**. - * When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of --set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. For example, when entering an answer that includes two values separated by a comma (i.e., `abc, bcd`), wrap the values with double quotes (i.e., `"abc, bcd"`). - -7. Review the files in **Preview**. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's **Workloads** view or **Apps** view. In versions before v2.2.0, this is the **Catalog Apps** view. - -# Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -{{% tabs %}} -{{% tab "UI" %}} - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/catalog-config/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -{{% /tab %}} -{{% tab "Editing YAML Files" %}} - -_Available as of v2.1.0_ - -If you do not want to input answers using the UI, you can choose the **Edit as YAML** option. - -With this example YAML: - -```YAML -outer: - inner: value -servers: -- port: 80 - host: example -``` - -### Key Value Pairs - -You can have a YAML file that translates these fields to match how to [format custom values so that it can be used with `--set`](https://github.com/helm/helm/blob/master/docs/using_helm.md#the-format-and-limitations-of---set). - -These values would be translated to: - -``` -outer.inner=value -servers[0].port=80 -servers[0].host=example -``` - -### YAML files - -_Available as of v2.2.0_ - -You can directly paste that YAML formatted structure into the YAML editor. By allowing custom values to be set using a YAML formatted structure, Rancher has the ability to easily customize for more complicated input values (e.g. multi-lines, array and JSON objects). -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/multi-cluster-apps/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/multi-cluster-apps/_index.md deleted file mode 100644 index 4bcd3b4b42..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/multi-cluster-apps/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Multi-Cluster Apps -weight: 600 -aliases: - - /rancher/v2.0-v2.4/en/catalog/multi-cluster-apps - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/multi-cluster-apps ---- -_Available as of v2.2.0_ - -The documentation about multi-cluster apps has moved [here.]({{}}/rancher/v2.0-v2.4/en/deploy-across-clusters/multi-cluster-apps) diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/tutorial/_index.md b/content/rancher/v2.0-v2.4/en/helm-charts/tutorial/_index.md deleted file mode 100644 index 09b5c493fc..0000000000 --- a/content/rancher/v2.0-v2.4/en/helm-charts/tutorial/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Tutorial: Example Custom Chart Creation" -weight: 800 -aliases: - - /rancher/v2.0-v2.4/en/catalog/tutorial - - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/tutorial ---- - -In this tutorial, you'll learn how to create a Helm chart and deploy it to a repository. The repository can then be used as a source for a custom catalog in Rancher. - -You can fill your custom catalogs with either Helm Charts or Rancher Charts, although we recommend Rancher Charts due to their enhanced user experience. - -> For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://helm.sh/docs/chart_template_guide/). - -1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in the [Chart Directory Structure]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps/#chart-directory-structure). - - Rancher requires this directory structure, although `app-readme.md` and `questions.yml` are optional. - - >**Tip:** - > - >- To begin customizing a chart, copy one from either the [Rancher Library](https://github.com/rancher/charts) or the [Helm Stable](https://github.com/kubernetes/charts/tree/master/stable). - >- For a complete walk through of developing charts, see the upstream Helm chart [developer reference](https://docs.helm.sh/developing_charts/). - -2. **Recommended:** Create an `app-readme.md` file. - - Use this file to create custom text for your chart's header in the Rancher UI. You can use this text to notify users that the chart is customized for your environment or provide special instruction on how to use it. -
-
- **Example**: - - ``` - $ cat ./app-readme.md - - # Wordpress ROCKS! - ``` - -3. **Recommended:** Create a `questions.yml` file. - - This file creates a form for users to specify deployment parameters when they deploy the custom chart. Without this file, users **must** specify the parameters manually using key value pairs, which isn't user-friendly. -
-
- The example below creates a form that prompts users for persistent volume size and a storage class. -
-
- For a list of variables you can use when creating a `questions.yml` file, see [Question Variable Reference]({{}}/rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps/#question-variable-reference). - - ```yaml - categories: - - Blog - - CMS - questions: - - variable: persistence.enabled - default: "false" - description: "Enable persistent volume for WordPress" - type: boolean - required: true - label: WordPress Persistent Volume Enabled - show_subquestion_if: true - group: "WordPress Settings" - subquestions: - - variable: persistence.size - default: "10Gi" - description: "WordPress Persistent Volume Size" - type: string - label: WordPress Volume Size - - variable: persistence.storageClass - default: "" - description: "If undefined or null, uses the default StorageClass. Default to null" - type: storageclass - label: Default StorageClass for WordPress - ``` - -4. Check the customized chart into your GitHub repo. - -**Result:** Your custom chart is added to the repo. Your Rancher Server will replicate the chart within a few minutes. diff --git a/content/rancher/v2.0-v2.4/en/installation/_index.md b/content/rancher/v2.0-v2.4/en/installation/_index.md deleted file mode 100644 index 44c5f923fe..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Installing/Upgrading Rancher -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/how-ha-works/ ---- - -This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. - -# Terminology - -In this section, - -- **The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. -- **RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. -- **K3s (Lightweight Kubernetes)** is also a fully compliant Kubernetes distribution. It is newer than RKE, easier to use, and more lightweight, with a binary size of less than 100 MB. As of Rancher v2.4, Rancher can be installed on a K3s cluster. - -# Overview of Installation Options - -Rancher can be installed on these main architectures: - -### High-availability Kubernetes Install with the Helm CLI - -We recommend using Helm, a Kubernetes package manager, to install Rancher on multiple nodes on a dedicated Kubernetes cluster. For RKE clusters, three nodes are required to achieve a high-availability cluster. For K3s clusters, only two nodes are required. - -### Single-node Kubernetes Install - -Rancher can be installed on a single-node Kubernetes cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. - -However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. - -### Docker Install - -For test and demonstration purposes, Rancher can be installed with Docker on a single node. - -For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -### Other Options - -There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: - -| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | -| ---------------------------------- | ------------------------------ | ---------- | -| With direct access to the Internet | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) | -| Behind an HTTP proxy | These [docs,]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) plus this [configuration]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) | These [docs,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) plus this [configuration]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/) | -| In an air gap environment | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) | [Docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) | - -We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. - -For that reason, we recommend that for a production-grade architecture, you should set up a high-availability Kubernetes cluster, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. - -> The type of cluster that Rancher needs to be installed on depends on the Rancher version. -> -> For Rancher v2.4.x, either an RKE Kubernetes cluster or K3s Kubernetes cluster can be used. -> For Rancher before v2.4, an RKE cluster must be used. - -For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. - -Our [instructions for installing Rancher on Kubernetes]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. - -When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,]({{}}/rancher/v2.0-v2.4/en/installation/requirements) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. - -For a longer discussion of Rancher architecture, refer to the [architecture overview,]({{}}/rancher/v2.0-v2.4/en/overview/architecture) [recommendations for production-grade architecture,]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations) or our [best practices guide.]({{}}/rancher/v2.0-v2.4/en/best-practices/deployment-types) - -# Prerequisites -Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -# Architecture Tip - -For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. - -For more architecture recommendations, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations) - -### More Options for Installations on a Kubernetes Cluster - -Refer to the [Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: - -- With [API auditing to record all transactions]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) -- With [TLS termination on a load balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) -- With a [custom Ingress]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#customizing-your-ingress) - -In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: - -- [RKE configuration options]({{}}/rke/latest/en/config-options/) -- [K3s configuration options]({{}}/k3s/latest/en/installation/install-options/) - -### More Options for Installations with Docker - -Refer to the [docs about options for Docker installs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) for details about other configurations including: - -- With [API auditing to record all transactions]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) -- With an [external load balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/single-node-install-external-lb/) -- With a [persistent data store]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#persistent-data) diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/_index.md deleted file mode 100644 index 0e79ce86a0..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/_index.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Install Rancher on a Kubernetes Cluster -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/k8s-install/ - - /rancher/v2.0-v2.4/en/installation/k8s-install/helm-rancher - - /rancher/v2.0-v2.4/en/installation/k8s-install/kubernetes-rke - - /rancher/v2.0-v2.4/en/installation/ha-server-install - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/install ---- - -# Prerequisite - -Set up the Rancher server's local Kubernetes cluster. - -The cluster requirements depend on the Rancher version: - -- **In Rancher v2.4.x,** Rancher needs to be installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. -- **In Rancher before v2.4,** Rancher needs to be installed on an RKE Kubernetes cluster. - -For the tutorial to install an RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha) - -For the tutorial to install a K3s Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-with-external-db) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db) - -# Install the Rancher Helm Chart - -Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. - -With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/install-rancher/). - -To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags) - -To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) - -> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -To set up Rancher, - -1. [Install the required CLI tools](#1-install-the-required-cli-tools) -2. [Add the Helm chart repository](#2-add-the-helm-chart-repository) -3. [Create a namespace for Rancher](#3-create-a-namespace-for-rancher) -4. [Choose your SSL configuration](#4-choose-your-ssl-configuration) -5. [Install cert-manager](#5-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) -6. [Install Rancher with Helm and your chosen certificate option](#6-install-rancher-with-helm-and-your-chosen-certificate-option) -7. [Verify that the Rancher server is successfully deployed](#7-verify-that-the-rancher-server-is-successfully-deployed) -8. [Save your options](#8-save-your-options) - -### 1. Install the Required CLI Tools - -The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. - -Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -### 2. Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### 3. Create a Namespace for Rancher - -We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: - -``` -kubectl create namespace cattle-system -``` - -### 4. Choose your SSL Configuration - -The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: - -- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. -- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. -- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. - - -| Configuration | Helm Chart Option | Requires cert-manager | -| ------------------------------ | ----------------------- | ------------------------------------- | -| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#5-install-cert-manager) | -| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#5-install-cert-manager) | -| Certificates from Files | `ingress.tls.source=secret` | no | - -### 5. Install cert-manager - -> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). - -{{% accordion id="cert-manager" label="Click to Expand" %}} - -> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - -These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). - -``` -# Install the CustomResourceDefinition resources separately -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml - -# **Important:** -# If you are running Kubernetes v1.15 or below, you -# will need to add the `--validate=false` flag to your -# kubectl apply command, or else you will receive a -# validation error relating to the -# x-kubernetes-preserve-unknown-fields field in -# cert-manager’s CustomResourceDefinition resources. -# This is a benign error and occurs due to the way kubectl -# performs resource validation. - -# Create the namespace for cert-manager -kubectl create namespace cert-manager - -# Add the Jetstack Helm repository -helm repo add jetstack https://charts.jetstack.io - -# Update your local Helm chart repository cache -helm repo update - -# Install the cert-manager Helm chart -helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v1.0.4 -``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -{{% /accordion %}} - -### 6. Install Rancher with Helm and Your Chosen Certificate Option - -The exact command to install Rancher differs depending on the certificate configuration. - -{{% tabs %}} -{{% tab "Rancher-generated Certificates" %}} - - -The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. - -Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set the `hostname` to the DNS name you pointed at your load balancer. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Let's Encrypt" %}} - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. - -In the following command, - -- `hostname` is set to the public DNS record, -- `ingress.tls.source` is set to `letsEncrypt` -- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org \ -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Certificates from Files" %}} -In this option, Kubernetes secrets are created from your own certificates for Rancher to use. - -When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. - -Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. - -> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set the `hostname`. -- Set `ingress.tls.source` to `secret`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. -{{% /tab %}} -{{% /tabs %}} - -The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. - -- [HTTP Proxy]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) -- [Private Docker Image Registry]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) for the full list of options. - - -### 7. Verify that the Rancher Server is Successfully Deployed - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### 8. Save Your Options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it. You should have a functional Rancher server. - -In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) Page - - -### Optional Next Steps - -Enable the Enterprise Cluster Manager. diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md deleted file mode 100644 index f027000a6c..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: Rancher Helm Chart Options -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/ - - /rancher/v2.0-v2.4/en/installation/options/chart-options/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/ - - /rancher/v2.0-v2.4/en/installation/resources/chart-options ---- - -This page is a configuration reference for the Rancher Helm chart. - -For help choosing a Helm chart version, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/) - -For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/feature-flags/) - -- [Common Options](#common-options) -- [Advanced Options](#advanced-options) -- [API Audit Log](#api-audit-log) -- [Setting Extra Environment Variables](#setting-extra-environment-variables) -- [TLS Settings](#tls-settings) -- [Customizing your Ingress](#customizing-your-ingress) -- [HTTP Proxy](#http-proxy) -- [Additional Trusted CAs](#additional-trusted-cas) -- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) -- [External TLS Termination](#external-tls-termination) - -### Common Options - -| Option | Default Value | Description | -| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | -| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | -| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | -| `letsEncrypt.email` | " " | `string` - Your email address | -| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | -| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | - -
- -### Advanced Options - -| Option | Default Value | Description | -| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | -| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" Rancher server cluster. _Note: This option is no longer available in v2.5.0. In v2.5.0, the `restrictedAdmin` option is used to prevent users from modifying the local cluster._ | -| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | -| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | -| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) level. 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | -| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | -| `certmanager.version` | "" | `string` - set cert-manager compatibility | -| `debug` | false | `bool` - set debug flag on rancher server | -| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | -| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | -| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | -| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | -| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges. Options: traefik, nginx. | | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - comma separated list of hostnames or ip address not to use the proxy | | -| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | -| `rancherImage` | "rancher/rancher" | `string` - rancher image source | -| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | -| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | -| `replicas` | 3 | `int` - Number of replicas of Rancher pods | -| `resources` | {} | `map` - rancher pod resource requests & limits | -| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ | -| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | -| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | - - - -### API Audit Log - -Enabling the [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing/). - -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. - -```plain ---set auditLog.level=1 -``` - -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. - -Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. - -### Setting Extra Environment Variables - -You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -### TLS Settings - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -See [TLS settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/tls-settings) for more information and options. - -### Import `local` Cluster - -By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. - -> **Important if you are considering upgrading to Rancher v2.5:** If you turn addLocal off, most Rancher v2.5 features won't work, including the EKS provisioner. In Rancher v2.5, the restrictedAdmin option is used to prevent users from modifying the local cluster. - -If this is a concern in your environment you can set this option to "false" on your initial install. - -This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. - -```plain ---set addLocal="false" -``` - -### Customizing your Ingress - -To customize or use a different ingress with Rancher server you can set your own Ingress annotations. - -Example on setting a custom certificate issuer: - -```plain ---set ingress.extraAnnotations.'cert-manager\.io/cluster-issuer'=issuer-name -``` - -Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. - -```plain ---set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' -``` - -### HTTP Proxy - -Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. - -Add your IP exceptions to the `noProxy` list. Make sure you add the Pod cluster IP range (default: `10.42.0.0/16`), Service cluster IP range (default: `10.43.0.0/16`), the internal cluster domains (default: `.svc,.cluster.local`) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. - -```plain ---set proxy="http://:@:/" ---set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local" -``` - -### Additional Trusted CAs - -If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. - -```plain ---set additionalTrustedCAs=true -``` - -Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. - -```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem -``` - -### Private Registry and Air Gap Installs - -For details on installing Rancher with a private registry, see: - -- [Air Gap: Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) - -# External TLS Termination - -We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. - -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. - -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) to add the CA cert for Rancher. - -Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. - -### Configuring Ingress for External TLS when Using NGINX v0.25 - -In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: - -```yaml -ingress: - provider: nginx - options: - use-forwarded-headers: 'true' -``` - -### Required Headers - -- `Host` -- `X-Forwarded-Proto` -- `X-Forwarded-Port` -- `X-Forwarded-For` - -### Recommended Timeouts - -- Read Timeout: `1800 seconds` -- Write Timeout: `1800 seconds` -- Connect Timeout: `30 seconds` - -### Health Checks - -Rancher will respond `200` to health checks on the `/healthz` endpoint. - -### Example NGINX config - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/rollbacks/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/rollbacks/_index.md deleted file mode 100644 index 9596006b5c..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/rollbacks/_index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Rollbacks -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/rollbacks - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/rollbacks - - /rancher/v2.0-v2.4/en/upgrades/ha-server-rollbacks - - /rancher/v2.0-v2.4/en/upgrades/rollbacks/ha-server-rollbacks - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/rollbacks/ha-server-rollbacks - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/rollbacks ---- - -### Rolling Back to Rancher v2.2-v2.4 - -For Rancher installed on Kubernetes, follow the procedure detailed here: [Restoring Backups for Kubernetes installs.]({{}}/rancher/v2.0-v2.4/en/backups/restorations/ha-restoration) Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. - -For information on how to roll back Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks) - -> Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. - -### Rolling Back to v2.0.0-v2.1.5 - -If you are rolling back to versions in either of these scenarios, you must follow some extra instructions in order to get your clusters working. - -- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. -- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. - -Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321), special steps are necessary if the user wants to roll back to a previous version of Rancher where this vulnerability exists. The steps are as follows: - -1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. - - **Rancher Installed with Docker** - ``` - docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - - **Rancher Installed on a Kubernetes Cluster** - ``` - kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json - ``` - -2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** - -3. Rollback Rancher following the [normal instructions]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/). - -4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. - -5. Apply the backed up tokens based on how you installed Rancher. - - **Rancher Installed with Docker** - - Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - docker exec $1 kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from Unavailable back to Available. - - **Rancher Installed on a Kubernetes Cluster** - - Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. - ``` - set -e - - tokens=$(jq .[] -c tokens.json) - for token in $tokens; do - name=$(echo $token | jq -r .name) - value=$(echo $token | jq -r .token) - - kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" - done - ``` - Set the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: - ``` - ./apply_tokens.sh - ``` - After a few moments the clusters will go from `Unavailable` back to `Available`. - -6. Continue using Rancher as normal. diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/_index.md deleted file mode 100644 index cb3cf8b965..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/_index.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -title: Upgrades -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/upgrades - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades - - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha-server-upgrade-helm-airgap - - /rancher/v2.0-v2.4/en/upgrades/air-gap-upgrade/ - - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/upgrades/ha - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ - - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha-server-upgrade-helm/ - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/ - - /rancher/v2.0-v2.4/en/upgrades/ ---- -The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. - -For the instructions to upgrade Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) - -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - -If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on). - -- [Prerequisites](#prerequisites) -- [Upgrade Outline](#upgrade-outline) -- [Known Upgrade Issues](#known-upgrade-issues) -- [RKE Add-on Installs](#rke-add-on-installs) - -# Prerequisites - -### Access to kubeconfig - -Helm should be run from the same location as your kubeconfig file, or the same location where you run your kubectl commands from. - -If you installed Kubernetes with RKE, the config will have been created in the directory you ran `rke up` in. - -The kubeconfig can also be manually targeted for the intended cluster with the `--kubeconfig` tag (see: https://helm.sh/docs/helm/helm/) - -### Review Known Issues - -Review the [known upgrade issues](#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. - -A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren't supported. - -### Helm Version - -The upgrade instructions assume you are using Helm 3. - -For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -### For air gap installs: Populate private registry - --For [air gap installs only,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -### For upgrades from v2.0-v2.2 with external TLS termination - -If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) - -### For upgrades with cert-manager older than 0.8.0 - -[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager) - -# Upgrade Outline - -Follow the steps to upgrade Rancher server: - -- [1. Back up your Kubernetes cluster that is running Rancher server](#1-back-up-your-kubernetes-cluster-that-is-running-rancher-server) -- [2. Update the Helm chart repository](#2-update-the-helm-chart-repository) -- [3. Upgrade Rancher](#3-upgrade-rancher) -- [4. Verify the Upgrade](#4-verify-the-upgrade) - -# 1. Back up Your Kubernetes Cluster that is Running Rancher Server - - -[Take a one-time snapshot]({{}}/rancher/v2.0-v2.4/en/backups/backup/rke-backups/#option-b-one-time-snapshots) -of your Kubernetes cluster running Rancher server. - -You'll use the backup as a restoration point if something goes wrong during upgrade. - -# 2. Update the Helm chart repository - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -1. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -1. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest charts and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - You can fetch the chart for the specific version you are upgrading to by adding in the `--version=` tag. For example: - - ```plain - helm fetch rancher-/rancher --version=v2.4.11 - ``` - -# 3. Upgrade Rancher - -This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. - -{{% tabs %}} -{{% tab "Kubernetes Upgrade" %}} - -Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. - -``` -helm get values rancher -n cattle-system - -hostname: rancher.my.org -``` - -> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. - -If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow [Option B: Reinstalling Rancher and cert-manager.](#option-b-reinstalling-rancher-and-cert-manager) - -Otherwise, follow [Option A: Upgrading Rancher.](#option-a-upgrading-rancher) - -### Option A: Upgrading Rancher - -Upgrade Rancher to the latest version with all your settings. - -Take all the values from the previous step and append them to the command using `--set key=value`: - -``` -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -> **Note:** The above is an example, there may be more values from the previous step that need to be appended. - -Alternatively, it's possible to export the current values to a file and reference that file during upgrade. For example, to only change the Rancher version: - -``` -helm get values rancher -n cattle-system -o yaml > values.yaml - -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - -f values.yaml \ - --version=2.4.5 -``` - -### Option B: Reinstalling Rancher and cert-manager - -If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11. - -1. Uninstall Rancher - - ``` - helm delete rancher -n cattle-system - ``` - -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager) page. - -3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. - - ``` - helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org - ``` - -{{% /tab %}} - -{{% tab "Kubernetes Air Gap Upgrade" %}} - -Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -Based on the choice you made during installation, complete one of the procedures below. - -Placeholder | Description -------------|------------- -`` | The version number of the output tarball. -`` | The DNS name you pointed at your load balancer. -`` | The DNS name for your private registry. -`` | Cert-manager version running on k8s cluster. - - -### Option A: Default Self-signed Certificate - - ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -### Option B: Certificates from Files using Kubernetes Secrets - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set privateCA=true \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -### Apply the Rendered Templates - -Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - -Use `kubectl` to apply the rendered manifests. - -```plain -kubectl -n cattle-system apply -R -f ./rancher -``` - -{{% /tab %}} -{{% /tabs %}} - -# 4. Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). - -# Known Upgrade Issues - -The following table lists some of the most noteworthy issues to be considered when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -Upgrade Scenario | Issue ----|--- -Upgrading to v2.4.6 or v2.4.7 | These Rancher versions had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. -Upgrading to v2.3.0+ | Any user provisioned cluster will be automatically updated upon any edit as tolerations were added to the images used for Kubernetes provisioning. -Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/). -Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. -Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration) - -# RKE Add-on Installs - -**Important: RKE add-on install is only supported up to Rancher v2.0.8** - -Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). - -If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md deleted file mode 100644 index e0f9ac2787..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/helm2/_index.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Upgrading Rancher Installed on Kubernetes with Helm 2 -weight: 1050 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha/helm2 - - /rancher/v2.0-v2.4/en/upgrades/helm2 - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2 - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha/helm2 - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/helm2 - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/helm2/ ---- - -> Helm 3 has been released. If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. -> -> The [current instructions for Upgrading Rancher Installed on Kubernetes](https://rancher.com/docs/rancher/v2.0-v2.4/en/upgrades/upgrades/ha/) use Helm 3. -> -> This section provides a copy of the older instructions for upgrading Rancher with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -The following instructions will guide you through using Helm to upgrade a Rancher server that is installed on a Kubernetes cluster. - -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - -If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on). - ->**Notes:** -> -> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager) -> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#configuring-ingress-for-external-tls-when-using-nginx-v0-25) -> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) -- **For [air gap installs only,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Upgrade Outline - -Follow the steps to upgrade Rancher server: - -- [A. Back up your Kubernetes cluster that is running Rancher server](#a-back-up-your-kubernetes-cluster-that-is-running-rancher-server) -- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) -- [C. Upgrade Rancher](#c-upgrade-rancher) -- [D. Verify the Upgrade](#d-verify-the-upgrade) - -### A. Back up Your Kubernetes Cluster that is Running Rancher Server - -[Take a one-time snapshot]({{}}/rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups/#option-b-one-time-snapshots) -of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restore point if something goes wrong during upgrade. - -### B. Update the Helm chart repository - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -1. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -1. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest charts and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - -### C. Upgrade Rancher - -This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. - -{{% tabs %}} -{{% tab "Kubernetes Upgrade" %}} - -Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. - -``` -helm get values rancher - -hostname: rancher.my.org -``` - -> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. - -If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow `Option B: Reinstalling Rancher`. Otherwise, follow `Option A: Upgrading Rancher`. - -{{% accordion label="Option A: Upgrading Rancher" %}} - -Upgrade Rancher to the latest version with all your settings. - -Take all the values from the previous step and append them to the command using `--set key=value`. Note: There will be many more options from the previous step that need to be appended. - -``` -helm upgrade --install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` -{{% /accordion %}} - -{{% accordion label="Option B: Reinstalling Rancher chart" %}} - -If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11. - -1. Uninstall Rancher - - ``` - helm delete rancher - ``` - In case this results in an error that the release "rancher" was not found, make sure you are using the correct deployment name. Use `helm list` to list the helm-deployed releases. - -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions) page. - -3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. - - ``` - helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org - ``` - -{{% /accordion %}} - -{{% /tab %}} - -{{% tab "Kubernetes Air Gap Upgrade" %}} - -1. Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - Based on the choice you made during installation, complete one of the procedures below. - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - -{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} - - ```plain -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} -{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template ./rancher-.tgz --output-dir . \ ---name rancher \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set privateCA=true \ ---set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} - -2. Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - - Use `kubectl` to apply the rendered manifests. - - ```plain - kubectl -n cattle-system apply -R -f ./rancher - ``` - -{{% /tab %}} -{{% /tabs %}} - -### D. Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). - -## Rolling Back - -Should something go wrong, follow the [roll back]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/ha-server-rollbacks/) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md deleted file mode 100644 index 50b771e626..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Migrating from a Kubernetes Install with an RKE Add-on -weight: 1030 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/ha-server-upgrade/ - - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha-server-upgrade/ - - /rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/migrating-from-rke-add-on - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/migrating-from-rke-add-on - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/ ---- - -> **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->If you are currently using the RKE add-on install method, please follow these directions to migrate to the Helm install. - - -The following instructions will help guide you through migrating from the RKE Add-on install to managing Rancher with the Helm package manager. - -You will need the to have [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) installed and the kubeconfig YAML file (`kube_config_rancher-cluster.yml`) generated by RKE. - -> **Note:** This guide assumes a standard Rancher install. If you have modified any of the object names or namespaces, please adjust accordingly. - -> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. - -### Point kubectl at your Rancher Cluster - -Make sure `kubectl` is using the correct kubeconfig YAML file. Set the `KUBECONFIG` environmental variable to point to `kube_config_rancher-cluster.yml`: - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -After setting the `KUBECONFIG` environment variable, verify that it contains the correct `server` parameter. It should point directly to one of your cluster nodes on port `6443`. - -``` -kubectl config view -o=jsonpath='{.clusters[*].cluster.server}' -https://NODE:6443 -``` - -If the output from the command shows your Rancher hostname with the suffix `/k8s/clusters`, the wrong kubeconfig YAML file is configured. It should be the file that was created when you used RKE to create the cluster to run Rancher. - -### Save your certificates - -If you have terminated ssl on the Rancher cluster ingress, recover your certificate and key for use in the Helm install. - -Use `kubectl` to get the secret, decode the value and direct the output to a file. - -``` -kubectl -n cattle-system get secret cattle-keys-ingress -o jsonpath --template='{ .data.tls\.crt }' | base64 -d > tls.crt -kubectl -n cattle-system get secret cattle-keys-ingress -o jsonpath --template='{ .data.tls\.key }' | base64 -d > tls.key -``` - -If you specified a private CA root cert - -``` -kubectl -n cattle-system get secret cattle-keys-server -o jsonpath --template='{ .data.cacerts\.pem }' | base64 -d > cacerts.pem -``` - -### Remove previous Kubernetes objects - -Remove the Kubernetes objects created by the RKE install. - -> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/backups/backups/ha-backups) for details. - -``` -kubectl -n cattle-system delete ingress cattle-ingress-http -kubectl -n cattle-system delete service cattle-service -kubectl -n cattle-system delete deployment cattle -kubectl -n cattle-system delete clusterrolebinding cattle-crb -kubectl -n cattle-system delete serviceaccount cattle-admin -``` - -### Remove addons section from `rancher-cluster.yml` - -The addons section from `rancher-cluster.yml` contains all the resources needed to deploy Rancher using RKE. By switching to Helm, this part of the cluster configuration file is no longer needed. Open `rancher-cluster.yml` in your favorite text editor and remove the addons section: - ->**Important:** Make sure you only remove the addons section from the cluster configuration file. - -``` -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -# Remove addons section from here til end of file -addons: |- - --- - ... -# End of file -``` - -### Follow Helm and Rancher install steps - -From here follow the standard install steps. - -* [3 - Initialize Helm]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) -* [4 - Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md b/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md deleted file mode 100644 index 773bb97f9a..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/_index.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: Upgrading to v2.0.7+ — Namespace Migration -weight: 1040 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/upgrades/namespace-migration - - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/namespace-migration - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/namespace-migration - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/ ---- ->This section applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. - -In Rancher v2.0.6 and prior, system namespaces crucial for Rancher and Kubernetes operations were not assigned to any Rancher project by default. Instead, these namespaces existed independently from all Rancher projects, but you could move these namespaces into any project without affecting cluster operations. - -These namespaces include: - -- `kube-system` -- `kube-public` -- `cattle-system` -- `cattle-alerting`1 -- `cattle-logging`1 -- `cattle-pipeline`1 -- `ingress-nginx` - ->1 Only displays if this feature is enabled for the cluster. - -However, with the release of Rancher v2.0.7, the `System` project was introduced. This project, which is automatically created during the upgrade, is assigned the system namespaces above to hold these crucial components for safe keeping. - -During upgrades from Rancher v2.0.6- to Rancher v2.0.7+, all system namespaces are moved from their default location outside of all projects into the newly created `System` project. However, if you assigned any of your system namespaces to a project before upgrading, your cluster networking may encounter issues afterwards. This issue occurs because the system namespaces are not where the upgrade expects them to be during the upgrade, so it cannot move them to the `System` project. - -- To prevent this issue from occurring before the upgrade, see [Preventing Cluster Networking Issues](#preventing-cluster-networking-issues). -- To fix this issue following upgrade, see [Restoring Cluster Networking](#restoring-cluster-networking). - -> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps]({{}}/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. - -## Preventing Cluster Networking Issues - -You can prevent cluster networking issues from occurring during your upgrade to v2.0.7+ by unassigning system namespaces from all of your Rancher projects. Complete this task if you've assigned any of a cluster's system namespaces into a Rancher project. - -1. Log into the Rancher UI before upgrade. - -1. From the context menu, open the **local** cluster (or any of your other clusters). - -1. From the main menu, select **Project/Namespaces**. - -1. Find and select the following namespaces. Click **Move** and then choose **None** to move them out of your projects. Click **Move** again. - - >**Note:** Some or all of these namespaces may already be unassigned from all projects. - - - `kube-system` - - `kube-public` - - `cattle-system` - - `cattle-alerting`1 - - `cattle-logging`1 - - `cattle-pipeline`1 - - `ingress-nginx` - - >1 Only displays if this feature is enabled for the cluster. - -
Moving namespaces out of projects
- ![Moving Namespaces]({{}}/img/rancher/move-namespaces.png) - -1. Repeat these steps for each cluster where you've assigned system namespaces to projects. - -**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades). - -## Restoring Cluster Networking - -Reset the cluster nodes' network policies to restore connectivity. - ->**Prerequisites:** -> ->Download and setup [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -{{% tabs %}} -{{% tab "Kubernetes Install" %}} -1. From **Terminal**, change directories to your kubectl file that's generated during Rancher install, `kube_config_rancher-cluster.yml`. This file is usually in the directory where you ran RKE during Rancher installation. - -1. Before repairing networking, run the following two commands to make sure that your nodes have a status of `Ready` and that your cluster components are `Healthy`. - - ``` - kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes - - NAME STATUS ROLES AGE VERSION - 165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1 - 165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1 - 165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1 - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cs - - NAME STATUS MESSAGE ERROR - scheduler Healthy ok - controller-manager Healthy ok - etcd-0 Healthy {"health": "true"} - etcd-2 Healthy {"health": "true"} - etcd-1 Healthy {"health": "true"} - ``` - -1. Check the `networkPolicy` for all clusters by running the following command. - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o=custom-columns=ID:.metadata.name,NAME:.spec.displayName,NETWORKPOLICY:.spec.enableNetworkPolicy,APPLIEDNP:.status.appliedSpec.enableNetworkPolicy,ANNOTATION:.metadata.annotations."networking\.management\.cattle\.io/enable-network-policy" - - ID NAME NETWORKPOLICY APPLIEDNP ANNOTATION - c-59ptz custom - local local - - -1. Disable the `networkPolicy` for all clusters, still pointing toward your `kube_config_rancher-cluster.yml`. - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o jsonpath='{range .items[*]}{@.metadata.name}{"\n"}{end}' | xargs -I {} kubectl --kubeconfig kube_config_rancher-cluster.yml patch cluster {} --type merge -p '{"spec": {"enableNetworkPolicy": false},"status": {"appliedSpec": {"enableNetworkPolicy": false }}}' - - >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): - > - >``` - kubectl --kubeconfig kube_config_rancher-cluster.yml patch cluster local --type merge -p '{"spec": {"enableNetworkPolicy": false},"status": {"appliedSpec": {"enableNetworkPolicy": false }}}' - ``` - -1. Remove annotations for network policy for all clusters - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o jsonpath='{range .items[*]}{@.metadata.name}{"\n"}{end}' | xargs -I {} kubectl --kubeconfig kube_config_rancher-cluster.yml annotate cluster {} "networking.management.cattle.io/enable-network-policy"="false" --overwrite - - >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): - > - >``` - kubectl --kubeconfig kube_config_rancher-cluster.yml annotate cluster local "networking.management.cattle.io/enable-network-policy"="false" --overwrite - ``` - -1. Check the `networkPolicy` for all clusters again to make sure the policies have a status of `false `. - - kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o=custom-columns=ID:.metadata.name,NAME:.spec.displayName,NETWORKPOLICY:.spec.enableNetworkPolicy,APPLIEDNP:.status.appliedSpec.enableNetworkPolicy,ANNOTATION:.metadata.annotations."networking\.management\.cattle\.io/enable-network-policy" - - ID NAME NETWORKPOLICY APPLIEDNP ANNOTATION - c-59ptz custom false false false - local local false false false - -1. Remove all network policies from all namespaces. Run this command for each cluster, using the kubeconfig generated by RKE. - - ``` - for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml -n $namespace delete networkpolicy --all; - done - ``` - -1. Remove all the projectnetworkpolicies created for the clusters, to make sure networkpolicies are not recreated. - - ``` - for cluster in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get clusters -o custom-columns=NAME:.metadata.name --no-headers); do - for project in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get project -n $cluster -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml delete projectnetworkpolicy -n $project --all - done - done - ``` - - >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): - > - >``` - for project in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get project -n local -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml -n $project delete projectnetworkpolicy --all; - done - ``` - -1. Wait a few minutes and then log into the Rancher UI. - - - If you can access Rancher, you're done, so you can skip the rest of the steps. - - If you still can't access Rancher, complete the steps below. - -1. Force your pods to recreate themselves by entering the following command. - - ``` - kubectl --kubeconfig kube_config_rancher-cluster.yml delete pods -n cattle-system --all - ``` - -1. Log into the Rancher UI and view your clusters. Created clusters will show errors from attempting to contact Rancher while it was unavailable. However, these errors should resolve automatically. - -{{% /tab %}} -{{% tab "Rancher Launched Kubernetes" %}} -
-If you can access Rancher, but one or more of the clusters that you launched using Rancher has no networking, you can repair them by moving them: - -- Using the cluster's [embedded kubectl shell]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/kubectl/). -- By [downloading the cluster kubeconfig file and running it]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl) from your workstation. - - ``` - for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do - kubectl --kubeconfig kube_config_rancher-cluster.yml -n $namespace delete networkpolicy --all; - done - ``` - -{{% /tab %}} -{{% /tabs %}} - - diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/_index.md deleted file mode 100644 index bc79e37d9a..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Other Installation Methods -weight: 3 ---- - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -The Docker installation is for development and testing environments only. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -There is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/_index.md deleted file mode 100644 index bdc2faf239..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Air Gapped Helm CLI Install -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-installation/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/ ---- - -This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. - -For more information on each installation option, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/) - -Throughout the installation instructions, there will be _tabs_ for each installation option. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. - -# Installation Outline - -1. [Set up infrastructure and private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) -2. [Collect and publish images to your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) -3. [Set up a Kubernetes cluster (Skip this step for Docker installations)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -4. [Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) - -# Upgrades - -To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/) - -### [Next: Prepare your Node(s)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/_index.md deleted file mode 100644 index f94562e900..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/_index.md +++ /dev/null @@ -1,358 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-system-charts/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher - - /rancher/v2.0-v2.4/en/installation/air-gap/install-rancher - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-rancher/ ---- - -This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher in five parts: - -- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) -- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) -- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) -- [4. Install Rancher](#4-install-rancher) -- [5. For Rancher versions before v2.3.0, Configure System Charts](#5-for-rancher-versions-before-v2-3-0-configure-system-charts) - -# 1. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. - ```plain - helm fetch rancher-/rancher - ``` - - If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: - ```plain - helm fetch rancher-stable/rancher --version=v2.4.8 - ``` - -# 2. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -# 3. Render the Rancher Helm Template - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. - -### Option A: Default Self-Signed Certificate - -{{% accordion id="k8s-1" label="Click to expand" %}} - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - -1. From a system connected to the internet, add the cert-manager repo to Helm. - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v1.0.4 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - ```plain - helm template cert-manager ./cert-manager-v1.0.4.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller \ - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml - ``` - -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - - ```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -{{% /accordion %}} - -### Option B: Certificates From Files using Kubernetes Secrets - -{{% accordion id="k8s-2" label="Click to expand" %}} - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -{{% /accordion %}} - -# 4. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -### For Self-Signed Certificate Installs, Install Cert-manager - -{{% accordion id="install-cert-manager" label="Click to expand" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -### Install Rancher with kubectl - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` -**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.0-v2.4/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -# 5. For Rancher versions before v2.3.0, Configure System Charts - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/). - -# Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users who want to test out Rancher. - -Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -> **Important:** There is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -> **Do you want to...** -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/custom-ca-root-certificate/). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). - -- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/) - -Choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to install. | - - - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in PEM format. - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -If you are installing Rancher v2.3.0+, the installation is complete. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.0-v2.4/en/faq/telemetry/) during the initial login. - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/). - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md deleted file mode 100644 index 232e69ee60..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -title: '3. Install Kubernetes (Skip for Docker Installs)' -weight: 300 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-kube ---- - -> Skip this section if you are installing Rancher on a single node with Docker. - -This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. - -For Rancher before v2.4, Rancher should be installed on an [RKE]({{}}/rke/latest/en/) (Rancher Kubernetes Engine) Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. - -In Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. The Rancher management server can only be run on a Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. - -The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown below. - -{{% tabs %}} -{{% tab "K3s" %}} - -In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. - -### Installation Outline - -1. [Prepare Images Directory](#1-prepare-images-directory) -2. [Create Registry YAML](#2-create-registry-yaml) -3. [Install K3s](#3-install-k3s) -4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) - -### 1. Prepare Images Directory -Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. - -Place the tar file in the `images` directory before starting K3s on each node, for example: - -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` - -### 2. Create Registry YAML -Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. - -The registries.yaml file should look like this before plugging in the necessary information: - -``` ---- -mirrors: - customreg: - endpoint: - - "https://ip-to-server:5000" -configs: - customreg: - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password - tls: - cert_file: - key_file: - ca_file: -``` - -Note, at this time only secure registries are supported with K3s (SSL with custom CA). - -For more information on private registries configuration file for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/private-registry/) - -### 3. Install K3s - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. -Also obtain the K3s install script at https://get.k3s.io - -Place the binary in `/usr/local/bin` on each node. -Place the install script anywhere on each node, and name it `install.sh`. - -Install K3s on each server: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh -``` - -Install K3s on each agent: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh -``` - -Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. -The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` - ->**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -### 4. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -``` -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### Note on Upgrading - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. -2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. -3. Restart the K3s service (if not restarted automatically by installer). -{{% /tab %}} -{{% tab "RKE" %}} -We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. - -### 1. Install RKE - -Install RKE by following the instructions in the [RKE documentation.]({{}}/rke/latest/en/installation/) - -### 2. Create an RKE Config File - -From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. - -This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts) you created. - -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | -| `user` | ✓ | A user that can run Docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: - - address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: - - url: # private registry url - user: rancher - password: '*********' - is_default: true -``` - -### 3. Run RKE - -After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: - -``` -rke up --config ./rancher-cluster.yml -``` - -### 4. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ -{{% /tab %}} -{{% /tabs %}} - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md deleted file mode 100644 index ea5b7142c3..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md +++ /dev/null @@ -1,298 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/prepare-private-registry/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/prepare-private-registry/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ ---- - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. - -The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. - -> **Prerequisites:** -> -> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. -> -> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) -2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) -3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) -4. [Populate the private registry](#4-populate-the-private-registry) - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### 1. Find the required assets for your Rancher version - -1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets.** Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### 2. Collect the cert-manager image - -> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v1.0.4 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### 4. Populate the private registry - -Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -_Available as of v2.3.0_ - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -# Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -1. Find the required assets for your Rancher version -2. Save the images to your Windows Server workstation -3. Prepare the Docker daemon -4. Populate the private registry - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|----------------------------|------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - ```plain - ./rancher-save-images.ps1 - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - - - -### 3. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - - - -### 4. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -# Linux Steps - -The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -1. Find the required assets for your Rancher version -2. Collect all the required images -3. Save the images to your Linux workstation -4. Populate the private registry - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets.** - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -|----------------------------| -------------------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.12.0 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - - - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - -**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - - - -### 4. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. - -The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - -```plain -docker login -``` - -1. Make `rancher-load-images.sh` an executable: - -``` -chmod +x rancher-load-images.sh -``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - -```plain -./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry -``` - - -{{% /tab %}} -{{% /tabs %}} - -### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md deleted file mode 100644 index efd93d093c..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: '1. Set up Infrastructure and Private Registry' -weight: 100 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/provision-host ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). - -An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. - -The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/) - -{{% tabs %}} -{{% tab "K3s" %}} -We recommend setting up the following infrastructure for a high-availability installation: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set up one of the following external databases: - -* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) -* [MySQL](https://www.mysql.com/) (certified against version 5.7) -* [etcd](https://etcd.io/) (certified against version 3.3.15) - -When you install Kubernetes, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the database, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/rds) for setting up a MySQL database on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 5. Set up a Private Docker Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) -{{% /tab %}} -{{% tab "RKE" %}} - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 4. Set up a Private Docker Registry - -Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file]({{}}/rke/latest/en/config-options/private-registries/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) - -{{% /tab %}} -{{% tab "Docker" %}} -> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. -> -> For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -### 1. Set up a Linux Node - -This host will be disconnected from the Internet, but needs to be able to connect to your private registry. - -Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up a Private Docker Registry - -Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/_index.md deleted file mode 100644 index a40a5a02db..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Installing Rancher behind an HTTP Proxy -weight: 4 ---- - -In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. - -Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/). - -# Installation Outline - -1. [Set up infrastructure]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/prepare-nodes/) -2. [Set up a Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) -3. [Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/install-rancher/) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md deleted file mode 100644 index c0f70adda0..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 3. Install Rancher -weight: 300 ---- - -Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. - -> **Note:** These installation instructions assume you are using Helm 3. - -### Install cert-manager - -Add the cert-manager helm repository: - -``` -helm repo add jetstack https://charts.jetstack.io -``` - -Create a namespace for cert-manager: - -``` -kubectl create namespace cert-manager -``` - -Install the CustomResourceDefinitions of cert-manager: - -``` -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.2/cert-manager.crds.yaml -``` - -And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers: - -``` -helm upgrade --install cert-manager jetstack/cert-manager \ - --namespace cert-manager --version v0.15.2 \ - --set http_proxy=http://${proxy_host} \ - --set https_proxy=http://${proxy_host} \ - --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local -``` - -Now you should wait until cert-manager is finished starting up: - -``` -kubectl rollout status deployment -n cert-manager cert-manager -kubectl rollout status deployment -n cert-manager cert-manager-webhook -``` - -### Install Rancher - -Next you can install Rancher itself. First add the helm repository: - -``` -helm repo add rancher-latest https://releases.rancher.com/server-charts/latest -``` - -Create a namespace: - -``` -kubectl create namespace cattle-system -``` - -And install Rancher with Helm. Rancher also needs a proxy configuration so that it can communicate with external application catalogs or retrieve Kubernetes version update metadata: - -``` -helm upgrade --install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.example.com \ - --set proxy=http://${proxy_host} -``` - -After waiting for the deployment to finish: - -``` -kubectl rollout status deployment -n cattle-system rancher -``` - -You can now navigate to `https://rancher.example.com` and start using Rancher. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.0-v2.4/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -### Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md deleted file mode 100644 index 851e501a4f..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md +++ /dev/null @@ -1,151 +0,0 @@ ---- -title: '2. Install Kubernetes' -weight: 200 ---- - -Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. - -### Installing Docker - -First, you have to install Docker and setup the HTTP proxy on all three Linux nodes. For this perform the following steps on all three nodes. - -For convenience export the IP address and port of your proxy into an environment variable and set up the HTTP_PROXY variables for your current shell: - -``` -export proxy_host="10.0.0.5:8888" -export HTTP_PROXY=http://${proxy_host} -export HTTPS_PROXY=http://${proxy_host} -export NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16 -``` - -Next configure apt to use this proxy when installing packages. If you are not using Ubuntu, you have to adapt this step accordingly: - -``` -cat <<'EOF' | sudo tee /etc/apt/apt.conf.d/proxy.conf > /dev/null -Acquire::http::Proxy "http://${proxy_host}/"; -Acquire::https::Proxy "http://${proxy_host}/"; -EOF -``` - -Now you can install Docker: - -``` -curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh -``` - -Then ensure that your current user is able to access the Docker daemon without sudo: - -``` -sudo usermod -aG docker YOUR_USERNAME -``` - -And configure the Docker daemon to use the proxy to pull images: - -``` -sudo mkdir -p /etc/systemd/system/docker.service.d -cat <<'EOF' | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf > /dev/null -[Service] -Environment="HTTP_PROXY=http://${proxy_host}" -Environment="HTTPS_PROXY=http://${proxy_host}" -Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" -EOF -``` - -To apply the configuration, restart the Docker daemon: - -``` -sudo systemctl daemon-reload -sudo systemctl restart docker -``` - -### Creating the RKE Cluster - -You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: - -* [RKE CLI binary]({{}}/rke/latest/en/installation/#download-the-rke-binary) - -``` -sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 -sudo chmod +x /usr/local/bin/rke -``` - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -``` -curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" -chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -``` - -* [helm](https://helm.sh/docs/intro/install/) - -``` -curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 -chmod +x get_helm.sh -sudo ./get_helm.sh -``` - -Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -``` -nodes: - - address: 10.0.1.200 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 10.0.1.201 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 10.0.1.202 - user: ubuntu - role: [controlplane,worker,etcd] - -services: - etcd: - backup_config: - interval_hours: 12 - retention: 6 -``` - -After that, you can create the Kubernetes cluster by running: - -``` -rke up --config rancher-cluster.yaml -``` - -RKE creates a state file called `rancher-cluster.rkestate`, this is needed if you want to perform updates, modify your cluster configuration or restore it from a backup. It also creates a `kube_config_rancher-cluster.yaml` file, that you can use to connect to the remote Kubernetes cluster locally with tools like kubectl or Helm. Make sure to save all of these files in a secure location, for example by putting them into a version control system. - -To have a look at your cluster run: - -``` -export KUBECONFIG=kube_config_rancher-cluster.yaml -kubectl cluster-info -kubectl get pods --all-namespaces -``` - -You can also verify that your external load balancer works, and the DNS entry is set up correctly. If you send a request to either, you should receive HTTP 404 response from the ingress controller: - -``` -$ curl 10.0.1.100 -default backend - 404 -$ curl rancher.example.com -default backend - 404 -``` - -### Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md deleted file mode 100644 index eb8ab34b4e..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: '1. Set up Infrastructure' -weight: 100 ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will connect to the internet through an HTTP proxy. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - - -### [Next: Set up a Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/_index.md deleted file mode 100644 index 22dab597f2..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/_index.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Installing Rancher on a Single Node Using Docker -description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/installation/single-node-install/ - - /rancher/v2.0-v2.4/en/installation/single-node - - /rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node ---- - -Rancher can be installed by running a single Docker container. - -In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. - -> **Want to use an external load balancer?** -> See [Docker Install with an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/single-node-install-external-lb) instead. - -A Docker installation of Rancher is recommended only for development and testing purposes. - -For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -# Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -# 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements) to launch your Rancher server. - -# 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Use a proxy? See [HTTP Proxy Configuration]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/) -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) -> - Complete an Air Gap Installation? See [Air Gap: Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/) -> - Record all transactions with the Rancher API? See [API Auditing](./advanced/#api-audit-log) - -Choose from the following options: - -- [Option A: Default Rancher-generated Self-signed Certificate](#option-a-default-rancher-generated-self-signed-certificate) -- [Option B: Bring Your Own Certificate, Self-signed](#option-b-bring-your-own-certificate-self-signed) -- [Option C: Bring Your Own Certificate, Signed by a Recognized CA](#option-c-bring-your-own-certificate-signed-by-a-recognized-ca) -- [Option D: Let's Encrypt Certificate](#option-d-let-s-encrypt-certificate) - -### Option A: Default Rancher-generated Self-signed Certificate - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the minimum installation command below. - - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest -``` - -### Option B: Bring Your Own Certificate, Self-signed -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| ------------------- | --------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest -``` - -### Option C: Bring Your Own Certificate, Signed by a Recognized CA - -In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After obtaining your certificate, run the Docker command below. - -- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. -- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -| Placeholder | Description | -| ------------------- | ----------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | - - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - rancher/rancher:latest \ - --no-cacerts -``` - -### Option D: Let's Encrypt Certificate - -> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. - -> **Prerequisites:** -> -> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. -> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). -> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. - -| Placeholder | Description | -| ----------------- | ------------------- | -| `` | Your domain address | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest \ - --acme-domain -``` - -## Advanced Options - -When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: - -- Custom CA Certificate -- API Audit Log -- TLS Settings -- Air Gap -- Persistent Data -- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -Refer to [this page](./advanced) for details. - -## Troubleshooting - -Refer to [this page](./troubleshooting) for frequently asked questions and troubleshooting tips. - -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/_index.md deleted file mode 100644 index a924970a46..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/_index.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Advanced Options for Docker Installs -weight: 5 ---- - -When installing Rancher, there are several [advanced options]({{}}/rancher/v2.0-v2.4/en/installation/options/) that can be enabled: - -- [Custom CA Certificate](#custom-ca-certificate) -- [API Audit Log](#api-audit-log) -- [TLS Settings](#tls-settings) -- [Air Gap](#air-gap) -- [Persistent Data](#persistent-data) -- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) - -### Custom CA Certificate - -If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. - -Use the command example to start a Rancher container with your private CA certificates mounted. - -- The volume flag (`-v`) should specify the host directory containing the CA root certificates. -- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. -- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. -- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. - -The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /host/certs:/container/certs \ - -e SSL_CERT_DIR="/container/certs" \ - rancher/rancher:latest -``` - -### API Audit Log - -The API Audit Log records all the user and system transactions made through Rancher server. - -The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. - -See [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) for more information and options. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /var/log/rancher/auditlog:/var/log/auditlog \ - -e AUDIT_LEVEL=1 \ - rancher/rancher:latest -``` - -### TLS settings - -_Available as of v2.1.7_ - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_TLS_MIN_VERSION="1.0" \ - rancher/rancher:latest -``` - -See [TLS settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/tls-settings) for more information and options. - -### Air Gap - -If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - rancher/rancher:latest -``` - -### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. - -If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. - -Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. - -To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: - -``` -docker run -d --restart=unless-stopped \ - -p 8080:80 -p 8443:443 \ - rancher/rancher:latest -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/_index.md deleted file mode 100644 index d21818b4c6..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: HTTP Proxy Configuration -weight: 251 -aliases: - - /rancher/v2.0-v2.4/en/installation/proxy-configuration/ - - /rancher/v2.0-v2.4/en/installation/single-node/proxy ---- - -If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. - -Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. - -| Environment variable | Purpose | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | -| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | -| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | -| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | - -> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. - -## Docker Installation - -Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation]({{}}/rancher/v2.0-v2.4/en/installation/single-node-install/) are: - -- `localhost` -- `127.0.0.1` -- `0.0.0.0` -- `10.0.0.0/8` -- `cattle-system.svc` -- `.svc` -- `.cluster.local` - -The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e HTTP_PROXY="http://192.168.10.1:3128" \ - -e HTTPS_PROXY="http://192.168.10.1:3128" \ - -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local,example.com" \ - rancher/rancher:latest -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md deleted file mode 100644 index 2d11611926..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Rolling Back Rancher Installed with Docker -weight: 1015 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/single-node-rollbacks - - /rancher/v2.0-v2.4/en/upgrades/rollbacks/single-node-rollbacks ---- - -If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade). Rolling back restores: - -- Your previous version of Rancher. -- Your data backup created before upgrade. - -## Before You Start - -During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker pull rancher/rancher: -``` - -In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | ------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that the backup is for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Rolling Back Rancher - -If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. - ->**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. - - For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. - - ``` - docker pull rancher/rancher: - ``` - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - You can obtain the name for your Rancher container by entering `docker ps`. - -1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). - -1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. - - ``` - docker run --volumes-from rancher-data \ - -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ - && tar zxvf /backup/rancher-data-backup--.tar.gz" - ``` - -1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. - ``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher: - ``` - - >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. - -**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md b/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md deleted file mode 100644 index 7e7e935097..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md +++ /dev/null @@ -1,351 +0,0 @@ ---- -title: Upgrading Rancher Installed with Docker -weight: 1010 -aliases: - - /rancher/v2.0-v2.4/en/upgrades/single-node-upgrade/ - - /rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-air-gap-upgrade - - /rancher/v2.0-v2.4/en/upgrades/upgrades/single-node - - /rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade/ - - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/upgrades/single-node/ ---- - -The following instructions will guide you through upgrading a Rancher server that was installed with Docker. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren’t supported. -- **For [air gap installs only,]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Placeholder Review - -During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). - -Here's an **example** of a command with a placeholder: - -``` -docker stop -``` - -In this command, `` is the name of your Rancher container. - -# Get Data for Upgrade Commands - -To obtain the data to replace the placeholders, run: - -``` -docker ps -``` - -Write down or copy this information before starting the upgrade. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | -| `` | `2018-12-19` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -# Upgrade Outline - -During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: - -- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) -- [2. Create a backup tarball](#2-create-a-backup-tarball) -- [3. Pull the new Docker image](#3-pull-the-new-docker-image) -- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) -- [5. Verify the Upgrade](#5-verify-the-upgrade) -- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) - -# 1. Create a copy of the data from your Rancher server container - -1. Using a remote Terminal connection, log into the node running your Rancher server. - -1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - -1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data rancher/rancher: - ``` - -# 2. Create a backup tarball - -1. From the data container that you just created (`rancher-data`), create a backup tarball (`rancher-data-backup--.tar.gz`). - - This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. - - - ``` - docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** When you enter this command, a series of commands should run. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - - ``` - [rancher@ip-10-0-0-50 ~]$ ls - rancher-data-backup-v2.1.3-20181219.tar.gz - ``` - -1. Move your backup tarball to a safe location external from your Rancher server. - -# 3. Pull the New Docker Image - -Pull the image of the Rancher version that you want to upgrade to. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker pull rancher/rancher: -``` - -# 4. Start the New Rancher Server Container - -Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. - ->**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. - -If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/proxy/) - -If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -To see the command to use when starting the new Rancher server container, choose from the following options: - -- Docker Upgrade -- Docker Upgrade for Air Gap Installs - -{{% tabs %}} -{{% tab "Docker Upgrade" %}} - -Select which option you had installed Rancher server - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher: -``` - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. - -Placeholder | Description -------------|------------- - `` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher: -``` - - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - rancher/rancher: \ - --no-cacerts -``` - -{{% /accordion %}} - -### Option D: Let's Encrypt Certificate - -{{% accordion id="option-d" label="Click to expand" %}} - ->**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. - ->**Reminder of the Cert Prerequisites:** -> ->- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). ->- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. -`` | The domain address that you had originally started with - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher: \ - --acme-domain -``` - -{{% /accordion %}} - -{{% /tab %}} -{{% tab "Docker Air Gap Upgrade" %}} - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> For Rancher versions from v2.2.0 to v2.2.x, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/) - -When starting the new Rancher server container, choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to to upgrade to. - -``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. - - >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/resources/chart-options/) that you want to upgrade to. - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% /tab %}} -{{% /tabs %}} - -**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. - -# 5. Verify the Upgrade - -Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. - ->**Having network issues in your user clusters following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/#restoring-cluster-networking). - - -# 6. Clean up Your Old Rancher Server Container - -Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. - -# Rolling Back - -If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/single-node-rollbacks/). diff --git a/content/rancher/v2.0-v2.4/en/installation/requirements/_index.md b/content/rancher/v2.0-v2.4/en/installation/requirements/_index.md deleted file mode 100644 index fbf442d663..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/requirements/_index.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Installation Requirements -description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup -weight: 1 ---- - -This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. - -> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/) which will run your apps and services. - -Make sure the node(s) for the Rancher server fulfill the following requirements: - -- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) - - [CPU and Memory](#cpu-and-memory) - - [CPU and Memory for Rancher before v2.4.0](#cpu-and-memory-for-rancher-before-v2-4-0) - - [Disks](#disks) -- [Networking Requirements](#networking-requirements) - - [Node IP Addresses](#node-ip-addresses) - - [Port Requirements](#port-requirements) - -For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.0-v2.4/en/best-practices/deployment-types/) - -The Rancher UI works best in Firefox or Chrome. - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution. - -For details on which OS, Docker, and Kubernetes versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. - -Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - -If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.0-v2.4/en/installation/options/arm64-platform/) - -### RKE Specific Requirements - -For the container runtime, RKE should work with any modern Docker version. - -### K3s Specific Requirements - -For the container runtime, K3s should work with any modern version of Docker or containerd. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. - -If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. - - -### Installing Docker - -Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.0-v2.4/en/installation/requirements/installing-docker) to install Docker with one command. -# Hardware Requirements - -This section describes the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. - -### CPU and Memory - -Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. - -{{% tabs %}} -{{% tab "RKE" %}} - -These requirements apply to each host in an [RKE Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) - -Performance increased in Rancher v2.4.0. For the requirements of Rancher before v2.4.0, refer to [this section.](#cpu-and-memory-for-rancher-before-v2-4-0) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | ---------- | ------------ | -------| ------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | - -Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. - -{{% /tab %}} - -{{% tab "K3s" %}} - -These requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | -| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | - -Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. - -{{% /tab %}} - -{{% tab "Docker" %}} - -These requirements apply to a host with a [single-node]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) installation of Rancher. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 1 | 4 GB | -| Medium | Up to 15 | Up to 200 | 2 | 8 GB | - -{{% /tab %}} -{{% /tabs %}} - -### CPU and Memory for Rancher before v2.4.0 - -{{% accordion label="Click to expand" %}} -These requirements apply to installing Rancher on an RKE Kubernetes cluster before Rancher v2.4.0: - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | --------- | ---------- | ----------------------------------------------- | ----------------------------------------------- | -| Small | Up to 5 | Up to 50 | 2 | 8 GB | -| Medium | Up to 15 | Up to 200 | 4 | 16 GB | -| Large | Up to 50 | Up to 500 | 8 | 32 GB | -| X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | -| XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | -{{% /accordion %}} - -### Disks - -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. - -# Networking Requirements - -This section describes the networking requirements for the node(s) where the Rancher server is installed. - -### Node IP Addresses - -Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -### Port Requirements - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/ports) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/requirements/ports/_index.md b/content/rancher/v2.0-v2.4/en/installation/requirements/ports/_index.md deleted file mode 100644 index 682497174d..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/requirements/ports/_index.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -title: Port Requirements -description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes -weight: 300 ---- - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. - -- [Rancher Nodes](#rancher-nodes) - - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) - - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) - - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) -- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) - - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) - - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) - - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) - - [Ports for Imported Clusters](#ports-for-imported-clusters) -- [Other Port Considerations](#other-port-considerations) - - [Commonly Used Ports](#commonly-used-ports) - - [Local Node Traffic](#local-node-traffic) - - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) - - [Opening SUSE Linux Ports](#opening-suse-linux-ports) - -# Rancher Nodes - -The following table lists the ports that need to be open to and from nodes that are running the Rancher server. - -The port requirements differ based on the Rancher server architecture. - -> **Notes:** -> -> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). -> - Kubernetes recommends TCP 30000-32767 for node port services. -> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. - -### Ports for Rancher Server Nodes on K3s - -{{% accordion label="Click to expand" %}} - -The K3s server needs port 6443 to be accessible by the nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • server nodes
  • agent nodes
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | -| TCP | 6443 | K3s server nodes | Kubernetes API -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. -| TCP | 10250 | K3s server and agent nodes | kubelet - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RKE - -{{% accordion label="Click to expand" %}} - -Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. - -The following tables break down the port requirements for traffic between the Rancher nodes: - -
Rules for traffic between Rancher nodes
- -| Protocol | Port | Description | -|-----|-----|----------------| -| TCP | 443 | Rancher agents | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| TCP | 6443 | Kubernetes apiserver | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 10250 | kubelet | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | -| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | -| TCP | 443 |
  • Load Balancer/Reverse Proxy
  • IPs of all cluster nodes and other API/UI clients
| HTTPS traffic to Rancher UI/API | -| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -|-----|-----|----------------|---| -| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | -| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | -| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | -| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | - -{{% /accordion %}} - -### Ports for Rancher Server in Docker - -{{% accordion label="Click to expand" %}} - -The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: - -
Inbound Rules for Rancher Node
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used -| TCP | 443 |
  • hosted/imported Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl - -
Outbound Rules for Rancher Node
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -# Downstream Kubernetes Cluster Nodes - -Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. - -The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - ->**Tip:** -> ->If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. - -### Ports for Rancher Launched Kubernetes Clusters using Node Pools - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/). - ->**Note:** ->The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. - -{{< ports-iaas-nodes >}} - -{{% /accordion %}} - -### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/). - -{{< ports-custom-nodes >}} - -{{% /accordion %}} - -### Ports for Hosted Kubernetes Clusters - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - -### Ports for Imported Clusters - - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [imported clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - - -# Other Port Considerations - -### Commonly Used Ports - -These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. - -{{% include file="/rancher/v2.0-v2.4/en/installation/requirements/ports/common-ports-table" %}} - ----- - -### Local Node Traffic - -Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). -These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. - -However, this traffic may be blocked when: - -- You have applied strict host firewall policies on the node. -- You are using nodes that have multiple interfaces (multihomed). - -In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. - -### Rancher AWS EC2 Security Group - -When using the [AWS EC2 node driver]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. - -| Type | Protocol | Port Range | Source/Destination | Rule Type | -|-----------------|:--------:|:-----------:|------------------------|:---------:| -| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | -| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | -| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | -| All traffic | All | All | 0.0.0.0/0 | Outbound | - -### Opening SUSE Linux Ports - -SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, - -1. SSH into the instance. -1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: - ``` - FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" - FW_SERVICES_EXT_UDP="8472 30000:32767" - FW_ROUTE=yes - ``` -1. Restart the firewall with the new ports: - ``` - SuSEfirewall2 - ``` - -**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/requirements/ports/common-ports-table/index.md b/content/rancher/v2.0-v2.4/en/installation/requirements/ports/common-ports-table/index.md deleted file mode 100644 index 86bb7177bb..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/requirements/ports/common-ports-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Protocol | Port | Description | -|:--------: |:----------------: |---------------------------------------------------------------------------------- | -| TCP | 22 | Node driver SSH provisioning | -| TCP | 179 | Calico BGP Port | -| TCP | 2376 | Node driver Docker daemon TLS port | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | -| TCP | 8443 | Rancher webhook | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | -| TCP | 9443 | Rancher webhook | -| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | -| TCP | 6783 | Weave Port | -| UDP | 6783-6784 | Weave UDP Ports | -| TCP | 10250 | kubelet API | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | -| TCP/UDP | 30000-
32767 | NodePort port range | diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/_index.md deleted file mode 100644 index bfdf93ba6b..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Resources -weight: 5 -aliases: -- /rancher/v2.0-v2.4/en/installation/options ---- - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Advanced Options - -When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: - -| Advanced Option | Available as of | -| ----------------------------------------------------------------------------------------------------------------------- | --------------- | -| [Custom CA Certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/custom-ca-root-certificate/) | v2.0.0 | -| [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/options/api-audit-log/) | v2.0.0 | -| [TLS Settings]({{}}/rancher/v2.0-v2.4/en/installation/options/tls-settings/) | v2.1.7 | -| [etcd configuration]({{}}/rancher/v2.0-v2.4/en/installation/options/etcd/) | v2.2.0 | -| [Local System Charts for Air Gap Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md deleted file mode 100644 index 40243da33a..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Installing Rancher in an Air Gapped Environment with Helm 2 -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-installation/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/ - - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2 - - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/ ---- - -> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. -> -> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. -> -> This section provides a copy of the older instructions for installing Rancher on a Kubernetes cluster using Helm 2 in an air air gap environment, and it is intended to be used if upgrading to Helm 3 is not feasible. - -This section is about installations of Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -Throughout the installations instructions, there will be _tabs_ for either a high availability Kubernetes installation or a single-node Docker installation. - -### Air Gapped Kubernetes Installations - -This section covers how to install Rancher on a Kubernetes cluster in an air gapped environment. - -A Kubernetes installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -### Air Gapped Docker Installations - -These instructions also cover how to install Rancher on a single node in an air gapped environment. - -The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. - -Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -# Installation Outline - -- [1. Prepare your Node(s)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) -- [2. Collect and Publish Images to your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) -- [3. Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -- [4. Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) - -### [Next: Prepare your Node(s)]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md deleted file mode 100644 index 5ba2568932..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/install-rancher/_index.md +++ /dev/null @@ -1,335 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-installation/install-rancher/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-system-charts/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher - - /rancher/v2.0-v2.4/en/installation/air-gap/install-rancher - - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/install-rancher - - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/install-rancher/ ---- - -This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes Installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher in five parts: - -- [A. Add the Helm Chart Repository](#a-add-the-helm-chart-repository) -- [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration) -- [C. Render the Rancher Helm Template](#c-render-the-rancher-helm-template) -- [D. Install Rancher](#d-install-rancher) -- [E. For Rancher versions before v2.3.0, Configure System Charts](#e-for-rancher-versions-before-v2-3-0-configure-system-charts) - -### A. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - ```plain - helm init -c - ``` - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version/). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. -```plain -helm fetch rancher-/rancher -``` - -> Want additional options? See the Rancher [Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options). - -### B. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -### C. Render the Rancher Helm Template - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. - -{{% accordion id="self-signed" label="Option A-Default Self-Signed Certificate" %}} - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - -1. From a system connected to the internet, add the cert-manager repo to Helm. - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.14.2 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - ```plain - helm template ./cert-manager-v0.14.2.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml - ``` -1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - - Placeholder | Description - ------------|------------- - `` | The version number of the output tarball. - `` | The DNS name you pointed at your load balancer. - `` | The DNS name for your private registry. - `` | Cert-manager version running on k8s cluster. - - ```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -{{% /accordion %}} - -{{% accordion id="secret" label="Option B: Certificates From Files using Kubernetes Secrets" %}} - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts -``` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -{{% /accordion %}} - -### D. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -{{% accordion id="install-cert-manager" label="Self-Signed Certificate Installs - Install Cert-manager" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - -> **Important:** -> If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false flag to your kubectl apply command above else you will receive a validation error relating to the x-kubernetes-preserve-unknown-fields field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -Install Rancher: - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` - -**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. - -### E. For Rancher versions before v2.3.0, Configure System Charts - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts/). - -### Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users that are wanting to **test** out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. **Important: If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | - -> **Do you want to...** -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#additional-trusted-cas). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). - -- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts/) - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Default Self-Signed Certificate" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Self-Signed" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags/) that you want to install. | - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} -{{% accordion id="option-c" label="Option C-Bring Your Own Certificate: Signed by Recognized CA" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in PEM format. - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.0-v2.4/en/installation/options/server-tags/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts - /rancher/rancher: -``` - -{{% /accordion %}} - -If you are installing Rancher v2.3.0+, the installation is complete. - -If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts]({{}}/rancher/v2.0-v2.4/en/installation/options/local-system-charts/). - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md deleted file mode 100644 index afac79574b..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/_index.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: '3. Install Kubernetes with RKE (Kubernetes Installs Only)' -weight: 300 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-kube - - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/launch-kubernetes - - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/ ---- - -This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. - -Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE]({{}}/rke/latest/en/installation/) and create a RKE config file. - -- [A. Create an RKE Config File](#a-create-an-rke-config-file) -- [B. Run RKE](#b-run-rke) -- [C. Save Your Files](#c-save-your-files) - -### A. Create an RKE Config File - -From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts) you created. - -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gap network. | -| `user` | ✓ | A user that can run docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: - - address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: - - url: # private registry url - user: rancher - password: '*********' - is_default: true -``` - -### B. Run RKE - -After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: - -``` -rke up --config ./rancher-cluster.yml -``` - -### C. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### [Next: Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md deleted file mode 100644 index 75e024e218..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/_index.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-installation/prepare-private-reg/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/prepare-private-registry/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/prepare-private-registry/ - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/populate-private-registry - - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/ ---- - -> **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. -> -> **Note:** Populating the private registry with images is the same process for HA and Docker installations, the differences in this section is based on whether or not you are planning to provision a Windows cluster or not. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) or launch any [tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed for a Windows cluster. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -A. Find the required assets for your Rancher version
-B. Collect all the required images
-C. Save the images to your workstation
-D. Populate the private registry - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets*.* - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### B. Collect all the required images (For Kubernetes Installs using Rancher Generated Self-Signed Certificate) - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### C. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### D. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -_Available as of v2.3.0_ - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -### Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -A. Find the required assets for your Rancher version
-B. Save the images to your Windows Server workstation
-C. Prepare the Docker daemon
-D. Populate the private registry - -{{% accordion label="Collecting and Populating Windows Images into the Private Registry"%}} - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|------------------------|-------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - -### B. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - - ```plain - ./rancher-save-images.ps1 - ``` - - **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - -### C. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - -### D. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -{{% /accordion %}} - -### Linux Steps - -The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -A. Find the required assets for your Rancher version
-B. Collect all the required images
-C. Save the images to your Linux workstation
-D. Populate the private registry - -{{% accordion label="Collecting and Populating Linux Images into the Private Registry" %}} - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -### A. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -|----------------------------|------| -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### B. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - - 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.14.2 - helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt - ``` - - 2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### C. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### D. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. The `rancher-images.txt` / `rancher-windows-images.txt` image list is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. - -1. Log into your private registry if required: - ```plain - docker login - ``` - -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry - ``` - -{{% /accordion %}} - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next: Docker Installs - Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md deleted file mode 100644 index 71c94aecb8..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/_index.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: '1. Prepare your Node(s)' -weight: 100 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/provision-host - - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/prepare-nodes - - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/ ---- - -This section is about how to prepare your node(s) to install Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. - -# Prerequisites - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -### OS, Docker, Hardware, and Networking - -Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -### Private Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). - -### CLI Tools - -The following CLI tools are required for the Kubernetes Install. Make sure these tools are installed on your workstation and available in your `$PATH`. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -{{% /tab %}} -{{% tab "Docker Install" %}} - -### OS, Docker, Hardware, and Networking - -Make sure that your node(s) fulfill the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -### Private Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). -{{% /tab %}} -{{% /tabs %}} - -# Set up Infrastructure - -{{% tabs %}} -{{% tab "Kubernetes Install (Recommended)" %}} - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
- -![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -### A. Provision three air gapped Linux hosts according to our requirements - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -### B. Set up your Load Balancer - -When setting up the Kubernetes cluster that will run the Rancher server components, an Ingress controller pod will be deployed on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. - -You will need to configure a load balancer as a basic Layer 4 TCP forwarder to direct traffic to these ingress controller pods. The exact configuration will vary depending on your environment. - -> **Important:** -> Only use this load balancer (i.e, the `local` cluster Ingress) to load balance the Rancher server. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. - -**Load Balancer Configuration Samples:** - -- For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx) -- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb) - -{{% /tab %}} -{{% tab "Docker Install" %}} - -The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation. - -Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. - -### A. Provision a single, air gapped Linux host according to our Requirements - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -View hardware and software requirements for each of your cluster nodes in [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/api-audit-log/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/api-audit-log/_index.md deleted file mode 100644 index 0ed83fad70..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/api-audit-log/_index.md +++ /dev/null @@ -1,569 +0,0 @@ ---- -title: Enabling the API Audit Log to Record System Events -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/api-audit-log/ - - /rancher/v2.0-v2.4/en/installation/api-auditing ---- - -You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. - -You can enable API Auditing during Rancher installation or upgrade. - -## Enabling API Audit Log - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -- [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) - -## API Audit Log Options - -The usage below defines rules about what the audit log should record and what data it should include: - -| Parameter | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | -| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| -| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | -| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | -| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | - -
- -### Audit Log Levels - -The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. - -| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | -| --------------------- | ---------------- | ------------ | ----------------- | ------------- | -| `0` | | | | | -| `1` | ✓ | | | | -| `2` | ✓ | ✓ | | | -| `3` | ✓ | ✓ | ✓ | ✓ | - -## Viewing API Audit Logs - -### Docker Install - -Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. - -### Kubernetes Install - -Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. - -The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. - -#### CLI - -```bash -kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log -``` - -#### Rancher Web GUI - -1. From the context menu, select **Cluster: local > System**. -1. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. -1. Pick one of the `rancher` pods and select **⋮ > View Logs**. -1. From the **Logs** drop-down, select `rancher-audit-log`. - -#### Shipping the Audit Log - -You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging) for details. - -## Audit Log Samples - -After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. - -### Metadata Level - -If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. - -```json -{ - "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", - "requestURI": "/v3/schemas", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "GET", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:22:43 +0800" -} -``` - -### Metadata and Request Body Level - -If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:28:08 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my description", - "volumes": [] - } -} -``` - -### Metadata, Request Body, and Response Body Level - -If you set your `AUDIT_LEVEL` to `3`, Rancher logs: - -- The metadata header and body for every API request. -- The metadata header and body for every API response. - -#### Request - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my decript", - "volumes": [] - } -} -``` - -#### Response - -The code sample below depicts an API response, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "responseStatus": "200", - "stage": "ResponseComplete", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "responseBody": { - "actionLinks": { - "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", - "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", - "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" - }, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements" - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container" - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "links": { - "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", - "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" - }, - "name": "nginx", - "namespaceId": "default", - "paused": false, - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - } - } -} -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/arm64-platform/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/arm64-platform/_index.md deleted file mode 100644 index 03bcfbede0..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/arm64-platform/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Running on ARM64 (Experimental)" -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/arm64-platform ---- - -> **Important:** -> -> Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. - -The following options are available when using an ARM64 platform: - -- Running Rancher on ARM64 based node(s) - - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) link: - - ``` - # In the last line `rancher/rancher:vX.Y.Z`, be certain to replace "X.Y.Z" with a released version in which ARM64 builds exist. For example, if your matching version is v2.5.8, you would fill in this line with `rancher/rancher:v2.5.8`. - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher:vX.Y.Z - ``` -> **Note:** To check if your specific released version is compatible with the ARM64 architecture, you may navigate to your -> version's release notes in the following two ways: -> -> - Manually find your version using https://github.com/rancher/rancher/releases. -> - Go directly to your version using the tag and the specific version number. If you plan to use v2.5.8, for example, you may -> navigate to https://github.com/rancher/rancher/releases/tag/v2.5.8. - -- Create custom cluster and adding ARM64 based node(s) - - Kubernetes cluster version must be 1.12 or higher - - CNI Network Provider must be [Flannel]({{}}/rancher/v2.0-v2.4/en/faq/networking/cni-providers/#flannel) - -- Importing clusters that contain ARM64 based nodes - - Kubernetes cluster version must be 1.12 or higher - -Please see [Cluster Options]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/) for information on how to configure the cluster options. - -The following features are not tested: - -- Monitoring, alerts, notifiers, pipelines and logging -- Launching apps from the catalog diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md deleted file mode 100644 index c546bb51f1..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/_index.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -title: Template for an RKE Cluster with a Certificate Signed by Recognized CA and a Layer 4 Load Balancer -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca - - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/ ---- - -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Certificate signed by a recognized CA -- Layer 4 load balancer -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: # ssl cert for ingress. If self-signed, must be signed by same CA as cattle server - tls.key: # ssl key for ingress. If self-signed, must be signed by same CA as cattle server - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: # FQDN to access cattle server - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - # FQDN to access cattle server - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - args: - - --no-cacerts - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md deleted file mode 100644 index 9f7552a58e..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/_index.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Template for an RKE Cluster with a Self-signed Certificate and Layer 4 Load Balancer -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate - - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/ ---- -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Self-signed SSL -- Layer 4 load balancer -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: # ssl cert for ingress. If selfsigned, must be signed by same CA as cattle server - tls.key: # ssl key for ingress. If selfsigned, must be signed by same CA as cattle server - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: # CA cert used to sign cattle server cert and key - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - - port: 443 - targetPort: 443 - protocol: TCP - name: https - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: # FQDN to access cattle server - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - # FQDN to access cattle server - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP - volumeMounts: - - mountPath: /etc/rancher/ssl - name: cattle-keys-volume - readOnly: true - volumes: - - name: cattle-keys-volume - secret: - defaultMode: 420 - secretName: cattle-keys-server -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md deleted file mode 100644 index 8b2e38ac17..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/_index.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: Template for an RKE Cluster with a Self-signed Certificate and SSL Termination on Layer 7 Load Balancer -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate - - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/ ---- - -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Layer 7 load balancer with self-signed SSL termination (HTTPS) -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: # CA cert used to sign cattle server cert and key - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable redirect to ssl - spec: - rules: - - host: - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP - volumeMounts: - - mountPath: /etc/rancher/ssl - name: cattle-keys-volume - readOnly: true - volumes: - - name: cattle-keys-volume - secret: - defaultMode: 420 - secretName: cattle-keys-server -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md deleted file mode 100644 index ee5d81eaec..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Template for an RKE Cluster with a Recognized CA Certificate and SSL Termination on Layer 7 Load Balancer -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca - - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/ ---- - -RKE uses a cluster.yml file to install and configure your Kubernetes cluster. - -This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. - -The following template can be used for the cluster.yml if you have a setup with: - -- Layer 7 load balancer with SSL termination (HTTPS) -- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) - -> For more options, refer to [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - -```yaml -nodes: - - address: # hostname or IP to access nodes - user: # root user (usually 'root') - role: [controlplane,etcd,worker] # K8s roles for node - ssh_key_path: # path to PEM file - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - - address: - user: - role: [controlplane,etcd,worker] - ssh_key_path: - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -addons: |- - --- - kind: Namespace - apiVersion: v1 - metadata: - name: cattle-system - --- - kind: ServiceAccount - apiVersion: v1 - metadata: - name: cattle-admin - namespace: cattle-system - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: cattle-crb - namespace: cattle-system - subjects: - - kind: ServiceAccount - name: cattle-admin - namespace: cattle-system - roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io - --- - apiVersion: v1 - kind: Service - metadata: - namespace: cattle-system - name: cattle-service - labels: - app: cattle - spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: cattle - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable redirect to ssl - spec: - rules: - - host: - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - --- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - # Rancher install via RKE addons is only supported up to v2.0.8 - - image: rancher/rancher:v2.0.8 - args: - - --no-cacerts - imagePullPolicy: Always - name: cattle-server - # env: - # - name: HTTP_PROXY - # value: "http://your_proxy_address:port" - # - name: HTTPS_PROXY - # value: "http://your_proxy_address:port" - # - name: NO_PROXY - # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" - livenessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 60 - periodSeconds: 60 - readinessProbe: - httpGet: - path: /ping - port: 80 - initialDelaySeconds: 20 - periodSeconds: 10 - ports: - - containerPort: 80 - protocol: TCP -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/firewall/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/firewall/_index.md deleted file mode 100644 index 67c6f88032..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/firewall/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Opening Ports with firewalld -weight: 1 ---- - -> We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. - -Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. - -For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: - -``` -Chain INPUT (policy ACCEPT) -target prot opt source destination -ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED -ACCEPT icmp -- anywhere anywhere -ACCEPT all -- anywhere anywhere -ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain FORWARD (policy ACCEPT) -target prot opt source destination -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain OUTPUT (policy ACCEPT) -target prot opt source destination -``` - -You can check the default firewall rules with this command: - -``` -sudo iptables --list -``` - -This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.0-v2.4/en/installation/references) for nodes in a high-availability Rancher server cluster. - -# Prerequisite - -Install v7.x or later ofv`firewalld`: - -``` -yum install firewalld -systemctl start firewalld -systemctl enable firewalld -``` - -# Applying Firewall Port Rules - -In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: - -``` -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` -If your Rancher server nodes have separate roles, use the following commands based on the role of the node: - -``` -# For etcd nodes, run the following commands: -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp - -# For control plane nodes, run the following commands: -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp - -# For worker nodes, run the following commands: -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` - -After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: - -``` -firewall-cmd --reload -``` - -**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md deleted file mode 100644 index 552053f181..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Kubernetes Installation Using Helm 2 -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2 - - /rancher/v2.x/en/installation/resources/advanced/helm2/ ---- - -> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. -> -> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. -> -> This section provides a copy of the older high-availability Kubernetes Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -For production environments, we recommend installing Rancher in a high-availability configuration so that your user base can always access Rancher Server. When installed in a Kubernetes cluster, Rancher will integrate with the cluster's etcd database and take advantage of Kubernetes scheduling for high-availability. - -This procedure walks you through setting up a 3-node cluster with Rancher Kubernetes Engine (RKE) and installing the Rancher chart with the Helm package manager. - -> **Important:** The Rancher management server can only be run on an RKE-managed Kubernetes cluster. Use of Rancher on hosted Kubernetes or other providers is not supported. - -> **Important:** For the best performance, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. - -## Recommended Architecture - -- DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Install]({{}}/img/rancher/ha/rancher2ha.svg) -Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers - -## Required Tools - -The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [rke]({{}}/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -## Installation Outline - -- [Create Nodes and Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/) -- [Install Kubernetes with RKE]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/) -- [Initialize Helm (tiller)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) -- [Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/) - -## Additional Install Options - -- [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) - -## Previous Methods - -[RKE add-on install]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/) - -> **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> -> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> -> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md deleted file mode 100644 index 87cbb05f59..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "1. Create Nodes and Load Balancer" -weight: 185 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb - - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/ ---- - -Use your provider of choice to provision 3 nodes and a Load Balancer endpoint for your RKE install. - -> **Note:** These nodes must be in the same region/datacenter. You may place these servers in separate availability zones. - -### Node Requirements - -View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -View the OS requirements for RKE at [RKE Requirements]({{}}/rke/latest/en/os/) - -### Load Balancer - -RKE will configure an Ingress controller pod, on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. - -Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configuration will vary depending on your environment. - ->**Important:** ->Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -#### Examples - -* [Nginx]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nginx/) -* [Amazon NLB]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nlb/) - -### [Next: Install Kubernetes with RKE]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md deleted file mode 100644 index b81f530349..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: NGINX -weight: 270 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nginx - - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/ ---- -NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. - ->**Note:** -> In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. -> -> One caveat: do not use one of your Rancher nodes as the load balancer. - -## Install NGINX - -Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. - -## Create NGINX Configuration - -After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/). - - >**Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. - -
Example NGINX config
- ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - stream { - upstream rancher_servers_http { - least_conn; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - server :80 max_fails=3 fail_timeout=5s; - } - server { - listen 80; - proxy_pass rancher_servers_http; - } - - upstream rancher_servers_https { - least_conn; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - server :443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers_https; - } - } - ``` - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -## Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md deleted file mode 100644 index 4f15be0455..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/_index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Amazon NLB -weight: 277 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nlb - - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/ ---- -## Objectives - -Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. - -1. [Create Target Groups](#create-target-groups) - - Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -2. [Register Targets](#register-targets) - - Add your Linux nodes to the target groups. - -3. [Create Your NLB](#create-your-nlb) - - Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. - -> **Note:** Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ELB or ALB. - -## Create Target Groups - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX ingress controller on the nodes will make sure that port 80 gets redirected to port 443. - -Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. - -The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. - -{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} - -Click **Create target group** to create the first target group, regarding TCP port 443. - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-443` -Protocol | `TCP` -Port | `443` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `override`,`80` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 443 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} - -
-**Screenshot Target group TCP port 443 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} - -
- -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-80` -Protocol | `TCP` -Port | `80` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `traffic port` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 80 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} - -
-**Screenshot Target group TCP port 80 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} - -
- -## Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -## Create Your NLB - -Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. - -5. Complete the **Step 1: Configure Load Balancer** form. - - **Basic Configuration** - - - Name: `rancher` - - Scheme: `internal` or `internet-facing` - - The Scheme that you choose for your NLB is dependent on the configuration of your instances/VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. - - **Listeners** - - Add the **Load Balancer Protocols** and **Load Balancer Ports** below. - - `TCP`: `443` - - - **Availability Zones** - - - Select Your **VPC** and **Availability Zones**. - -6. Complete the **Step 2: Configure Routing** form. - - - From the **Target Group** drop-down, choose **Existing target group**. - - - From the **Name** drop-down, choose `rancher-tcp-443`. - - - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. - -8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. - -9. After AWS creates the NLB, click **Close**. - -## Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md deleted file mode 100644 index f318630896..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: "Initialize Helm: Install the Tiller Service" -description: "With Helm, you can create configurable deployments instead of using static files. In order to use Helm, the Tiller service needs to be installed on your cluster." -weight: 195 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init - - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/ ---- - -Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. - -For systems without direct internet access, see [Helm - Air Gap]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap) for install details. - -Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) - -### Install Tiller on the Cluster - -> **Important:** Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. - -Helm installs the `tiller` service on your cluster to manage charts. Since RKE enables RBAC by default we will need to use `kubectl` to create a `serviceaccount` and `clusterrolebinding` so `tiller` has permission to deploy to the cluster. - -* Create the `ServiceAccount` in the `kube-system` namespace. -* Create the `ClusterRoleBinding` to give the `tiller` account access to the cluster. -* Finally use `helm` to install the `tiller` service - -```plain -kubectl -n kube-system create serviceaccount tiller - -kubectl create clusterrolebinding tiller \ - --clusterrole=cluster-admin \ - --serviceaccount=kube-system:tiller - -helm init --service-account tiller - -# Users in China: You will need to specify a specific tiller-image in order to initialize tiller. -# The list of tiller image tags are available here: https://dev.aliyun.com/detail.html?spm=5176.1972343.2.18.ErFNgC&repoId=62085. -# When initializing tiller, you'll need to pass in --tiller-image - -helm init --service-account tiller \ ---tiller-image registry.cn-hangzhou.aliyuncs.com/google_containers/tiller: -``` - -> **Note:** This`tiller`install has full cluster access, which should be acceptable if the cluster is dedicated to Rancher server. Check out the [helm docs](https://docs.helm.sh/using_helm/#role-based-access-control) for restricting `tiller` access to suit your security requirements. - -### Test your Tiller installation - -Run the following command to verify the installation of `tiller` on your cluster: - -``` -kubectl -n kube-system rollout status deploy/tiller-deploy -Waiting for deployment "tiller-deploy" rollout to finish: 0 of 1 updated replicas are available... -deployment "tiller-deploy" successfully rolled out -``` - -And run the following command to validate Helm can talk to the `tiller` service: - -``` -helm version -Client: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b4babf9d818144e", GitTreeState:"clean"} -Server: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b4babf9d818144e", GitTreeState:"clean"} -``` - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/troubleshooting/) page. - -### [Next: Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md deleted file mode 100644 index 789e01310f..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-init/troubleshooting/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Troubleshooting -weight: 276 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/troubleshooting - - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/troubleshooting/ ---- - -### Helm commands show forbidden - -When Helm is initiated in the cluster without specifying the correct `ServiceAccount`, the command `helm init` will succeed but you won't be able to execute most of the other `helm` commands. The following error will be shown: - -``` -Error: configmaps is forbidden: User "system:serviceaccount:kube-system:default" cannot list configmaps in the namespace "kube-system" -``` - -To resolve this, the server component (`tiller`) needs to be removed and added with the correct `ServiceAccount`. You can use `helm reset --force` to remove the `tiller` from the cluster. Please check if it is removed using `helm version --server`. - -``` -helm reset --force -Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster. -helm version --server -Error: could not find tiller -``` - -When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) to install `tiller` with the correct `ServiceAccount`. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md deleted file mode 100644 index dc96db26d4..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/_index.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: "4. Install Rancher" -weight: 200 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher - - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/ ---- - -Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/install-rancher/). - -Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) - -### Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.0-v2.4/en/installation/resources/choosing-version). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -There are three recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -|-----|-----|-----|-----| -| [Rancher Generated Certificates](#rancher-generated-certificates) | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** | [yes](#optional-install-cert-manager) | -| [Let’s Encrypt](#let-s-encrypt) | `ingress.tls.source=letsEncrypt` | Use [Let's Encrypt](https://letsencrypt.org/) to issue a certificate | [yes](#optional-install-cert-manager) | -| [Certificates from Files](#certificates-from-files) | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s) | no | - -### Optional: Install cert-manager - -**Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). - -> **Important:** -> Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. - -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/). - -Rancher relies on [cert-manager](https://github.com/jetstack/cert-manager) to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. - -These instructions are adapted from the [official cert-manager documentation](https://docs.cert-manager.io/en/latest/getting-started/install/kubernetes.html#installing-with-helm). - - -1. Install the CustomResourceDefinition resources separately - ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml - ``` - -1. Create the namespace for cert-manager - ```plain - kubectl create namespace cert-manager - ``` - -1. Label the cert-manager namespace to disable resource validation - ```plain - kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true - ``` - -1. Add the Jetstack Helm repository - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - ```plain - helm repo update - ``` - -1. Install the cert-manager Helm chart - ```plain - helm install \ - --name cert-manager \ - --namespace cert-manager \ - --version v0.14.2 \ - jetstack/cert-manager - ``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m -cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m -cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m -``` - -If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check the [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. - -
- -#### Rancher Generated Certificates - -> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. - -The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set the `hostname` to the DNS name you pointed at your load balancer. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -#### Let's Encrypt - -> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. This configuration uses HTTP validation (`HTTP-01`) so the load balancer must have a public DNS record and be accessible from the internet. - -In the following command, - -- Set `hostname` to the public DNS record that resolves to your load balancer. -- Set `ingress.tls.source` to `letsEncrypt`. -- Set `letsEncrypt.email` to the email address used for communication about your certificate (for example, expiry notices). -- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org \ - --set letsEncrypt.ingress.class=nginx -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -#### Certificates from Files - -Create Kubernetes secrets from your own certificates for Rancher to use. - - -> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.0-v2.4/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set `hostname` and set `ingress.tls.source` to `secret`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher-/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### Advanced Configurations - -The Rancher chart configuration has many options for customizing the install to suit your specific environment. Here are some common advanced scenarios. - -* [HTTP Proxy]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/) -* [Private Docker Image Registry]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -* [TLS Termination on an External Load Balancer]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/) for the full list of options. - -### Save your options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it you should have a functional Rancher server. Point a browser at the hostname you picked and you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/troubleshooting/) Page diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md deleted file mode 100644 index e13a88a1a6..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/chart-options/_index.md +++ /dev/null @@ -1,248 +0,0 @@ ---- -title: Chart Options -weight: 276 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options - - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/chart-options/ ---- - -### Common Options - -| Option | Default Value | Description | -| --- | --- | --- | -| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | -| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | -| `letsEncrypt.email` | " " | `string` - Your email address | -| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | -| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | - -
- -### Advanced Options - -| Option | Default Value | Description | -| --- | --- | --- | -| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | -| `addLocal` | "auto" | `string` - Have Rancher detect and import the local Rancher server cluster | -| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | -| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | -| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) level. 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | -| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | -| `debug` | false | `bool` - set debug flag on rancher server | -| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | -| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | -| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | -| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | -| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" | `string` - comma separated list of hostnames or ip address not to use the proxy | -| `resources` | {} | `map` - rancher pod resource requests & limits | -| `rancherImage` | "rancher/rancher" | `string` - rancher image source | -| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | -| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | -| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ _Available as of v2.3.0_ | -| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. _Available as of v2.3.0_ - -
- -### API Audit Log - -Enabling the [API Audit Log]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing/). - -You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. - -```plain ---set auditLog.level=1 -``` - -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) for the Rancher server cluster or System Project. - -Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. - -### Setting Extra Environment Variables - -_Available as of v2.2.0_ - -You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -### TLS settings - -_Available as of v2.2.0_ - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -See [TLS settings]({{}}/rancher/v2.0-v2.4/en/admin-settings/tls-settings) for more information and options. - -### Import `local` Cluster - -By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. - -If this is a concern in your environment you can set this option to "false" on your initial install. - -> Note: This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. - -```plain ---set addLocal="false" -``` - -### Customizing your Ingress - -To customize or use a different ingress with Rancher server you can set your own Ingress annotations. - -Example on setting a custom certificate issuer: - -```plain ---set ingress.extraAnnotations.'certmanager\.k8s\.io/cluster-issuer'=ca-key-pair -``` - -_Available as of v2.0.15, v2.1.10 and v2.2.4_ - -Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. - -```plain ---set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' -``` - -### HTTP Proxy - -Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. - -Add your IP exceptions to the `noProxy` list. Make sure you add the Service cluster IP range (default: 10.43.0.1/16) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. - -```plain ---set proxy="http://:@:/" ---set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16" -``` - -### Additional Trusted CAs - -If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. - -```plain ---set additionalTrustedCAs=true -``` - -Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. - -```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem -``` - -### Private Registry and Air Gap Installs - -For details on installing Rancher with a private registry, see: - -- [Air Gap: Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) - - -### External TLS Termination - -We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. - -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. - -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/tls-secrets/) to add the CA cert for Rancher. - -Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. - -#### Configuring Ingress for External TLS when Using NGINX v0.25 - -In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: - -```yaml -ingress: - provider: nginx - options: - use-forwarded-headers: "true" -``` - -#### Required Headers - -* `Host` -* `X-Forwarded-Proto` -* `X-Forwarded-Port` -* `X-Forwarded-For` - -#### Recommended Timeouts - -* Read Timeout: `1800 seconds` -* Write Timeout: `1800 seconds` -* Connect Timeout: `30 seconds` - -#### Health Checks - -Rancher will respond `200` to health checks on the `/healthz` endpoint. - - -#### Example NGINX config - -This NGINX configuration is tested on NGINX 1.14. - - >**Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -* Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -* Replace both occurrences of `FQDN` to the DNS name for Rancher. -* Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md deleted file mode 100644 index aeb3d54a9d..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/_index.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: "2. Install Kubernetes with RKE" -weight: 190 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke - - /rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/ ---- - -Use RKE to install Kubernetes with a high availability etcd configuration. - ->**Note:** For systems without direct internet access see [Air Gap: Kubernetes install]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) for install details. - -### Create the `rancher-cluster.yml` File - -Using the sample below create the `rancher-cluster.yml` file. Replace the IP Addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. - -> **Note:** If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. - - -```yaml -nodes: - - address: 165.227.114.63 - internal_address: 172.16.22.12 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 165.227.116.167 - internal_address: 172.16.32.37 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 165.227.127.226 - internal_address: 172.16.42.73 - user: ubuntu - role: [controlplane,worker,etcd] - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h -``` - -#### Common RKE Nodes Options - -| Option | Required | Description | -| --- | --- | --- | -| `address` | yes | The public DNS or IP address | -| `user` | yes | A user that can run docker commands | -| `role` | yes | List of Kubernetes roles assigned to the node | -| `internal_address` | no | The private DNS or IP address for internal cluster traffic | -| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | - -#### Advanced Configurations - -RKE has many configuration options for customizing the install to suit your specific environment. - -Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. - -For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide]({{}}/rancher/v2.0-v2.4/en/installation/options/etcd/). - -### Run RKE - -``` -rke up --config ./rancher-cluster.yml -``` - -When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. - -### Testing Your Cluster - -RKE should have created a file `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. - -> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -You can copy this file to `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`. - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state. - -``` -kubectl get nodes - -NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 -``` - -### Check the Health of Your Cluster Pods - -Check that all the required pods and containers are healthy are ready to continue. - -* Pods are in `Running` or `Completed` state. -* `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` -* Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s -kube-system canal-jp4hz 3/3 Running 0 30s -kube-system canal-z2hg8 3/3 Running 0 30s -kube-system canal-z6kpw 3/3 Running 0 30s -kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s -kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s -kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s -kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s -kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s -kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s -kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s -``` - -### Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke/troubleshooting/) page. - -### [Next: Initialize Helm (Install tiller)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md deleted file mode 100644 index a6989a9fe0..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: RKE Add-On Install -weight: 276 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - - -* [Kubernetes installation with External Load Balancer (TCP/Layer 4)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb) -* [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb) -* [HTTP Proxy Configuration for a Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/proxy/) -* [Troubleshooting RKE Add-on Installs]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md deleted file mode 100644 index 9425665cd7..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Enable API Auditing -weight: 300 -aliases: - - /rke/latest/en/config-options/add-ons/api-auditing/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/ ---- - ->**Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. - -## In-line Arguments - -Enable API Auditing using RKE by adding arguments to your Rancher container. - -To enable API auditing: - -- Add API Auditing arguments (`args`) to your Rancher container. -- Declare a `mountPath` in the `volumeMounts` directive of the container. -- Declare a `path` in the `volumes` directive. - -For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing). - -```yaml -... -containers: - - image: rancher/rancher:latest - imagePullPolicy: Always - name: cattle-server - args: ["--audit-log-path", "/var/log/auditlog/rancher-api-audit.log", "--audit-log-maxbackup", "5", "--audit-log-maxsize", "50", "--audit-level", "2"] - ports: - - containerPort: 80 - protocol: TCP - - containerPort: 443 - protocol: TCP - volumeMounts: - - mountPath: /etc/rancher/ssl - name: cattle-keys-volume - readOnly: true - - mountPath: /var/log/auditlog - name: audit-log-dir - volumes: - - name: cattle-keys-volume - secret: - defaultMode: 420 - secretName: cattle-keys-server - - name: audit-log-dir - hostPath: - path: /var/log/rancher/auditlog - type: Directory -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md deleted file mode 100644 index 7a84ec9360..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/_index.md +++ /dev/null @@ -1,401 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (TCP/Layer 4) -weight: 275 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 4 load balancer (TCP) -- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) - -In a Kubernetes setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. - -Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers -![High-availability Kubernetes installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -
- -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -## 2. Configure Load Balancer - -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](./nlb) - ->**Note:** -> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. -> ->One caveat: do not use one of your Rancher nodes as the load balancer. - -### A. Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. - -### B. Create NGINX Configuration - -After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). - - >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - - **Example NGINX config:** - ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - http { - server { - listen 80; - return 301 https://$host$request_uri; - } - } - - stream { - upstream rancher_servers { - least_conn; - server IP_NODE_1:443 max_fails=3 fail_timeout=5s; - server IP_NODE_2:443 max_fails=3 fail_timeout=5s; - server IP_NODE_3:443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers; - } - } - ``` - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -### Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
`3-node-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate.yml) - - [Template for certificate signed by recognized CA
`3-node-certificate-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate-recognizedca.yml) - - >**Advanced Config Options:** - > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} - ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. - -1. In `kind: Secret` with `name: cattle-keys-ingress`: - - * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) - * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - - >**Note:** - > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== - ``` - -2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). - - >**Note:** - > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - - - **Step Result:** The file should look like the example below (the base64 encoded string should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - ``` - -{{% /accordion %}} - -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} - -If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the intermediate certificates in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. - -In the `kind: Secret` with `name: cattle-keys-ingress`: - -* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) -* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - ->**Note:** -> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: cattle-keys-ingress - namespace: cattle-system -type: Opaque -data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== -``` - -{{% /accordion %}} - - - -## 8. Configure FQDN - -There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). - -In the `kind: Ingress` with `name: cattle-ingress-http`: - -* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). - -After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): - -```yaml - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - rancher.yourdomain.com -``` - -Save the `.yml` file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - -``` -rke up --config rancher-cluster.yml -``` - -**Step Result:** The output should be similar to the snippet below: - -``` -INFO[0000] Building Kubernetes cluster -INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] -INFO[0000] [network] Deploying port listener containers -INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] -... -INFO[0101] Finished building Kubernetes cluster successfully -``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -You have a couple of options: - -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md deleted file mode 100644 index c8b155bb6c..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Amazon NLB Configuration -weight: 277 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha-server-install/nlb/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Objectives - -Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. - -1. [Create Target Groups](#create-target-groups) - - Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -2. [Register Targets](#register-targets) - - Add your Linux nodes to the target groups. - -3. [Create Your NLB](#create-your-nlb) - - Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. - - -## Create Target Groups - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX controller on the nodes will make sure that port 80 gets redirected to port 443. - -Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. - -The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. - -{{< img "/img/rancher/ha/nlb/ec2-loadbalancing.png" "EC2 Load Balancing section">}} - -Click **Create target group** to create the first target group, regarding TCP port 443. - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-443` -Protocol | `TCP` -Port | `443` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `override`,`80` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 443 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443.png" "Target group 443">}} - -
-**Screenshot Target group TCP port 443 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-443-advanced.png" "Target group 443 Advanced">}} - -
- -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. - -Option | Setting ---------------------------------------|------------------------------------ -Target Group Name | `rancher-tcp-80` -Protocol | `TCP` -Port | `80` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` -Port (Advanced health check) | `traffic port` -Healthy threshold (Advanced health) | `3` -Unhealthy threshold (Advanced) | `3` -Timeout (Advanced) | `6 seconds` -Interval (Advanced) | `10 second` -Success codes | `200-399` - -
-**Screenshot Target group TCP port 80 settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80.png" "Target group 80">}} - -
-**Screenshot Target group TCP port 80 Advanced settings**
-{{< img "/img/rancher/ha/nlb/create-targetgroup-80-advanced.png" "Target group 80 Advanced">}} - -
- -## Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -## Create Your NLB - -Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. - -5. Complete the **Step 1: Configure Load Balancer** form. - - **Basic Configuration** - - - Name: `rancher` - - Scheme: `internet-facing` - - **Listeners** - - Add the **Load Balancer Protocols** and **Load Balancer Ports** below. - - `TCP`: `443` - - - **Availability Zones** - - - Select Your **VPC** and **Availability Zones**. - -6. Complete the **Step 2: Configure Routing** form. - - - From the **Target Group** drop-down, choose **Existing target group**. - - - From the **Name** drop-down, choose `rancher-tcp-443`. - - - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. - -8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. - -9. After AWS creates the NLB, click **Close**. - -## Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md deleted file mode 100644 index 0dbb1290d6..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/_index.md +++ /dev/null @@ -1,290 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) -weight: 276 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 7 Loadbalancer with SSL termination (HTTPS) -- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) - -In an Kubernetes setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. - -Kubernetes Rancher install with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -## 2. Configure Load Balancer - -When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. - -The load balancer has to be configured to support the following: - -* **WebSocket** connections -* **SPDY** / **HTTP/2** protocols -* Passing / setting the following headers: - -| Header | Value | Description | -|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | -| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | -| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | -| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | - -Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. - -We have example configurations for the following load balancers: - -* [Amazon ALB configuration](alb/) -* [NGINX configuration](nginx/) - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-certificate.yml) - - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-recognizedca.yml) - - >**Advanced Config Options:** - > - >- Want records of all transactions with the Rancher API? Enable the [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing/). - >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options]({{}}/rke/latest/en/config-options/). - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) - ->**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} -If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. -{{% /accordion %}} - -## 8. Configure FQDN - -There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). - -1. Open `rancher-cluster.yml`. - -2. In the `kind: Ingress` with `name: cattle-ingress-http:` - - Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ``` - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - ``` - - -3. Save the file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - - ``` - rke up --config rancher-cluster.yml - ``` - - **Step Result:** The output should be similar to the snippet below: - - ``` - INFO[0000] Building Kubernetes cluster - INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] - INFO[0000] [network] Deploying port listener containers - INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] - ... - INFO[0101] Finished building Kubernetes cluster successfully - ``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -- **Recommended:** Review [Creating Backups—High Availability Back Up and Restore]({{}}/rancher/v2.0-v2.4/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md deleted file mode 100644 index cda6cd4f1d..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Amazon ALB Configuration -weight: 277 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/alb/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/alb - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Kubernetes Rancher. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Objectives - -Configuring an Amazon ALB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. - -1. [Create Target Group](#create-target-group) - - Begin by creating one target group for the http protocol. You'll add your Linux nodes to this group. - -2. [Register Targets](#register-targets) - - Add your Linux nodes to the target group. - -3. [Create Your ALB](#create-your-alb) - - Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. - - -## Create Target Group - -Your first ALB configuration step is to create one target group for HTTP. - -Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. - -The document below will guide you through this process. Use the data in the tables below to complete the procedure. - -[Amazon Documentation: Create a Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-target-group.html) - -### Target Group (HTTP) - -Option | Setting -----------------------------|------------------------------------ -Target Group Name | `rancher-http-80` -Protocol | `HTTP` -Port | `80` -Target type | `instance` -VPC | Choose your VPC -Protocol
(Health Check) | `HTTP` -Path
(Health Check) | `/healthz` - -## Register Targets - -Next, add your Linux nodes to your target group. - -[Amazon Documentation: Register Targets with Your Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-register-targets.html) - -### Create Your ALB - -Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target group you created in [Create Target Group](#create-target-group). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Application Load Balancer**. - -5. Complete the **Step 1: Configure Load Balancer** form. - - **Basic Configuration** - - - Name: `rancher-http` - - Scheme: `internet-facing` - - IP address type: `ipv4` - - **Listeners** - - Add the **Load Balancer Protocols** and **Load Balancer Ports** below. - - `HTTP`: `80` - - `HTTPS`: `443` - - - **Availability Zones** - - - Select Your **VPC** and **Availability Zones**. - -6. Complete the **Step 2: Configure Security Settings** form. - - Configure the certificate you want to use for SSL termination. - -7. Complete the **Step 3: Configure Security Groups** form. - -8. Complete the **Step 4: Configure Routing** form. - - - From the **Target Group** drop-down, choose **Existing target group**. - - - Add target group `rancher-http-80`. - -9. Complete **Step 5: Register Targets**. Since you registered your targets earlier, all you have to do it click **Next: Review**. - -10. Complete **Step 6: Review**. Look over the load balancer details and click **Create** when you're satisfied. - -11. After AWS creates the ALB, click **Close**. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md deleted file mode 100644 index c1e1c8024f..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: NGINX Configuration -weight: 277 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/nginx/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -## Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. - -For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -## Create NGINX Configuration - -See [Example NGINX config]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/#example-nginx-config). - -## Run NGINX - -* Reload or restart NGINX - - ```` - # Reload NGINX - nginx -s reload - - # Restart NGINX - # Depending on your Linux distribution - service nginx restart - systemctl restart nginx - ```` - -## Browse to Rancher UI - -You should now be to able to browse to `https://FQDN`. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md deleted file mode 100644 index 80cf52b95b..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/proxy/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: HTTP Proxy Configuration -weight: 277 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/proxy - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/proxy/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. - -Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. - -Environment variable | Purpose ---------------------------|--------- -HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) -HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) -NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) - -> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. - -## Kubernetes installation - -When using Kubernetes installation, the environment variables need to be added to the RKE Config File template. - -* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/#5-download-rke-config-file-template) -* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/#5-download-rke-config-file-template) - -The environment variables should be defined in the `Deployment` inside the RKE Config File Template. You only have to add the part starting with `env:` to (but not including) `ports:`. Make sure the indentation is identical to the preceding `name:`. Required values for `NO_PROXY` are: - -* `localhost` -* `127.0.0.1` -* `0.0.0.0` -* Configured `service_cluster_ip_range` (default: `10.43.0.0/16`) - -The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage of the proxy when accessing network range `192.168.10.0/24`, the configured `service_cluster_ip_range` (`10.43.0.0/16`) and every hostname under the domain `example.com`. If you have changed the `service_cluster_ip_range`, you have to update the value below accordingly. - -```yaml -... ---- - kind: Deployment - apiVersion: extensions/v1beta1 - metadata: - namespace: cattle-system - name: cattle - spec: - replicas: 1 - template: - metadata: - labels: - app: cattle - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:latest - imagePullPolicy: Always - name: cattle-server - env: - - name: HTTP_PROXY - value: "http://192.168.10.1:3128" - - name: HTTPS_PROXY - value: "http://192.168.10.1:3128" - - name: NO_PROXY - value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,192.168.10.0/24,example.com" - ports: -... -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md deleted file mode 100644 index 0c2697ec11..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: 404 - default backend -weight: 30 -aliases: - - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/404-default-backend/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend - - /404-default-backend/ - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. - -When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. - -### Possible causes - -The nginx ingress controller is not able to serve the configured host in `rancher-cluster.yml`. This should be the FQDN you configured to access Rancher. You can check if it is properly configured by viewing the ingress that is created by running the following command: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress -n cattle-system -o wide -``` - -Check if the `HOSTS` column is displaying the FQDN you configured in the template, and that the used nodes are listed in the `ADDRESS` column. If that is configured correctly, we can check the logging of the nginx ingress controller. - -The logging of the nginx ingress controller will show why it cannot serve the requested host. To view the logs, you can run the following command - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx -``` - -Errors - -* `x509: certificate is valid for fqdn, not your_configured_fqdn` - -The used certificates do not contain the correct hostname. Generate new certificates that contain the chosen FQDN to access Rancher and redeploy. - -* `Port 80 is already in use. Please check the flag --http-port` - -There is a process on the node occupying port 80, this port is needed for the nginx ingress controller to route requests to Rancher. You can find the process by running the command: `netstat -plant | grep \:80`. - -Stop/kill the process and redeploy. - -* `unexpected error creating pem file: no valid PEM formatted block found` - -The base64 encoded string configured in the template is not valid. Please check if you can decode the configured string using `base64 -D STRING`, this should return the same output as the content of the file you used to generate the string. If this is correct, please check if the base64 encoded string is placed directly after the key, without any newlines before, in between or after. (For example: `tls.crt: LS01..`) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md deleted file mode 100644 index aa383d0591..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Troubleshooting HA RKE Add-On Install -weight: 370 -aliases: - - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting - - /rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This section contains common errors seen when setting up a Kubernetes installation. - -Choose from the following options: - -- [Generic troubleshooting](generic-troubleshooting/) - - In this section, you can find generic ways to debug your Kubernetes cluster. - -- [Failed to set up SSH tunneling for host]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) - - In this section, you can find errors related to SSH tunneling when you run the `rke` command to setup your nodes. - -- [Failed to get job complete status](./job-complete-status/) - - In this section, you can find errors related to deploying addons. - -- [404 - default backend]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/) - - In this section, you can find errors related to the `404 - default backend` page that is shown when trying to access Rancher. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md deleted file mode 100644 index 9019f0b737..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/_index.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Generic troubleshooting -weight: 5 -aliases: - - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/generic-troubleshooting/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -Below are steps that you can follow to determine what is wrong in your cluster. - -### Double check if all the required ports are opened in your (host) firewall - -Double check if all the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. - -### All nodes should be present and in **Ready** state - -To check, run the command: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes -``` - -If a node is not shown in this output or a node is not in **Ready** state, you can check the logging of the `kubelet` container. Login to the node and run `docker logs kubelet`. - -### All pods/jobs should be in **Running**/**Completed** state - -To check, run the command: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get pods --all-namespaces -``` - -If a pod is not in **Running** state, you can dig into the root cause by running: - -#### Describe pod - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml describe pod POD_NAME -n NAMESPACE -``` - -#### Pod container logs - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs POD_NAME -n NAMESPACE -``` - -If a job is not in **Completed** state, you can dig into the root cause by running: - -#### Describe job - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml describe job JOB_NAME -n NAMESPACE -``` - -#### Logs from the containers of pods of the job - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=JOB_NAME -n NAMESPACE -``` - -### Check ingress - -Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (address(es) it will be routed to). - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress --all-namespaces -``` - -### List all Kubernetes cluster events - -Kubernetes cluster events are stored, and can be retrieved by running: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml get events --all-namespaces -``` - -### Check Rancher container logging - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=cattle -n cattle-system -``` - -### Check NGINX ingress controller logging - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx -``` - -### Check if overlay network is functioning correctly - -The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. - -To test the overlay network, you can launch the following `DaemonSet` definition. This will run an `alpine` container on every host, which we will use to run a `ping` test between containers on all hosts. - -1. Save the following file as `ds-alpine.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: alpine - spec: - selector: - matchLabels: - name: alpine - template: - metadata: - labels: - name: alpine - spec: - tolerations: - - effect: NoExecute - key: "node-role.kubernetes.io/etcd" - value: "true" - - effect: NoSchedule - key: "node-role.kubernetes.io/controlplane" - value: "true" - containers: - - image: alpine - imagePullPolicy: Always - name: alpine - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl --kubeconfig kube_config_rancher-cluster.yml create -f ds-alpine.yml` -3. Wait until `kubectl --kubeconfig kube_config_rancher-cluster.yml rollout status ds/alpine -w` returns: `daemon set "alpine" successfully rolled out`. -4. Run the following command to let each container on every host ping each other (it's a single line command). - - ``` - echo "=> Start"; kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --kubeconfig kube_config_rancher-cluster.yml --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start - => End - ``` - -If you see error in the output, that means that the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened between the hosts indicated. - -Example error output of a situation where NODE1 had the UDP ports blocked. - -``` -=> Start -command terminated with exit code 1 -NODE2 cannot reach NODE1 -command terminated with exit code 1 -NODE3 cannot reach NODE1 -command terminated with exit code 1 -NODE1 cannot reach NODE2 -command terminated with exit code 1 -NODE1 cannot reach NODE3 -=> End -``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md deleted file mode 100644 index f6591e3cd7..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Failed to get job complete status -weight: 20 -aliases: - - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/job-complete-status/ - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status - - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. - -When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. - -### Failed to deploy addon execute job [rke-user-includes-addons]: Failed to get job complete status - -Something is wrong in the addons definitions, you can run the following command to get the root cause in the logging of the job: - -``` -kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=rke-user-addon-deploy-job -n kube-system -``` - -#### error: error converting YAML to JSON: yaml: line 9: - -The structure of the addons definition in `rancher-cluster.yml` is wrong. In the different resources specified in the addons section, there is a error in the structure of the YAML. The pointer `yaml line 9` references to the line number of the addon that is causing issues. - -Things to check -
    -
      -
    • Is each of the base64 encoded certificate string placed directly after the key, for example: `tls.crt: LS01...`, there should be no newline/space before, in between or after.
    • -
    • Is the YAML properly formatted, each indentation should be 2 spaces as shown in the template files.
    • -
    • Verify the integrity of your certificate by running this command `cat MyCertificate | base64 -d` on Linux, `cat MyCertificate | base64 -D` on Mac OS . If any error exists, the command output will tell you. -
    -
- -#### Error from server (BadRequest): error when creating "/etc/config/rke-user-addon.yaml": Secret in version "v1" cannot be handled as a Secret - -The base64 string of one of the certificate strings is wrong. The log message will try to show you what part of the string is not recognized as valid base64. - -Things to check -
    -
      -
    • Check if the base64 string is valid by running one of the commands below:
    • - -``` -# MacOS -echo BASE64_CRT | base64 -D -# Linux -echo BASE64_CRT | base64 -d -# Windows -certutil -decode FILENAME.base64 FILENAME.verify -``` - -
    -
- -#### The Ingress "cattle-ingress-http" is invalid: spec.rules[0].host: Invalid value: "IP": must be a DNS name, not an IP address - -The host value can only contain a host name, as it is needed by the ingress controller to match the hostname and pass to the correct backend. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md deleted file mode 100644 index cb9001f348..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-4-lb/_index.md +++ /dev/null @@ -1,399 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (TCP/Layer 4) -weight: 275 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-4-lb - - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb - - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-4-lb - - /rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-4-lb/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 4 load balancer (TCP) -- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) - -In an HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. - -Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers -![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -
- -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -## 2. Configure Load Balancer - -We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb) - ->**Note:** -> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. -> ->One caveat: do not use one of your Rancher nodes as the load balancer. - -### A. Install NGINX - -Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). - -The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. - -### B. Create NGINX Configuration - -After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. - -1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. - -2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). - - >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - - **Example NGINX config:** - ``` - worker_processes 4; - worker_rlimit_nofile 40000; - - events { - worker_connections 8192; - } - - http { - server { - listen 80; - return 301 https://$host$request_uri; - } - } - - stream { - upstream rancher_servers { - least_conn; - server IP_NODE_1:443 max_fails=3 fail_timeout=5s; - server IP_NODE_2:443 max_fails=3 fail_timeout=5s; - server IP_NODE_3:443 max_fails=3 fail_timeout=5s; - } - server { - listen 443; - proxy_pass rancher_servers; - } - } - ``` - -3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. - -4. Load the updates to your NGINX configuration by running the following command: - - ``` - # nginx -s reload - ``` - -### Option - Run NGINX as Docker container - -Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/nginx.conf:/etc/nginx/nginx.conf \ - nginx:1.14 -``` - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate) - - [Template for certificate signed by recognized CA
]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca) - - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} - ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -1. In `kind: Secret` with `name: cattle-keys-ingress`: - - * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) - * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - - >**Note:** - > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-ingress - namespace: cattle-system - type: Opaque - data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== - ``` - -2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). - - >**Note:** - > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - - - **Step Result:** The file should look like the example below (the base64 encoded string should be different): - - ```yaml - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - ``` - -{{% /accordion %}} - -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} - -If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the intermediate certificates in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. - -In the `kind: Secret` with `name: cattle-keys-ingress`: - -* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) -* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - ->**Note:** -> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: cattle-keys-ingress - namespace: cattle-system -type: Opaque -data: - tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== -``` - -{{% /accordion %}} - - - -## 8. Configure FQDN - -There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). - -In the `kind: Ingress` with `name: cattle-ingress-http`: - -* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). - -After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): - -```yaml - --- - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - tls: - - secretName: cattle-keys-ingress - hosts: - - rancher.yourdomain.com -``` - -Save the `.yml` file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - -``` -rke up --config rancher-cluster.yml -``` - -**Step Result:** The output should be similar to the snippet below: - -``` -INFO[0000] Building Kubernetes cluster -INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] -INFO[0000] [network] Deploying port listener containers -INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] -... -INFO[0101] Finished building Kubernetes cluster successfully -``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -You have a couple of options: - -- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/ha-backup-and-restoration). -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md deleted file mode 100644 index 236813e01e..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/rke-add-on/layer-7-lb/_index.md +++ /dev/null @@ -1,287 +0,0 @@ ---- -title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) -weight: 276 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-7-lb - - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb/ - - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb - - /rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-7-lb/ ---- - -> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** -> ->Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). -> ->If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. - -This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: - -- Layer 7 load balancer with SSL termination (HTTPS) -- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) - -In an HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. - -Rancher installed on a Kubernetes cluster with layer 7 load balancer, depicting SSL termination at load balancer -![Rancher HA]({{}}/img/rancher/ha/rancher2ha-l7.svg) - -## Installation Outline - -Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. - - - -- [1. Provision Linux Hosts](#1-provision-linux-hosts) -- [2. Configure Load Balancer](#2-configure-load-balancer) -- [3. Configure DNS](#3-configure-dns) -- [4. Install RKE](#4-install-rke) -- [5. Download RKE Config File Template](#5-download-rke-config-file-template) -- [6. Configure Nodes](#6-configure-nodes) -- [7. Configure Certificates](#7-configure-certificates) -- [8. Configure FQDN](#8-configure-fqdn) -- [9. Configure Rancher version](#9-configure-rancher-version) -- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) -- [11. Run RKE](#11-run-rke) -- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) - - - -## 1. Provision Linux Hosts - -Provision three Linux hosts according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements). - -## 2. Configure Load Balancer - -When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. - -The load balancer has to be configured to support the following: - -* **WebSocket** connections -* **SPDY** / **HTTP/2** protocols -* Passing / setting the following headers: - -| Header | Value | Description | -|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | -| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | -| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | -| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | - -Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. - -We have example configurations for the following load balancers: - -* [Amazon ELB configuration]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) -* [NGINX configuration]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) - -## 3. Configure DNS - -Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

- -1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). - -2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: - - `nslookup HOSTNAME.DOMAIN.COM` - - **Step Result:** Terminal displays output similar to the following: - - ``` - $ nslookup rancher.yourdomain.com - Server: YOUR_HOSTNAME_IP_ADDRESS - Address: YOUR_HOSTNAME_IP_ADDRESS#53 - - Non-authoritative answer: - Name: rancher.yourdomain.com - Address: HOSTNAME.DOMAIN.COM - ``` - -
- -## 4. Install RKE - -RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. - -1. Follow the [RKE Install]({{}}/rke/latest/en/installation) instructions. - -2. Confirm that RKE is now executable by running the following command: - - ``` - rke --version - ``` - -## 5. Download RKE Config File Template - -RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. - -1. Download one of following templates, depending on the SSL certificate you're using. - - - [Template for self-signed certificate
`3-node-externalssl-certificate.yml`]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate) - - [Template for certificate signed by recognized CA
`3-node-externalssl-recognizedca.yml`]({{}}/rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca) - - - -2. Rename the file to `rancher-cluster.yml`. - -## 6. Configure Nodes - -Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. - -1. Open `rancher-cluster.yml` in your favorite text editor. - -1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). - - For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. - - >**Note:** - > - >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements]({{}}/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. - - nodes: - # The IP address or hostname of the node - - address: IP_ADDRESS_1 - # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) - # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 - user: USER - role: [controlplane,etcd,worker] - # Path the SSH key that can be used to access to node with the specified user - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_2 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - - address: IP_ADDRESS_3 - user: USER - role: [controlplane,etcd,worker] - ssh_key_path: ~/.ssh/id_rsa - -1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. - - services: - etcd: - backup: false - -## 7. Configure Certificates - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A—Bring Your Own Certificate: Self-Signed" %}} ->**Prerequisites:** ->Create a self-signed certificate. -> ->- The certificate files must be in PEM format. ->- The certificate files must be encoded in [base64](#base64). ->- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) - ->**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. - -After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - --- - apiVersion: v1 - kind: Secret - metadata: - name: cattle-keys-server - namespace: cattle-system - type: Opaque - data: - cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B—Bring Your Own Certificate: Signed by Recognized CA" %}} -If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. -{{% /accordion %}} - -## 8. Configure FQDN - -There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). - -1. Open `rancher-cluster.yml`. - -2. In the `kind: Ingress` with `name: cattle-ingress-http:` - - Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). - - **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): - - ``` - apiVersion: extensions/v1beta1 - kind: Ingress - metadata: - namespace: cattle-system - name: cattle-ingress-http - annotations: - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" - nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open - nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open - spec: - rules: - - host: rancher.yourdomain.com - http: - paths: - - backend: - serviceName: cattle-service - servicePort: 80 - ``` - - -3. Save the file and close it. - -## 9. Configure Rancher version - -The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. - -``` - spec: - serviceAccountName: cattle-admin - containers: - - image: rancher/rancher:v2.0.6 - imagePullPolicy: Always -``` - -## 10. Back Up Your RKE Config File - -After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. - -## 11. Run RKE - -With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. - -1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. - -2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. - -3. Enter one of the `rke up` commands listen below. - - ``` - rke up --config rancher-cluster.yml - ``` - - **Step Result:** The output should be similar to the snippet below: - - ``` - INFO[0000] Building Kubernetes cluster - INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] - INFO[0000] [network] Deploying port listener containers - INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] - ... - INFO[0101] Finished building Kubernetes cluster successfully - ``` - -## 12. Back Up Auto-Generated Config File - -During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. - -## What's Next? - -- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration]({{}}/rancher/v2.0-v2.4/en/backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. -- Create a Kubernetes cluster: [Creating a Cluster]({{}}/rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/). - -
- -## FAQ and Troubleshooting - -{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/single-node-install-external-lb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/advanced/single-node-install-external-lb/_index.md deleted file mode 100644 index cf9a01efa2..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/single-node-install-external-lb/_index.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer -weight: 252 -aliases: - - /rancher/v2.0-v2.4/en/installation/single-node/single-node-install-external-lb/ - - /rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb - - /rancher/v2.0-v2.4/en/installation/options/single-node-install-external-lb - - /rancher/v2.0-v2.4/en/installation/single-node-install-external-lb ---- - -For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. - -A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. - -This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. - -> **Want to skip the external load balancer?** -> See [Docker Installation]({{}}/rancher/v2.0-v2.4/en/installation/single-node) instead. - -## Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -## Installation Outline - - - -- [1. Provision Linux Host](#1-provision-linux-host) -- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) -- [3. Configure Load Balancer](#3-configure-load-balancer) - - - -## 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements) to launch your Rancher Server. - -## 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Complete an Air Gap Installation? -> - Record all transactions with the Rancher API? -> -> See [Advanced Options](#advanced-options) below before continuing. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Bring Your Own Certificate: Self-Signed" %}} -If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. - -> **Prerequisites:** -> Create a self-signed certificate. -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Self-Signed Cert:** - -1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest - ``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Signed by Recognized CA" %}} -If your cluster is public facing, it's best to use a certificate signed by a recognized CA. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Cert Signed by a Recognized CA:** - -If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. - -1. Enter the following command. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest --no-cacerts - ``` - - {{% /accordion %}} - -## 3. Configure Load Balancer - -When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. - -The load balancer or proxy has to be configured to support the following: - -- **WebSocket** connections -- **SPDY** / **HTTP/2** protocols -- Passing / setting the following headers: - - | Header | Value | Description | - |--------|-------|-------------| - | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. - | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. - | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. - | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. -### Example NGINX configuration - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server rancher-server:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` - -
- -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -For help troubleshooting certificates, see [this section.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -## Advanced Options - -### API Auditing - -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.0-v2.4/en/installation/api-auditing) feature by adding the flags below into your install command. - - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - -### Air Gap - -If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - rancher/rancher:latest -``` - -This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - -``` -upstream rancher { - server rancher-server:80; -} - -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -server { - listen 443 ssl http2; - server_name rancher.yourdomain.com; - ssl_certificate /etc/your_certificate_directory/fullchain.pem; - ssl_certificate_key /etc/your_certificate_directory/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } -} - -server { - listen 80; - server_name rancher.yourdomain.com; - return 301 https://$server_name$request_uri; -} -``` - -
- diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/chart-options/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/chart-options/_index.md deleted file mode 100644 index 1b258db1b9..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/chart-options/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Rancher Helm Chart Options -weight: 50 ---- - -The Rancher Helm chart options reference moved to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/choosing-version/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/choosing-version/_index.md deleted file mode 100644 index df137ded59..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/choosing-version/_index.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: Choosing a Rancher Version -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/server-tags ---- - -This section describes how to choose a Rancher version. - -For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** - -{{% tabs %}} -{{% tab "Helm Charts" %}} - -When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. - -Refer to the [Helm version requirements]({{}}/rancher/v2.0-v2.4/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -### Helm Chart Repositories - -Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. - -| Type | Command to Add the Repo | Description of the Repo | -| -------------- | ------------ | ----------------- | -| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | -| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | -| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | - -
-Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). - -> **Note:** The introduction of the `rancher-latest` and `rancher-stable` Helm Chart repositories was introduced after Rancher v2.1.0, so the `rancher-stable` repository contains some Rancher versions that were never marked as `rancher/rancher:stable`. The versions of Rancher that were tagged as `rancher/rancher:stable` before v2.1.0 are v2.0.4, v2.0.6, v2.0.8. Post v2.1.0, all charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. - -### Helm Chart Versions - -Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
-    `helm search repo --versions` - -If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
-For more information, see https://helm.sh/docs/helm/helm_search_repo/ - -To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
-    `helm fetch rancher-stable/rancher --version=2.4.8` - -For the Rancher v2.1.x versions, there were some Helm charts where the version was a build number, i.e. `yyyy.mm.`. These charts have been replaced with the equivalent Rancher version and are no longer available. - -### Switching to a Different Helm Chart Repository - -After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. - -> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. - -{{< release-channel >}} - -1. List the current Helm chart repositories. - - ```plain - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - -2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. - - ```plain - helm repo remove rancher- - ``` - -3. Add the Helm chart repository that you want to start installing Rancher from. - - ```plain - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha) from the new Helm chart repository. -{{% /tab %}} -{{% tab "Docker Images" %}} -When performing [Docker installs]({{}}/rancher/v2.0-v2.4/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. - -### Server Tags - -Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. - -| Tag | Description | -| -------------------------- | ------ | -| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | -| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | -| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | - -> **Notes:** -> -> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. -> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/custom-ca-root-certificate/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/custom-ca-root-certificate/_index.md deleted file mode 100644 index 6474fc305a..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/custom-ca-root-certificate/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: About Custom CA Root Certificates -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/custom-ca-root-certificate/ - - /rancher/v2.0-v2.4/en/installation/resources/choosing-version/encryption/custom-ca-root-certificate ---- - -If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). - -Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. - -To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. - -Examples of services that Rancher can access: - -- Catalogs -- Authentication providers -- Accessing hosting/cloud API when using Node Drivers - -## Installing with the custom CA Certificate - -For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: - -- [Docker install Custom CA certificate options]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -- [Kubernetes install options for Additional Trusted CAs]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#additional-trusted-cas) - diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/_index.md deleted file mode 100644 index 3c0c500c5a..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/_index.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Enabling Experimental Features -weight: 17 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/feature-flags/ - - /rancher/v2.0-v2.4/en/admin-settings/feature-flags/ ---- -Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. - -The features can be enabled in three ways: - -- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. -- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) in Rancher v2.3.3+ by going to the **Settings** page. -- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. - -Each feature has two values: - -- A default value, which can be configured with a flag or environment variable from the command line -- A set value, which can be configured with the Rancher API or UI - -If no value has been set, Rancher uses the default value. - -Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. - -For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. - -> **Note:** As of v2.4.0, there are some feature flags that may require a restart of the Rancher server container. These features that require a restart are marked in the table of these docs and in the UI. - -The following is a list of the feature flags available in Rancher: - -- `dashboard`: This feature enables the new experimental UI that has a new look and feel. The dashboard also leverages a new API in Rancher which allows the UI to access the default Kubernetes resources without any intervention from Rancher. -- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. -- `proxy`: This feature enables Rancher to use a new simplified code base for the proxy, which can help enhance performance and security. The proxy feature is known to have issues with Helm deployments, which prevents any catalog applications to be deployed which includes Rancher's tools like monitoring, logging, Istio, etc. -- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. - -The below table shows the availability and default value for feature flags in Rancher: - -| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | -| ----------------------------- | ------------- | ------------ | --------------- |---| -| `dashboard` | `true` | Experimental | v2.4.0 | x | -| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | -| `istio-virtual-service-ui` | `true` | GA | v2.3.2 | | -| `proxy` | `false` | Experimental | v2.4.0 | | -| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | - -# Enabling Features when Starting Rancher - -When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. - -> **Note:** Values set from the Rancher API will override the value passed in through the command line. - -{{% tabs %}} -{{% tab "Kubernetes Install" %}} -When installing Rancher with a Helm chart, use the `--features` option. In the below example, two features are enabled by passing the feature flag names names in a comma separated list: - -``` -helm install rancher-latest/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -Note: If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -### Rendering the Helm Chart for Air Gap Installations - -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/air-gap/install-rancher) - -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm 3 command is as follows: - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -The Helm 2 command is as follows: - -``` -helm template ./rancher-.tgz --output-dir . \ - --name rancher \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 - --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 -``` - -{{% /tab %}} -{{% tab "Docker Install" %}} -When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: - -``` -docker run -d -p 80:80 -p 443:443 \ - --restart=unless-stopped \ - rancher/rancher:rancher-latest \ - --features==true,=true # Available as of v2.3.0 -``` - -{{% /tab %}} -{{% /tabs %}} - -# Enabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate.** - -**Result:** The feature is disabled. - -# Enabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **True.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **False.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is disabled. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md deleted file mode 100644 index e52edb1e60..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Allow Unsupported Storage Drivers -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers/ ---- - -This feature allows you to use types for storage providers and provisioners that are not enabled by default. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Description ----|---|--- - `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. - -### Types for Persistent Volume Plugins that are Enabled by Default -Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -### Types for StorageClass that are Enabled by Default -Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|-------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md deleted file mode 100644 index 7159860f4b..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: UI for Istio Virtual Services and Destination Rules -weight: 2 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/feature-flags/istio-virtual-service-ui ---- - -This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. - -> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup) in order to use the feature. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.0-v2.4/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Status | Available as of ----|---|---|--- -`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 -`istio-virtual-service-ui` | `true` | GA | v2.3.2 - -# About this Feature - -A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. - -When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. - -The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** - -- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) -- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) - -To see these tabs, - -1. Go to the project view in Rancher and click **Resources > Istio.** -1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/helm-version/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/helm-version/_index.md deleted file mode 100644 index dc0a2a72a1..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/helm-version/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Helm Version Requirements -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/helm-version - - /rancher/v2.0-v2.4/en/installation/options/helm2 - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init - - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher ---- - -This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. - -> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.0-v2.4/en/installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. -- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. -- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-RKE/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-RKE/_index.md deleted file mode 100644 index 9a8b8e758c..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-RKE/_index.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -title: Setting up a High-availability RKE Kubernetes Cluster -shortTitle: Set up RKE Kubernetes -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/k8s-install/kubernetes-rke ---- - - -This section describes how to install a Kubernetes cluster. This cluster should be dedicated to run only the Rancher server. - -For Rancher before v2.4, Rancher should be installed on an RKE Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. - -As of Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. - -The Rancher management server can only be run on Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. - -For systems without direct internet access, refer to [Air Gap: Kubernetes install.]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/) - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Installing Kubernetes - -### Required CLI Tools - -Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. - -Also install [RKE,]({{}}/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. - -### 1. Create the cluster configuration file - -In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. - -Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. - -If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. - -RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. - -```yaml -nodes: - - address: 165.227.114.63 - internal_address: 172.16.22.12 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.116.167 - internal_address: 172.16.32.37 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.127.226 - internal_address: 172.16.42.73 - user: ubuntu - role: [controlplane, worker, etcd] - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -# Required for external TLS termination with -# ingress-nginx v0.22+ -ingress: - provider: nginx - options: - use-forwarded-headers: "true" -``` - -
Common RKE Nodes Options
- -| Option | Required | Description | -| ------------------ | -------- | -------------------------------------------------------------------------------------- | -| `address` | yes | The public DNS or IP address | -| `user` | yes | A user that can run docker commands | -| `role` | yes | List of Kubernetes roles assigned to the node | -| `internal_address` | no | The private DNS or IP address for internal cluster traffic | -| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | - -> **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. -> -> Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. -> -> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide]({{}}/rancher/v2.0-v2.4/en/installation/options/etcd/). - -### 2. Run RKE - -``` -rke up --config ./rancher-cluster.yml -``` - -When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. - -### 3. Test Your Cluster - -This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. - -Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. - -When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. - -> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`: - -``` -export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml -``` - -Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: - -``` -kubectl get nodes - -NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 -``` - -### 4. Check the Health of Your Cluster Pods - -Check that all the required pods and containers are healthy are ready to continue. - -- Pods are in `Running` or `Completed` state. -- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` -- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s -kube-system canal-jp4hz 3/3 Running 0 30s -kube-system canal-z2hg8 3/3 Running 0 30s -kube-system canal-z6kpw 3/3 Running 0 30s -kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s -kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s -kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s -kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s -kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s -kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s -kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s -``` - -This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. - -### 5. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.0-v2.4/en/installation/options/troubleshooting/) page. - - -### [Next: Install Rancher]({{}}/rancher/v2.0-v2.4/en/installation/k8s-install/helm-rancher/) - diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md deleted file mode 100644 index 7b9a8c3eb3..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Setting up a High-availability K3s Kubernetes Cluster for Rancher -shortTitle: Set up K3s for Rancher -weight: 2 ---- - -This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) - -For systems without direct internet access, refer to the air gap installation instructions. - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node K3s cluster, run the Rancher server installation command on just one node instead of two nodes. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Prerequisites - -These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. -# Installing Kubernetes - -### 1. Install Kubernetes and Set up the K3s Server - -When running the command to start the K3s Kubernetes API server, you will pass in an option to use the external datastore that you set up earlier. - -1. Connect to one of the Linux nodes that you have prepared to run the Rancher server. -1. On the Linux node, run this command to start the K3s server and connect it to the external datastore: - ``` - curl -sfL https://get.k3s.io | sh -s - server \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" - ``` - To specify the K3s version, use the INSTALL_K3S_VERSION environment variable: - ```sh - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z sh -s - server \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" - ``` - Note: The datastore endpoint can also be passed in using the environment variable `$K3S_DATASTORE_ENDPOINT`. - -1. Repeat the same command on your second K3s server node. - -### 2. Confirm that K3s is Running - -To confirm that K3s has been set up successfully, run the following command on either of the K3s server nodes: -``` -sudo k3s kubectl get nodes -``` - -Then you should see two nodes with the master role: -``` -ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-172-31-60-194 Ready master 44m v1.17.2+k3s1 -ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1 -``` - -Then test the health of the cluster pods: -``` -sudo k3s kubectl get pods --all-namespaces -``` - -**Result:** You have successfully set up a K3s Kubernetes cluster. - -### 3. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -```yml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### 4. Check the Health of Your Cluster Pods - -Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. - -Check that all the required pods and containers are healthy are ready to continue: - -``` -ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d -kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d -kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d -``` - -**Result:** You have confirmed that you can access the cluster with `kubectl` and the K3s cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/how-ha-works/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/how-ha-works/_index.md deleted file mode 100644 index e572c961b0..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/how-ha-works/_index.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: About High-availability Installations -weight: 1 ---- - -We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. - -In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. - -Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. - -The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. - -For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.0-v2.4/en/overview/architecture) - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) -Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md deleted file mode 100644 index 22a86b8d51..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. -shortTitle: Infrastructure Tutorials -weight: 5 ---- - -To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - - -To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md deleted file mode 100644 index 0b9927cb88..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Setting up Nodes in Amazon EC2 -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/ec2-node ---- - -In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. - -If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. - -If the Rancher server is installed in a single Docker container, you only need one instance. - -### 1. Optional Preparation - -- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/#port-requirements) - -### 2. Provision Instances - -1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. -1. In the left panel, click **Instances.** -1. Click **Launch Instance.** -1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select.** -1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. -1. Click **Next: Configure Instance Details.** -1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. -1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. -1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** -1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/#port-requirements) for Rancher nodes. -1. Click **Review and Launch.** -1. Click **Launch.** -1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. -1. Click **Launch Instances.** - -**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. Next, you will install Docker on each node. - -### 3. Install Docker and Create User - -1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. -1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect.** -1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: -``` -sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] -``` -1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: -``` -curl https://releases.rancher.com/install-docker/18.09.sh | sh -``` -1. When you are connected to the instance, run the following command on the instance to create a user: -``` -sudo usermod -aG docker ubuntu -``` -1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. - -> To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. - -**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. - -### Next Steps for RKE Kubernetes Cluster Nodes - -If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. - -RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md deleted file mode 100644 index 89bcefa558..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' -weight: 1 ---- - -This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. - -The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. - -For more information about each installation option, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation) - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability K3s cluster, we recommend setting up the following infrastructure: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. We recommend MySQL. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set a [MySQL](https://www.mysql.com/) external database. Rancher has been tested on K3s Kubernetes clusters using MySQL version 5.7 as the datastore. - -When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the MySQL database, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/rds/) for setting up MySQL on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md deleted file mode 100644 index 0ca01f9a5d..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' -weight: 2 ---- - -This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, Azure, or vSphere. - * **Note:** When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. Please refer [here](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations) for more information on Azure load balancer limitations. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.0-v2.4/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.0-v2.4/en/installation/options/ec2-node/) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on any of the three nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.0-v2.4/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md deleted file mode 100644 index 603fe9f7a2..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Setting up Amazon ELB Network Load Balancer -weight: 5 -aliases: - - /rancher/v2.0-v2.4/en/installation/ha/create-nodes-lb/nlb - - /rancher/v2.0-v2.4/en/installation/k8s-install/create-nodes-lb/nlb - - /rancher/v2.0-v2.4/en/installation/options/nlb ---- - -This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. - -These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. - -Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. - -# Setting up the Load Balancer - -Configuring an Amazon NLB is a multistage process: - -1. [Create Target Groups](#1-create-target-groups) -2. [Register Targets](#2-register-targets) -3. [Create Your NLB](#3-create-your-nlb) -4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) - -# Requirements - -These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. - -# 1. Create Target Groups - -Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. - -Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. - -1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -1. Click **Create target group** to create the first target group, regarding TCP port 443. - -> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. - -| Option | Setting | -|-------------------|-------------------| -| Target Group Name | `rancher-tcp-443` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `443` | -| VPC | Choose your VPC | - -Health check settings: - -| Option | Setting | -|---------------------|-----------------| -| Protocol | TCP | -| Port | `override`,`80` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. - -| Option | Setting | -|-------------------|------------------| -| Target Group Name | `rancher-tcp-80` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `80` | -| VPC | Choose your VPC | - - -Health check settings: - -| Option |Setting | -|---------------------|----------------| -| Protocol | TCP | -| Port | `traffic port` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -# 2. Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -# 3. Create Your NLB - -Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. Then complete each form. - -- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) -- [Step 2: Configure Routing](#step-2-configure-routing) -- [Step 3: Register Targets](#step-3-register-targets) -- [Step 4: Review](#step-4-review) - -### Step 1: Configure Load Balancer - -Set the following fields in the form: - -- **Name:** `rancher` -- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. -- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. -- **Availability Zones:** Select Your **VPC** and **Availability Zones**. - -### Step 2: Configure Routing - -1. From the **Target Group** drop-down, choose **Existing target group**. -1. From the **Name** drop-down, choose `rancher-tcp-443`. -1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -### Step 3: Register Targets - -Since you registered your targets earlier, all you have to do is click **Next: Review**. - -### Step 4: Review - -Look over the load balancer details and click **Create** when you're satisfied. - -After AWS creates the NLB, click **Close**. - -# 4. Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. - -# Health Check Paths for NGINX Ingress and Traefik Ingresses - -K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. - -For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. - -- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. -- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. - -To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md deleted file mode 100644 index cb88c11a85..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Setting up a MySQL Database in Amazon RDS -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/rds ---- -This tutorial describes how to set up a MySQL database in Amazon's RDS. - -This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. - -1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. In the left panel, click **Databases.** -1. Click **Create database.** -1. In the **Engine type** section, click **MySQL.** -1. In the **Version** section, choose **MySQL 5.7.22.** -1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. -1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. -1. Click **Create database.** - -You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. - -To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. - -- **Username:** Use the admin username. -- **Password:** Use the admin password. -- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. -- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. -- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name.** - -This information will be used to connect to the database in the following format: - -``` -mysql://username:password@tcp(hostname:3306)/database-name -``` - -For more information on configuring the datastore for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/_index.md deleted file mode 100644 index 50f28c23fc..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/local-system-charts/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Setting up Local System Charts for Air Gapped Installations -weight: 120 -aliases: - - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md - - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md - - /rancher/v2.0-v2.4/en/installation/options/local-system-charts ---- - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. - -In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag in Rancher v2.3.0, and using a Git mirror for Rancher versions before v2.3.0. - -# Using Local System Charts in Rancher v2.3.0 - -In Rancher v2.3.0, a local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. - -Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-rancher/) instructions. - -# Setting Up System Charts for Rancher Before v2.3.0 - -### A. Prepare System Charts - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. - -Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. - -### B. Configure System Charts - -Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. - -{{% tabs %}} -{{% tab "Rancher UI" %}} - -In the catalog management page in the Rancher UI, follow these steps: - -1. Go to the **Global** view. - -1. Click **Tools > Catalogs.** - -1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **⋮ > Edit.** - -1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. - -1. Click **Save.** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -{{% /tab %}} -{{% tab "Rancher API" %}} - -1. Log into Rancher. - -1. Open `https:///v3/catalogs/system-library` in your browser. - - {{< img "/img/rancher/airgap/system-charts-setting.png" "Open">}} - -1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. - - {{< img "/img/rancher/airgap/system-charts-update.png" "Update">}} - -1. Click **Show Request** - -1. Click **Send Request** - -**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/_index.md deleted file mode 100644 index 351dc19b5a..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/tls-secrets/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Adding TLS Secrets -weight: 2 ---- - -Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. - -Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. - -For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. -This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. - -Use `kubectl` with the `tls` secret type to create the secrets. - -``` -kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. - -# Using a Private CA Signed Certificate - -If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. - -Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. - -``` -kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem=./cacerts.pem -``` - -> **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. - -# Updating a Private CA Certificate - -Follow the steps on [this page]({{}}/rancher/v2.0-v2.4/en/installation/resources/update-rancher-cert) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/tls-settings/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/tls-settings/_index.md deleted file mode 100644 index 3cd06647f1..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/tls-settings/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: TLS Settings -weight: 3 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/tls-settings/ - - /rancher/v2.0-v2.4/en/admin-settings/tls-settings - - /rancher/v2.0-v2.4/en/installation/resources/encryption/tls-settings ---- - -In Rancher v2.1.7, the default TLS configuration changed to only accept TLS 1.2 and secure TLS cipher suites. TLS 1.3 and TLS 1.3 exclusive cipher suites are not supported. - -# Configuring TLS settings - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [TLS settings in Docker options]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/advanced/#tls-settings) - -- [TLS settings in Helm chart options]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/chart-options/#tls-settings) - -# TLS Environment Variables - -| Parameter | Description | Default | Available options | -|-----|-----|-----|-----| -| `CATTLE_TLS_MIN_VERSION` | Minimum TLS version | `1.2` | `1.0`, `1.1`, `1.2` | -| `CATTLE_TLS_CIPHERS` | Allowed TLS cipher suites | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | See [Golang tls constants](https://golang.org/pkg/crypto/tls/#pkg-constants) | - - -# Legacy configuration - -If you need to configure TLS the same way as it was before Rancher v2.1.7, please use the following settings: - - -| Parameter | Legacy value | -|-----|-----| -| `CATTLE_TLS_MIN_VERSION` | `1.0` | -| `CATTLE_TLS_CIPHERS` | `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
`TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,`
`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,`
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,`
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,`
`TLS_RSA_WITH_AES_128_GCM_SHA256,`
`TLS_RSA_WITH_AES_256_GCM_SHA384,`
`TLS_RSA_WITH_AES_128_CBC_SHA,`
`TLS_RSA_WITH_AES_256_CBC_SHA,`
`TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,`
`TLS_RSA_WITH_3DES_EDE_CBC_SHA` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/update-rancher-cert/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/update-rancher-cert/_index.md deleted file mode 100644 index f27a09a882..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/update-rancher-cert/_index.md +++ /dev/null @@ -1,234 +0,0 @@ ---- -title: Updating the Rancher Certificate -weight: 10 ---- - -# Updating a Private CA Certificate - -Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. - -A summary of the steps is as follows: - -1. Create or update the `tls-rancher-ingress` Kubernetes secret resource with the new certificate and private key. -2. Create or update the `tls-ca` Kubernetes secret resource with the root CA certificate (only required when using a private CA). -3. Update the Rancher installation using the Helm CLI. -4. Reconfigure the Rancher agents to trust the new CA certificate. - -The details of these instructions are below. - -## 1. Create/update the certificate secret resource - -First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. - -If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -Alternatively, to update an existing certificate secret: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -## 2. Create/update the CA certificate secret resource - -If the new certificate was signed by a private CA, you will need to copy the corresponding root CA certificate into a file named `cacerts.pem` and create or update the `tls-ca secret` in the `cattle-system` namespace. If the certificate was signed by an intermediate CA, then the `cacerts.pem` must contain both the intermediate and root CA certificates (in this order). - -To create the initial secret: - -``` -$ kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem -``` - -To update an existing `tls-ca` secret: - -``` -$ kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -## 3. Reconfigure the Rancher deployment - -> Before proceeding, [generate an API token in the Rancher UI]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/#creating-an-api-key) (User > API & Keys). - -This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). - -It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. - -To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: - -``` -$ helm get values rancher -n cattle-system -``` - -Also get the version string of the currently deployed Rancher chart: - -``` -$ helm ls -A -``` - -Upgrade the Helm application instance using the original configuration values and making sure to specify `ingress.tls.source=secret` as well as the current chart version to prevent an application upgrade. - -If the certificate was signed by a private CA, add the `set privateCA=true` argument as well. Also make sure to read the documentation describing the initial installation using custom certificates. - -``` -helm upgrade rancher rancher-stable/rancher \ - --namespace cattle-system \ - --version \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set ... -``` - -When the upgrade is completed, navigate to `https:///v3/settings/cacerts` to verify that the value matches the CA certificate written in the `tls-ca` secret earlier. - -## 4. Reconfigure Rancher agents to trust the private CA - -This section covers three methods to reconfigure Rancher agents to trust the private CA. This step is required if either of the following is true: - -- Rancher was initially configured to use the Rancher self-signed certificate (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`) -- The root CA certificate for the new custom certificate has changed - -### Why is this step required? - -When Rancher is configured with a certificate signed by a private CA, the CA certificate chain is downloaded into Rancher agent containers. Agents compare the checksum of the downloaded certificate against the `CATTLE_CA_CHECKSUM` environment variable. This means that, when the private CA certificate is changed on Rancher server side, the environvment variable `CATTLE_CA_CHECKSUM` must be updated accordingly. - -### Which method should I choose? - -Method 1 is the easiest one but requires all clusters to be connected to Rancher after the certificates have been rotated. This is usually the case if the process is performed right after updating the Rancher deployment (Step 3). - -If the clusters have lost connection to Rancher but you have [Authorized Cluster Endpoints](https://rancher.com/docs/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/ace/) enabled, then go with method 2. - -Method 3 can be used as a fallback if method 1 and 2 are unfeasible. - -### Method 1: Kubectl command - -For each cluster under Rancher management (including `local`) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). - -``` -kubectl patch clusters -p '{"status":{"agentImage":"dummy"}}' --type merge -``` - -This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. - - -### Method 2: Manually update checksum - -Manually patch the agent Kubernetes resources by updating the `CATTLE_CA_CHECKSUM` environment variable to the value matching the checksum of the new CA certificate. Generate the new checksum value like so: - -``` -$ curl -k -s -fL /v3/settings/cacerts | jq -r .value > cacert.tmp -$ sha256sum cacert.tmp | awk '{print $1}' -``` - -Using a Kubeconfig for each downstream cluster update the environment variable for the two agent deployments. - -``` -$ kubectl edit -n cattle-system ds/cattle-node-agent -$ kubectl edit -n cattle-system deployment/cluster-agent -``` - -### Method 3: Recreate Rancher agents - -With this method you are recreating the Rancher agents by running a set of commands on a controlplane node of each downstream cluster. - -First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 - -Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: -https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b - -# Updating from a Private CA Certificate to a Common Certificate - ->It is possible to perform the opposite procedure as shown above: you may change from a private certificate to a common, or non-private, certificate. The steps involved are outlined below. - -## 1. Create/update the certificate secret resource - -First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. - -If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -Alternatively, to update an existing certificate secret: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -## 2. Delete the CA certificate secret resource - -You will delete the `tls-ca secret` in the `cattle-system` namespace as it is no longer needed. You may also optionally save a copy of the `tls-ca secret` if desired. - -To save the existing secret: - -``` -kubectl -n cattle-system get secret tls-ca -o yaml > tls-ca.yaml -``` - -To delete the existing `tls-ca` secret: - -``` -kubectl -n cattle-system delete secret tls-ca -``` - -## 3. Reconfigure the Rancher deployment - -> Before proceeding, [generate an API token in the Rancher UI](https://rancher.com/docs/rancher/v2.6/en/user-settings/api-keys/#creating-an-api-key) (User > API & Keys) and save the Bearer Token which you might need in step 4. - -This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). - -It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. - -To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: - -``` -$ helm get values rancher -n cattle-system -``` - -Also get the version string of the currently deployed Rancher chart: - -``` -$ helm ls -A -``` - -Upgrade the Helm application instance using the original configuration values and making sure to specify the current chart version to prevent an application upgrade. - -Also make sure to read the documentation describing the initial installation using custom certificates. - -``` -helm upgrade rancher rancher-stable/rancher \ - --namespace cattle-system \ - --version \ - --set hostname=rancher.my.org \ - --set ... -``` - -On upgrade, you can either - -- remove `--set ingress.tls.source=secret \` from the Helm upgrade command, as shown above, or - -- remove the `privateCA` parameter or set it to `false` because the CA is valid: - -``` -set privateCA=false -``` - -## 4. Reconfigure Rancher agents for the non-private/common certificate - -`CATTLE_CA_CHECKSUM` environment variable on the downstream cluster agents should be removed or set to "" (an empty string). \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/_index.md deleted file mode 100644 index 29d2e41144..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/_index.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: Upgrading Cert-Manager -weight: 4 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager - - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.0-v2.4/en/installation/resources/encryption/upgrading-cert-manager ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. - -# Upgrade Cert-Manager - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.]({{}}/rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions) - -In order to upgrade cert-manager, follow these instructions: - -### Option A: Upgrade cert-manager with Internet Access - -{{% accordion id="normal" label="Click to expand" %}} -1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) - - ```plain - helm uninstall cert-manager - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed - - ```plain - kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager if needed - - ```plain - kubectl create namespace cert-manager - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v0.12.0 - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Option B: Upgrade cert-manager in an Air Gap Environment - -{{% accordion id="airgap" label="Click to expand" %}} - -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - The Helm 3 command is as follows: - - ```plain - helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - - The Helm 2 command is as follows: - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager (old and new) - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n cert-manager \ - delete deployment,sa,clusterrole,clusterrolebinding \ - -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y you installed - - ```plain - kubectl delete -f cert-manager/cert-manager-crd-old.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager - - ```plain - kubectl create namespace cert-manager - ``` - -1. Install cert-manager - - ```plain - kubectl -n cert-manager apply -R -f ./cert-manager - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Verify the Deployment - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). - diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md b/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md deleted file mode 100644 index 968cd6d666..0000000000 --- a/content/rancher/v2.0-v2.4/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Upgrading Cert-Manager with Helm 2 -weight: 2040 -aliases: - - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.0-v2.4/en/installation/resources/choosing-version/encryption/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.x/en/installation/resources/upgrading-cert-manager/helm-2-instructions/ ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's offficial documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. - -## Upgrade Cert-Manager Only - -> **Note:** -> These instructions are applied if you have no plan to upgrade Rancher. - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -In order to upgrade cert-manager, follow these instructions: - -{{% accordion id="normal" label="Upgrading cert-manager with Internet access" %}} -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml - ``` - -1. Delete the existing deployment - - ```plain - helm delete --purge cert-manager - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install --version 0.12.0 --name cert-manager --namespace kube-system jetstack/cert-manager - ``` -{{% /accordion %}} - -{{% accordion id="airgap" label="Upgrading cert-manager in an airgapped environment" %}} -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.0-v2.4/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace kube-system \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml - ``` - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n kube-system delete deployment,sa,clusterrole,clusterrolebinding -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - -1. Install cert-manager - - ```plain - kubectl -n kube-system apply -R -f ./cert-manager - ``` -{{% /accordion %}} - - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace kube-system - -NAME READY STATUS RESTARTS AGE -cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m -cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m -cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m -``` - -If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check cert-manager's [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. - -> **Note:** The above instructions ask you to add the disable-validation label to the kube-system namespace. Here are additional resources that explain why this is necessary: -> -> - [Information on the disable-validation label](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.4-0.5.html?highlight=certmanager.k8s.io%2Fdisable-validation#disabling-resource-validation-on-the-cert-manager-namespace) -> - [Information on webhook validation for certificates](https://docs.cert-manager.io/en/latest/getting-started/webhook.html) - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be `cert-manager.io` instead of `certmanager.k8s.io.` - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/_index.md deleted file mode 100644 index eeaab9d0ca..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Kubernetes Resources -weight: 19 -aliases: - - /rancher/v2.0-v2.4/en/concepts/ - - /rancher/v2.0-v2.4/en/tasks/ - - /rancher/v2.0-v2.4/en/concepts/resources/ ---- - -## Workloads - -Deploy applications to your cluster nodes using [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. - -When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. - -Following a workload deployment, you can continue working with it. You can: - -- [Upgrade]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. -- [Roll back]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. -- [Add a sidecar]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. - -## Load Balancing and Ingress - -### Load Balancers - -After you launch an application, it's only available within the cluster. It can't be reached externally. - -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -#### Ingress - -Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. - -Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. - -For more information, see [Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - -When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. - -For more information, see [Global DNS]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). - -## Service Discovery - -After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. - -For more information, see [Service Discovery]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery). - -## Pipelines - -After your project has been [configured to a version control provider]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines/#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. - -For more information, see [Pipelines]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/). - -## Applications - -Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. - -For more information, see [Applications in a Project]({{}}/rancher/v2.0-v2.4/en/catalog/apps/). - -## Kubernetes Resources - -Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. - -Resources include: - -- [Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. -- [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. -- [Secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. -- [Registries]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/_index.md deleted file mode 100644 index 9c5ed85d68..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Encrypting HTTP Communication -description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments -weight: 3060 -aliases: - - /rancher/v2.0-v2.4/en/tasks/projects/add-ssl-certificates/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/certificates ---- - -When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. - -Add SSL certificates to either projects, namespaces, or both. A project scoped certificate will be available in all its namespaces. - ->**Prerequisites:** You must have a TLS private key and certificate available to upload. - -1. From the **Global** view, select the project where you want to deploy your ingress. - -1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. (For Rancher before v2.3, click **Resources > Certificates.**) - -1. Enter a **Name** for the certificate. - - >**Note:** Kubernetes classifies SSL certificates as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your SSL certificate must have a unique name among the other certificates, registries, and secrets within your project/workspace. - -1. Select the **Scope** of the certificate. - - - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. - - - **Available to a single namespace:** The certificate is only available for the deployments in one namespace. If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. - -1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Private key files end with an extension of `.key`. - -1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Certificate files end with an extension of `.crt`. - -**Result:** Your certificate is added to the project or namespace. You can now add it to deployments. - -- If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. -- If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. -- Your certificate is added to the **Resources > Secrets > Certificates** view. (For Rancher before v2.3, it is added to **Resources > Certificates.**) - -## What's Next? - -Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/_index.md deleted file mode 100644 index 124ae82895..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: ConfigMaps -weight: 3061 -aliases: - - /rancher/v2.0-v2.4/en/tasks/projects/add-configmaps - - /rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps ---- - -While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). - -ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. - ->**Note:** ConfigMaps can only be applied to namespaces and not projects. - -1. From the **Global** view, select the project containing the namespace that you want to add a ConfigMap to. - -1. From the main menu, select **Resources > Config Maps**. Click **Add Config Map**. - -1. Enter a **Name** for the Config Map. - - >**Note:** Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. - -1. Select the **Namespace** you want to add Config Map to. You can also add a new namespace on the fly by clicking **Add to a new namespace**. - -1. From **Config Map Values**, click **Add Config Map Value** to add a key value pair to your ConfigMap. Add as many values as you need. - -1. Click **Save**. - - >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/). - > - >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. - -## What's Next? - -Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: - -- Application environment variables. -- Specifying parameters for a Volume mounted to the workload. - -For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md deleted file mode 100644 index f3e5fdd4bf..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: The Horizontal Pod Autoscaler -description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment -weight: 3026 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. - -Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. - -You can create, manage, and delete HPAs using the Rancher UI in Rancher v2.3.0-alpha4 and higher versions. It only supports HPA in the `autoscaling/v2beta2` API. - -## Managing HPAs - -The way that you manage HPAs is different based on your version of the Kubernetes API: - -- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. -- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. - -HPAs are also managed differently based on your version of Rancher: - -- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). -- **For Rancher Before v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl). - -You might have additional HPA installation steps if you are using an older version of Rancher: - -- **For Rancher v2.0.7+:** Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. -- **For Rancher Before v2.0.7:** Clusters created in Rancher before v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). - -## Testing HPAs with a Service Deployment - -In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). - -You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md deleted file mode 100644 index 369f7a1a8d..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Background Information on HPAs -weight: 3027 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. - -## Why Use Horizontal Pod Autoscaler? - -Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: - -- A minimum and maximum number of pods allowed to run, as defined by the user. -- Observed CPU/memory use, as reported in resource metrics. -- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. - -HPA improves your services by: - -- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. -- Increase/decrease performance as needed to accomplish service level agreements. - -## How HPA Works - -![HPA Schema]({{}}/img/rancher/horizontal-pod-autoscaler.jpg) - -HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: - -Flag | Default | Description | ----------|----------|----------| - `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. - `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. - `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. - - -For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). - -## Horizontal Pod Autoscaler API Objects - -HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. - -For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md deleted file mode 100644 index 2c81976930..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/_index.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: Manual HPA Installation for Clusters Created Before Rancher v2.0.7 -weight: 3050 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-for-rancher-before-2_0_7 - - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/ ---- - -This section describes how to manually install HPAs for clusters created with Rancher before v2.0.7. This section also describes how to configure your HPA to scale up or down, and how to assign roles to your HPA. - -Before you can use HPA in your Kubernetes cluster, you must fulfill some requirements. - -### Requirements - -Be sure that your Kubernetes cluster services are running with these flags at minimum: - -- kube-api: `requestheader-client-ca-file` -- kubelet: `read-only-port` at 10255 -- kube-controller: Optional, just needed if distinct values than default are required. - - - `horizontal-pod-autoscaler-downscale-delay: "5m0s"` - - `horizontal-pod-autoscaler-upscale-delay: "3m0s"` - - `horizontal-pod-autoscaler-sync-period: "30s"` - -For an RKE Kubernetes cluster definition, add this snippet in the `services` section. To add this snippet using the Rancher v2.0 UI, open the **Clusters** view and select **⋮ > Edit** for the cluster in which you want to use HPA. Then, from **Cluster Options**, click **Edit as YAML**. Add the following snippet to the `services` section: - -``` -services: -... - kube-api: - extra_args: - requestheader-client-ca-file: "/etc/kubernetes/ssl/kube-ca.pem" - kube-controller: - extra_args: - horizontal-pod-autoscaler-downscale-delay: "5m0s" - horizontal-pod-autoscaler-upscale-delay: "1m0s" - horizontal-pod-autoscaler-sync-period: "30s" - kubelet: - extra_args: - read-only-port: 10255 -``` - -Once the Kubernetes cluster is configured and deployed, you can deploy metrics services. - ->**Note:** `kubectl` command samples in the sections that follow were tested in a cluster running Rancher v2.0.6 and Kubernetes v1.10.1. - -### Configuring HPA to Scale Using Resource Metrics - -To create HPA resources based on resource metrics such as CPU and memory use, you need to deploy the `metrics-server` package in the `kube-system` namespace of your Kubernetes cluster. This deployment allows HPA to consume the `metrics.k8s.io` API. - ->**Prerequisite:** You must be running `kubectl` 1.8 or later. - -1. Connect to your Kubernetes cluster using `kubectl`. - -1. Clone the GitHub `metrics-server` repo: - ``` - # git clone https://github.com/kubernetes-incubator/metrics-server - ``` - -1. Install the `metrics-server` package. - ``` - # kubectl create -f metrics-server/deploy/1.8+/ - ``` - -1. Check that `metrics-server` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check the service pod for a status of `running`. Enter the following command: - ``` - # kubectl get pods -n kube-system - ``` - Then check for the status of `running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - metrics-server-6fbfb84cdd-t2fk9 1/1 Running 0 8h - ... - ``` - 1. Check the service logs for service availability. Enter the following command: - ``` - # kubectl -n kube-system logs metrics-server-6fbfb84cdd-t2fk9 - ``` - Then review the log to confirm that the `metrics-server` package is running. - {{% accordion id="metrics-server-run-check" label="Metrics Server Log Output" %}} - I0723 08:09:56.193136 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:'' - I0723 08:09:56.193574 1 heapster.go:72] Metrics Server version v0.2.1 - I0723 08:09:56.194480 1 configs.go:61] Using Kubernetes client with master "https://10.43.0.1:443" and version - I0723 08:09:56.194501 1 configs.go:62] Using kubelet port 10255 - I0723 08:09:56.198612 1 heapster.go:128] Starting with Metric Sink - I0723 08:09:56.780114 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) - I0723 08:09:57.391518 1 heapster.go:101] Starting Heapster API server... - [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] listing is available at https:///swaggerapi - [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ - I0723 08:09:57.394080 1 serve.go:85] Serving securely on 0.0.0.0:443 - {{% /accordion %}} - - -1. Check that the metrics api is accessible from `kubectl`. - - - - If you are accessing the cluster through Rancher, enter your Server URL in the `kubectl` config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/metrics.k8s.io/v1beta1 - ``` - If the API is working correctly, you should receive output similar to the output below. - ``` - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} - ``` - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/metrics.k8s.io/v1beta1 - ``` - If the API is working correctly, you should receive output similar to the output below. - ``` - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} - ``` - -### Assigning Additional Required Roles to Your HPA - -By default, HPA reads resource and custom metrics with the user `system:anonymous`. Assign `system:anonymous` to `view-resource-metrics` and `view-custom-metrics` in the ClusterRole and ClusterRoleBindings manifests. These roles are used to access metrics. - -To do it, follow these steps: - -1. Configure `kubectl` to connect to your cluster. - -1. Copy the ClusterRole and ClusterRoleBinding manifest for the type of metrics you're using for your HPA. - {{% accordion id="cluster-role-resource-metrics" label="Resource Metrics: ApiGroups resource.metrics.k8s.io" %}} - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: view-resource-metrics - rules: - - apiGroups: - - metrics.k8s.io - resources: - - pods - - nodes - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: view-resource-metrics - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: view-resource-metrics - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:anonymous - {{% /accordion %}} -{{% accordion id="cluster-role-custom-resources" label="Custom Metrics: ApiGroups custom.metrics.k8s.io" %}} - - ``` - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: view-custom-metrics - rules: - - apiGroups: - - custom.metrics.k8s.io - resources: - - "*" - verbs: - - get - - list - - watch - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: view-custom-metrics - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: view-custom-metrics - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: User - name: system:anonymous - ``` -{{% /accordion %}} -1. Create them in your cluster using one of the follow commands, depending on the metrics you're using. - ``` - # kubectl create -f - # kubectl create -f - ``` diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md deleted file mode 100644 index 6857b0ce29..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: Managing HPAs with kubectl -weight: 3029 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl ---- - -This section describes HPA management with `kubectl`. This document has instructions for how to: - -- Create an HPA -- Get information on HPAs -- Delete an HPA -- Configure your HPAs to scale with CPU or memory utilization -- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics - -### Note For Rancher v2.3.x - -In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. - -### Note For Rancher Before v2.0.7 - -Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7). - -##### Basic kubectl Command for Managing HPAs - -If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: - -- Creating HPA - - - With manifest: `kubectl create -f ` - - - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` - -- Getting HPA info - - - Basic: `kubectl get hpa hello-world` - - - Detailed description: `kubectl describe hpa hello-world` - -- Deleting HPA - - - `kubectl delete hpa hello-world` - -##### HPA Manifest Definition Example - -The HPA manifest is the config file used for managing an HPA with `kubectl`. - -The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. - -```yml -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi -``` - - -Directive | Description ----------|----------| - `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | - `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | - `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | - `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. - `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. - `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. -
- -##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) - -Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. Run the following commands to check if metrics are available in your installation: - -``` -$ kubectl top nodes -NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% -node-controlplane 196m 9% 1623Mi 42% -node-etcd 80m 4% 1090Mi 28% -node-worker 64m 3% 1146Mi 29% -$ kubectl -n kube-system top pods -NAME CPU(cores) MEMORY(bytes) -canal-pgldr 18m 46Mi -canal-vhkgr 20m 45Mi -canal-x5q5v 17m 37Mi -canal-xknnz 20m 37Mi -kube-dns-7588d5b5f5-298j2 0m 22Mi -kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi -metrics-server-97bc649d5-jxrlt 0m 12Mi -$ kubectl -n kube-system logs -l k8s-app=metrics-server -I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true -I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 -I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version -I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 -I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink -I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) -I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ -I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 -``` - -If you have created your cluster in Rancher v2.0.6 or before, please refer to the manual installation. - -##### Configuring HPA to Scale Using Custom Metrics with Prometheus - -You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. - -For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: - -- Prometheus is deployed in the cluster. -- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. -- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` - -Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. - -For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). - -1. Initialize Helm in your cluster. - ``` - # kubectl -n kube-system create serviceaccount tiller - kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` - -1. Clone the `banzai-charts` repo from GitHub: - ``` - # git clone https://github.com/banzaicloud/banzai-charts - ``` - -1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. - ``` - # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system - ``` - -1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check that the service pod is `Running`. Enter the following command. - ``` - # kubectl get pods -n kube-system - ``` - From the resulting output, look for a status of `Running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h - ... - ``` - 1. Check the service logs to make sure the service is running correctly by entering the command that follows. - ``` - # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system - ``` - Then review the log output to confirm the service is running. - {{% accordion id="prometheus-logs" label="Prometheus Adaptor Logs" %}} - ... - I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds - I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: - I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT - I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json - I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 - I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} - I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.sslip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK - I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} - I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] - I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} - ... - {{% /accordion %}} - - - -1. Check that the metrics API is accessible from kubectl. - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} - - - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response-rancher" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md deleted file mode 100644 index 49d3a4866e..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Managing HPAs with the Rancher UI -weight: 3028 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui ---- - -_Available as of v2.3.0_ - -The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. - -If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -## Creating an HPA - -1. From the **Global** view, open the project that you want to deploy a HPA to. - -1. Click **Resources > HPA.** - -1. Click **Add HPA.** - -1. Enter a **Name** for the HPA. - -1. Select a **Namespace** for the HPA. - -1. Select a **Deployment** as scale target for the HPA. - -1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. - -1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -1. Click **Create** to create the HPA. - -> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. - -## Get HPA Metrics and Status - -1. From the **Global** view, open the project with the HPAs you want to look at. - -1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. - -1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. - - -## Deleting an HPA - -1. From the **Global** view, open the project that you want to delete an HPA from. - -1. Click **Resources > HPA.** - -1. Find the HPA which you would like to delete. - -1. Click **⋮ > Delete**. - -1. Click **Delete** to confirm. - -> **Result:** The HPA is deleted from the current cluster. diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md deleted file mode 100644 index a003fdc2bb..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ /dev/null @@ -1,494 +0,0 @@ ---- -title: Testing HPAs with kubectl -weight: 3031 - -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa ---- - -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). - -For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. - -1. Configure `kubectl` to connect to your Kubernetes cluster. - -2. Copy the `hello-world` deployment manifest below. -{{% accordion id="hello-world" label="Hello World Manifest" %}} -``` -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - resources: - requests: - cpu: 500m - memory: 64Mi - ports: - - containerPort: 80 - protocol: TCP - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: hello-world - namespace: default -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: hello-world -``` -{{% /accordion %}} - -1. Deploy it to your cluster. - - ``` - # kubectl create -f - ``` - -1. Copy one of the HPAs below based on the metric type you're using: -{{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 1000Mi -``` -{{% /accordion %}} -{{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi - - type: Pods - pods: - metricName: cpu_system - targetAverageValue: 20m -``` -{{% /accordion %}} - -1. View the HPA info and description. Confirm that metric data is shown. - {{% accordion id="hpa-info-resource-metrics" label="Resource Metrics" %}} -1. Enter the following commands. - ``` - # kubectl get hpa - NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE - hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m - # kubectl describe hpa - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 1253376 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - {{% accordion id="hpa-info-custom-metrics" label="Custom Metrics" %}} -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive the output that follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 3514368 / 100Mi - "cpu_system" on pods: 0 / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - - -1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). - -1. Test that pod autoscaling works as intended.

- **To Test Autoscaling Using Resource Metrics:** - {{% accordion id="observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to two pods based on CPU Usage. - -1. View your HPA. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10928128 / 100Mi - resource cpu on pods (as a percentage of request): 56% (280m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm you've scaled to two pods. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-upscale-3-pods-cpu-cooldown" label="Upscale to 3 pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 9424896 / 100Mi - resource cpu on pods (as a percentage of request): 66% (333m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - ``` -2. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-f46kh 0/1 Running 0 1m - hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10070016 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` - {{% /accordion %}} -
-**To Test Autoscaling Using Custom Metrics:** - {{% accordion id="custom-observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale two pods based on CPU usage. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8159232 / 100Mi - "cpu_system" on pods: 7m / 20m - resource cpu on pods (as a percentage of request): 64% (321m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm two pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` - {{% /accordion %}} -{{% accordion id="observe-upscale-3-pods-cpu-cooldown-2" label="Upscale to 3 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows: - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - ``` -1. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - # kubectl get pods - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="observe-upscale-4-pods" label="Upscale to 4 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm four pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m - hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="custom-metrics-observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive similar output to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8101888 / 100Mi - "cpu_system" on pods: 8m / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` -1. Enter the following command to confirm a single pods is running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/_index.md deleted file mode 100644 index 5c18feeec1..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Set Up Load Balancer and Ingress Controller within Rancher -description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers -weight: 3040 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress ---- - -Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. - -## Load Balancers - -After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. - -If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -### Load Balancer Limitations - -Load Balancers have a couple of limitations you should be aware of: - -- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. - -- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: - - - - [Support for Layer-4 Load Balancing]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) - - - [Support for Layer-7 Load Balancing]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) - -## Ingress - -As mentioned in the limitations above, the disadvantages of using a load balancer are: - -- Load Balancers can only handle one IP address per service. -- If you run multiple services in your cluster, you must have a load balancer for each service. -- It can be expensive to have a load balancer for every service. - -In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. - -Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. - -Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. - -Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. - -Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). - -Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. - ->**Using Rancher in a High Availability Configuration?** -> ->Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. - -- For more information on how to set up ingress in Rancher, see [Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress). -- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md deleted file mode 100644 index 82a6da5d5d..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Adding Ingresses to Your Project -description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project -weight: 3042 -aliases: - - /rancher/v2.0-v2.4/en/tasks/workloads/add-ingress/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress ---- - -Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry]({{}}/rancher/v2.0-v2.4/en/helm-charts/globaldns/). - -1. From the **Global** view, open the project that you want to add ingress to. -1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions before v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. -1. Enter a **Name** for the ingress. -1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new namespace on the fly by clicking **Add to a new namespace**. -1. Create ingress forwarding **Rules**. For help configuring the rules, refer to [this section.](#ingress-rule-configuration) If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. -1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. - -**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. - - -# Ingress Rule Configuration - -- [Automatically generate a sslip.io hostname](#automatically-generate-a-sslip-io-hostname) -- [Specify a hostname to use](#specify-a-hostname-to-use) -- [Use as the default backend](#use-as-the-default-backend) -- [Certificates](#certificates) -- [Labels and Annotations](#labels-and-annotations) - -### Automatically generate a sslip.io hostname - -If you choose this option, ingress routes requests to hostname to a DNS name that's automatically generated. Rancher uses [sslip.io](http://sslip.io/) to automatically generates the DNS name. This option is best used for testing, _not_ production environments. - ->**Note:** To use this option, you must be able to resolve to `sslip.io` addresses. - -1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. -1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. -1. Select a workload or service from the **Target** drop-down list for each target you've added. -1. Enter the **Port** number that each target operates on. - -### Specify a hostname to use - -If you use this option, ingress routes requests for a hostname to the service or workload that you specify. - -1. Enter the hostname that your ingress will handle request forwarding for. For example, `www.mysite.com`. -1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. -1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. -1. Select a workload or service from the **Target** drop-down list for each target you've added. -1. Enter the **Port** number that each target operates on. - -### Use as the default backend - -Use this option to set an ingress rule for handling requests that don't match any other ingress rules. For example, use this option to route requests that can't be found to a `404` page. - ->**Note:** If you deployed Rancher using RKE, a default backend for 404s and 202s is already configured. - -1. Add a **Target Backend**. Click either **Service** or **Workload** to add the target. -1. Select a service or workload from the **Target** drop-down list. - -### Certificates ->**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/). - -1. Click **Add Certificate**. -1. Select a **Certificate** from the drop-down list. -1. Enter the **Host** using encrypted communication. -1. To add additional hosts that use the certificate, click **Add Hosts**. - -### Labels and Annotations - -Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. - -For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md deleted file mode 100644 index c3b39f63b5..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "Layer 4 and Layer 7 Load Balancing" -description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" -weight: 3041 -aliases: - - /rancher/v2.0-v2.4/en/concepts/load-balancing/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers ---- -Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. - -## Layer-4 Load Balancer - -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. - -Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. - -> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. - -### Support for Layer-4 Load Balancing - -Support for layer-4 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-4 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Limited NGINX or third-party Ingress* -RKE on vSphere | Limited NGINX or third party-Ingress* -RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* -Third-party MetalLB | Limited NGINX or third-party Ingress* - -\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) - -## Layer-7 Load Balancer - -Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. - -### Support for Layer-7 Load Balancing - -Support for layer-7 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-7 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GKE cloud provider -Azure AKS | Not Supported -RKE on EC2 | Nginx Ingress Controller -RKE on DigitalOcean | Nginx Ingress Controller -RKE on vSphere | Nginx Ingress Controller -RKE on Custom Hosts
(e.g. bare-metal servers) | Nginx Ingress Controller - -### Host Names in Layer-7 Load Balancer - -Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. - -Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: - -1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. -2. Ask Rancher to generate an sslip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name ..a.b.c.d.sslip.io. - -The benefit of using sslip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. - -## Related Links - -- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/_index.md deleted file mode 100644 index a852a837e6..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Secrets -weight: 3062 -aliases: - - /rancher/v2.0-v2.4/en/tasks/projects/add-a-secret - - /rancher/v2.0-v2.4/en/k8s-in-rancher/secrets ---- - -[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. - -> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries) - -When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# Creating Secrets - -When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. - -1. From the **Global** view, select the project containing the namespace(s) where you want to add a secret. - -2. From the main menu, select **Resources > Secrets**. Click **Add Secret**. - -3. Enter a **Name** for the secret. - - >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. - -4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single namespace. - -5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. - - >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -1. Click **Save**. - -**Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# What's Next? - -Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. - -For more information on adding secret to a workload, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/_index.md deleted file mode 100644 index 412ffe112a..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/_index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "Kubernetes Workloads and Pods" -description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" -weight: 3025 -aliases: - - /rancher/v2.0-v2.4/en/concepts/workloads/ - - /rancher/v2.0-v2.4/en/tasks/workloads/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/workloads ---- - -You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. - -### Pods - -[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. - -### Workloads - -_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. -Workloads let you define the rules for application scheduling, scaling, and upgrade. - -#### Workload Types - -Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: - -- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) - - _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. - -- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - - _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. - -- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - - _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. - -- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) - - _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. - -- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) - - _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. - -### Services - -In many use cases, a workload has to be either: - -- Accessed by other workloads in the cluster. -- Exposed to the outside world. - -You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. - -#### Service Types - -There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). - -- **ClusterIP** - - >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. - -- **NodePort** - - >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. - -- **LoadBalancer** - - >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. - -## Workload Options - -This section of the documentation contains instructions for deploying workloads and using workload options. - -- [Deploy Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/) -- [Upgrade Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/upgrade-workloads/) -- [Rollback Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/rollback-workloads/) - -## Related Links - -### External Links - -- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md deleted file mode 100644 index eda77f05ab..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Adding a Sidecar -weight: 3029 -aliases: - - /rancher/v2.0-v2.4/en/tasks/workloads/add-a-sidecar/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar ---- -A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. - -1. From the **Global** view, open the project running the workload you want to add a sidecar to. - -1. Click **Resources > Workloads.** In versions before v2.3.0, select the **Workloads** tab. - -1. Find the workload that you want to extend. Select **⋮ icon (...) > Add a Sidecar**. - -1. Enter a **Name** for the sidecar. - -1. Select a **Sidecar Type**. This option determines if the sidecar container is deployed before or after the main container is deployed. - - - **Standard Container:** - - The sidecar container is deployed after the main container. - - - **Init Container:** - - The sidecar container is deployed before the main container. - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. - -1. Set the remaining options. You can read about them in [Deploying Workloads](../deploy-workloads). - -1. Click **Launch**. - -**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. - -## Related Links - -- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/_index.md deleted file mode 100644 index 249bd6e59d..0000000000 --- a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Deploying Workloads -description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. -weight: 3026 -aliases: - - /rancher/v2.0-v2.4/en/tasks/workloads/deploy-workloads/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads ---- - -Deploy a workload to run an application in one or more containers. - -1. From the **Global** view, open the project that you want to deploy a workload to. - -1. 1. Click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) From the **Workloads** view, click **Deploy**. - -1. Enter a **Name** for the workload. - -1. Select a [workload type]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, but you can change the workload type by clicking **More options.** - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. - -1. Either select an existing namespace, or click **Add to a new namespace** and enter a new namespace. - -1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/#services). - -1. Configure the remaining options: - - - **Environment Variables** - - Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/). - - - **Node Scheduling** - - **Health Check** - - **Volumes** - - Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/). - - When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. This option is available in the UI as of Rancher v2.2.0. - - - **Scaling/Upgrade Policy** - - >**Amazon Note for Volumes:** - > - > To mount an Amazon EBS volume: - > - >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. - > - >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes). - - -1. Click **Show Advanced Options** and configure: - - - **Command** - - **Networking** - - **Labels & Annotations** - - **Security and Host Config** - -1. Click **Launch**. - -**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.0-v2.4/en/overview/_index.md b/content/rancher/v2.0-v2.4/en/overview/_index.md deleted file mode 100644 index 16d8a5d059..0000000000 --- a/content/rancher/v2.0-v2.4/en/overview/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Overview -weight: 1 ---- -Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. - -# Run Kubernetes Everywhere - -Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. - -# Meet IT requirements - -Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: - -- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. -- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. -- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. - -# Empower DevOps Teams - -Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. - -The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. - -![Platform]({{}}/img/rancher/platform.png) - -# Features of the Rancher API Server - -The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: - -### Authorization and Role-Based Access Control - -- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. -- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/) policies. - -### Working with Kubernetes - -- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/upgrading-kubernetes) -- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.0-v2.4/en/catalog/) that make it easy to repeatedly deploy applications. -- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.0-v2.4/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/) -- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. -- **Istio:** Our [integration with Istio]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -### Working with Cloud Infrastructure - -- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/nodes/) in all clusters. -- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) in the cloud. - -### Cluster Visibility - -- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. -- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. -- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. - -# Editing Downstream Clusters with Rancher - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/) - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-capabilities-table" %}} diff --git a/content/rancher/v2.0-v2.4/en/overview/architecture-recommendations/_index.md b/content/rancher/v2.0-v2.4/en/overview/architecture-recommendations/_index.md deleted file mode 100644 index dade3593e0..0000000000 --- a/content/rancher/v2.0-v2.4/en/overview/architecture-recommendations/_index.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Architecture Recommendations -weight: 3 ---- - -Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the cluster running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) - -This section covers the following topics: - -- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) -- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) -- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-kubernetes-installations) -- [Environment for Kubernetes Installations](#environment-for-kubernetes-installations) -- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-kubernetes-installations) -- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) - -# Separation of Rancher and User Clusters - -A user cluster is a downstream Kubernetes cluster that runs your apps and services. - -If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. - -In Kubernetes installations of Rancher, the Rancher server cluster should also be separate from the user clusters. - -![Separation of Rancher Server from User Clusters]({{}}/img/rancher/rancher-architecture-separation-of-rancher-server.svg) - -# Why HA is Better for Rancher in Production - -We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. - -We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. - -As of v2.4, Rancher needs to be installed on either a high-availability [RKE (Rancher Kubernetes Engine)]({{}}/rke/latest/en/) Kubernetes cluster, or a high-availability [K3s (Lightweight Kubernetes)]({{}}/k3s/latest/en/) Kubernetes cluster. Both RKE and K3s are fully certified Kubernetes distributions. - -Rancher versions before v2.4 need to be installed on an RKE cluster. - -### K3s Kubernetes Cluster Installations - -If you are installing Rancher v2.4 for the first time, we recommend installing it on a K3s Kubernetes cluster. One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. - -The option to install Rancher on a K3s cluster is a feature introduced in Rancher v2.4. K3s is easy to install, with half the memory of Kubernetes, all in a binary less than 100 MB. - -
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) - -### RKE Kubernetes Cluster Installations - -If you are installing Rancher before v2.4, you will need to install Rancher on an RKE cluster, in which the cluster data is stored on each node with the etcd role. As of Rancher v2.4, there is no migration path to transition the Rancher server from an RKE cluster to a K3s cluster. All versions of the Rancher server, including v2.4+, can be installed on an RKE cluster. - -In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. - -
Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) - -# Recommended Load Balancer Configuration for Kubernetes Installations - -We recommend the following configurations for the load balancer and Ingress controllers: - -* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
-![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -# Environment for Kubernetes Installations - -It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. - -For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. - -It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. - -# Recommended Node Roles for Kubernetes Installations - -Our recommendations for the roles of each node differ depending on whether Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. - -### K3s Cluster Roles - -In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. - -For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. - -### RKE Cluster Roles - -If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. - -### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters - -Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. - -Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. - -For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. - -![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters]({{}}/img/rancher/rancher-architecture-node-roles.svg) - -RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. - -We recommend that downstream user clusters should have at least: - -- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available -- **Two nodes with only the controlplane role** to make the master component highly available -- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services - -With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. - -For more best practices for downstream clusters, refer to the [production checklist]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/production) or our [best practices guide.]({{}}/rancher/v2.0-v2.4/en/best-practices/) - -# Architecture for an Authorized Cluster Endpoint - -If you are using an [authorized cluster endpoint,]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. - -If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/kubeconfig/) and [API keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/#creating-an-api-key) for more information. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/overview/architecture/_index.md b/content/rancher/v2.0-v2.4/en/overview/architecture/_index.md deleted file mode 100644 index 8b7b065600..0000000000 --- a/content/rancher/v2.0-v2.4/en/overview/architecture/_index.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: Architecture -weight: 1 ---- - -This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. - -For information on the different ways that Rancher can be installed, refer to the [overview of installation options.]({{}}/rancher/v2.0-v2.4/en/installation/#overview-of-installation-options) - -For a list of main features of the Rancher API server, refer to the [overview section.]({{}}/rancher/v2.0-v2.4/en/overview/#features-of-the-rancher-api-server) - -For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.]({{}}/rancher/v2.0-v2.4/en/overview/architecture-recommendations) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.0-v2.4/en/overview/concepts) page. - -This section covers the following topics: - -- [Rancher server architecture](#rancher-server-architecture) -- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) - - [The authentication proxy](#1-the-authentication-proxy) - - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) - - [Node agents](#3-node-agents) - - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) -- [Important files](#important-files) -- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) -- [Rancher server components and source code](#rancher-server-components-and-source-code) - -# Rancher Server Architecture - -The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). - -For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/) for running your workloads. - -The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy: - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -You can install Rancher on a single node, or on a high-availability Kubernetes cluster. - -A high-availability Kubernetes installation is recommended for production. - -A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: - -For Rancher v2.0-v2.4, there was no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. - -The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. - -# Communicating with Downstream User Clusters - -This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. - -The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. - -
Communicating with Downstream Clusters
- -![Rancher Components]({{}}/img/rancher/rancher-architecture-cluster-controller.svg) - -The following descriptions correspond to the numbers in the diagram above: - -1. [The Authentication Proxy](#1-the-authentication-proxy) -2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) -3. [Node Agents](#3-node-agents) -4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) - -### 1. The Authentication Proxy - -In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see -the pods. Bob is authenticated through Rancher's authentication proxy. - -The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. - -Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. - -By default, Rancher generates a [kubeconfig file]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. - -### 2. Cluster Controllers and Cluster Agents - -Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. - -There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: - -- Watches for resource changes in the downstream cluster -- Brings the current state of the downstream cluster to the desired state -- Configures access control policies to clusters and projects -- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE - -By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. - -The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: - -- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters -- Manages workloads, pod creation and deployment within each cluster -- Applies the roles and bindings defined in each cluster's global policies -- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health - -### 3. Node Agents - -If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. - -The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. - -### 4. Authorized Cluster Endpoint - -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. - -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. - -There are two main reasons why a user might need the authorized cluster endpoint: - -- To access a downstream user cluster while Rancher is down -- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. - -> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. - -With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. - -You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl) - -# Important Files - -The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_rancher-cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. -- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/) documentation. - -# Tools for Provisioning Kubernetes Clusters - -The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. - -### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider - -Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) - -### Rancher Launched Kubernetes for Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. - -Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) - -### Hosted Kubernetes Providers - -When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) - -### Imported Kubernetes Clusters - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -# Rancher Server Components and Source Code - -This diagram shows each component that the Rancher server is composed of: - -![Rancher Components]({{}}/img/rancher/rancher-architecture-rancher-components.svg) - -The GitHub repositories for Rancher can be found at the following links: - -- [Main Rancher server repository](https://github.com/rancher/rancher) -- [Rancher UI](https://github.com/rancher/ui) -- [Rancher API UI](https://github.com/rancher/api-ui) -- [Norman,](https://github.com/rancher/norman) Rancher's API framework -- [Types](https://github.com/rancher/types) -- [Rancher CLI](https://github.com/rancher/cli) -- [Catalog applications](https://github.com/rancher/helm) - -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.]({{}}/rancher/v2.0-v2.4/en/contributing/#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/content/rancher/v2.0-v2.4/en/overview/concepts/_index.md b/content/rancher/v2.0-v2.4/en/overview/concepts/_index.md deleted file mode 100644 index c637928995..0000000000 --- a/content/rancher/v2.0-v2.4/en/overview/concepts/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Kubernetes Concepts -weight: 4 ---- - -This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) - -This section covers the following topics: - -- [About Docker](#about-docker) -- [About Kubernetes](#about-kubernetes) -- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) -- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) - - [etcd Nodes](#etcd-nodes) - - [Controlplane Nodes](#controlplane-nodes) - - [Worker Nodes](#worker-nodes) -- [About Helm](#about-helm) - -# About Docker - -Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. - ->**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. - -# About Kubernetes - -Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. - -# What is a Kubernetes Cluster? - -A cluster is a group of computers that work together as a single system. - -A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. - -# Roles for Nodes in Kubernetes Clusters - -Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. - -A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. - -### etcd Nodes - -Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. - -The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. - -The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. - -Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. - -Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Controlplane Nodes - -Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although three or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. - -### Worker Nodes - -Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: - -- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. -- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. - -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/). - -# About Helm - -For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. - -Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). - -For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) diff --git a/content/rancher/v2.0-v2.4/en/pipelines/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/_index.md deleted file mode 100644 index 2226fce30d..0000000000 --- a/content/rancher/v2.0-v2.4/en/pipelines/_index.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: Pipelines -weight: 11 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines ---- - -Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. - -Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - ->**Notes:** -> ->- Pipelines improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. ->- Still using v2.0.x? See the pipeline documentation for [previous versions]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/docs-for-v2.0.x). ->- Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. - -This section covers the following topics: - -- [Concepts](#concepts) -- [How Pipelines Work](#how-pipelines-work) -- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) -- [Setting up Pipelines](#setting-up-pipelines) - - [Configure version control providers](#1-configure-version-control-providers) - - [Configure repositories](#2-configure-repositories) - - [Configure the pipeline](#3-configure-the-pipeline) -- [Pipeline Configuration Reference](#pipeline-configuration-reference) -- [Running your Pipelines](#running-your-pipelines) -- [Triggering a Pipeline](#triggering-a-pipeline) - - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) - -# Concepts - -For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/concepts) - -# How Pipelines Work - -After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. - -A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. - -Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. - -When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: - - - **Jenkins:** - - The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. - - >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. - - - **Docker Registry:** - - Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. - - - **Minio:** - - Minio storage is used to store the logs for pipeline executions. - - >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/storage). - -# Roles-based Access Control for Pipelines - -If you can access a project, you can enable repositories to start building pipelines. - -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. - -Project members can only configure repositories and pipelines. - -# Setting up Pipelines - -To set up pipelines, you will need to do the following: - -1. [Configure version control providers](#1-configure-version-control-providers) -2. [Configure repositories](#2-configure-repositories) -3. [Configure the pipeline](#3-configure-the-pipeline) - -### 1. Configure Version Control Providers - -Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider. - -| Provider | Available as of | -| --- | --- | -| GitHub | v2.0.0 | -| GitLab | v2.1.0 | -| Bitbucket | v2.2.0 | - -Select your provider's tab below and follow the directions. - -{{% tabs %}} -{{% tab "GitHub" %}} -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. - -1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to setup an OAuth App in Github. - -1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - -1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "GitLab" %}} - -_Available as of v2.1.0_ - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. - -1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. - -1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. - -1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. - -1. Click **Authenticate**. - ->**Note:** -> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. -> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. -{{% /tab %}} -{{% tab "Bitbucket Cloud" %}} - -_Available as of v2.2.0_ - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use public Bitbucket Cloud** option. - -1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. - -1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "Bitbucket Server" %}} - -_Available as of v2.2.0_ - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use private Bitbucket Server setup** option. - -1. Follow the directions displayed to **Setup a Bitbucket Server application**. - -1. Enter the host address of your Bitbucket server installation. - -1. Click **Authenticate**. - ->**Note:** -> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: -> -> 1. Setup Rancher server with a certificate from a trusted CA. -> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). -> -{{% /tab %}} -{{% /tabs %}} - -**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. - -### 2. Configure Repositories - -After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Click on **Configure Repositories**. - -1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. - -1. For each repository that you want to set up a pipeline, click on **Enable**. - -1. When you're done enabling all your repositories, click on **Done**. - -**Results:** You have a list of repositories that you can start configuring pipelines for. - -### 3. Configure the Pipeline - -Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the repository that you want to set up a pipeline for. - -1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config) - - * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. - * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. - -1. Select which `branch` to use from the list of branches. - -1. _Available as of v2.2.0_ Optional: Set up notifications. - -1. Set up the trigger rules for the pipeline. - -1. Enter a **Timeout** for the pipeline. - -1. When all the stages and steps are configured, click **Done**. - -**Results:** Your pipeline is now configured and ready to be run. - - -# Pipeline Configuration Reference - -Refer to [this page]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: - -- Run a script -- Build and publish images -- Publish catalog templates -- Deploy YAML -- Deploy a catalog app - -The configuration reference also covers how to configure: - -- Notifications -- Timeouts -- The rules that trigger a pipeline -- Environment variables -- Secrets - - -# Running your Pipelines - -Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions before v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **⋮ > Run**. - -During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: - -- `docker-registry` -- `jenkins` -- `minio` - -This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. - -# Triggering a Pipeline - -When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. - -Available Events: - -* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. -* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. -* **Tag**: When a tag is created in the repository, the pipeline is triggered. - -> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example-repos/). - -### Modifying the Event Triggers for the Repository - -1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. - -1. 1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the repository that you want to modify the event triggers. Select the vertical **⋮ > Setting**. - -1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. - -1. Click **Save**. diff --git a/content/rancher/v2.0-v2.4/en/pipelines/config/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/config/_index.md deleted file mode 100644 index a6d43d1e38..0000000000 --- a/content/rancher/v2.0-v2.4/en/pipelines/config/_index.md +++ /dev/null @@ -1,660 +0,0 @@ ---- -title: Pipeline Configuration Reference -weight: 1 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config ---- - -In this section, you'll learn how to configure pipelines. - -- [Step Types](#step-types) -- [Step Type: Run Script](#step-type-run-script) -- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) -- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) -- [Step Type: Deploy YAML](#step-type-deploy-yaml) -- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) -- [Notifications](#notifications) -- [Timeouts](#timeouts) -- [Triggers and Trigger Rules](#triggers-and-trigger-rules) -- [Environment Variables](#environment-variables) -- [Secrets](#secrets) -- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) -- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) - - [Executor Quota](#executor-quota) - - [Resource Quota for Executors](#resource-quota-for-executors) - - [Custom CA](#custom-ca) -- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) -- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) - -# Step Types - -Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. - -Step types include: - -- [Run Script](#step-type-run-script) -- [Build and Publish Images](#step-type-build-and-publish-images) -- [Publish Catalog Template](#step-type-publish-catalog-template) -- [Deploy YAML](#step-type-deploy-yaml) -- [Deploy Catalog App](#step-type-deploy-catalog-app) - - - -### Configuring Steps By UI - -If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. - -1. Add stages to your pipeline execution by clicking **Add Stage**. - - 1. Enter a **Name** for each stage of your pipeline. - 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. - -1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. - -### Configuring Steps by YAML - -For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com -``` -# Step Type: Run Script - -The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configuring Script by UI - -1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. - -1. Click **Add**. - -### Configuring Script by YAML -```yaml -# example -stages: -- name: Build something - steps: - - runScriptConfig: - image: golang - shellScript: go build -``` -# Step Type: Build and Publish Images - -_Available as of Rancher v2.1.0_ - -The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. - -The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. - -### Configuring Building and Publishing Images by UI -1. From the **Step Type** drop-down, choose **Build and Publish**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | - Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | - Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | - Build Context

(**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). - -### Configuring Building and Publishing Images by YAML - -You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: - -Variable Name | Description -------------------------|------------------------------------------------------------ -PLUGIN_DRY_RUN | Disable docker push -PLUGIN_DEBUG | Docker daemon executes in debug mode -PLUGIN_MIRROR | Docker daemon registry mirror -PLUGIN_INSECURE | Docker daemon allows insecure registries -PLUGIN_BUILD_ARGS | Docker build args, a comma separated list - -
- -```yaml -# This example shows an environment variable being used -# in the Publish Image step. This variable allows you to -# publish an image to an insecure registry: - -stages: -- name: Publish Image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - pushRemote: true - registry: example.com - env: - PLUGIN_INSECURE: "true" -``` - -# Step Type: Publish Catalog Template - -_Available as of v2.2.0_ - -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository]({{}}/rancher/v2.0-v2.4/en/catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. - -### Configuring Publishing a Catalog Template by UI - -1. From the **Step Type** drop-down, choose **Publish Catalog Template**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | - Catalog Template Name | The name of the template. For example, wordpress. | - Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | - Protocol | You can choose to publish via HTTP(S) or SSH protocol. | - Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | - Git URL | The Git URL of the chart repository that the template will be published to. | - Git Branch | The Git branch of the chart repository that the template will be published to. | - Author Name | The author name used in the commit message. | - Author Email | The author email used in the commit message. | - - -### Configuring Publishing a Catalog Template by YAML - -You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: - -* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. -* CatalogTemplate: The name of the template. -* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. -* GitUrl: The git URL of the chart repository that the template will be published to. -* GitBranch: The git branch of the chart repository that the template will be published to. -* GitAuthor: The author name used in the commit message. -* GitEmail: The author email used in the commit message. -* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. - -```yaml -# example -stages: -- name: Publish Wordpress Template - steps: - - publishCatalogConfig: - path: ./charts/wordpress/latest - catalogTemplate: wordpress - version: ${CICD_GIT_TAG} - gitUrl: git@github.com:myrepo/charts.git - gitBranch: master - gitAuthor: example-user - gitEmail: user@example.com - envFrom: - - sourceName: publish-keys - sourceKey: DEPLOY_KEY -``` - -# Step Type: Deploy YAML - -This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configure Deploying YAML by UI - -1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. - -1. Enter the **YAML Path**, which is the path to the manifest file in the source code. - -1. Click **Add**. - -### Configure Deploying YAML by YAML - -```yaml -# example -stages: -- name: Deploy - steps: - - applyYamlConfig: - path: ./deployment.yaml -``` - -# Step Type :Deploy Catalog App - -_Available as of v2.2.0_ - -The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. - -### Configure Deploying Catalog App by UI - -1. From the **Step Type** drop-down, choose **Deploy Catalog App**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Catalog | The catalog from which the app template will be used. | - Template Name | The name of the app template. For example, wordpress. | - Template Version | The version of the app template you want to deploy. | - Namespace | The target namespace where you want to deploy the app. | - App Name | The name of the app you want to deploy. | - Answers | Key-value pairs of answers used to deploy the app. | - - -### Configure Deploying Catalog App by YAML - -You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: - -* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. -* Version: The version of the template you want to deploy. -* Answers: Key-value pairs of answers used to deploy the app. -* Name: The name of the app you want to deploy. -* TargetNamespace: The target namespace where you want to deploy the app. - -```yaml -# example -stages: -- name: Deploy App - steps: - - applyAppConfig: - catalogTemplate: cattle-global-data:library-mysql - version: 0.3.8 - answers: - persistence.enabled: "false" - name: testmysql - targetNamespace: test -``` - -# Timeouts - -By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. - -### Configuring Timeouts by UI - -Enter a new value in the **Timeout** field. - -### Configuring Timeouts by YAML - -In the `timeout` section, enter the timeout value in minutes. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -# timeout in minutes -timeout: 30 -``` - -# Notifications - -You can enable notifications to any [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers/) so it will be easy to add recipients immediately. - -### Configuring Notifications by UI - -_Available as of v2.2.0_ - -1. Within the **Notification** section, turn on notifications by clicking **Enable**. - -1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. - -1. If you don't have any existing [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers/) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. - - > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. - -1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. - -### Configuring Notifications by YAML -_Available as of v2.2.0_ - -In the `notification` section, you will provide the following information: - -* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. - * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. - * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. -* **Condition:** Select which conditions of when you want the notification to be sent. -* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. - -```yaml -# Example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` - -# Triggers and Trigger Rules - -After you configure a pipeline, you can trigger it using different methods: - -- **Manually:** - - After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. - -- **Automatically:** - - When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. - - To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. - -Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: - -- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. - -- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. - -If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. - -Wildcard character (`*`) expansion is supported in `branch` conditions. - -This section covers the following topics: - -- [Configuring pipeline triggers](#configuring-pipeline-triggers) -- [Configuring stage triggers](#configuring-stage-triggers) -- [Configuring step triggers](#configuring-step-triggers) -- [Configuring triggers by YAML](#configuring-triggers-by-yaml) - -### Configuring Pipeline Triggers - -1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Click on **Show Advanced Options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. - - 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. - - 1. **Optional:** Add more branches that trigger a build. - -1. Click **Done.** - -### Configuring Stage Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the stage. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the stage and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the stage. | - | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - -### Configuring Step Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the step. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the step and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the step. | - | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - - -### Configuring Triggers by YAML - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -``` - -# Environment Variables - -When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. - -### Configuring Environment Variables by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. - -1. Add your environment variable(s) into either the script or file. - -1. Click **Save**. - -### Configuring Environment Variables by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 -``` - -# Secrets - -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/). - -### Prerequisite -Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. -
- ->**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). - -### Configuring Secrets by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. - -1. Click **Save**. - -### Configuring Secrets by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${ALIAS_ENV} - # environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV -``` - -# Pipeline Variable Substitution Reference - -For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. - -Variable Name | Description -------------------------|------------------------------------------------------------ -`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). -`CICD_GIT_URL` | URL of the Git repository. -`CICD_GIT_COMMIT` | Git commit ID being executed. -`CICD_GIT_BRANCH` | Git branch of this event. -`CICD_GIT_REF` | Git reference specification of this event. -`CICD_GIT_TAG` | Git tag name, set on tag event. -`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). -`CICD_PIPELINE_ID` | Rancher ID for the pipeline. -`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. -`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. -`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. -`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

[Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) - -# Global Pipeline Execution Settings - -After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. These settings can be edited by selecting **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. - -- [Executor Quota](#executor-quota) -- [Resource Quota for Executors](#resource-quota-for-executors) -- [Custom CA](#custom-ca) - -### Executor Quota - -Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. - -### Resource Quota for Executors - -_Available as of v2.2.0_ - -Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. - -Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. - -To configure compute resources for pipeline-step containers: - -You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. - -In a step, you will provide the following information: - -* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. -* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. -* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. -* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi -``` - ->**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. - -### Custom CA - -_Available as of v2.2.0_ - -If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. - -1. Click **Edit cacerts**. - -1. Paste in the CA root certificates and click **Save cacerts**. - -**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. - -# Persistent Data for Pipeline Components - -The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/storage) - -# Example rancher-pipeline.yml - -An example pipeline configuration file is on [this page.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example) diff --git a/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md deleted file mode 100644 index e4c584e74b..0000000000 --- a/content/rancher/v2.0-v2.4/en/pipelines/docs-for-v2.0.x/_index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: v2.0.x Pipeline Documentation -weight: 9000 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/pipelines/docs-for-v2.0.x - - /rancher/v2.0-v2.4/en/project-admin/pipelines/docs-for-v2.0.x - - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/docs-for-v2.0.x - - /rancher/v2.x/en/pipelines/docs-for-v2.0.x/ ---- - ->**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/). - - - -Pipelines help you automate the software delivery process. You can integrate Rancher with GitHub to create a pipeline. - -You can set up your pipeline to run a series of stages and steps to test your code and deploy it. - -
-
Pipelines
-
Contain a series of stages and steps. Out-of-the-box, the pipelines feature supports fan out and in capabilities.
-
Stages
-
Executed sequentially. The next stage will not execute until all of the steps within the stage execute.
-
Steps
-
Are executed in parallel within a stage.
-
- -## Enabling CI Pipelines - -1. Select cluster from drop down. - -2. Under tools menu select pipelines. - -3. Follow instructions for setting up github auth on page. - - -## Creating CI Pipelines - -1. Go to the project you want this pipeline to run in. - -2. Click **Resources > Pipelines.** In versions before v2.3.0,click **Workloads > Pipelines.** - -4. Click Add pipeline button. - -5. Enter in your repository name (Autocomplete should help zero in on it quickly). - -6. Select Branch options. - - - Only the branch {BRANCH NAME}: Only events triggered by changes to this branch will be built. - - - Everything but {BRANCH NAME}: Build any branch that triggered an event EXCEPT events from this branch. - - - All branches: Regardless of the branch that triggered the event always build. - - >**Note:** If you want one path for master, but another for PRs or development/test/feature branches, create two separate pipelines. - -7. Select the build trigger events. By default, builds will only happen by manually clicking build now in Rancher UI. - - - Automatically build this pipeline whenever there is a git commit. (This respects the branch selection above) - - - Automatically build this pipeline whenever there is a new PR. - - - Automatically build the pipeline. (Allows you to configure scheduled builds similar to Cron) - -8. Click Add button. - - By default, Rancher provides a three stage pipeline for you. It consists of a build stage where you would compile, unit test, and scan code. The publish stage has a single step to publish a docker image. - - -8. Add a name to the pipeline in order to complete adding a pipeline. - -9. Click on the ‘run a script’ box under the ‘Build’ stage. - - Here you can set the image, or select from pre-packaged envs. - -10. Configure a shell script to run inside the container when building. - -11. Click Save to persist the changes. - -12. Click the “publish an image’ box under the “Publish” stage. - -13. Set the location of the Dockerfile. By default it looks in the root of the workspace. Instead, set the build context for building the image relative to the root of the workspace. - -14. Set the image information. - - The registry is the remote registry URL. It is defaulted to Docker hub. - Repository is the `/` in the repository. - -15. Select the Tag. You can hard code a tag like ‘latest’ or select from a list of available variables. - -16. If this is the first time using this registry, you can add the username/password for pushing the image. You must click save for the registry credentials AND also save for the modal. - - - - -## Creating a New Stage - -1. To add a new stage the user must click the ‘add a new stage’ link in either create or edit mode of the pipeline view. - -2. Provide a name for the stage. - -3. Click save. - - -## Creating a New Step - -1. Go to create / edit mode of the pipeline. - -2. Click “Add Step” button in the stage that you would like to add a step in. - -3. Fill out the form as detailed above - - -## Environment Variables - -For your convenience the following environment variables are available in your build steps: - -Variable Name | Description -------------------------|------------------------------------------------------------ -CICD_GIT_REPO_NAME | Repository Name (Stripped of Github Organization) -CICD_PIPELINE_NAME | Name of the pipeline -CICD_GIT_BRANCH | Git branch of this event -CICD_TRIGGER_TYPE | Event that triggered the build -CICD_PIPELINE_ID | Rancher ID for the pipeline -CICD_GIT_URL | URL of the Git repository -CICD_EXECUTION_SEQUENCE | Build number of the pipeline -CICD_EXECUTION_ID | Combination of {CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE} -CICD_GIT_COMMIT | Git commit ID being executed. diff --git a/content/rancher/v2.0-v2.4/en/pipelines/example-repos/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/example-repos/_index.md deleted file mode 100644 index 4c0393fecd..0000000000 --- a/content/rancher/v2.0-v2.4/en/pipelines/example-repos/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Example Repositories -weight: 500 -aliases: - - /rancher/v2.0-v2.4/en/tools/pipelines/quick-start-guide/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example-repos ---- - -Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: - -- Go -- Maven -- php - -> **Note:** The example repositories are only available if you have not [configured a version control provider]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines). - -To start using these example repositories, - -1. [Enable the example repositories](#1-enable-the-example-repositories) -2. [View the example pipeline](#2-view-the-example-pipeline) -3. [Run the example pipeline](#3-run-the-example-pipeline) - -### 1. Enable the Example Repositories - -By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Click **Configure Repositories**. - - **Step Result:** A list of example repositories displays. - - >**Note:** Example repositories only display if you haven't fetched your own repos. - -1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. - -**Results:** - -- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. - -- The following workloads are deployed to a new namespace: - - - `docker-registry` - - `jenkins` - - `minio` - -### 2. View the Example Pipeline - -After enabling an example repository, review the pipeline to see how it is set up. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the example repository, select the vertical **⋮**. There are two ways to view the pipeline: - * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. - * **YAML**: Click on View/Edit YAML to view the `./rancher-pipeline.yml` file. - -### 3. Run the Example Pipeline - -After enabling an example repository, run the pipeline to see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** - -1. Find the example repository, select the vertical **⋮ > Run**. - - >**Note:** When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. - -**Result:** The pipeline runs. You can see the results in the logs. - -### What's Next? - -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines), enable a repository and finally configure your pipeline. diff --git a/content/rancher/v2.0-v2.4/en/pipelines/example/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/example/_index.md deleted file mode 100644 index e94e24171b..0000000000 --- a/content/rancher/v2.0-v2.4/en/pipelines/example/_index.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Example YAML File -weight: 501 -aliases: - - /rancher/v2.0-v2.4/en/tools/pipelines/reference/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example ---- - -Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. - -In the [pipeline configuration reference]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. - -Below is a full example `rancher-pipeline.yml` for those who want to jump right in. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} - # Set environment variables in container for the step - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 - # Set environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com - - name: Deploy some workloads - steps: - - applyYamlConfig: - path: ./deployment.yaml -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -# timeout in minutes -timeout: 30 -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` diff --git a/content/rancher/v2.0-v2.4/en/pipelines/storage/_index.md b/content/rancher/v2.0-v2.4/en/pipelines/storage/_index.md deleted file mode 100644 index c176b2ac84..0000000000 --- a/content/rancher/v2.0-v2.4/en/pipelines/storage/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Configuring Persistent Data for Pipeline Components -weight: 600 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/storage ---- - -The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/how-storage-works/) - ->**Prerequisites (for both parts A and B):** -> ->[Persistent volumes]({{}}/rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/) must be available for the cluster. - -### A. Configuring Persistent Data for Docker Registry - -1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** In versions before v2.3.0, select the **Workloads** tab. - -1. Find the `docker-registry` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. - -1. Click **Upgrade**. - -### B. Configuring Persistent Data for Minio - -1. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Find the `minio` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} - -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. - -1. Click **Upgrade**. - -**Result:** Persistent storage is configured for your pipeline components. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/_index.md deleted file mode 100644 index a0a86c22d0..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Project Administration -weight: 9 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/editing-projects/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/editing-projects/ ---- - -_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! - -Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. - -You can use projects to perform actions like: - -- [Assign users access to a group of namespaces]({{}}/rancher/v2.0-v2.4/en/project-admin/project-members) -- Assign users [specific roles in a project]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles/) -- [Set resource quotas]({{}}/rancher/v2.0-v2.4/en/project-admin/resource-quotas/) -- [Manage namespaces]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces/) -- [Configure tools]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/) -- [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.0-v2.4/en/project-admin/pipelines) -- [Configure pod security policies]({{}}/rancher/v2.0-v2.4/en/project-admin/pod-security-policies) - -### Authorization - -Non-administrative users are only authorized for project access after an [administrator]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. - -Whoever creates the project automatically becomes a [project owner]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles). - -## Switching between Projects - -To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. - -1. From the **Global** view, navigate to the project that you want to configure. - -1. Select **Projects/Namespaces** from the navigation bar. - -1. Select the link for the project that you want to open. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/namespaces/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/namespaces/_index.md deleted file mode 100644 index db8845713e..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/namespaces/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Namespaces -weight: 2520 ---- - -Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. - -Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. - -Resources that you can assign directly to namespaces include: - -- [Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) -- [Certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.0-v2.4/en/project-admin/namespaces) to ensure that you will have permission to access the namespace. - - -### Creating Namespaces - -Create a new namespace to isolate apps and resources in a project. - ->**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/), [ConfigMaps]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. - -1. From the **Global** view, open the project where you want to create a namespace. - - >**Tip:** As a best practice, we recommend creating namespaces from the project level. However, cluster owners and members can create them from the cluster level as well. - -1. From the main menu, select **Namespace**. The click **Add Namespace**. - -1. **Optional:** If your project has [Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). - -1. Enter a **Name** and then click **Create**. - -**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. - -### Moving Namespaces to Another Project - -Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. - -1. From the **Global** view, open the cluster that contains the namespace you want to move. - -1. From the main menu, select **Projects/Namespaces**. - -1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. - - >**Notes:** - > - >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. - >- You cannot move a namespace into a project that already has a [resource quota]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. - >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. - -1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. - -**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. - -### Editing Namespace Resource Quotas - -You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -For more information, see how to [edit namespace resource quotas]({{}}/rancher/v2.0-v2.4/en/project-admin//resource-quotas/override-namespace-default/). \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/project-admin/pipelines/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/pipelines/_index.md deleted file mode 100644 index 252f94f132..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/pipelines/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher's CI/CD Pipelines -description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users -weight: 4000 -aliases: - - /rancher/v2.0-v2.4/en/concepts/ci-cd-pipelines/ - - /rancher/v2.0-v2.4/en/tasks/pipelines/ - - /rancher/v2.0-v2.4/en/tools/pipelines/configurations/ ---- -Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - -For details, refer to the [pipelines]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines) section. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/project-admin/pod-security-policies/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/pod-security-policies/_index.md deleted file mode 100644 index 5d57af5e41..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/pod-security-policies/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Pod Security Policies -weight: 5600 ---- - -> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/). - -You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. - -### Prerequisites - -- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.0-v2.4/en/admin-settings/pod-security-policies/). -- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/pod-security-policy). - -### Applying a Pod Security Policy - -1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit**. -1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. - Assigning a PSP to a project will: - - - Override the cluster's default PSP. - - Apply the PSP to the project. - - Apply the PSP to any namespaces you add to the project later. - -1. Click **Save**. - -**Result:** The PSP is applied to the project and any namespaces added to the project. - ->**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/project-admin/project-members/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/project-members/_index.md deleted file mode 100644 index a0a4a0922d..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/project-members/_index.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Adding Users to Projects -weight: 2505 -aliases: - - /rancher/v2.0-v2.4/en/tasks/projects/add-project-members/ - - /rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/project-members/ ---- - -If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. - -You can add members to a project as it is created, or add them to an existing project. - ->**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/cluster-members/) instead. - -### Adding Members to a New Project - -You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) - -### Adding Members to an Existing Project - -Following project creation, you can add users as project members so that they can access its resources. - -1. From the **Global** view, open the project that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the project. - - If external authentication is configured: - - - Rancher returns users from your external authentication source as you type. - - - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. - -1. Assign the user or group **Project** roles. - - [What are Project Roles?]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/) - - >**Notes:** - > - >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - > - >- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. - > - >- For `Custom` roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/locked-roles/). - -**Result:** The chosen users are added to the project. - -- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/_index.md deleted file mode 100644 index af27d4b345..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Project Resource Quotas -weight: 2515 -aliases: - - /rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas ---- - -_Available as of v2.1.0_ - -In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. - -This page is a how-to guide for creating resource quotas in existing projects. - -Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/#creating-projects) - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) - -### Applying Resource Quotas to Existing Projects - -_Available as of v2.0.1_ - -Edit [resource quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) when: - -- You want to limit the resources that a project and its namespaces can use. -- You want to scale the resources available to a project up or down when a research quota is already in effect. - -1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit**. - -1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. - -1. Select a Resource Type. For more information on types, see the [quota type reference.](./quota-type-reference) - -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. - - | Field | Description | - | ----------------------- | -------------------------------------------------------------------------------------------------------- | - | Project Limit | The overall resource limit for the project. | - | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | - -1. **Optional:** Add more quotas. - -1. Click **Create**. - -**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, you may still create namespaces, but they will be given a resource quota of 0. Subsequently, Rancher will not allow you to create any resources restricted by this quota. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/override-container-default/_index.md deleted file mode 100644 index 6b4d3c7101..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/override-container-default/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Setting Container Default Resource Limits -weight: 3 ---- - -_Available as of v2.2.0_ - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. - -To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -### Editing the Container Default Resource Limit - -_Available as of v2.2.0_ - -Edit [container default resource limit]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/) when: - -- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. -- You want to edit the default container resource limit. - -1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit**. -1. Expand **Container Default Resource Limit** and edit the values. - -### Resource Limit Propagation - -When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. - -> **Note:** Before v2.2.0, you could not launch catalog applications that did not have any limits set. With v2.2.0, you can set a default container resource limit on a project and launch any catalog applications. - -Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. - -### Container Resource Quota Types - -The following resource limits can be configured: - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| -| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | -| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | -| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/override-namespace-default/_index.md deleted file mode 100644 index c65eba128f..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/override-namespace-default/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Overriding the Default Limit for a Namespace -weight: 2 ---- - -Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. - -In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) for `Namespace 3` so that the namespace can access more resources. - -Namespace Default Limit Override -![Namespace Default Limit Override]({{}}/img/rancher/rancher-resource-quota-override.svg) - -How to: [Editing Namespace Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/) - -### Editing Namespace Resource Quotas - -If there is a [resource quota]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the namespace for which you want to edit the resource quota. Select **⋮ > Edit**. - -1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - - For more information about each **Resource Type**, see [Resource Quotas]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/). - - >**Note:** - > - >- If a resource quota is not configured for the project, these options will not be available. - >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. - -**Result:** Your override is applied to the namespace's resource quota. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/quotas-for-projects/_index.md deleted file mode 100644 index 63a18ba0f4..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/quotas-for-projects/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: How Resource Quotas Work in Rancher Projects -weight: 1 ---- - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. - -In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. - -In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. - -Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{}}/img/rancher/kubernetes-resource-quota.svg) - -Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. - -The resource quota includes two limits, which you set while creating or editing a project: - - -- **Project Limits:** - - This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. - -- **Namespace Default Limits:** - - This value is the default resource limit available for each namespace. When the resource quota is created at the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you override it. - -In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. - -Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{}}/img/rancher/rancher-resource-quota.png) - -Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. - -The following table explains the key differences between the two quota types. - -| Rancher Resource Quotas | Kubernetes Resource Quotas | -| ---------------------------------------------------------- | -------------------------------------------------------- | -| Applies to projects and namespace. | Applies to namespaces only. | -| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | -| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/tools/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/tools/_index.md deleted file mode 100644 index c1adfb7bc8..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/tools/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Tools for Logging, Monitoring, and More -weight: 2525 ---- - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - -- [Notifiers](#notifiers) -- [Alerts](#alerts) -- [Logging](#logging) -- [Monitoring](#monitoring) - - - -# Notifiers - -[Notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -# Alerts - -[Alerts]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts) are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -For details on project-level alerts, see [this page.](./project-alerts) - -# Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debug and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -For details on setting up logging at the cluster level, refer to the [logging section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging) - -For details on project-level logging, see [this section.](./project-logging) - -# Monitoring - -_Available as of v2.2.0_ - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-monitoring) diff --git a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md deleted file mode 100644 index e3710de9d0..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-alerts/_index.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -title: Project Alerts -weight: 2526 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/alerts - - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/project-alerts - - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts/ ---- - -To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. - -Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) and [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) of events they need to address. - -Before you can receive alerts, one or more [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) must be configured at the cluster level. - -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can manage project alerts. - -This section covers the following topics: - -- [Alerts scope](#alerts-scope) -- [Default project-level alerts](#default-project-level-alerts) -- [Adding project alerts](#adding-project-alerts) -- [Managing project alerts](#managing-project-alerts) -- [Project Alert Rule Configuration](#project-alert-rule-configuration) - - [Pod Alerts](#pod-alerts) - - [Workload Alerts](#workload-alerts) - - [Workload Selector Alerts](#workload-selector-alerts) - - [Metric Expression Alerts](#metric-expression-alerts) - - -# Alerts Scope - -The scope for alerts can be set at either the [cluster level]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/) or project level. - -At the project level, Rancher monitors specific deployments and sends alerts for: - -* Deployment availability -* Workloads status -* Pod status -* The Prometheus expression cross the thresholds - -# Default Project-level Alerts - -When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) for them is configured at the cluster level. - -| Alert | Explanation | -|-------|-------------| -| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | -| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | - -For information on other default alerts, refer to the section on [cluster-level alerts.]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/default-alerts) - -# Adding Project Alerts - ->**Prerequisite:** Before you can receive project alerts, you must add a notifier. - -1. From the **Global** view, navigate to the project that you want to configure project alerts for. Select **Tools > Alerts**. In versions before v2.2.0, you can choose **Resources > Alerts**. - -1. Click **Add Alert Group**. - -1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. - -1. Based on the type of alert you want to create, fill out the form. For help, refer to the [configuration](#project-alert-rule-configuration) section below. - -1. Continue adding more alert rules to the group. - -1. Finally, choose the [notifiers]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers/) that send you alerts. - - - You can set up multiple notifiers. - - You can change notifier recipients on the fly. - -1. Click **Create.** - -**Result:** Your alert is configured. A notification is sent when the alert is triggered. - - -# Managing Project Alerts - -To manage project alerts, browse to the project that alerts you want to manage. Then select **Tools > Alerts**. In versions before v2.2.0, you can choose **Resources > Alerts**. You can: - -- Deactivate/Reactive alerts -- Edit alert settings -- Delete unnecessary alerts -- Mute firing alerts -- Unmute muted alerts - - -# Project Alert Rule Configuration - -- [Pod Alerts](#pod-alerts) -- [Workload Alerts](#workload-alerts) -- [Workload Selector Alerts](#workload-selector-alerts) -- [Metric Expression Alerts](#metric-expression-alerts) - -# Pod Alerts - -This alert type monitors for the status of a specific pod. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Pod** option, and then select a pod from the drop-down. - -### Is - -Select a pod status that triggers an alert: - -- **Not Running** -- **Not Scheduled** -- **Restarted times within the last Minutes** - -### Send a - -Select the urgency level of the alert. The options are: - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on pod state. For example, select **Info** for Job pod which stop running after job finished. However, if an important pod isn't scheduled, it may affect operations, so choose **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. - -You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -# Workload Alerts - -This alert type monitors for the availability of a workload. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Choose the **Workload** option. Then choose a workload from the drop-down. - -### Is - -Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on the percentage you choose and the importance of the workload. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. - -You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -# Workload Selector Alerts - -This alert type monitors for the availability of all workloads marked with tags that you've specified. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When a - -Select the **Workload Selector** option, and then click **Add Selector** to enter the key value pair for a label. If one of the workloads drops below your specifications, an alert is triggered. This label should be applied to one or more of your workloads. - -### Is - -Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on the percentage you choose and the importance of the workload. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. - -You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. - -# Metric Expression Alerts -_Available as of v2.2.4_ - -If you enable [project monitoring]({{}}/rancher/v2.0-v2.4/en/project-admin/tools/#monitoring), this alert type monitors for the overload from Prometheus expression querying. - -Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. - -### When A - -Input or select an **Expression**. The dropdown shows the original metrics from Prometheus, including: - -- [**Container**](https://github.com/google/cadvisor) -- [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) -- **Customize** -- [**Project Level Grafana**](http://docs.grafana.org/administration/metrics/) -- **Project Level Prometheus** - -### Is - -Choose a comparison. - -- **Equal**: Trigger alert when expression value equal to the threshold. -- **Not Equal**: Trigger alert when expression value not equal to the threshold. -- **Greater Than**: Trigger alert when expression value greater than to threshold. -- **Less Than**: Trigger alert when expression value equal or less than the threshold. -- **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. -- **Less or Equal**: Trigger alert when expression value less or equal to the threshold. - -If applicable, choose a comparison value or a threshold for the alert to be triggered. - -### For - -Select a duration for a trigger alert when the expression value crosses the threshold longer than the configured duration. - -### Send a - -Select the urgency level of the alert. - -- **Critical**: Most urgent -- **Warning**: Normal urgency -- **Info**: Least urgent - -Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a expression for container memory close to the limit raises above 60% deems an urgency of **Info**, but raised about 95% deems an urgency of **Critical**. - -### Advanced Options - -By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. - -- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. -- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. -- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md b/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md deleted file mode 100644 index c4c54f55e3..0000000000 --- a/content/rancher/v2.0-v2.4/en/project-admin/tools/project-logging/_index.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Project Logging -shortTitle: Project Logging -weight: 2527 -aliases: - - /rancher/v2.0-v2.4/en/project-admin/tools/logging - - /rancher/v2.0-v2.4/en/logging/legacy/project-logging - - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/project-logging - - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/project-logging/ - - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring/ ---- - -Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. - -For background information about how logging integrations work, refer to the [cluster administration section.]({{}}/rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/#how-logging-integrations-work) - -Rancher supports the following services: - -- Elasticsearch -- Splunk -- Kafka -- Syslog -- Fluentd - ->**Note:** You can only configure one logging service per cluster or per project. - -Only [administrators]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure Rancher to send Kubernetes logs to a logging service. - -# Requirements - -The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: - -``` -$ docker info | grep 'Logging Driver' -Logging Driver: json-file -``` - -# Advantages - -Setting up a logging service to collect logs from your cluster/project has several advantages: - -- Logs errors and warnings in your Kubernetes infrastructure to a stream. The stream informs you of events like a container crashing, a pod eviction, or a node dying. -- Allows you to capture and analyze the state of your cluster and look for trends in your environment using the log stream. -- Helps you when troubleshooting or debugging. -- Saves your logs to a safe location outside of your cluster, so that you can still access them even if your cluster encounters issues. - -# Logging Scope - -You can configure logging at either cluster level or project level. - -- [Cluster logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters), it also writes logs for all the Kubernetes system components. - -- Project logging writes logs for every pod in that particular project. - -Logs that are sent to your logging service are from the following locations: - - - Pod logs stored at `/var/log/containers`. - - - Kubernetes system components logs stored at `/var/lib/rancher/rke/logs/`. - -# Enabling Project Logging - -1. From the **Global** view, navigate to the project that you want to configure project logging. - -1. Select **Tools > Logging** in the navigation bar. In versions before v2.2.0, you can choose **Resources > Logging**. - -1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports the following services: - - - [Elasticsearch]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/elasticsearch/) - - [Splunk]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk/) - - [Kafka]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/kafka/) - - [Syslog]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/syslog/) - - [Fluentd]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging/fluentd/) - -1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. - - - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. - - - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) - - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) - - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) - - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) - - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) - - - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. - 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. - - - You can use either a self-signed certificate or one provided by a certificate authority. - - - You can generate a self-signed certificate using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. - -1. (Optional) Complete the **Additional Logging Configuration** form. - - 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. - - 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. - - 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. - -1. Click **Test**. Rancher sends a test log to the service. - - > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. - -1. Click **Save**. - -**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. - -# Related Links - -[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/_index.md deleted file mode 100644 index 5491d63787..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Rancher Deployment Quick Start Guides -metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. -short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. -weight: 2 ---- ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. - -We have Quick Start Guides for: - -- [Deploying Rancher Server]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. - -- [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload/): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. - -- [Using the CLI]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/cli/): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md deleted file mode 100644 index f991ddeef2..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/cli/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: CLI with Rancher -weight: 100 ---- - -Interact with Rancher using command line interface (CLI) tools from your workstation. - -## Rancher CLI - -Follow the steps in [rancher cli](../../cli). - -Ensure you can run `rancher kubectl get pods` successfully. - - -## kubectl -Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - - -Configure kubectl by visiting your cluster in the Rancher Web UI then clicking on `Kubeconfig`, copying contents and putting into your `~/.kube/config` file. - -Run `kubectl cluster-info` or `kubectl get pods` successfully. - -## Authentication with kubectl and kubeconfig Tokens with TTL - -_**Available as of v2.4.6**_ - -_Requirements_ - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.0-v2.4/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher cli](../cli) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see error like: -`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. - -This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: - -1. Local -2. Active Directory -3. FreeIpa, OpenLdap -4. SAML providers - Ping, Okta, ADFS, Keycloak, Shibboleth - -When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. -The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid till [it expires](../../api/api-tokens/#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../api/api-tokens/#deleting-tokens) -Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. - -_Note_ - -As of CLI [v2.4.10](https://github.com/rancher/cli/releases/tag/v2.4.10), the kubeconfig token can be cached at a chosen path with `cache-dir` flag or env var `RANCHER_CACHE_DIR`. - -_**Current Known Issues**_ - -1. If [authorized cluster endpoint]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) is enabled for RKE clusters to [authenticate directly with downstream cluster]({{}}/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) and Rancher server goes down, all kubectl calls will fail after the kubeconfig token expires. No new kubeconfig tokens can be generated if Rancher server isn't accessible. -2. If a kubeconfig token is deleted from Rancher [API tokens]({{}}/rancher/v2.0-v2.4/en/api/api-tokens/#deleting-tokens) page, and the token is still cached, cli won't ask you to login again until the token expires or is deleted. -`kubectl` calls will result into an error like `error: You must be logged in to the server (the server has asked for the client to provide credentials`. Tokens can be deleted using `rancher token delete`. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/_index.md deleted file mode 100644 index f7d4da476a..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Deploying Rancher Server -weight: 100 ---- - -Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - -- [DigitalOcean](./digital-ocean-qs) (uses Terraform) -- [AWS](./amazon-aws-qs) (uses Terraform) -- [Azure](./microsoft-azure-qs) (uses Terraform) -- [GCP](./google-gcp-qs) (uses Terraform) -- [Vagrant](./quickstart-vagrant) - -If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. - -- [Manual Install](./quickstart-manual-setup) diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/_index.md deleted file mode 100644 index 92e07f38fc..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Rancher AWS Quick Start Guide -description: Read this step by step Rancher AWS guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher Server on AWS with a single node cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Amazon AWS will incur charges. - -- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. -- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. -- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the AWS folder containing the terraform files by executing `cd quickstart/aws`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `aws_access_key` - Amazon AWS Access Key - - `aws_secret_key` - Amazon AWS Secret Key - - `rancher_server_admin_password` - Admin password for created Rancher server - -1. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. -Suggestions include: - - `aws_region` - Amazon AWS region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/aws` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/_index.md deleted file mode 100644 index 06f557f5c8..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Rancher DigitalOcean Quick Start Guide -description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher Server on DigitalOcean with a single node cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to DigitalOcean will incur charges. - -- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. -- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/do`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `do_token` - DigitalOcean access key - - `rancher_server_admin_password` - Admin password for created Rancher server - -1. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. -Suggestions include: - - `do_region` - DigitalOcean region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget - - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 15 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/do` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/google-gcp-qs/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/google-gcp-qs/_index.md deleted file mode 100644 index 31911cc0af..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/google-gcp-qs/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Rancher GCP Quick Start Guide -description: Read this step by step Rancher GCP guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher server on GCP in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Google GCP will incur charges. - -- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. -- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. -- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the GCP folder containing the terraform files by executing `cd quickstart/gcp`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `gcp_account_json` - GCP service account file path and file name - - `rancher_server_admin_password` - Admin password for created Rancher server - -1. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. -Suggestions include: - - `gcp_region` - Google GCP region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget - - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/gcp` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md deleted file mode 100644 index 448549c677..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Rancher Azure Quick Start Guide -description: Read this step by step Rancher Azure guide to quickly deploy a Rancher Server with a single node cluster attached. -weight: 100 ---- - -The following steps will quickly deploy a Rancher server on Azure in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Microsoft Azure will incur charges. - -- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. -- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. -- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. -- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -1. Go into the Azure folder containing the terraform files by executing `cd quickstart/azure`. - -1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -1. Edit `terraform.tfvars` and customize the following variables: - - `azure_subscription_id` - Microsoft Azure Subscription ID - - `azure_client_id` - Microsoft Azure Client ID - - `azure_client_secret` - Microsoft Azure Client Secret - - `azure_tenant_id` - Microsoft Azure Tenant ID - - `rancher_server_admin_password` - Admin password for created Rancher server - -2. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. -Suggestions include: - - `azure_location` - Microsoft Azure region, choose the closest instead of the default - - `prefix` - Prefix for all created resources - - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget - - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) - -1. Run `terraform init`. - -1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). - -#### Result - -Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/azure` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md deleted file mode 100644 index 753006eb6b..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Manual Quick Start -weight: 300 ---- -Howdy Partner! This tutorial walks you through: - -- Installation of Rancher 2.x -- Creation of your first cluster -- Deployment of an application, Nginx - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -## Quick Start Outline - -This Quick Start Guide is divided into different tasks for easier consumption. - - - - -1. [Provision a Linux Host](#1-provision-a-linux-host) - -1. [Install Rancher](#2-install-rancher) - -1. [Log In](#3-log-in) - -1. [Create the Cluster](#4-create-the-cluster) - - -
-### 1. Provision a Linux Host - - Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-prem VM -- A bare-metal server - - >**Note:** - > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. - > - > For a full list of port requirements, refer to [Docker Installation]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/). - - Provision the host according to our [Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). - -### 2. Install Rancher - -To install Rancher on your host, connect to it and then use a shell to install. - -1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. - -2. From your shell, enter the following command: - - ``` - sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher - ``` - -**Result:** Rancher is installed. - -### 3. Log In - -Log in to Rancher to begin using the application. After you log in, you'll make some one-time configurations. - -1. Open a web browser and enter the IP address of your host: `https://`. - - Replace `` with your host IP address. - -2. When prompted, create a password for the default `admin` account there cowpoke! - -3. Set the **Rancher Server URL**. The URL can either be an IP address or a host name. However, each node added to your cluster must be able to connect to this URL.

If you use a hostname in the URL, this hostname must be resolvable by DNS on the nodes you want to add to you cluster. - -
- -### 4. Create the Cluster - -Welcome to Rancher! You are now able to create your first Kubernetes cluster. - -In this task, you can use the versatile **Custom** option. This option lets you add _any_ Linux host (cloud-hosted VM, on-prem VM, or bare-metal) to be used in a cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Custom**. - -3. Enter a **Cluster Name**. - -4. Skip **Member Roles** and **Cluster Options**. We'll tell you about them later. - -5. Click **Next**. - -6. From **Node Role**, select _all_ the roles: **etcd**, **Control**, and **Worker**. - -7. **Optional**: Rancher auto-detects the IP addresses used for Rancher communication and cluster communication. You can override these using `Public Address` and `Internal Address` in the **Node Address** section. - -8. Skip the **Labels** stuff. It's not important for now. - -9. Copy the command displayed on screen to your clipboard. - -10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - -11. When you finish running the command on your Linux host, click **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -#### Finished - -Congratulations! You have created your first cluster. - -#### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/_index.md deleted file mode 100644 index a996135f28..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Vagrant Quick Start -weight: 200 ---- -The following steps quickly deploy a Rancher Server with a single node cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.0-v2.4/en/installation/). - -## Prerequisites - -- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. -- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. -- At least 4GB of free RAM. - -### Note -- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: - - `vagrant plugin install vagrant-vboxmanage` - - `vagrant plugin install vagrant-vbguest` - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the folder containing the Vagrantfile by executing `cd quickstart/vagrant`. - -3. **Optional:** Edit `config.yaml` to: - - - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) - - Change the password of the `admin` user for logging into Rancher. (`default_password`) - -4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. - -5. Once provisioning finishes, go to `https://172.22.101.101` in the browser. The default user/password is `admin/admin`. - -**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/vagrant` folder execute `vagrant destroy -f`. - -2. Wait for the confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md deleted file mode 100644 index e47fa946cc..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Workload with Ingress Quick Start -weight: 100 ---- - -### Prerequisite - -You have a running cluster with at least 1 node. - -### 1. Deploying a Workload - -You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. - -For this workload, you'll be deploying the application Rancher Hello-World. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. - -3. Open the **Project: Default** project. - -4. Click **Resources > Workloads.** In versions before v2.3.0, click **Workloads > Workloads.** - -5. Click **Deploy**. - - **Step Result:** The **Deploy Workload** page opens. - -6. Enter a **Name** for your workload. - -7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. - -8. Leave the remaining options on their default setting. We'll tell you about them later. - -9. Click **Launch**. - -**Result:** - -* Your workload is deployed. This process might take a few minutes to complete. -* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. - -
-### 2. Expose The Application Via An Ingress - -Now that the application is up and running it needs to be exposed so that other services can connect. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects**. - -3. Open the **Default** project. - -4. Click **Resources > Workloads > Load Balancing.** In versions before v2.3.0, click the **Workloads** tab. Click on the **Load Balancing** tab. - -5. Click **Add Ingress**. - -6. Enter a name i.e. **hello**. - -7. In the **Target** field, drop down the list and choose the name that you set for your service. - -8. Enter `80` in the **Port** field. - -9. Leave everything else as default and click **Save**. - -**Result:** The application is assigned a `sslip.io` address and exposed. It may take a minute or two to populate. - -### View Your Application - -From the **Load Balancing** page, click the target link, which will look something like `hello.default.xxx.xxx.xxx.xxx.sslip.io > hello-world`. - -Your application will open in a separate window. - -#### Finished - -Congratulations! You have successfully deployed a workload exposed via an ingress. - -#### What's Next? - -When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: - -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md deleted file mode 100644 index 55e1fd93a2..0000000000 --- a/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Workload with NodePort Quick Start -weight: 200 ---- - -### Prerequisite - -You have a running cluster with at least 1 node. - -### 1. Deploying a Workload - -You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. - -For this workload, you'll be deploying the application Rancher Hello-World. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. - -3. Open the **Project: Default** project. - -4. Click **Resources > Workloads.** In versions before v2.3.0, click **Workloads > Workloads.** - -5. Click **Deploy**. - - **Step Result:** The **Deploy Workload** page opens. - -6. Enter a **Name** for your workload. - -7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. - -8. From **Port Mapping**, click **Add Port**. - -9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. - - ![As a dropdown, NodePort (On every node selected)]({{}}/img/rancher/nodeport-dropdown.png) - -10. From the **On Listening Port** field, leave the **Random** value in place. - - ![On Listening Port, Random selected]({{}}/img/rancher/listening-port-field.png) - -11. From the **Publish the container port** field, enter port `80`. - - ![Publish the container port, 80 entered]({{}}/img/rancher/container-port-field.png) - -12. Leave the remaining options on their default setting. We'll tell you about them later. - -13. Click **Launch**. - -**Result:** - -* Your workload is deployed. This process might take a few minutes to complete. -* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. - -
- -### 2. Viewing Your Application - -From the **Workloads** page, click the link underneath your workload. If your deployment succeeded, your application opens. - -### Attention: Cloud-Hosted Sandboxes - -When using a cloud-hosted virtual machine, you may not have access to the port running the container. In this event, you can test Nginx in an ssh session on the local machine using `Execute Shell`. Use the port number after the `:` in the link under your workload if available, which is `31568` in this example. - -```sh -gettingstarted@rancher:~$ curl http://localhost:31568 - - - - Rancher - - - - - -

Hello world!

-

My hostname is hello-world-66b4b9d88b-78bhx

-
-

k8s services found 2

- - INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
- - KUBERNETES tcp://10.43.0.1:443
- -
-
- - -
- - -
- - - -gettingstarted@rancher:~$ - -``` - -### Finished - -Congratulations! You have successfully deployed a workload exposed via a NodePort. - -#### What's Next? - -When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: - -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.0-v2.4/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.0-v2.4/en/security/_index.md b/content/rancher/v2.0-v2.4/en/security/_index.md deleted file mode 100644 index 876ac3e6e4..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Security -weight: 20 ---- - - - - - - - -
-

Security policy

-

Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

-
-

Reporting process

-

Please submit possible security issues by emailing security@rancher.com

-
-

Announcements

-

Subscribe to the Rancher announcements forum for release updates.

-
- -Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,]({{}}/rancher/v2.0-v2.4/en/admin-settings/rbac) Rancher makes your Kubernetes clusters even more secure. - -On this page, we provide security-related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: - -- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) -- [Guide to hardening Rancher installations](#rancher-hardening-guide) -- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) -- [Third-party penetration test reports](#third-party-penetration-test-reports) -- [Rancher CVEs and resolutions](#rancher-cves-and-resolutions) - -### Running a CIS Security Scan on a Kubernetes Cluster - -_Available as of v2.4.0_ - -Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS (Center for Internet Security) Kubernetes Benchmark. - -The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. - -The Center for Internet Security (CIS) is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace." - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. - -When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. - -For details, refer to the section on [security scans.]({{}}/rancher/v2.0-v2.4/en/cis-scans) - -### Rancher Hardening Guide - -The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. - -The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x, v2.2.x and v.2.3.x. See Rancher's guides for [Self Assessment of the CIS Kubernetes Benchmark](#the-cis-benchmark-and-self-sssessment) for the full list of security controls. - -> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. - -Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -[Hardening Guide v2.4]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.4/) | Rancher v2.4 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.5]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.3.5/) | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes v1.15 -[Hardening Guide v2.3.3]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 -[Hardening Guide v2.3]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 -[Hardening Guide v2.2]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 -[Hardening Guide v2.1]({{}}/rancher/v2.0-v2.4/en/security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 - -### The CIS Benchmark and Self-Assessment - -The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). - -Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -[Self Assessment Guide v2.4]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.4/#cis-kubernetes-benchmark-1-5-0-rancher-2-4-with-kubernetes-1-15) | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.5]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.5/#cis-kubernetes-benchmark-1-5-0-rancher-2-3-5-with-kubernetes-1-15) | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 -[Self Assessment Guide v2.3.3]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 -[Self Assessment Guide v2.3]({{}}/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 -[Self Assessment Guide v2.2]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 -[Self Assessment Guide v2.1]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 - -### Third-party Penetration Test Reports - -Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. - -Results: - -- [Cure53 Pen Test - 7/2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) -- [Untamed Theory Pen Test- 3/2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) - -### Rancher CVEs and Resolutions - -Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](./cve) diff --git a/content/rancher/v2.0-v2.4/en/security/cve/_index.md b/content/rancher/v2.0-v2.4/en/security/cve/_index.md deleted file mode 100644 index 41699809bd..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/cve/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Rancher CVEs and Resolutions -weight: 300 ---- - -Rancher is committed to informing the community of security issues in our products. Rancher will publish CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. - -| ID | Description | Date | Resolution | -|----|-------------|------|------------| -| [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server, i.e. local server, and return the requested information. You are vulnerable if you are running any Rancher 2.x version. Only valid Rancher users who have some level of permission on the cluster can perform the request. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher where users were granted access to resources regardless of the resource's API group. For example Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. You are vulnerable if you are running any Rancher 2.x version. The extent of the exploit increases if there are other matching CRD resources installed in the cluster. There is no direct mitigation besides upgrading to the patched versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud credential ID that was valid for a given cloud provider could make requests against that cloud provider's API through the proxy API, and the cloud credential would be attached. You are vulnerable if you are running any Rancher 2.2.0 or above and use cloud credentials. The exploit is limited to valid Rancher users. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.0-v2.4/en/upgrades/rollbacks/). | -| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | -| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | -| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | -| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md deleted file mode 100644 index 8e443a4991..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rancher v2.1 -weight: 5 -aliases: - - /rancher/v2.x/en/security/rancher-2.1/ ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.1) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.1 | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes 1.11 | Benchmark 1.3.0 - -### Hardening Guide - -This hardening [guide](./hardening-2.1) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md deleted file mode 100644 index aedd034a59..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/hardening-2.1/_index.md +++ /dev/null @@ -1,1177 +0,0 @@ ---- -title: Hardening Guide v2.1 -weight: 104 -aliases: - - /rancher/v2.0-v2.4/en/security/hardening-2.1 - - /rancher/v2.x/en/security/rancher-2.1/hardening-2.1/ ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Hardening_Guide.pdf) - -For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.1/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher HA Kubernetes cluster host configuration - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -kernel.panic=10 -kernel.panic_on_oops=1 -``` - -- Run `sysctl -p` to enable the settings. - -### 1.1.2 - Install the encryption provider configuration on all control plane nodes - -**Profile Applicability** - -- Level 1 - -**Description** - -Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: - -**Rationale** - -This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. - -This supports the following controls: - -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) - -**Audit** - -On the control plane hosts for the Rancher HA cluster run: - -``` bash -stat /etc/kubernetes/encryption.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: -- resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. - -**Remediation** - -- Generate a key and an empty configuration file: - -``` bash -head -c 32 /dev/urandom | base64 -i - -touch /etc/kubernetes/encryption.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /etc/kubernetes/encryption.yaml -chmod 0600 /etc/kubernetes/encryption.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `secret` is the 32-byte base64-encoded string generated in the first step. - -### 1.1.3 - Install the audit log configuration on all control plane nodes. - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. - -**Rationale** - -The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. - -This supports the following controls: - -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) - -**Audit** - -On each control plane node, run: - -``` bash -stat /etc/kubernetes/audit.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /etc/kubernetes/audit.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /etc/kubernetes/audit.yaml -chmod 0600 /etc/kubernetes/audit.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -### 1.1.4 - Place Kubernetes event limit configuration on each control plane host - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. - -**Rationale** - -Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. - -This supports the following control: - -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Audit** - -On nodes with the `controlplane` role run: - -``` bash -stat /etc/kubernetes/admission.yaml -stat /etc/kubernetes/event.yaml -``` - -For each file, ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` - -For `admission.yaml` ensure that the file contains: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /etc/kubernetes/event.yaml -``` - -For `event.yaml` ensure that the file contains: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 500 - burst: 5000 -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /etc/kubernetes/admission.yaml -touch /etc/kubernetes/event.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /etc/kubernetes/admission.yaml -chown root:root /etc/kubernetes/event.yaml -chmod 0600 /etc/kubernetes/admission.yaml -chmod 0600 /etc/kubernetes/event.yaml -``` - -- For `admission.yaml` set the contents to: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /etc/kubernetes/event.yaml -``` - -- For `event.yaml` set the contents to: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 500 - burst: 5000 -``` - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix A. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling argument` is set to false (Scored) -- 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---repair-malformed-updates=false ---service-account-lookup=true ---enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---experimental-encryption-provider-config=/etc/kubernetes/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=5 ---audit-log-maxbackup=5 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit.yaml -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - repair-malformed-updates: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - experimental-encryption-provider-config: /etc/kubernetes/encryption.yaml - admission-control-config-file: "/etc/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /etc/kubernetes/audit.yaml - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - … - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive PodSecurityPolicy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole psp:restricted -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding psp:restricted -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- Upgrade to Rancher v2.1.2 via the Helm chart. While performing the upgrade, provide the following installation flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local administrator password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local administrator password should be changed from the default. - -**Rationale** - -The default administrator password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - ---- - -## Appendix A - Complete RKE `cluster.yml` Example - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] - -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "1800s" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - repair-malformed-updates: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - experimental-encryption-provider-config: /etc/kubernetes/encryption.yaml - admission-control-config-file: "/etc/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /etc/kubernetes/audit.yaml - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md deleted file mode 100644 index a485c70735..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rancher v2.2 -weight: 4 -aliases: - - /rancher/v2.x/en/security/rancher-2.2/ ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.2) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.2 | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes 1.13 | Benchmark v1.4.0 and v1.4.1 - -### Hardening Guide - -This hardening [guide](./hardening-2.2) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md deleted file mode 100644 index 768e53ecca..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/hardening-2.2/_index.md +++ /dev/null @@ -1,1231 +0,0 @@ ---- -title: Hardening Guide v2.2 -weight: 103 -aliases: - - /rancher/v2.0-v2.4/en/security/hardening-2.2 - - /rancher/v2.x/en/security/rancher-2.2/hardening-2.2/ ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.2.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Hardening_Guide.pdf) - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.2/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher HA Kubernetes cluster host configuration - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -kernel.panic=10 -kernel.panic_on_oops=1 -``` - -- Run `sysctl -p` to enable the settings. - -### 1.1.2 - Install the encryption provider configuration on all control plane nodes - -**Profile Applicability** - -- Level 1 - -**Description** - -Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: - -**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` - -**Rationale** - -This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. - -This supports the following controls: - -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) - -**Audit** - -On the control plane hosts for the Rancher HA cluster run: - -``` bash -stat /opt/kubernetes/encryption.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. - -**Remediation** - -- Generate a key and an empty configuration file: - -``` bash -head -c 32 /dev/urandom | base64 -i - -touch /opt/kubernetes/encryption.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/encryption.yaml -chmod 0600 /opt/kubernetes/encryption.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `secret` is the 32-byte base64-encoded string generated in the first step. - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.3 - Install the audit log configuration on all control plane nodes. - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. - -**Rationale** - -The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. - -This supports the following controls: - -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) - -**Audit** - -On each control plane node, run: - -``` bash -stat /opt/kubernetes/audit.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/audit.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/audit.yaml -chmod 0600 /opt/kubernetes/audit.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.4 - Place Kubernetes event limit configuration on each control plane host - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. - -**Rationale** - -Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. - -This supports the following control: - -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Audit** - -On nodes with the `controlplane` role run: - -``` bash -stat /opt/kubernetes/admission.yaml -stat /opt/kubernetes/event.yaml -``` - -For each file, ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` - -For `admission.yaml` ensure that the file contains: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -For `event.yaml` ensure that the file contains: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/admission.yaml -touch /opt/kubernetes/event.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/admission.yaml -chown root:root /opt/kubernetes/event.yaml -chmod 0600 /opt/kubernetes/admission.yaml -chmod 0600 /opt/kubernetes/event.yaml -``` - -- For `admission.yaml` set the contents to: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -- For `event.yaml` set the contents to: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix A. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - extra_args: - authorization-mode: "Webhook" - streaming-connection-idle-timeout: "" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling argument` is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---encryption-provider-config=/opt/kubernetes/encryption.yaml ---admission-control-config-file=/opt/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=5 ---audit-log-maxbackup=5 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/opt/kubernetes/audit.yaml ---tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /opt/kubernetes/encryption.yaml - admission-control-config-file: "/opt/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - - "/opt/kubernetes:/opt/kubernetes" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - … - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole psp:restricted -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding psp:restricted -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- While upgrading or installing Rancher 2.2.x, provide the following flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local administrator password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local administrator password should be changed from the default. - -**Rationale** - -The default administrator password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - ---- - -## Appendix A - Complete RKE `cluster.yml` Example - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] - -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "1800s" - authorization-mode: "Webhook" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-api: - pod_security_policy: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - encryption-provider-config: /opt/kubernetes/encryption.yaml - admission-control-config-file: "/opt/kubernetes/admission.yaml" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/var/log/kube-audit:/var/log/kube-audit" - - "/opt/kubernetes:/opt/kubernetes" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -addons: | - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md deleted file mode 100644 index e50a8c2f17..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Rancher v2.3.x -weight: 3 -aliases: - - /rancher/v2.x/en/security/rancher-2.3.x/ ---- - -The relevant Hardening Guide and Self Assessment guide depends on your Rancher version: - -- [Rancher v2.3.5](./rancher-v2.3.5) -- [Rancher v2.3.3](./rancher-v2.3.3) -- [Rancher v2.3.0](./rancher-v2.3.0) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md deleted file mode 100644 index 5897146658..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rancher v2.3.0 -weight: 3 -aliases: - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/ ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.3) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3 | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes 1.15 | Benchmark v1.4.1 - -### Hardening Guide - -This hardening [guide](./hardening-2.3) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md deleted file mode 100644 index 7f77f12545..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/_index.md +++ /dev/null @@ -1,1546 +0,0 @@ ---- -title: Hardening Guide v2.3 -weight: 102 -aliases: - - /rancher/v2.0-v2.4/en/security/hardening-2.3 - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/ ---- -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.0-v2.3.2. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher HA Kubernetes cluster host configuration - -(See Appendix A. for full ubuntu `cloud-config` example) - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `vm.panic_on_oom = 0` - -``` bash -sysctl vm.panic_on_oom -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -- Verify `kernel.keys.root_maxkeys = 1000000` - -``` bash -sysctl kernel.keys.root_maxkeys -``` - -- Verify `kernel.keys.root_maxbytes = 25000000` - -``` bash -sysctl kernel.keys.root_maxbytes -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxkeys=1000000 -kernel.keys.root_maxbytes=25000000 -``` - -- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### 1.1.2 - Install the encryption provider configuration on all control plane nodes - -**Profile Applicability** - -- Level 1 - -**Description** - -Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: - -**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` - -**Rationale** - -This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. - -This supports the following controls: - -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) - -**Audit** - -On the control plane hosts for the Rancher HA cluster run: - -``` bash -stat /opt/kubernetes/encryption.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. - -**Remediation** - -- Generate a key and an empty configuration file: - -``` bash -head -c 32 /dev/urandom | base64 -i - -touch /opt/kubernetes/encryption.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/encryption.yaml -chmod 0600 /opt/kubernetes/encryption.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: v1 -kind: EncryptionConfig -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: <32-byte base64 encoded string> - - identity: {} -``` - -Where `secret` is the 32-byte base64-encoded string generated in the first step. - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.3 - Install the audit log configuration on all control plane nodes. - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. - -**Rationale** - -The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. - -This supports the following controls: - -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) - -**Audit** - -On each control plane node, run: - -``` bash -stat /opt/kubernetes/audit.yaml -``` - -Ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` -- The file contains: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/audit.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/audit.yaml -chmod 0600 /opt/kubernetes/audit.yaml -``` - -- Set the contents to: - -``` yaml -apiVersion: audit.k8s.io/v1beta1 -kind: Policy -rules: -- level: Metadata -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.1.4 - Place Kubernetes event limit configuration on each control plane host - -**Profile Applicability** - -- Level 1 - -**Description** - -Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. - -**Rationale** - -Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. - -This supports the following control: - -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) - -**Audit** - -On nodes with the `controlplane` role run: - -``` bash -stat /opt/kubernetes/admission.yaml -stat /opt/kubernetes/event.yaml -``` - -For each file, ensure that: - -- The file is present -- The file mode is `0600` -- The file owner is `root:root` - -For `admission.yaml` ensure that the file contains: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -For `event.yaml` ensure that the file contains: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**Remediation** - -On nodes with the `controlplane` role: - -- Generate an empty configuration file: - -``` bash -touch /opt/kubernetes/admission.yaml -touch /opt/kubernetes/event.yaml -``` - -- Set the file ownership to `root:root` and the permissions to `0600` - -``` bash -chown root:root /opt/kubernetes/admission.yaml -chown root:root /opt/kubernetes/event.yaml -chmod 0600 /opt/kubernetes/admission.yaml -chmod 0600 /opt/kubernetes/event.yaml -``` - -- For `admission.yaml` set the contents to: - -``` yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: EventRateLimit - path: /opt/kubernetes/event.yaml -``` - -- For `event.yaml` set the contents to: - -``` yaml -apiVersion: eventratelimit.admission.k8s.io/v1alpha1 -kind: Configuration -limits: -- type: Server - qps: 5000 - burst: 20000 -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory has permissions of 700 or more restrictive. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. - -**Audit** - -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %a /var/lib/rancher/etcd -``` - -Verify that the permissions are `700` or more restrictive. - -**Remediation** - -Follow the steps as documented in [1.4.12](#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. - -### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory ownership is set to `etcd:etcd`. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. - -**Audit** - -On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %U:%G /var/lib/rancher/etcd -``` - -Verify that the ownership is set to `etcd:etcd`. - -**Remediation** - -- On the etcd server node(s) add the `etcd` user: - -``` bash -useradd etcd -``` - -Record the uid/gid: - -``` bash -id etcd -``` - -- Add the following to the RKE `cluster.yml` etcd section under `services`: - -``` yaml -services: - etcd: - uid: - gid: -``` - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix B. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - extra_args: - authorization-mode: "Webhook" - streaming-connection-idle-timeout: "" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" ---encryption-provider-config=/opt/kubernetes/encryption.yaml ---admission-control-config-file=/opt/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=5 ---audit-log-maxbackup=5 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/opt/kubernetes/audit.yaml ---tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube-api: - pod_security_policy: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - … - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole psp:restricted -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding psp:restricted -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- While upgrading or installing Rancher 2.3.x, provide the following flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local administrator password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local administrator password should be changed from the default. - -**Rationale** - -The default administrator password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - ---- - -## Appendix A - Complete ubuntu `cloud-config` Example - -`cloud-config` file to automate hardening manual steps on nodes deployment. - -``` -#cloud-config -bootcmd: -- apt-get update -- apt-get install -y apt-transport-https -apt: - sources: - docker: - source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" - keyid: 0EBFCD88 -packages: -- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] -- jq -write_files: -# 1.1.1 - Configure default sysctl settings on all hosts -- path: /etc/sysctl.d/90-kubelet.conf - owner: root:root - permissions: '0644' - content: | - vm.overcommit_memory=1 - vm.panic_on_oom=0 - kernel.panic=10 - kernel.panic_on_oops=1 - kernel.keys.root_maxkeys=1000000 - kernel.keys.root_maxbytes=25000000 -# 1.1.2 encription provider -- path: /opt/kubernetes/encryption.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: QRCexFindur3dzS0P/UmHs5xA6sKu58RbtWOQFarfh4= - - identity: {} -# 1.1.3 audit log -- path: /opt/kubernetes/audit.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: audit.k8s.io/v1beta1 - kind: Policy - rules: - - level: Metadata -# 1.1.4 event limit -- path: /opt/kubernetes/admission.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: apiserver.k8s.io/v1alpha1 - kind: AdmissionConfiguration - plugins: - - name: EventRateLimit - path: /opt/kubernetes/event.yaml -- path: /opt/kubernetes/event.yaml - owner: root:root - permissions: '0600' - content: | - apiVersion: eventratelimit.admission.k8s.io/v1alpha1 - kind: Configuration - limits: - - type: Server - qps: 5000 - burst: 20000 -# 1.4.12 etcd user -groups: - - etcd -users: - - default - - name: etcd - gecos: Etcd user - primary_group: etcd - homedir: /var/lib/etcd -# 1.4.11 etcd data dir -runcmd: - - chmod 0700 /var/lib/etcd - - usermod -G docker -a ubuntu - - sysctl -p /etc/sysctl.d/90-kubelet.conf -``` - -## Appendix B - Complete RKE `cluster.yml` Example - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] - -services: - kubelet: - extra_args: - streaming-connection-idle-timeout: "1800s" - authorization-mode: "Webhook" - protect-kernel-defaults: "true" - make-iptables-util-chains: "true" - event-qps: "0" - anonymous-auth: "false" - feature-gates: "RotateKubeletServerCertificate=true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - generate_serving_certificate: true - kube-api: - pod_security_policy: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: "false" - profiling: "false" - service-account-lookup: "true" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - audit-log-path: "/var/log/kube-audit/audit-log.json" - audit-log-maxage: "5" - audit-log-maxbackup: "5" - audit-log-maxsize: "100" - audit-log-format: "json" - audit-policy-file: /opt/kubernetes/audit.yaml - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - services: - etcd: - uid: 1001 - gid: 1001 -addons: | - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: extensions/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -## Appendix C - Complete RKE Template Example - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - ignore_docker_version: true -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 1001 - retention: 72h - snapshot: false - uid: 1001 - kube_api: - always_pull_images: false - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: 'false' - audit-log-format: json - audit-log-maxage: '5' - audit-log-maxbackup: '5' - audit-log-maxsize: '100' - audit-log-path: /var/log/kube-audit/audit-log.json - enable-admission-plugins: >- - ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy - profiling: 'false' - service-account-lookup: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: - - '/opt/kubernetes:/opt/kubernetes' - pod_security_policy: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md deleted file mode 100644 index 98c78426c6..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rancher v2.3.3 -weight: 2 -aliases: - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/ ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.3.3) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.3 | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 - -### Hardening Guide - -This hardening [guide](./hardening-2.3.3) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md deleted file mode 100644 index bf75ee6a04..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/_index.md +++ /dev/null @@ -1,2045 +0,0 @@ ---- -title: Hardening Guide v2.3.3 -weight: 101 -aliases: - - /rancher/v2.0-v2.4/en/security/hardening-2.3.3 - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/ ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.3. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Hardening_Guide.pdf) - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3]({{}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.3/). - -### Profile Definitions - -The following profile definitions agree with the CIS benchmarks for Kubernetes. - -A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. - -#### Level 1 - -Items in this profile intend to: - -- offer practical advice appropriate for the environment; -- deliver an obvious security benefit; and -- not alter the functionality or utility of the environment beyond an acceptable margin - -#### Level 2 - -Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: - -- are intended for use in environments or use cases where security is paramount -- act as a defense in depth measure -- may negatively impact the utility or performance of the technology - ---- - -## 1.1 - Rancher RKE Kubernetes cluster host configuration - -(See Appendix A. for full ubuntu `cloud-config` example) - -### 1.1.1 - Configure default sysctl settings on all hosts - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure sysctl settings to match what the kubelet would set if allowed. - -**Rationale** - -We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. - -This supports the following control: - -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) - -**Audit** - -- Verify `vm.overcommit_memory = 1` - -``` bash -sysctl vm.overcommit_memory -``` - -- Verify `vm.panic_on_oom = 0` - -``` bash -sysctl vm.panic_on_oom -``` - -- Verify `kernel.panic = 10` - -``` bash -sysctl kernel.panic -``` - -- Verify `kernel.panic_on_oops = 1` - -``` bash -sysctl kernel.panic_on_oops -``` - -- Verify `kernel.keys.root_maxkeys = 1000000` - -``` bash -sysctl kernel.keys.root_maxkeys -``` - -- Verify `kernel.keys.root_maxbytes = 25000000` - -``` bash -sysctl kernel.keys.root_maxbytes -``` - -**Remediation** - -- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: - -``` plain -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxkeys=1000000 -kernel.keys.root_maxbytes=25000000 -``` - -- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory has permissions of 700 or more restrictive. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. - -**Audit** - -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %a /var/lib/etcd -``` - -Verify that the permissions are `700` or more restrictive. - -**Remediation** - -Follow the steps as documented in [1.4.12](#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. - -### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that the etcd data directory ownership is set to `etcd:etcd`. - -**Rationale** - -etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. - -**Audit** - -On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -stat -c %U:%G /var/lib/etcd -``` - -Verify that the ownership is set to `etcd:etcd`. - -**Remediation** - -- On the etcd server node(s) add the `etcd` user: - -``` bash -useradd -c "Etcd user" -d /var/lib/etcd etcd -``` - -Record the uid/gid: - -``` bash -id etcd -``` - -- Add the following to the RKE `cluster.yml` etcd section under `services`: - -``` yaml -services: - etcd: - uid: - gid: -``` - -## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE - -(See Appendix B. for full RKE `cluster.yml` example) - -### 2.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy ---encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=30 ---audit-log-maxbackup=10 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit-policy.yaml ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" -``` - -For k8s 1.14 `enable-admission-plugins` should be - -``` yaml - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 2.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 2.1.5 - Configure addons and PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole restricted-clusterrole -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -kubectl get clusterrolebinding restricted-clusterrolebinding -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted-psp -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -addons: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted-psp - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: restricted-clusterrole - rules: - - apiGroups: - - extensions - resourceNames: - - restricted-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: restricted-clusterrolebinding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: restricted-clusterrole - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -## 3.1 - Rancher Management Control Plane Installation - -### 3.1.1 - Disable the local cluster option - -**Profile Applicability** - -- Level 2 - -**Description** - -When deploying Rancher, disable the local cluster option on the Rancher Server. - -**NOTE:** This requires Rancher v2.1.2 or above. - -**Rationale** - -Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. - -**Audit** - -- Verify the Rancher deployment has the `--add-local=false` option set. - -``` bash -kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' -``` - -- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. - -**Remediation** - -- While upgrading or installing Rancher 2.3.3 or above, provide the following flag: - -``` text ---set addLocal="false" -``` - -### 3.1.2 - Enable Rancher Audit logging - -**Profile Applicability** - -- Level 1 - -**Description** - -Enable Rancher’s built-in audit logging capability. - -**Rationale** - -Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. - -**Audit** - -- Verify that the audit log parameters were passed into the Rancher deployment. - -``` -kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog -``` - -- Verify that the log is going to the appropriate destination, as set by -`auditLog.destination` - - - `sidecar`: - - 1. List pods: - - ``` bash - kubectl get pods -n cattle-system - ``` - - 2. Tail logs: - - ``` bash - kubectl logs -n cattle-system -c rancher-audit-log - ``` - - - `hostPath` - - 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. - -**Remediation** - -Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. - -#### Reference - -- - -## 3.2 - Rancher Management Control Plane Authentication - -### 3.2.1 - Change the local admin password from the default value - -**Profile Applicability** - -- Level 1 - -**Description** - -The local admin password should be changed from the default. - -**Rationale** - -The default admin password is common across all Rancher installations and should be changed immediately upon startup. - -**Audit** - -Attempt to login into the UI with the following credentials: - - Username: admin - - Password: admin - -The login attempt must not succeed. - -**Remediation** - -Change the password from `admin` to a password that meets the recommended password standards for your organization. - -### 3.2.2 - Configure an Identity Provider for Authentication - -**Profile Applicability** - -- Level 1 - -**Description** - -When running Rancher in a production environment, configure an identity provider for authentication. - -**Rationale** - -Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. - -**Audit** - -- In the Rancher UI, select _Global_ -- Select _Security_ -- Select _Authentication_ -- Ensure the authentication provider for your environment is active and configured correctly - -**Remediation** - -Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. - -#### Reference - -- - -## 3.3 - Rancher Management Control Plane RBAC - -### 3.3.1 - Ensure that administrator privileges are only granted to those who require them - -**Profile Applicability** - -- Level 1 - -**Description** - -Restrict administrator access to only those responsible for managing and operating the Rancher server. - -**Rationale** - -The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. - -**Audit** - -The following script uses the Rancher API to show users with administrator privileges: - -``` bash -#!/bin/bash -for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do - -curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' - -done - -``` - -The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. - -The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. - -**Remediation** - -Remove the `admin` role from any user that does not require administrative privileges. - -## 3.4 - Rancher Management Control Plane Configuration - -### 3.4.1 - Ensure only approved node drivers are active - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure that node drivers that are not needed or approved are not active in the Rancher console. - -**Rationale** - -Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. - -**Audit** - -- In the Rancher UI select _Global_ -- Select _Node Drivers_ -- Review the list of node drivers that are in an _Active_ state. - -**Remediation** - -If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. - -## 4.1 - Rancher Kubernetes Custom Cluster Configuration via RKE - -(See Appendix C. for full RKE template example) - -### 4.1.1 - Configure kubelet options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure Kubelet options are configured to match CIS controls. - -**Rationale** - -To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. - -- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) -- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) -- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) -- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) -- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) -- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) -- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) - -**Audit** - -Inspect the Kubelet containers on all hosts and verify that they are running with the following options: - -- `--streaming-connection-idle-timeout=` -- `--authorization-mode=Webhook` -- `--protect-kernel-defaults=true` -- `--make-iptables-util-chains=true` -- `--event-qps=0` -- `--anonymous-auth=false` -- `--feature-gates="RotateKubeletServerCertificate=true"` -- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` - -**Remediation** - -- Add the following to the RKE `cluster.yml` kubelet section under `services`: - -``` yaml -services: - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" -``` - - Where `` is in a form like `1800s`. - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 4.1.2 - Configure kube-api options - -**Profile Applicability** - -- Level 1 - -**Description** - -Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. - -**NOTE:** - -Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. -Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. - -**Rationale** - -To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. - -- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) -- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) -- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) -- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) -- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) -- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) -- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) -- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) -- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) -- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) -- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) -- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) -- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) -- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) -- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: - - ``` bash - docker inspect kube-apiserver - ``` - -- Look for the following options in the command section of the output: - -``` text ---anonymous-auth=false ---profiling=false ---service-account-lookup=true ---enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy ---encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml ---admission-control-config-file=/etc/kubernetes/admission.yaml ---audit-log-path=/var/log/kube-audit/audit-log.json ---audit-log-maxage=30 ---audit-log-maxbackup=10 ---audit-log-maxsize=100 ---audit-log-format=json ---audit-policy-file=/etc/kubernetes/audit-policy.yaml ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -``` - -- In the `volume` section of the output ensure the bind mount is present: - -``` text -/var/log/kube-audit:/var/log/kube-audit -``` - -**Remediation** - -- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: - -``` yaml -services: - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" -``` - -For k8s 1.14 `enable-admission-plugins` should be - -``` yaml - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -**NOTE:** - -Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. - -### 4.1.3 - Configure scheduler options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate options for the Kubernetes scheduling service. - -**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. - -- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) -- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) - -**Audit** - -- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: - -``` bash -docker inspect kube-scheduler -``` - -- Verify the following options are set in the `command` section. - -``` text ---profiling=false ---address=127.0.0.1 -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 4.1.4 - Configure controller options - -**Profile Applicability** - -- Level 1 - -**Description** - -Set the appropriate arguments on the Kubernetes controller manager. - -5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. - -**Rationale** - -To address the following controls the options need to be passed to the Kubernetes controller manager. - -- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) -- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) -- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) -- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) - -**Audit** - -- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: - -``` bash -docker inspect kube-controller-manager -``` - -- Verify the following options are set in the `command` section: - -``` text ---terminated-pod-gc-threshold=1000 ---profiling=false ---address=127.0.0.1 ---feature-gates="RotateKubeletServerCertificate=true" -``` - -**Remediation** - -- In the RKE `cluster.yml` file ensure the following options are set: - -``` yaml -services: - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" -``` - -- Reconfigure the cluster: - -``` bash -rke up --config cluster.yml -``` - -### 4.1.5 - Check PSPs - -**Profile Applicability** - -- Level 1 - -**Description** - -Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. - -**Rationale** - -To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. - -- 1.7.1 - Do not admit privileged containers (Not Scored) -- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) -- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) -- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) -- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) -- 1.7.6 - Do not admit root containers (Not Scored) -- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) - -**Audit** - -- Verify that the `cattle-system` namespace exists: - -``` bash -kubectl get ns |grep cattle -``` - -- Verify that the roles exist: - -``` bash -kubectl get role default-psp-role -n ingress-nginx -kubectl get role default-psp-role -n cattle-system -kubectl get clusterrole restricted-clusterrole -``` - -- Verify the bindings are set correctly: - -``` bash -kubectl get rolebinding -n ingress-nginx default-psp-rolebinding -kubectl get rolebinding -n cattle-system default-psp-rolebinding -``` - -- Verify the restricted PSP is present. - -``` bash -kubectl get psp restricted-psp -``` - ---- - -## Appendix A - Complete ubuntu `cloud-config` Example - -`cloud-config` file to automate hardening manual steps on nodes deployment. - -``` -#cloud-config -bootcmd: -- apt-get update -- apt-get install -y apt-transport-https -apt: - sources: - docker: - source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" - keyid: 0EBFCD88 -packages: -- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] -- jq -write_files: -# 1.1.1 - Configure default sysctl settings on all hosts -- path: /etc/sysctl.d/90-kubelet.conf - owner: root:root - permissions: '0644' - content: | - vm.overcommit_memory=1 - vm.panic_on_oom=0 - kernel.panic=10 - kernel.panic_on_oops=1 - kernel.keys.root_maxkeys=1000000 - kernel.keys.root_maxbytes=25000000 -# 1.4.12 etcd user -groups: - - etcd -users: - - default - - name: etcd - gecos: Etcd user - primary_group: etcd - homedir: /var/lib/etcd -# 1.4.11 etcd data dir -runcmd: - - chmod 0700 /var/lib/etcd - - usermod -G docker -a ubuntu - - sysctl -p /etc/sysctl.d/90-kubelet.conf -``` - -## Appendix B - Complete RKE `cluster.yml` Example - -Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. - -{{% accordion id="cluster-1.14" label="RKE yaml for k8s 1.14" %}} - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -addon_job_timeout: 30 -authentication: - strategy: x509 -authorization: {} -bastion_host: - ssh_agent_auth: false -cloud_provider: {} -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.14.9-rancher1-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -restore: - restore: false -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube-api: - always_pull_images: true - audit_log: - enabled: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: 'false' - enable-admission-plugins: >- - ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit - profiling: 'false' - service-account-lookup: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: - - '/opt/kubernetes:/opt/kubernetes' - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube-controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - protect-kernel-defaults: 'true' - fail_swap_on: false - generate_serving_certificate: true - kubeproxy: {} - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' -ssh_agent_auth: false -``` - -{{% /accordion %}} - -{{% accordion id="cluster-1.15" label="RKE yaml for k8s 1.15" %}} - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.15.6-rancher1-2 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -ssh_agent_auth: false -``` - -{{% /accordion %}} - -{{% accordion id="cluster-1.16" label="RKE yaml for k8s 1.16" %}} - -``` yaml -nodes: -- address: 18.191.190.205 - internal_address: 172.31.24.213 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.203 - internal_address: 172.31.24.203 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -- address: 18.191.190.10 - internal_address: 172.31.24.244 - user: ubuntu - role: [ "controlplane", "etcd", "worker" ] -addon_job_timeout: 30 -authentication: - strategy: x509 -ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# -ingress: - provider: nginx -kubernetes_version: v1.16.3-rancher1-1 -monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# -network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# -services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" -ssh_agent_auth: false -``` - -{{% /accordion %}} - -## Appendix C - Complete RKE Template Example - -Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. - - -{{% accordion id="k8s-1.14" label="RKE template for k8s 1.14" %}} - -``` yaml -# -# Cluster Config -# -answers: {} -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: false -name: test-35378 -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - authentication: - strategy: x509 - authorization: {} - bastion_host: - ssh_agent_auth: false - cloud_provider: {} - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.14.9-rancher1-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal - restore: - restore: false -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube-api: - always_pull_images: true - audit_log: - enabled: true - event_rate_limit: - enabled: true - extra_args: - anonymous-auth: 'false' - enable-admission-plugins: >- - ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit - profiling: 'false' - service-account-lookup: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: - - '/opt/kubernetes:/opt/kubernetes' - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube-controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - protect-kernel-defaults: 'true' - fail_swap_on: false - generate_serving_certificate: true - kubeproxy: {} - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -{{% /accordion %}} - -{{% accordion id="k8s-1.15" label="RKE template for k8s 1.15" %}} - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.6-rancher1-2 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -{{% /accordion %}} - -{{% accordion id="k8s-1.16" label="RKE template for k8s 1.16" %}} - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.16.3-rancher1-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 1000 - retention: 72h - snapshot: false - uid: 1000 - kube_api: - always_pull_images: true - pod_security_policy: true - service_node_port_range: 30000-32767 - event_rate_limit: - enabled: true - audit_log: - enabled: true - secrets_encryption_config: - enabled: true - extra_args: - anonymous-auth: "false" - enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" - profiling: "false" - service-account-lookup: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: - - "/opt/kubernetes:/opt/kubernetes" - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - kube-controller: - extra_args: - profiling: "false" - address: "127.0.0.1" - terminated-pod-gc-threshold: "1000" - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - extra_args: - profiling: "false" - address: "127.0.0.1" - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -{{% /accordion %}} diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md deleted file mode 100644 index e6b4582af9..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rancher v2.3.5 -weight: 1 -aliases: - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/ ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.3.5) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 - -### Hardening Guide - -This hardening [guide](./hardening-2.3.5) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md deleted file mode 100644 index 3fbb7f27f1..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/_index.md +++ /dev/null @@ -1,2269 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - v2.3.5 -weight: 205 -aliases: - - /rancher/v2.0-v2.4/en/security/benchmark-2.3.5 - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/ ---- - -### CIS Kubernetes Benchmark v1.5 - Rancher v2.3.5 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.3.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- -## 1 Master Node Security Configuration -### 1.1 Master Node Configuration Files - -#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -chmod 700 /var/lib/etcd -``` - -**Audit Script:** 1.1.11.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a -``` - -**Audit Execution:** - -``` -./1.1.11.sh etcd -``` - -**Expected result**: - -``` -'700' is equal to '700' -``` - -#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). -For example, -``` bash -chown etcd:etcd /var/lib/etcd -``` - -**Audit Script:** 1.1.12.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G -``` - -**Audit Execution:** - -``` -./1.1.12.sh etcd -``` - -**Expected result**: - -``` -'etcd:etcd' is present -``` - -#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chown -R root:root /etc/kubernetes/ssl -``` - -**Audit:** - -``` -stat -c %U:%G /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 644 /etc/kubernetes/ssl -``` - -**Audit Script:** check_files_permissions.sh - -``` -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit -``` - -**Audit Execution:** - -``` -./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' -``` - -**Expected result**: - -``` -'true' is present -``` - -#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 600 /etc/kubernetes/ssl/certs/serverca -``` - -**Audit Script:** 1.1.21.sh - -``` -#!/bin/bash -e -check_dir=${1:-/etc/kubernetes/ssl} - -for file in $(find ${check_dir} -name "*key.pem"); do - file_permission=$(stat -c %a ${file}) - if [[ "${file_permission}" == "600" ]]; then - continue - else - echo "FAIL: ${file} ${file_permission}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./1.1.21.sh /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 1.2 API Server - -#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--basic-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--basic-auth-file' is not present -``` - -#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--token-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--token-auth-file' is not present -``` - -#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the `--kubelet-https` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-https' is present OR '--kubelet-https' is not present -``` - -#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -kubelet client certificate and key parameters as below. - -``` bash ---kubelet-client-certificate= ---kubelet-client-key= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. -`--kubelet-certificate-authority=` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-certificate-authority' is present -``` - -#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. -One such example could be as below. - -``` bash ---authorization-mode=RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' not have 'AlwaysAllow' -``` - -#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'Node' -``` - -#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, -for example: - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'RBAC' -``` - -#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a -value that does not include `AlwaysAdmit`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and ensure that the `--disable-admission-plugins` parameter is set to a -value that does not include `ServiceAccount`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--disable-admission-plugins` parameter to -ensure it does not include `NamespaceLifecycle`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present -``` - -#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `PodSecurityPolicy`: - -``` bash ---enable-admission-plugins=...,PodSecurityPolicy,... -``` - -Then restart the API Server. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `NodeRestriction`. - -``` bash ---enable-admission-plugins=...,NodeRestriction,... -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--insecure-bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--insecure-bind-address' is not present -``` - -#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---insecure-port=0 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--secure-port` parameter or -set it to a different **(non-zero)** desired port. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -6443 is greater than 0 OR '--secure-port' is not present -``` - -#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-path` parameter to a suitable path and -file where you would like audit logs to be written, for example: - -``` bash ---audit-log-path=/var/log/apiserver/audit.log -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--audit-log-path' is present -``` - -#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: - -``` bash ---audit-log-maxage=30 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -30 is greater or equal to 30 -``` - -#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate -value. - -``` bash ---audit-log-maxbackup=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -10 is greater or equal to 10 -``` - -#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. -For example, to set it as `100` **MB**: - -``` bash ---audit-log-maxsize=100 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -100 is greater or equal to 100 -``` - -#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -and set the below parameter as appropriate and if needed. -For example, - -``` bash ---request-timeout=300s -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--request-timeout' is not present OR '--request-timeout' is present -``` - -#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---service-account-lookup=true -``` - -Alternatively, you can delete the `--service-account-lookup` parameter from this file so -that the default takes effect. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--service-account-key-file` parameter -to the public key file for service accounts: - -``` bash ---service-account-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-key-file' is present -``` - -#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the **etcd** certificate and **key** file parameters. - -``` bash ---etcd-certfile= ---etcd-keyfile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the TLS certificate and private key file parameters. - -``` bash ---tls-cert-file= ---tls-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the client certificate authority file. - -``` bash ---client-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the etcd certificate authority file parameter. - -``` bash ---etcd-cafile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-cafile' is present -``` - -#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--encryption-provider-config` parameter to the path of that file: - -``` bash ---encryption-provider-config= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--encryption-provider-config' is present -``` - -#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a `EncryptionConfig` file. -In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. - -**Audit Script:** 1.2.34.sh - -``` -#!/bin/bash -e - -check_file=${1} - -grep -q -E 'aescbc|kms|secretbox' ${check_file} -if [ $? -eq 0 ]; then - echo "--pass" - exit 0 -else - echo "fail: encryption provider found in ${check_file}" - exit 1 -fi -``` - -**Audit Execution:** - -``` -./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 1.3 Controller Manager - -#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, -for example: - -``` bash ---terminated-pod-gc-threshold=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--terminated-pod-gc-threshold' is present -``` - -#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node to set the below parameter. - -``` bash ---use-service-account-credentials=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'true' is not equal to 'false' -``` - -#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--service-account-private-key-file` parameter -to the private key file for service accounts. - -``` bash ---service-account-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-private-key-file' is present -``` - -#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. - -``` bash ---root-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--root-ca-file' is present -``` - -#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' -``` - -#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -### 1.4 Scheduler - -#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -## 2 Etcd Node Configuration -### 2 Etcd Node Configuration Files - -#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` -on the master node and set the below parameters. - -``` bash ---cert-file= ---key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--cert-file' is present AND '--key-file' is present -``` - -#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---client-cert-auth="true" -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--auto-tls` parameter or set it to `false`. - -``` bash - --auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the -master node and set the below parameters. - -``` bash ---peer-client-file= ---peer-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---peer-client-cert-auth=true -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--peer-auto-tls` parameter or set it to `false`. - -``` bash ---peer-auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -## 3 Control Plane Configuration -### 3.2 Logging - -#### 3.2.1 Ensure that a minimal audit policy is created (Scored) - -**Result:** PASS - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit Script:** 3.2.1.sh - -``` -#!/bin/bash -e - -api_server_bin=${1} - -/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep -``` - -**Audit Execution:** - -``` -./3.2.1.sh kube-apiserver -``` - -**Expected result**: - -``` -'--audit-policy-file' is present -``` - -## 4 Worker Node Security Configuration -### 4.1 Worker Node Configuration Files - -#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the file permissions of the - -``` bash ---client-ca-file chmod 644 -``` - -**Audit:** - -``` -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Expected result**: - -``` -'644' is equal to '644' OR '640' is present OR '600' is present -``` - -#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the ownership of the `--client-ca-file`. - -``` bash -chown root:root -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -### 4.2 Kubelet - -#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to -`false`. -If using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---anonymous-auth=false -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If -using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---authorization-mode=Webhook -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'Webhook' not have 'AlwaysAllow' -``` - -#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---client-ca-file= -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---read-only-port=0 -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a -value other than `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---streaming-connection-idle-timeout=5m -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---protect-kernel-defaults=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove the `--make-iptables-util-chains` argument from the -`KUBELET_SYSTEM_PODS_ARGS` variable. -Based on your system, restart the kubelet service. For example: - -```bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` -variable. -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--rotate-certificates' is present OR '--rotate-certificates' is not present -``` - -#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -## 5 Kubernetes Policies -### 5.1 RBAC and Service Accounts - -#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) - -**Result:** PASS - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value - -``` bash -automountServiceAccountToken: false -``` - -**Audit Script:** 5.1.5.sh - -``` -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" - -if [[ "${accounts}" != "" ]]; then - echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" - exit 1 -fi - -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" - -if [[ "${default_binding}" -gt 0 ]]; then - echo "fail: default service accounts have non default bindings" - exit 1 -fi - -echo "--pass" -exit 0 -``` - -**Audit Execution:** - -``` -./5.1.5.sh -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 5.2 Pod Security Policies - -#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostPID` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostIPC` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostNetwork` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -### 5.3 Network Policies and CNI - -#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create `NetworkPolicy` objects as you need them. - -**Audit Script:** 5.3.2.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "fail: ${namespace}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./5.3.2.sh -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 5.6 General Policies - -#### 5.6.4 The default namespace should not be used (Scored) - -**Result:** PASS - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** 5.6.4.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [[ $? -gt 0 ]]; then - echo "fail: kubectl failed" - exit 1 -fi - -default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) - -echo "--count=${default_resources}" -``` - -**Audit Execution:** - -``` -./5.6.4.sh -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md deleted file mode 100644 index 7065ae7b97..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/_index.md +++ /dev/null @@ -1,716 +0,0 @@ ---- -title: Hardening Guide v2.3.5 -weight: 100 -aliases: - - /rancher/v2.0-v2.4/en/security/hardening-2.3.5 - - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/ ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 - - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Hardening_Guide.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.5]({{< baseurl >}}/rancher/v2.0-v2.4/en/security/benchmark-2.3.5/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -``` -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -``` -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -``` yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace the **default** service account must include this value: - -``` -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -``` yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -``` yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. - -``` yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -kubernetes_version: "v1.15.9-rancher1-1" -enable_network_policy: true -default_pod_security_policy_template_id: "restricted" -services: - etcd: - uid: 52034 - gid: 52034 - kube-api: - pod_security_policy: true - secrets_encryption_config: - enabled: true - audit_log: - enabled: true - admission_configuration: - event_rate_limit: - enabled: true - kube-controller: - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: [] - extra_env: [] - cluster_domain: "" - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] -cluster_name: "" -prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} -restore: - restore: false - snapshot_name: "" -dns: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation) for additional installation and RKE Template details. - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - addons: |- - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - ignore_docker_version: true - kubernetes_version: v1.15.9-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -``` yaml -#cloud-config -packages: - - curl - - jq -runcmd: - - sysctl -w vm.overcommit_memory=1 - - sysctl -w kernel.panic=10 - - sysctl -w kernel.panic_on_oops=1 - - curl https://releases.rancher.com/install-docker/18.09.sh | sh - - usermod -aG docker ubuntu - - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done - - addgroup --gid 52034 etcd - - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -write_files: - - path: /etc/sysctl.d/kubelet.conf - owner: root:root - permissions: "0644" - content: | - vm.overcommit_memory=1 - kernel.panic=10 - kernel.panic_on_oops=1 -``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md deleted file mode 100644 index 137759fe75..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Rancher v2.4 -weight: 2 -aliases: - - /rancher/v2.x/en/security/rancher-2.4/ ---- - -### Self Assessment Guide - -This [guide](./benchmark-2.4) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 - -### Hardening Guide - -This hardening [guide](./hardening-2.4) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md deleted file mode 100644 index f1e0767ab9..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/benchmark-2.4/_index.md +++ /dev/null @@ -1,2269 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - v2.4 -weight: 204 -aliases: - - /rancher/v2.0-v2.4/en/security/benchmark-2.4 - - /rancher/v2.x/en/security/rancher-2.4/benchmark-2.4/ ---- - -### CIS Kubernetes Benchmark v1.5 - Rancher v2.4 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- -## 1 Master Node Security Configuration -### 1.1 Master Node Configuration Files - -#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -chmod 700 /var/lib/etcd -``` - -**Audit Script:** 1.1.11.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a -``` - -**Audit Execution:** - -``` -./1.1.11.sh etcd -``` - -**Expected result**: - -``` -'700' is equal to '700' -``` - -#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). -For example, -``` bash -chown etcd:etcd /var/lib/etcd -``` - -**Audit Script:** 1.1.12.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G -``` - -**Audit Execution:** - -``` -./1.1.12.sh etcd -``` - -**Expected result**: - -``` -'etcd:etcd' is present -``` - -#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chown -R root:root /etc/kubernetes/ssl -``` - -**Audit:** - -``` -stat -c %U:%G /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 644 /etc/kubernetes/ssl -``` - -**Audit Script:** check_files_permissions.sh - -``` -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit -``` - -**Audit Execution:** - -``` -./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' -``` - -**Expected result**: - -``` -'true' is present -``` - -#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 600 /etc/kubernetes/ssl/certs/serverca -``` - -**Audit Script:** 1.1.21.sh - -``` -#!/bin/bash -e -check_dir=${1:-/etc/kubernetes/ssl} - -for file in $(find ${check_dir} -name "*key.pem"); do - file_permission=$(stat -c %a ${file}) - if [[ "${file_permission}" == "600" ]]; then - continue - else - echo "FAIL: ${file} ${file_permission}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./1.1.21.sh /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 1.2 API Server - -#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--basic-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--basic-auth-file' is not present -``` - -#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--token-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--token-auth-file' is not present -``` - -#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the `--kubelet-https` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-https' is present OR '--kubelet-https' is not present -``` - -#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -kubelet client certificate and key parameters as below. - -``` bash ---kubelet-client-certificate= ---kubelet-client-key= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. -`--kubelet-certificate-authority=` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-certificate-authority' is present -``` - -#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. -One such example could be as below. - -``` bash ---authorization-mode=RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' not have 'AlwaysAllow' -``` - -#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'Node' -``` - -#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, -for example: - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'RBAC' -``` - -#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a -value that does not include `AlwaysAdmit`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and ensure that the `--disable-admission-plugins` parameter is set to a -value that does not include `ServiceAccount`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--disable-admission-plugins` parameter to -ensure it does not include `NamespaceLifecycle`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present -``` - -#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `PodSecurityPolicy`: - -``` bash ---enable-admission-plugins=...,PodSecurityPolicy,... -``` - -Then restart the API Server. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `NodeRestriction`. - -``` bash ---enable-admission-plugins=...,NodeRestriction,... -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--insecure-bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--insecure-bind-address' is not present -``` - -#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---insecure-port=0 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--secure-port` parameter or -set it to a different **(non-zero)** desired port. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -6443 is greater than 0 OR '--secure-port' is not present -``` - -#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-path` parameter to a suitable path and -file where you would like audit logs to be written, for example: - -``` bash ---audit-log-path=/var/log/apiserver/audit.log -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--audit-log-path' is present -``` - -#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: - -``` bash ---audit-log-maxage=30 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -30 is greater or equal to 30 -``` - -#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate -value. - -``` bash ---audit-log-maxbackup=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -10 is greater or equal to 10 -``` - -#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. -For example, to set it as `100` **MB**: - -``` bash ---audit-log-maxsize=100 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -100 is greater or equal to 100 -``` - -#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -and set the below parameter as appropriate and if needed. -For example, - -``` bash ---request-timeout=300s -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--request-timeout' is not present OR '--request-timeout' is present -``` - -#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---service-account-lookup=true -``` - -Alternatively, you can delete the `--service-account-lookup` parameter from this file so -that the default takes effect. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--service-account-key-file` parameter -to the public key file for service accounts: - -``` bash ---service-account-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-key-file' is present -``` - -#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the **etcd** certificate and **key** file parameters. - -``` bash ---etcd-certfile= ---etcd-keyfile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the TLS certificate and private key file parameters. - -``` bash ---tls-cert-file= ---tls-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the client certificate authority file. - -``` bash ---client-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the etcd certificate authority file parameter. - -``` bash ---etcd-cafile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-cafile' is present -``` - -#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--encryption-provider-config` parameter to the path of that file: - -``` bash ---encryption-provider-config= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--encryption-provider-config' is present -``` - -#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a `EncryptionConfig` file. -In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. - -**Audit Script:** 1.2.34.sh - -``` -#!/bin/bash -e - -check_file=${1} - -grep -q -E 'aescbc|kms|secretbox' ${check_file} -if [ $? -eq 0 ]; then - echo "--pass" - exit 0 -else - echo "fail: encryption provider found in ${check_file}" - exit 1 -fi -``` - -**Audit Execution:** - -``` -./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 1.3 Controller Manager - -#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, -for example: - -``` bash ---terminated-pod-gc-threshold=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--terminated-pod-gc-threshold' is present -``` - -#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node to set the below parameter. - -``` bash ---use-service-account-credentials=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'true' is not equal to 'false' -``` - -#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--service-account-private-key-file` parameter -to the private key file for service accounts. - -``` bash ---service-account-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-private-key-file' is present -``` - -#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. - -``` bash ---root-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--root-ca-file' is present -``` - -#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' -``` - -#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -### 1.4 Scheduler - -#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' is present OR '--bind-address' is not present -``` - -## 2 Etcd Node Configuration -### 2 Etcd Node Configuration Files - -#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` -on the master node and set the below parameters. - -``` bash ---cert-file= ---key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--cert-file' is present AND '--key-file' is present -``` - -#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---client-cert-auth="true" -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--auto-tls` parameter or set it to `false`. - -``` bash - --auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the -master node and set the below parameters. - -``` bash ---peer-client-file= ---peer-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---peer-client-cert-auth=true -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--peer-auto-tls` parameter or set it to `false`. - -``` bash ---peer-auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -## 3 Control Plane Configuration -### 3.2 Logging - -#### 3.2.1 Ensure that a minimal audit policy is created (Scored) - -**Result:** PASS - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit Script:** 3.2.1.sh - -``` -#!/bin/bash -e - -api_server_bin=${1} - -/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep -``` - -**Audit Execution:** - -``` -./3.2.1.sh kube-apiserver -``` - -**Expected result**: - -``` -'--audit-policy-file' is present -``` - -## 4 Worker Node Security Configuration -### 4.1 Worker Node Configuration Files - -#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the file permissions of the - -``` bash ---client-ca-file chmod 644 -``` - -**Audit:** - -``` -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Expected result**: - -``` -'644' is equal to '644' OR '640' is present OR '600' is present -``` - -#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the ownership of the `--client-ca-file`. - -``` bash -chown root:root -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -### 4.2 Kubelet - -#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to -`false`. -If using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---anonymous-auth=false -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If -using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---authorization-mode=Webhook -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'Webhook' not have 'AlwaysAllow' -``` - -#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---client-ca-file= -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---read-only-port=0 -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a -value other than `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---streaming-connection-idle-timeout=5m -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---protect-kernel-defaults=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove the `--make-iptables-util-chains` argument from the -`KUBELET_SYSTEM_PODS_ARGS` variable. -Based on your system, restart the kubelet service. For example: - -```bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` -variable. -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--rotate-certificates' is present OR '--rotate-certificates' is not present -``` - -#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -## 5 Kubernetes Policies -### 5.1 RBAC and Service Accounts - -#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) - -**Result:** PASS - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value - -``` bash -automountServiceAccountToken: false -``` - -**Audit Script:** 5.1.5.sh - -``` -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" - -if [[ "${accounts}" != "" ]]; then - echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" - exit 1 -fi - -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" - -if [[ "${default_binding}" -gt 0 ]]; then - echo "fail: default service accounts have non default bindings" - exit 1 -fi - -echo "--pass" -exit 0 -``` - -**Audit Execution:** - -``` -./5.1.5.sh -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 5.2 Pod Security Policies - -#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostPID` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostIPC` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostNetwork` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -### 5.3 Network Policies and CNI - -#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create `NetworkPolicy` objects as you need them. - -**Audit Script:** 5.3.2.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "fail: ${namespace}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./5.3.2.sh -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 5.6 General Policies - -#### 5.6.4 The default namespace should not be used (Scored) - -**Result:** PASS - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** 5.6.4.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [[ $? -gt 0 ]]; then - echo "fail: kubectl failed" - exit 1 -fi - -default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) - -echo "--count=${default_resources}" -``` - -**Audit Execution:** - -``` -./5.6.4.sh -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md b/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md deleted file mode 100644 index 31f5017aca..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/rancher-2.4/hardening-2.4/_index.md +++ /dev/null @@ -1,723 +0,0 @@ ---- -title: Hardening Guide v2.4 -weight: 99 -aliases: - - /rancher/v2.0-v2.4/en/security/hardening-2.4 - - /rancher/v2.x/en/security/rancher-2.4/hardening-2.4/ ---- - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version -------------------------|----------------|-----------------------|------------------ -Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 - - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Hardening_Guide.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a production installation of Rancher v2.4 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.4]({{< baseurl >}}/rancher/v2.0-v2.4/en/security/benchmark-2.4/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -``` -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. - -``` -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -``` yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: - -``` -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -``` yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -``` yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes - - -``` yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -kubernetes_version: "v1.15.9-rancher1-1" -enable_network_policy: true -default_pod_security_policy_template_id: "restricted" -# the nodes directive is required and will vary depending on your environment -# documentation for node configuration can be found here: -# https://rancher.com/docs/rke/latest/en/config-options/nodes -nodes: -services: - etcd: - uid: 52034 - gid: 52034 - kube-api: - pod_security_policy: true - secrets_encryption_config: - enabled: true - audit_log: - enabled: true - admission_configuration: - event_rate_limit: - enabled: true - kube-controller: - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: [] - extra_env: [] - cluster_domain: "" - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] -cluster_name: "" -prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} -restore: - restore: false - snapshot_name: "" -dns: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation) for additional installation and RKE Template details. - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - addons: |- - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - ignore_docker_version: true - kubernetes_version: v1.15.9-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -``` yaml -#cloud-config -packages: - - curl - - jq -runcmd: - - sysctl -w vm.overcommit_memory=1 - - sysctl -w kernel.panic=10 - - sysctl -w kernel.panic_on_oops=1 - - curl https://releases.rancher.com/install-docker/18.09.sh | sh - - usermod -aG docker ubuntu - - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done - - addgroup --gid 52034 etcd - - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -write_files: - - path: /etc/sysctl.d/kubelet.conf - owner: root:root - permissions: "0644" - content: | - vm.overcommit_memory=1 - kernel.panic=10 - kernel.panic_on_oops=1 -``` diff --git a/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md b/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md deleted file mode 100644 index 6cca088dcb..0000000000 --- a/content/rancher/v2.0-v2.4/en/security/security-scan/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Security Scans -weight: 299 -aliases: - - /rancher/v2.x/en/security/security-scan/ ---- - -The documentation about CIS security scans has moved [here.]({{}}/rancher/v2.0-v2.4/en/cis-scans) diff --git a/content/rancher/v2.0-v2.4/en/system-tools/_index.md b/content/rancher/v2.0-v2.4/en/system-tools/_index.md deleted file mode 100644 index b6b97e78ad..0000000000 --- a/content/rancher/v2.0-v2.4/en/system-tools/_index.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: System Tools -weight: 22 ---- - -System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters or [installations of Rancher on an RKE cluster.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) The tasks include: - -* Collect logging and system metrics from nodes. -* Remove Kubernetes resources created by Rancher. - -The following commands are available: - -| Command | Description -|---|--- -| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. -| [stats](#stats) | Stream system metrics from nodes. -| [remove](#remove) | Remove Kubernetes resources created by Rancher. - -# Download System Tools - -You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. - -Operating System | Filename ------------------|----- -MacOS | `system-tools_darwin-amd64` -Linux | `system-tools_linux-amd64` -Windows | `system-tools_windows-amd64.exe` - -After you download the tools, complete the following actions: - -1. Rename the file to `system-tools`. - -1. Give the file executable permissions by running the following command: - - > **Using Windows?** - The file is already an executable, you can skip this step. - - ``` - chmod +x system-tools - ``` - -# Logs - -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). See [Troubleshooting]({{}}//rancher/v2.0-v2.4/en/troubleshooting/) for a list of core Kubernetes cluster components. - -System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. - -### Usage - -``` -./system-tools_darwin-amd64 logs --kubeconfig -``` - -The following are the options for the logs command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. -| `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. - -# Stats - -The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/). - -System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. - -### Usage - -``` -./system-tools_darwin-amd64 stats --kubeconfig -``` - -The following are the options for the stats command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. -| `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. - -# Remove - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.0-v2.4/en/backups/backups) before executing the command. - -When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: - -- The Rancher deployment namespace (`cattle-system` by default). -- Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates as of v2.1.0. -- Labels, annotations, and finalizers. -- Rancher Deployment. -- Machines, clusters, projects, and user custom resource deployments (CRDs). -- All resources create under the `management.cattle.io` API Group. -- All CRDs created by Rancher v2.x. - ->**Using 2.0.8 or Earlier?** -> ->These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. - -### Usage - -When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.0-v2.4/en/backups/backups) before executing the command. - -``` -./system-tools remove --kubeconfig --namespace -``` - -The following are the options for the `remove` command: - -| Option | Description -| ---------------------------------------------- | ------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file -| `--namespace , -n cattle-system` | Rancher 2.x deployment namespace (``). If no namespace is defined, the options defaults to `cattle-system`. -| `--force` | Skips the interactive removal confirmation and removes the Rancher deployment without prompt. diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/_index.md b/content/rancher/v2.0-v2.4/en/troubleshooting/_index.md deleted file mode 100644 index fca31a2b07..0000000000 --- a/content/rancher/v2.0-v2.4/en/troubleshooting/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Troubleshooting -weight: 26 ---- - -This section contains information to help you troubleshoot issues when using Rancher. - -- [Kubernetes components]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/) - - If you need help troubleshooting core Kubernetes cluster components like: - * `etcd` - * `kube-apiserver` - * `kube-controller-manager` - * `kube-scheduler` - * `kubelet` - * `kube-proxy` - * `nginx-proxy` - -- [Kubernetes resources]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/) - - Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. - -- [Networking]({{}}/rancher/v2.0-v2.4/en/troubleshooting/networking/) - - Steps to troubleshoot networking issues can be found here. - -- [DNS]({{}}/rancher/v2.0-v2.4/en/troubleshooting/dns/) - - When you experience name resolution issues in your cluster. - -- [Troubleshooting Rancher installed on Kubernetes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/rancherha/) - - If you experience issues with your [Rancher server installed on Kubernetes]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) - -- [Imported clusters]({{}}/rancher/v2.0-v2.4/en/troubleshooting/imported-clusters/) - - If you experience issues when [Importing Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/imported-clusters/) - -- [Logging]({{}}/rancher/v2.0-v2.4/en/troubleshooting/logging/) - - Read more about what log levels can be configured and how to configure a log level. - diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/dns/_index.md b/content/rancher/v2.0-v2.4/en/troubleshooting/dns/_index.md deleted file mode 100644 index 169b5d8410..0000000000 --- a/content/rancher/v2.0-v2.4/en/troubleshooting/dns/_index.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: DNS -weight: 103 ---- - -The commands/steps listed on this page can be used to check name resolution issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -Before running the DNS checks, check the [default DNS provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{}}/rancher/v2.0-v2.4/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. - -### Check if DNS pods are running - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns -``` - -Example output when using CoreDNS: -``` -NAME READY STATUS RESTARTS AGE -coredns-799dffd9c4-6jhlz 1/1 Running 0 76m -``` - -Example output when using kube-dns: -``` -NAME READY STATUS RESTARTS AGE -kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s -``` - -### Check if the DNS service is present with the correct cluster-ip - -``` -kubectl -n kube-system get svc -l k8s-app=kube-dns -``` - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s -``` - -### Check if domain names are resolving - -Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: kubernetes.default -Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local -pod "busybox" deleted -``` - -Check if external names are resolving (in this example, `www.google.com`) - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: www.google.com -Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net -Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net -pod "busybox" deleted -``` - -If you want to check resolving of domain names on all of the hosts, execute the following steps: - -1. Save the following file as `ds-dnstest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: dnstest - spec: - selector: - matchLabels: - name: dnstest - template: - metadata: - labels: - name: dnstest - spec: - tolerations: - - operator: Exists - containers: - - image: busybox:1.28 - imagePullPolicy: Always - name: alpine - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl create -f ds-dnstest.yml` -3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. -4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). - - ``` - export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start DNS resolve test - => End DNS resolve test - ``` - -If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. - -Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. - -``` -=> Start DNS resolve test -command terminated with exit code 1 -209.97.182.150 cannot resolve www.google.com -=> End DNS resolve test -``` - -Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. - -### CoreDNS specific - -#### Check CoreDNS logging - -``` -kubectl -n kube-system logs -l k8s-app=kube-dns -``` - -#### Check configuration - -CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. - -``` -kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} -``` - -#### Check upstream nameservers in resolv.conf - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. - -``` -kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' -``` - -#### Enable query logging - -Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: - -``` -kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - -``` - -All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). - -### kube-dns specific - -#### Check upstream nameservers in kubedns container - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). - -Use the following command to check the upstream nameservers used by the kubedns container: - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done -``` - -Example output: -``` -Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x -nameserver 1.1.1.1 -nameserver 8.8.4.4 -``` - -If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: - -* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. -* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): - -``` -services: - kubelet: - extra_args: - resolv-conf: "/run/resolvconf/resolv.conf" -``` - -> **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. - -See [Editing Cluster as YAML]({{}}/rancher/v2.0-v2.4/en/cluster-admin/editing-clusters/#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: - -``` -kubectl delete pods -n kube-system -l k8s-app=kube-dns -pod "kube-dns-5fd74c7488-6pwsf" deleted -``` - -Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). - -If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: - -``` -kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' -``` - -Example output: -``` -upstreamNameservers:["1.1.1.1"] -``` diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/_index.md deleted file mode 100644 index 5754da979d..0000000000 --- a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Kubernetes Components -weight: 100 ---- - -The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters. - -This section includes troubleshooting tips in the following categories: - -- [Troubleshooting etcd Nodes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/etcd) -- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/controlplane) -- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/nginx-proxy) -- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/worker-and-generic) - -# Kubernetes Component Diagram - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/controlplane/_index.md deleted file mode 100644 index 3a3ca045c8..0000000000 --- a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/controlplane/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Troubleshooting Controlplane Nodes -weight: 2 ---- - -This section applies to nodes with the `controlplane` role. - -# Check if the Controlplane Containers are Running - -There are three specific containers launched on nodes with the `controlplane` role: - -* `kube-apiserver` -* `kube-controller-manager` -* `kube-scheduler` - -The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver -f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler -bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager -``` - -# Controlplane Container Logging - -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{}}/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kube-apiserver -docker logs kube-controller-manager -docker logs kube-scheduler -``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/_index.md b/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/_index.md deleted file mode 100644 index dc50f14d30..0000000000 --- a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-resources/_index.md +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: Kubernetes resources -weight: 101 ---- - -The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/) clusters. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -- [Nodes](#nodes) - - [Get nodes](#get-nodes) - - [Get node conditions](#get-node-conditions) -- [Kubernetes leader election](#kubernetes-leader-election) - - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) - - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) -- [Ingress controller](#ingress-controller) - - [Pod details](#pod-details) - - [Pod container logs](#pod-container-logs) - - [Namespace events](#namespace-events) - - [Debug logging](#debug-logging) - - [Check configuration](#check-configuration) -- [Rancher agents](#rancher-agents) - - [cattle-node-agent](#cattle-node-agent) - - [cattle-cluster-agent](#cattle-cluster-agent) -- [Jobs and pods](#jobs-and-pods) - - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) - - [Describe pod](#describe-pod) - - [Pod container logs](#pod-container-logs) - - [Describe job](#describe-job) - - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) - - [Evicted pods](#evicted-pods) - - [Job does not complete](#job-does-not-complete) - -# Nodes - -### Get nodes - -Run the command below and check the following: - -- All nodes in your cluster should be listed, make sure there is not one missing. -- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) -- Check if all nodes report the correct version. -- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) - - -``` -kubectl get nodes -o wide -``` - -Example output: - -``` -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -``` - -### Get node conditions - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' -``` - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' -``` - -Example output: - -``` -worker-0: DiskPressure:True -``` - -# Kubernetes leader election - -### Kubernetes Controller Manager leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -### Kubernetes Scheduler leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -# Ingress Controller - -The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. - -Check if the pods are running on all nodes: - -``` -kubectl -n ingress-nginx get pods -o wide -``` - -Example output: - -``` -kubectl -n ingress-nginx get pods -o wide -NAME READY STATUS RESTARTS AGE IP NODE -default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 -nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 -nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 -``` - -If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. - -### Pod details - -``` -kubectl -n ingress-nginx describe pods -l app=ingress-nginx -``` - -### Pod container logs - -``` -kubectl -n ingress-nginx logs -l app=ingress-nginx -``` - -### Namespace events - -``` -kubectl -n ingress-nginx get events -``` - -### Debug logging - -To enable debug logging: - -``` -kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' -``` - -### Check configuration - -Retrieve generated configuration in each pod: - -``` -kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done -``` - -# Rancher agents - -Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. - -#### cattle-node-agent - -Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 -cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 -cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 -cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 -cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 -cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 -cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 -``` - -Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: - -``` -kubectl -n cattle-system logs -l app=cattle-agent -``` - -#### cattle-cluster-agent - -Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 -``` - -Check logging of cattle-cluster-agent pod: - -``` -kubectl -n cattle-system logs -l app=cattle-cluster-agent -``` - -# Jobs and Pods - -### Check that pods or jobs have status **Running**/**Completed** - -To check, run the command: - -``` -kubectl get pods --all-namespaces -``` - -If a pod is not in **Running** state, you can dig into the root cause by running: - -### Describe pod - -``` -kubectl describe pod POD_NAME -n NAMESPACE -``` - -### Pod container logs - -``` -kubectl logs POD_NAME -n NAMESPACE -``` - -If a job is not in **Completed** state, you can dig into the root cause by running: - -### Describe job - -``` -kubectl describe job JOB_NAME -n NAMESPACE -``` - -### Logs from the containers of pods of the job - -``` -kubectl logs -l job-name=JOB_NAME -n NAMESPACE -``` - -### Evicted pods - -Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). - -Retrieve a list of evicted pods (podname and namespace): - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' -``` - -To delete all evicted pods: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done -``` - -Retrieve a list of evicted pods, scheduled node and the reason: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done -``` - -### Job does not complete - -If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.]({{}}/rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/#excluding-workloads-from-being-injected-with-the-istio-sidecar) - -Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/networking/_index.md b/content/rancher/v2.0-v2.4/en/troubleshooting/networking/_index.md deleted file mode 100644 index 99d67e8846..0000000000 --- a/content/rancher/v2.0-v2.4/en/troubleshooting/networking/_index.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Networking -weight: 102 ---- - -The commands/steps listed on this page can be used to check networking related issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -### Double check if all the required ports are opened in your (host) firewall - -Double check if all the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. -### Check if overlay network is functioning correctly - -The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. - -To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `swiss-army-knife` container on every host (image was developed by Rancher engineers and can be found here: https://github.com/rancherlabs/swiss-army-knife), which we will use to run a `ping` test between containers on all hosts. - -> **Note:** This container [does not support ARM nodes](https://github.com/leodotcloud/swiss-army-knife/issues/18), such as a Raspberry Pi. This will be seen in the pod logs as `exec user process caused: exec format error`. - -1. Save the following file as `overlaytest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: overlaytest - spec: - selector: - matchLabels: - name: overlaytest - template: - metadata: - labels: - name: overlaytest - spec: - tolerations: - - operator: Exists - containers: - - image: rancherlabs/swiss-army-knife - imagePullPolicy: Always - name: overlaytest - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - - ``` - -2. Launch it using `kubectl create -f overlaytest.yml` -3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. -4. Run the following script, from the same location. It will have each `overlaytest` container on every host ping each other: - ``` - #!/bin/bash - echo "=> Start network overlay test" - kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | - while read spod shost - do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | - while read tip thost - do kubectl --request-timeout='10s' exec $spod -c overlaytest -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1" - RC=$? - if [ $RC -ne 0 ] - then echo FAIL: $spod on $shost cannot reach pod IP $tip on $thost - else echo $shost can reach $thost - fi - done - done - echo "=> End network overlay test" - ``` - -5. When this command has finished running, it will output the state of each route: - - ``` - => Start network overlay test - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.7.3 on wk2 - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.0.5 on cp1 - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.2.12 on wk1 - command terminated with exit code 1 - FAIL: overlaytest-v4qkl on cp1 cannot reach pod IP 10.42.7.3 on wk2 - cp1 can reach cp1 - cp1 can reach wk1 - command terminated with exit code 1 - FAIL: overlaytest-xpxwp on wk1 cannot reach pod IP 10.42.7.3 on wk2 - wk1 can reach cp1 - wk1 can reach wk1 - => End network overlay test - ``` - If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened for `wk2`. -6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. - - -### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices - -When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: - -* `websocket: bad handshake` -* `Failed to connect to proxy` -* `read tcp: i/o timeout` - -See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. - -### Resolved issues - -#### Overlay network broken when using Canal/Flannel due to missing node annotations - -| | | -|------------|------------| -| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | -| Resolved in | v2.1.2 | - -To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): - -``` -kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' -``` - -If there is no output, the cluster is not affected. - -#### System namespace pods network connectivity broken - -> **Note:** This applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. - -| | | -|------------|------------| -| GitHub issue | [#15146](https://github.com/rancher/rancher/issues/15146) | - -If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration]({{}}/rancher/v2.0-v2.4/en/upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: - -- NGINX ingress controller showing `504 Gateway Time-out` when accessed. -- NGINX ingress controller logging `upstream timed out (110: Connection timed out) while connecting to upstream` when accessed. diff --git a/content/rancher/v2.0-v2.4/en/user-settings/_index.md b/content/rancher/v2.0-v2.4/en/user-settings/_index.md deleted file mode 100644 index 0e15e7008b..0000000000 --- a/content/rancher/v2.0-v2.4/en/user-settings/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: User Settings -weight: 23 -aliases: - - /rancher/v2.0-v2.4/en/tasks/user-settings/ ---- - -Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. - -![User Settings Menu]({{}}/img/rancher/user-settings.png) - -The available user settings are: - -- [API & Keys]({{}}/rancher/v2.0-v2.4/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. -- [Cloud Credentials]({{}}/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters). Note: Available as of v2.2.0. -- [Node Templates]({{}}/rancher/v2.0-v2.4/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters). -- [Preferences]({{}}/rancher/v2.0-v2.4/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. -- Log Out: Ends your user session. diff --git a/content/rancher/v2.0-v2.4/en/user-settings/api-keys/_index.md b/content/rancher/v2.0-v2.4/en/user-settings/api-keys/_index.md deleted file mode 100644 index e3c72a54a8..0000000000 --- a/content/rancher/v2.0-v2.4/en/user-settings/api-keys/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: API Keys -weight: 7005 -aliases: - - /rancher/v2.0-v2.4/en/concepts/api-keys/ - - /rancher/v2.0-v2.4/en/tasks/user-settings/api-keys/ ---- - -## API Keys and User Authentication - -If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. - -An API key is also required for using Rancher CLI. - -API Keys are composed of four components: - -- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. -- **Access Key:** The token's username. -- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. -- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. - -## Creating an API Key - -1. Select **User Avatar** > **API & Keys** from the **User Settings** menu in the upper-right. - -2. Click **Add Key**. - -3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. - - The API key won't be valid after expiration. Shorter expiration periods are more secure. - - _Available as of v2.4.6_ - Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. - - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{}}/rancher/v2.0-v2.4/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. - -4. Click **Create**. - - **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. - - Use the **Bearer Token** to authenticate with Rancher CLI. - -5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. - -## What's Next? - -- Enter your API key information into the application that will send requests to the Rancher API. -- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. -- API keys are used for API calls and [Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli). - -## Deleting API Keys - -If you need to revoke an API key, delete it. You should delete API keys: - -- That may have been compromised. -- That have expired. - -To delete an API, select the stale key and click **Delete**. diff --git a/content/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/_index.md b/content/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/_index.md deleted file mode 100644 index 3040c1e825..0000000000 --- a/content/rancher/v2.0-v2.4/en/user-settings/cloud-credentials/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Managing Cloud Credentials -weight: 7011 ---- - -_Available as of v2.2.0_ - -When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. - -Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. - -Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. - -You can create cloud credentials in two contexts: - -- [During creation of a node template]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. -- In the **User Settings** - -All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. - -## Creating a Cloud Credential from User Settings - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Click **Add Cloud Credential**. -1. Enter a name for the cloud credential. -1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/) in Rancher. -1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. -1. Click **Create**. - -**Result:** The cloud credential is created and can immediately be used to [create node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). - -## Updating a Cloud Credential - -When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. -1. Update the credential information and click **Save**. - -**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/). - -## Deleting a Cloud Credential - -In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{}}/rancher/v2.0-v2.4/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. You can either individually delete a cloud credential or bulk delete. - - - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. - - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. -1. Confirm that you want to delete these cloud credentials. diff --git a/content/rancher/v2.0-v2.4/en/user-settings/node-templates/_index.md b/content/rancher/v2.0-v2.4/en/user-settings/node-templates/_index.md deleted file mode 100644 index abda0996ab..0000000000 --- a/content/rancher/v2.0-v2.4/en/user-settings/node-templates/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Managing Node Templates -weight: 7010 ---- - -When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: - -- While [provisioning a node pool cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools). -- At any time, from your [user settings](#creating-a-node-template-from-user-settings). - -When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. - -## Creating a Node Template from User Settings - -1. From your user settings, select **User Avatar > Node Templates**. -1. Click **Add Template**. -1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. - -**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools). - -## Updating a Node Template - -1. From your user settings, select **User Avatar > Node Templates**. -1. Choose the node template that you want to edit and click the **⋮ > Edit**. - - > **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.0-v2.4/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -1. Edit the required information and click **Save**. - -**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. - -## Cloning Node Templates - -When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Find the template you want to clone. Then select **⋮ > Clone**. -1. Complete the rest of the form. - -**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools). - -## Deleting a Node Template - -When you no longer use a node template, you can delete it from your user settings. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md deleted file mode 100644 index 0d26be4f72..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/_index.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Migrating from v1.6 to v2.x -weight: 28 -aliases: - - /rancher/v2.x/en/v1.6-migration/ ---- - -Rancher v2.x has been rearchitected and rewritten with the goal of providing a complete management solution for Kubernetes and Docker. Due to these extensive changes, there is no direct upgrade path from v1.6 to v2.x, but rather a migration of your v1.6 services into v2.x as Kubernetes workloads. In v1.6, the most common orchestration used was Rancher's own engine called Cattle. The following guide explains and educates our Cattle users on running workloads in a Kubernetes environment. - -## Video - -This video demonstrates a complete walk through of migration from Rancher v1.6 to v2.x. - -{{< youtube OIifcqj5Srw >}} - -## Migration Plan - ->**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro). - - -- [1. Get Started]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/get-started) - - >**Already a Kubernetes user in v1.6?** - > - > _Get Started_ is the only section you need to review for migration to v2.x. You can skip everything else. -- [2. Migrate Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/) -- [3. Expose Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/) -- [4. Configure Health Checks]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps) -- [5. Schedule Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/) -- [6. Service Discovery]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/) -- [7. Load Balancing]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/) - - -## Migration Example Files - -Throughout this migration guide, we will reference several example services from Rancher v1.6 that we're migrating to v2.x. These services are: - -- A service named `web`, which runs [Let's Chat](http://sdelements.github.io/lets-chat/), a self-hosted chat for small teams. -- A service named `database`, which runs [Mongo DB](https://www.mongodb.com/), an open source document database. -- A service named `webLB`, which runs [HAProxy](http://www.haproxy.org/), an open source load balancer used in Rancher v1.6. - -During migration, we'll export these services from Rancher v1.6. The export generates a unique directory for each Rancher v1.6 environment and stack, and two files are output into each stack's directory: - -- `docker-compose.yml` - - A file that contains standard Docker directives for each service in your stack. We'll be converting these files to Kubernetes manifests that can be read by Rancher v2.x. - -- `rancher-compose.yml` - - A file for Rancher-specific functionality such as health checks and load balancers. These files cannot be read by Rancher v2.x, so don't worry about their contents—we're discarding them and recreating them using the v2.x UI. - - -### [Next: Get Started]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/get-started) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md deleted file mode 100644 index 426bf50402..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "6. Service Discovery" -weight: 600 -aliases: - - /rancher/v2.x/en/v1.6-migration/discover-services/ ---- - -Service discovery is one of the core functionalities of any container-based environment. Once you have packaged and launched your application, the next step is making it discoverable to other containers in your environment or the external world. This document will describe how to use the service discovery support provided by Rancher v2.x so that you can find them by name. - -This document will also show you how to link the workloads and services that you migrated into Rancher v2.x. When you parsed your services from v1.6 using migration-tools CLI, it output two files for each service: one deployment manifest and one service manifest. You'll have to link these two files together before the deployment works correctly in v2.x. - -
Resolve the output.txt Link Directive
- -![Resolve Link Directive]({{}}/img/rancher/resolve-links.png) - -## In This Document - - - - -- [Service Discovery: Rancher v1.6 vs. v2.x](#service-discovery-rancher-v1-6-vs-v2-x) -- [Service Discovery Within and Across Namespaces](#service-discovery-within-and-across-namespaces) -- [Container Discovery](#container-discovery) -- [Service Name Alias Creation](#service-name-alias-creation) - - - -## Service Discovery: Rancher v1.6 vs. v2.x - -For Rancher v2.x, we've replaced the Rancher DNS microservice used in v1.6 with native [Kubernetes DNS support](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/), which provides equivalent service discovery for Kubernetes workloads and pods. Former Cattle users can replicate all the service discovery features from Rancher v1.6 in v2.x. There's no loss of functionality. - -Kubernetes schedules a DNS pod and service in the cluster, which is similar to the [Rancher v1.6 DNS microservice]({{}}/rancher/v1.6/en/cattle/internal-dns-service/#internal-dns-service-in-cattle-environments). Kubernetes then configures its kubelets to route all DNS lookups to this DNS service, which is skyDNS, a flavor of the default Kube-DNS implementation. - -The following table displays each service discovery feature available in the two Rancher releases. - -Service Discovery Feature | Rancher v1.6 | Rancher v2.x | Description ---------------------------|--------------|--------------|------------- -[service discovery within and across stack][1] (i.e., clusters) | ✓ | ✓ | All services in the stack are resolvable by `` and by `.` across stacks. -[container discovery][2] | ✓ | ✓ | All containers are resolvable globally by their name. -[service alias name creation][3] | ✓ | ✓ | Adding an alias name to services and linking to other services using aliases. -[discovery of external services][4] | ✓ | ✓ | Pointing to services deployed outside of Rancher using the external IP(s) or a domain name. - -[1]: #service-discovery-within-and-across-stacks -[2]: #container-discovery -[3]: #service-name-alias-creation -[4]: #service-name-alias-creation - -
- -### Service Discovery Within and Across Namespaces - - -When you create a _new_ workload in v2.x (not migrated, more on that [below](#linking-migrated-workloads-and-services)), Rancher automatically creates a service with an identical name, and then links the service and workload together. If you don't explicitly expose a port, the default port of `42` is used. This practice makes the workload discoverable within and across namespaces by its name. - -### Container Discovery - -Individual pods running in the Kubernetes cluster also get a DNS record assigned, which uses dot notation as well: `..pod.cluster.local`. For example, a pod with an IP of `10.42.2.7` in the namespace `default` with a DNS name of `cluster.local` would have an entry of `10-42-2-7.default.pod.cluster.local`. - -Pods can also be resolved using the `hostname` and `subdomain` fields if set in the pod spec. Details about this resolution is covered in the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). - -### Linking Migrated Workloads and Services - -When you migrate v1.6 services to v2.x, Rancher does not automatically create a Kubernetes service record for each migrated deployment. Instead, you'll have to link the deployment and service together manually, using any of the methods listed below. - -In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/#migration-example-file-output) our [migration example services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files) are linked together. - -
Linked Workload and Kubernetes Service
- -![Linked Workload and Kubernetes Service]({{}}/img/rancher/linked-service-workload.png) - - -### Service Name Alias Creation - -Just as you can create an alias for Rancher v1.6 services, you can do the same for Rancher v2.x workloads. Similarly, you can also create DNS records pointing to services running externally, using either their hostname or IP address. These DNS records are Kubernetes service objects. - -Using the v2.x UI, use the context menu to navigate to the `Project` view. Then click **Resources > Workloads > Service Discovery.** (In versions before v2.3.0, click the **Workloads > Service Discovery** tab.) All existing DNS records created for your workloads are listed under each namespace. - -Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods. - -
Add Service Discovery Record
-![Add Service Discovery Record]({{}}/img/rancher/add-record.png) - -The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. - -Option | Kubernetes-implemented? | Rancher-implemented? --------|-------------------------|--------------------- -Pointing to an external hostname | ✓ | | -Pointing to a set of pods that match a selector | ✓ | | -Pointing to an external IP address | | ✓ -Pointing to another workload | | ✓ -Create alias for another DNS record | | ✓ - - -### [Next: Load Balancing]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md deleted file mode 100644 index f2bdd08909..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: "3. Expose Your Services" -weight: 400 -aliases: - - /rancher/v2.x/en/v1.6-migration/expose-services/ ---- - -In testing environments, you usually need to route external traffic to your cluster containers by using an unadvertised IP and port number, providing users access to their apps. You can accomplish this goal using port mapping, which exposes a workload (i.e., service) publicly over a specific port, provided you know your node IP address(es). You can either map a port using HostPorts (which exposes a service on a specified port on a single node) or NodePorts (which exposes a service on _all_ nodes on a single port). - -Use this document to correct workloads that list `ports` in `output.txt`. You can correct it by either setting a HostPort or a NodePort. - -
Resolve ports for the web Workload
- -![Resolve Ports]({{}}/img/rancher/resolve-ports.png) - - -## In This Document - - - -- [What's Different About Exposing Services in Rancher v2.x?](#what-s-different-about-exposing-services-in-rancher-v2-x) -- [HostPorts](#hostport) -- [Setting HostPort](#setting-hostport) -- [NodePorts](#nodeport) -- [Setting NodePort](#setting-nodeport) - - - -## What's Different About Exposing Services in Rancher v2.x? - -In Rancher v1.6, we used the term _Port Mapping_ for exposing an IP address and port where your you and your users can access a service. - -In Rancher v2.x, the mechanisms and terms for service exposure have changed and expanded. You now have two port mapping options: _HostPorts_ (which is most synonymous with v1.6 port mapping, allows you to expose your app at a single IP and port) and _NodePorts_ (which allows you to map ports on _all_ of your cluster nodes, not just one). - -Unfortunately, port mapping cannot be parsed by the migration-tools CLI. If the services you're migrating from v1.6 to v2.x have port mappings set, you'll have to either set a [HostPort](#hostport) or [NodePort](#nodeport) as a replacement. - -## HostPort - -A _HostPort_ is a port exposed to the public on a _specific node_ running one or more pod. Traffic to the node and the exposed port (`:`) are routed to the requested container's private port. Using a HostPort for a Kubernetes pod in Rancher v2.x is synonymous with creating a public port mapping for a container in Rancher v1.6. - -In the following diagram, a user is trying to access an instance of Nginx, which is running within a pod on port 80. However, the Nginx deployment is assigned a HostPort of 9890. The user can connect to this pod by browsing to its host IP address, followed by the HostPort in use (9890 in case). - -![HostPort Diagram]({{}}/img/rancher/hostPort.svg) - - -#### HostPort Pros - -- Any port available on the host can be exposed. -- Configuration is simple, and the HostPort is set directly in the Kubernetes pod specifications. Unlike NodePort, no other objects need to be created to expose your app. - -#### HostPort Cons - -- Limits the scheduling options for your pod, as only hosts with vacancies for your chosen port can be used. -- If the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. -- Any two workloads that specify the same HostPort cannot be deployed to the same node. -- If the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods—Kubernetes reschedules them to a different node. - -## Setting HostPort - -You can set a HostPort for migrated workloads (i.e., services) using the Rancher v2.x UI. To add a HostPort, browse to the project containing your workloads, and edit each workload that you want to expose, as shown below. Map the port that your service container exposes to the HostPort exposed on your target node. - -For example, for the web-deployment.yml file parsed from v1.6 that we've been using as a sample, we would edit its Kubernetes manifest, set the publish the port that the container uses, and then declare a HostPort listening on the port of your choice (`9890`) as shown below. You can then access your workload by clicking the link created in the Rancher UI. - -
Port Mapping: Setting HostPort
- -{{< img "/img/rancher/set-hostport.gif" "Set HostPort">}} - -## NodePort - -A _NodePort_ is a port that's open to the public _on each_ of your cluster nodes. When the NodePort receives a request for any of the cluster hosts' IP address for the set NodePort value, NodePort (which is a Kubernetes service) routes traffic to a specific pod, regardless of what node it's running on. NodePort provides a static endpoint where external requests can reliably reach your pods. - -NodePorts help you circumvent an IP address shortcoming. Although pods can be reached by their IP addresses, they are disposable by nature. Pods are routinely destroyed and recreated, getting a new IP address with each replication. Therefore, IP addresses are not a reliable way to access your pods. NodePorts help you around this issue by providing a static service where they can always be reached. Even if your pods change their IP addresses, external clients dependent on them can continue accessing them without disruption, all without any knowledge of the pod re-creation occurring on the back end. - -In the following diagram, a user is trying to connect to an instance of Nginx running in a Kubernetes cluster managed by Rancher. Although he knows what NodePort Nginx is operating on (30216 in this case), he does not know the IP address of the specific node that the pod is running on. However, with NodePort enabled, he can connect to the pod using the IP address for _any_ node in the cluster. Kubeproxy will forward the request to the correct node and pod. - -![NodePort Diagram]({{}}/img/rancher/nodePort.svg) - -NodePorts are available within your Kubernetes cluster on an internal IP. If you want to expose pods external to the cluster, use NodePorts in conjunction with an external load balancer. Traffic requests from outside your cluster for `:` are directed to the workload. The `` can be the IP address of any node in your Kubernetes cluster. - -#### NodePort Pros - -- Creating a NodePort service provides a static public endpoint to your workload pods. There, even if the pods are destroyed, Kubernetes can deploy the workload anywhere in the cluster without altering the public endpoint. -- The scale of the pods is not limited by the number of nodes in the cluster. NodePort allows decoupling of public access from the number and location of pods. - -#### NodePort Cons - -- When a NodePort is used, that `:` is reserved in your Kubernetes cluster on all nodes, even if the workload is never deployed to the other nodes. -- You can only specify a port from a configurable range (by default, it is `30000-32767`). -- An extra Kubernetes object (a Kubernetes service of type NodePort) is needed to expose your workload. Thus, finding out how your application is exposed is not straightforward. - -## Setting NodePort - -You can set a NodePort for migrated workloads (i.e., services) using the Rancher v2.x UI. To add a NodePort, browse to the project containing your workloads, and edit each workload that you want to expose, as shown below. Map the port that your service container exposes to a NodePort, which you'll be able to access from each cluster node. - -For example, for the `web-deployment.yml` file parsed from v1.6 that we've been using as a sample, we would edit its Kubernetes manifest, set the publish the port that the container uses, and then declare a NodePort. You can then access your workload by clicking the link created in the Rancher UI. - ->**Note:** -> ->- If you set a NodePort without giving it a value, Rancher chooses a port at random from the following range: `30000-32767`. ->- If you manually set a NodePort, you must assign it a value within the `30000-32767` range. - -
Port Mapping: Setting NodePort
- -{{< img "/img/rancher/set-nodeport.gif" "Set NodePort" >}} - -### [Next: Configure Health Checks]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md deleted file mode 100644 index 6a0e7714a9..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/get-started/_index.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "1. Get Started" -weight: 25 -aliases: - - /rancher/v2.x/en/v1.6-migration/get-started/ ---- -Get started with your migration to Rancher v2.x by installing Rancher and configuring your new Rancher environment. - -## Outline - - - -- [A. Install Rancher v2.x](#a-install-rancher-v2-x) -- [B. Configure Authentication](#b-configure-authentication) -- [C. Provision a Cluster and Project](#c-provision-a-cluster-and-project) -- [D. Create Stacks](#d-create-stacks) - - - - -## A. Install Rancher v2.x - -The first step in migrating from v1.6 to v2.x is to install the Rancher v2.x Server side-by-side with your v1.6 Server, as you'll need your old install during the migration process. Due to the architecture changes between v1.6 and v2.x, there is no direct path for upgrade. You'll have to install v2.x independently and then migrate your v1.6 services to v2.x. - -New for v2.x, all communication to Rancher Server is encrypted. The procedures below instruct you not only on installation of Rancher, but also creation and installation of these certificates. - -Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements]({{}}/rancher/v2.0-v2.4/en/installation/requirements/). - -After provisioning your node(s), install Rancher: - -- [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/single-node) - - For development environments, Rancher can be installed on a single node using Docker. This installation procedure deploys a single Rancher container to your host. - -- [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) - - For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability Kubernetes installation. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. - - >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/) for full requirements. - -## B. Configure Authentication - -After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication). - -
Rancher v2.x Authentication
- -![Rancher v2.x Authentication]({{}}/img/rancher/auth-providers.svg) - -### Local Users - -Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts]({{}}/rancher/v2.0-v2.4/en/admin-settings/authentication/) and assign them access rights. - -As a best practice, you should use a hybrid of external _and_ local authentication. This practice provides access to Rancher should your external authentication experience an interruption, as you can still log in using a local user account. Set up a few local accounts as administrative users of Rancher. - - -### SAML Authentication Providers - -In Rancher v1.6, we encouraged our SAML users to use Shibboleth, as it was the only SAML authentication option we offered. However, to better support their minor differences, we've added more fully tested SAML providers for v2.x: Ping Identity, Microsoft ADFS, and FreeIPA. - -## C. Provision a Cluster and Project - -Begin work in Rancher v2.x by using it to provision a new Kubernetes cluster, which is similar to an environment in v1.6. This cluster will host your application deployments. - -A cluster and project in combined together in Rancher v2.x is equivalent to a v1.6 environment. A _cluster_ is the compute boundary (i.e., your hosts) and a _project_ is an administrative boundary (i.e., a grouping of namespaces used to assign access rights to users). - -There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/). - -### Clusters - -In Rancher v1.6, compute nodes were added to an _environment_. Rancher v2.x eschews the term _environment_ for _cluster_, as Kubernetes uses this term for a team of computers instead of _environment_. - -Rancher v2.x lets you launch a Kubernetes cluster anywhere. Host your cluster using: - -- A [hosted Kubernetes provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/). -- A [pool of nodes from an infrastructure provider]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/). Rancher launches Kubernetes on the nodes. -- Any [custom node(s)]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/custom-nodes/). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. - -### Projects - -Additionally, Rancher v2.x introduces [projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. - -When you create a cluster, two projects are automatically created: - -- The `System` project, which includes system namespaces where important Kubernetes resources are running (like ingress controllers and cluster dns services) -- The `Default` project. - -However, for production environments, we recommend [creating your own project]({{}}/rancher/v2.0-v2.4/en/cluster-admin/projects-and-namespaces/#creating-projects) and giving it a descriptive name. - -After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. - -## D. Create Stacks - -In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/), which are the v2.x equivalent of stacks, for the same purpose. - -In Rancher v2.x, namespaces are child objects to projects. When you create a project, a `default` namespace is added to the project, but you can create your own to parallel your stacks from v1.6. - -During migration, if you don't explicitly define which namespace a service should be deployed to, it's deployed to the `default` namespace. - -Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services) soon). - - -### [Next: Migrate Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md deleted file mode 100644 index bf88c6a2b9..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/kub-intro/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Kubernetes Introduction -weight: 1 -aliases: - - /rancher/v2.x/en/v1.6-migration/kub-intro/ ---- - -Rancher v2.x is built on the [Kubernetes](https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational) container orchestrator. This shift in underlying technology for v2.x is a large departure from v1.6, which supported several popular container orchestrators. Since Rancher is now based entirely on Kubernetes, it's helpful to learn the Kubernetes basics. - -The following table introduces and defines some key Kubernetes concepts. - -| **Concept** | **Definition** | -| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Cluster | A collection of machines that run containerized applications managed by Kubernetes. | -| Namespace | A virtual cluster, multiple of which can be supported by a single physical cluster. | -| Node | One of the physical or virtual machines that make up a cluster. | -| Pod | The smallest and simplest Kubernetes object. A pod represents a set of running [containers](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/#why-containers) on your cluster. | -| Deployment | An API object that manages a replicated application. | -| Workload | Workloads are objects that set deployment rules for pods. | - - -## Migration Cheatsheet - -Because Rancher v1.6 defaulted to our Cattle container orchestrator, it primarily used terminology related to Cattle. However, because Rancher v2.x uses Kubernetes, it aligns with the Kubernetes naming standard. This shift could be confusing for people unfamiliar with Kubernetes, so we've created a table that maps terms commonly used in Rancher v1.6 to their equivalents in Rancher v2.x. - -| **Rancher v1.6** | **Rancher v2.x** | -| --- | --- | -| Container | Pod | -| Services | Workload | -| Load Balancer | Ingress | -| Stack | Namespace | -| Environment | Project (Administration)/Cluster (Compute) -| Host | Node | -| Catalog | Helm | -| Port Mapping | HostPort (Single Node)/NodePort (All Nodes) | - -
-More detailed information on Kubernetes concepts can be found in the -[Kubernetes Concepts Documentation](https://kubernetes.io/docs/concepts/). - -### [Next: Get Started]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/get-started/) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md deleted file mode 100644 index e740ca3d4a..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/load-balancing/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: "7. Load Balancing" -weight: 700 -aliases: - - /rancher/v2.x/en/v1.6-migration/load-balancing/ ---- - -If your applications are public-facing and consume significant traffic, you should place a load balancer in front of your cluster so that users can always access their apps without service interruption. Typically, you can fulfill a high volume of service requests by [horizontally scaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) your deployment, which spins up additional application containers as traffic ramps up. However, this technique requires routing that distributes traffic across your nodes efficiently. In cases where you need to accommodate public traffic that scales up and down, you'll need a load balancer. - -As outlined in [its documentation]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/), Rancher v1.6 provided rich support for load balancing using its own microservice powered by HAProxy, which supports HTTP, HTTPS, TCP hostname, and path-based routing. Most of these same features are available in v2.x. However, load balancers that you used with v1.6 cannot be migrated to v2.x. You'll have to manually recreate your v1.6 load balancer in v2.x. - -If you encounter the `output.txt` text below after parsing your v1.6 Compose files to Kubernetes manifests, you'll have to resolve it by manually creating a load balancer in v2.x. - -
output.txt Load Balancer Directive
- -![Resolve Load Balancer Directive]({{}}/img/rancher/resolve-load-balancer.png) - -## In This Document - - - -- [Load Balancing Protocol Options](#load-balancing-protocol-options) -- [Load Balancer Deployment](#load-balancer-deployment) -- [Load Balancing Architecture](#load-balancing-architecture) -- [Ingress Caveats](#ingress-caveats) -- [Deploying Ingress](#deploying-ingress) -- [Rancher v2.x Load Balancing Limitations](#rancher-v2-x-load-balancing-limitations) - - - -## Load Balancing Protocol Options - -By default, Rancher v2.x replaces the v1.6 load balancer microservice with the native [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), which is backed by NGINX Ingress Controller for layer 7 load balancing. By default, Kubernetes Ingress only supports the HTTP and HTTPS protocols, not TCP. Load balancing is limited to these two protocols when using Ingress. - -> **TCP Required?** See [TCP Load Balancing Options](#tcp-load-balancing-options) - - -## Load Balancer Deployment - -In Rancher v1.6, you could add port/service rules for configuring your HA proxy to load balance for target services. You could also configure the hostname/path-based routing rules. - -Rancher v2.x offers similar functionality, but load balancing is instead handled by Ingress. An Ingress is a specification of rules that a controller component applies to your load balancer. The actual load balancer can run outside of your cluster or within it. - -By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only. - -RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. - -For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). - -## Load Balancing Architecture - -Deployment of Ingress Controller in v2.x as a DaemonSet brings some architectural changes that v1.6 users should know about. - -In Rancher v1.6 you could deploy a scalable load balancer service within your stack. If you had four hosts in your Cattle environment, you could deploy one load balancer service with a scale of two and point to your application by appending port 80 to your two host IP Addresses. You could also launch another load balancer on the remaining two hosts to balance a different service again using port 80 because your load balancer is using different host IP Addresses). - - - -
Rancher v1.6 Load Balancing Architecture
- -![Rancher v1.6 Load Balancing]({{}}/img/rancher/cattle-load-balancer.svg) - -The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads. - -
Rancher v2.x Load Balancing Architecture
- -![Rancher v2.x Load Balancing]({{}}/img/rancher/kubernetes-load-balancer.svg) - -## Ingress Caveats - -Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balancing, you must use unique host names and paths when configuring your workloads. This limitation derives from: - -- Ingress confinement to ports 80 and 443 (i.e, the ports HTTP[S] uses for routing). -- The load balancer and the Ingress Controller is launched globally for the cluster as a DaemonSet. - -> **TCP Required?** Rancher v2.x still supports TCP. See [TCP Load Balancing Options](#tcp-load-balancing-options) for workarounds. - -## Deploying Ingress - -You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Deploy**. During deployment, you can choose a target project or namespace. - ->**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. -> - -![Workload Scale]({{}}/img/rancher/workload-scale.png) - -For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. - -
Browsing to Load Balancer Tab and Adding Ingress
- -![Adding Ingress]({{}}/img/rancher/add-ingress.gif) - -Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. - -### Configuring Host- and Path-Based Routing - -Using Rancher v2.x, you can add Ingress rules that are based on host names or a URL path. Based on the rules you create, your NGINX Ingress Controller routes traffic to multiple target workloads or Kubernetes services. - -For example, let's say you have multiple workloads deployed to a single namespace. You can add an Ingress to route traffic to these two workloads using the same hostname but different paths, as depicted in the image below. URL requests to `foo.com/name.html` will direct users to the `web` workload, and URL requests to `foo.com/login` will direct users to the `chat` workload. - -
Ingress: Path-Based Routing Configuration
- -![Ingress: Path-Based Routing Configuration]({{}}/img/rancher/add-ingress-form.png) - -Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address. - -
Workload Links
- -![Load Balancer Links to Workloads]({{}}/img/rancher/load-balancer-links.png) - -The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: - -``` -kubectl get ingress -``` - -### HTTPS/Certificates Option - -Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want to use it, you need to use a valid SSL/TLS certificate. While configuring Ingress rules, use the **SSL/TLS Certificates** section to configure a certificate. - -- We recommend [uploading a certificate]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/certificates/) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. -- If you have configured [NGINX default certificate]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. - -
Load Balancer Configuration: SSL/TLS Certificate Section
- -![SSL/TLS Certificates Section]({{}}/img/rancher/load-balancer-ssl-certs.png) - -### TCP Load Balancing Options - -#### Layer-4 Load Balancer - -For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. - -For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`. - -
Workload Deployment: Layer 4 Load Balancer Creation
- -![Deploy Layer-4 Load Balancer]({{}}/img/rancher/deploy-workload-load-balancer.png) - -Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. - -#### NGINX Ingress Controller TCP Support by ConfigMaps - -Although NGINX supports TCP, Kubernetes Ingress itself does not support the TCP protocol. Therefore, out-of-the-box configuration of NGINX Ingress Controller for TCP balancing isn't possible. - -However, there is a workaround to use NGINX's TCP balancing by creating a Kubernetes ConfigMap, as described in the [Ingress GitHub readme](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/exposing-tcp-udp-services.md). You can create a ConfigMap object that stores pod configuration parameters as key-value pairs, separate from the pod image, as described in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/). - -To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. - -![Layer-4 Load Balancer: ConfigMap Workaround]({{}}/img/rancher/layer-4-lb-config-map.png) - -The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. - -## Rancher v2.x Load Balancing Limitations - -Cattle provided feature-rich load balancer support that is [well documented]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/#load-balancers). Some of these features do not have equivalents in Rancher v2.x. This is the list of such features: - -- No support for SNI in current NGINX Ingress Controller. -- TCP load balancing requires a load balancer appliance enabled by cloud provider within the cluster. There is no Ingress support for TCP on Kubernetes. -- Only ports 80 and 443 can be configured for HTTP/HTTPS routing via Ingress. Also Ingress Controller is deployed globally as a DaemonSet and not launched as a scalable service. Also, users cannot assign random external ports to be used for balancing. Therefore, users need to ensure that they configure unique hostname/path combinations to avoid routing conflicts using the same two ports. -- There is no way to specify port rule priority and ordering. -- Rancher v1.6 added support for draining backend connections and specifying a drain timeout. This is not supported in Rancher v2.x. -- There is no support for specifying a custom stickiness policy and a custom load balancer config to be appended to the default config as of now in Rancher v2.x. There is some support, however, available in native Kubernetes for customizing the NGINX configuration as noted in the [NGINX Ingress Controller Custom Configuration Documentation](https://kubernetes.github.io/ingress-nginx/examples/customization/custom-configuration/). - -### Finished! diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md deleted file mode 100644 index da5d465c82..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: "4. Configure Health Checks" -weight: 400 -aliases: - - /rancher/v2.x/en/v1.6-migration/monitor-apps/ ---- - -Rancher v1.6 provided TCP and HTTP health checks on your nodes and services using its own health check microservice. These health checks monitored your containers to confirm they're operating as intended. If a container failed a health check, Rancher would destroy the unhealthy container and then replicates a healthy one to replace it. - -For Rancher v2.x, we've replaced the health check microservice, leveraging instead Kubernetes' native health check support. - -Use this document to correct Rancher v2.x workloads and services that list `health_check` in `output.txt`. You can correct them by configuring a liveness probe (i.e., a health check). - -For example, for the image below, we would configure liveness probes for the `web` and `weblb` workloads (i.e., the Kubernetes manifests output by migration-tools CLI). - -
Resolve health_check for the web and webLB Workloads
- -![Resolve health_check]({{}}/img/rancher/resolve-health-checks.png) - -## In This Document - - - -- [Rancher v1.6 Health Checks](#rancher-v1-6-health-checks) -- [Rancher v2.x Health Checks](#rancher-v2-x-health-checks) -- [Configuring Probes in Rancher v2.x](#configuring-probes-in-rancher-v2-x) - - - -## Rancher v1.6 Health Checks - -In Rancher v1.6, you could add health checks to monitor a particular service's operations. These checks were performed by the Rancher health check microservice, which is launched in a container on a node separate from the node hosting the monitored service (however, Rancher v1.6.20 and later also runs a local health check container as a redundancy for the primary health check container on another node). Health check settings were stored in the `rancher-compose.yml` file for your stack. - -The health check microservice features two types of health checks, which have a variety of options for timeout, check interval, etc.: - -- **TCP health checks**: - - These health checks check if a TCP connection opens at the specified port for the monitored service. For full details, see the [Rancher v1.6 documentation]({{}}/rancher/v1.6/en/cattle/health-checks/). - -- **HTTP health checks**: - - These health checks monitor HTTP requests to a specified path and check whether the response is expected response (which is configured along with the health check). - -The following diagram displays the health check microservice evaluating a container running Nginx. Notice that the microservice is making its check across nodes. - -![Rancher v1.6 Health Checks]({{}}/img/rancher/healthcheck.svg) - -## Rancher v2.x Health Checks - -In Rancher v2.x, the health check microservice is replaced with Kubernetes's native health check mechanisms, called _probes_. These probes, similar to the Rancher v1.6 health check microservice, monitor the health of pods over TCP and HTTP. - -However, probes in Rancher v2.x have some important differences, which are described below. For full details about probes, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes). - - -### Local Health Checks - -Unlike the Rancher v1.6 health checks performed across hosts, probes in Rancher v2.x occur on _same_ host, performed by the kubelet. - - -### Multiple Probe Types - -Kubernetes includes two different _types_ of probes: liveness checks and readiness checks. - -- **Liveness Check**: - - Checks if the monitored container is running. If the probe reports failure, Kubernetes kills the pod, and then restarts it according to the deployment [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). - -- **Readiness Check**: - - Checks if the container is ready to accept and serve requests. If the probe reports failure, the pod is sequestered from the public until it self heals. - -The following diagram displays kubelets running probes on containers they are monitoring ([kubelets](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) are the primary "agent" running on each node). The node on the left is running a liveness probe, while the one of the right is running a readiness check. Notice that the kubelet is scanning containers on its host node rather than across nodes, as in Rancher v1.6. - -![Rancher v2.x Probes]({{}}/img/rancher/probes.svg) - -## Configuring Probes in Rancher v2.x - -The [migration-tool CLI]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. - -Using the Rancher v2.x UI, you can add TCP or HTTP health checks to Kubernetes workloads. By default, Rancher asks you to configure a readiness check for your workloads and applies a liveness check using the same configuration. Optionally, you can define a separate liveness check. - -If the probe fails, the container is restarted per the restartPolicy defined in the workload specs. This setting is equivalent to the strategy parameter for health checks in Rancher v1.6. - -Configure probes by using the **Health Check** section while editing deployments called out in `output.txt`. - -
Edit Deployment: Health Check Section
- -![Health Check Section]({{}}/img/rancher/health-check-section.png) - -### Configuring Checks - -While you create a workload using Rancher v2.x, we recommend configuring a check that monitors the health of the deployment's pods. - -{{% tabs %}} - -{{% tab "TCP Check" %}} - -TCP checks monitor your deployment's health by attempting to open a connection to the pod over a specified port. If the probe can open the port, it's considered healthy. Failure to open it is considered unhealthy, which notifies Kubernetes that it should kill the pod and then replace it according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). - -You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). - -![TCP Check]({{}}/img/rancher/readiness-check-tcp.png) - -When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. - - - -{{% /tab %}} - -{{% tab "HTTP Check" %}} - -HTTP checks monitor your deployment's health by sending an HTTP GET request to a specific URL path that you define. If the pod responds with a message range of `200`-`400`, the health check is considered successful. If the pod replies with any other value, the check is considered unsuccessful, so Kubernetes kills and replaces the pod according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). - -You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). - -![HTTP Check]({{}}/img/rancher/readiness-check-http.png) - -When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. - -{{% /tab %}} - -{{% /tabs %}} - -### Configuring Separate Liveness Checks - -While configuring a readiness check for either the TCP or HTTP protocol, you can configure a separate liveness check by clicking the **Define a separate liveness check**. For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). - -![Separate Liveness Check]({{}}/img/rancher/separate-check.png) - -### Additional Probing Options - -Rancher v2.x, like v1.6, lets you perform health checks using the TCP and HTTP protocols. However, Rancher v2.x also lets you check the health of a pod by running a command inside of it. If the container exits with a code of `0` after running the command, the pod is considered healthy. - -You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). - -![Healthcheck Execute Command]({{}}/img/rancher/healthcheck-cmd-exec.png) - -#### Health Check Parameter Mappings - -While configuring readiness checks and liveness checks, Rancher prompts you to fill in various timeout and threshold values that determine whether the probe is a success or failure. The reference table below shows you the equivalent health check values from Rancher v1.6. - -Rancher v1.6 Compose Parameter | Rancher v2.x Kubernetes Parameter --------------------------------|----------------------------------- -`port` | `tcpSocket.port` -`response_timeout` | `timeoutSeconds` -`healthy_threshold` | `failureThreshold` -`unhealthy_threshold` | `successThreshold` -`interval` | `periodSeconds` -`initializing_timeout` | `initialDelaySeconds` -`strategy` | `restartPolicy` - -### [Next: Schedule Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md deleted file mode 100644 index c540b32b43..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/_index.md +++ /dev/null @@ -1,313 +0,0 @@ ---- -title: 2. Migrate Your Services -weight: 100 -aliases: - - /rancher/v2.x/en/v1.6-migration/run-migration-tool/ ---- - -Although your services from v1.6 won't work in Rancher v2.x by default, that doesn't mean you have to start again from square one, manually rebuilding your applications in v2.x. To help with migration from v1.6 to v2.x, Rancher has developed a migration tool. The migration-tools CLI is a utility that helps you recreate your applications in Rancher v2.x. This tool exports your Rancher v1.6 services as Compose files and converts them to a Kubernetes manifest that Rancher v2.x can consume. - -Additionally, for each Rancher v1.6-specific Compose directive that cannot be consumed by Kubernetes, migration-tools CLI provides instructions on how to manually recreate them in Rancher v2.x. - -This command line interface tool will: - -- Export Compose files (i.e., `docker-compose.yml` and `rancher-compose.yml`) for each stack in your v1.6 Cattle environment. For every stack, files are exported to a unique folder: `//`. - -- Parse Compose files that you’ve exported from your Rancher v1.6 stacks and converts them to Kubernetes manifests that Rancher v2.x can consume. The tool also outputs a list of directives present in the Compose files that cannot be converted automatically to Rancher v2.x. These are directives that you’ll have to manually configure using the Rancher v2.x UI. - -## Outline - - - -- [A. Download the migration-tools CLI](#a-download-the-migration-tools-cli) -- [B. Configure the migration-tools CLI](#b-configure-the-migration-tools-cli) -- [C. Run the migration-tools CLI](#c-run-the-migration-tools-cli) -- [D. Deploy Services Using Rancher CLI](#d-re-deploy-services-as-kubernetes-manifests) -- [What Now?](#what-now) - - - - - -## A. Download the migration-tools CLI - -The migration-tools CLI for your platform can be downloaded from our [GitHub releases page](https://github.com/rancher/migration-tools/releases). The tools are available for Linux, Mac, and Windows platforms. - - -## B. Configure the migration-tools CLI - -After you download migration-tools CLI, rename it and make it executable. - -1. Open a terminal window and change to the directory that contains the migration-tool file. - -1. Rename the file to `migration-tools` so that it no longer includes the platform name. - -1. Enter the following command to make `migration-tools` executable: - - ``` - chmod +x migration-tools - ``` - -## C. Run the migration-tools CLI - -Next, use the migration-tools CLI to export all stacks in all of the Cattle environments into Compose files. Then, for stacks that you want to migrate to Rancher v2.x, convert the Compose files into Kubernetes manifest. - ->**Prerequisite:** Create an [Account API Key]({{}}/rancher/v1.6/en/api/v2-beta/api-keys/#account-api-keys) to authenticate with Rancher v1.6 when using the migration-tools CLI. - -1. Export the Docker Compose files for your Cattle environments and stacks from Rancher v1.6. - - In the terminal window, execute the following command, replacing each placeholder with your values. - - ``` - migration-tools export --url http:// --access-key --secret-key --export-dir --all - ``` - - **Step Result:** migration-tools exports Compose files (`docker-compose.yml` and `rancher-compose.yml`) for each stack in the `--export-dir` directory. If you omitted this option, Compose files are output to your current directory. - - A unique directory is created for each environment and stack. For example, if we export each [environment/stack]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files) from Rancher v1.6, the following directory structure is created: - - ``` - export/ # migration-tools --export-dir - |--/ # Rancher v1.6 ENVIRONMENT - |--/ # Rancher v1.6 STACK - |--docker-compose.yml # STANDARD DOCKER DIRECTIVES FOR ALL STACK SERVICES - |--rancher-compose.yml # RANCHER-SPECIFIC DIRECTIVES FOR ALL STACK SERVICES - |--README.md # README OF CHANGES FROM v1.6 to v2.x - ``` - - - -1. Convert the exported Compose files to Kubernetes manifest. - - Execute the following command, replacing each placeholder with the absolute path to your Stack's Compose files. If you want to migrate multiple stacks, you'll have to re-run the command for each pair of Compose files that you exported. - - ``` - migration-tools parse --docker-file --rancher-file - ``` - - >**Note:** If you omit the `--docker-file` and `--rancher-file` options from your command, migration-tools uses the current working directory to find Compose files. - ->**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/). - -### migration-tools CLI Output - -After you run the migration-tools parse command, the following files are output to your target directory. - -| Output | Description | -| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `output.txt` | This file lists how to recreate your Rancher v1.6-specific functionality in Kubernetes. Each listing links to the relevant blog articles on how to implement it in Rancher v2.x. | -| Kubernetes manifest specs | Migration-tools internally invokes [Kompose](https://github.com/kubernetes/kompose) to generate a Kubernetes manifest for each service you're migrating to v2.x. Each YAML spec file is named for the service you're migrating. - -#### Why are There Separate Deployment and Service Manifests? - -To make an application publicly accessible by URL, a Kubernetes service is required in support of the deployment. A Kubernetes service is a REST object that abstracts access to the pods in the workload. In other words, a service provides a static endpoint to the pods by mapping a URL to pod(s) Therefore, even if the pods change IP address, the public endpoint remains unchanged. A service object points to its corresponding deployment (workload) by using selector labels. - -When a you export a service from Rancher v1.6 that exposes public ports, migration-tools CLI parses those ports to a Kubernetes service spec that links to a deployment YAML spec. - -#### Migration Example File Output - -If we parse the two example files from [Migration Example Files]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: - -File | Description ------|------------ -`web-deployment.yaml` | A file containing Kubernetes container specs for a Let's Chat deployment. -`web-service.yaml` | A file containing specs for the Let's Chat service. -`database-deployment.yaml` | A file containing container specs for the MongoDB deployment in support of Let's Chat. -`webLB-deployment.yaml` | A file containing container specs for an HAProxy deployment that's serving as a load balancer.1 -`webLB-service.yaml` | A file containing specs for the HAProxy service.1 - ->1 Because Rancher v2.x uses Ingress for load balancing, we won't be migrating our Rancher v1.6 load balancer to v2.x. - - - -## D. Re-Deploy Services as Kubernetes Manifests - ->**Note:** Although these instructions deploy your v1.6 services in Rancher v2.x, they will not work correctly until you adjust their Kubernetes manifests. - -{{% tabs %}} -{{% tab "Rancher UI" %}} - -You can deploy the Kubernetes manifests created by migration-tools by importing them into Rancher v2.x. - ->**Receiving an `ImportYaml Error`?** -> ->Delete the YAML directive listed in the error message. These are YAML directives from your v1.6 services that Kubernetes can't read. - -
Deploy Services: Import Kubernetes Manifest
- -![Deploy Services]({{}}/img/rancher/deploy-service.gif) - -{{% /tab %}} -{{% tab "Rancher CLI" %}} - - ->**Prerequisite:** [Install Rancher CLI]({{}}/rancher/v2.0-v2.4/en/cli/) for Rancher v2.x. - -Use the following Rancher CLI commands to deploy your application using Rancher v2.x. For each Kubernetes manifest output by migration-tools CLI, enter one of the commands below to import it into Rancher v2.x. - -``` -./rancher kubectl create -f # DEPLOY THE DEPLOYMENT YAML - -./rancher kubectl create -f # DEPLOY THE SERVICE YAML -``` - -{{% /tab %}} -{{% /tabs %}} - -Following importation, you can view your v1.6 services in the v2.x UI as Kubernetes manifests by using the context menu to select ` > ` that contains your services. The imported manifests will display on the **Resources > Workloads** and on the tab at **Resources > Workloads > Service Discovery.** (In Rancher v2.x before v2.3.0, these are on the **Workloads** and **Service Discovery** tabs in the top navigation bar.) - -
Imported Services
- -![Imported Services]({{}}/img/rancher/imported-workloads.png) - -## What Now? - -Although the migration-tool CLI parses your Rancher v1.6 Compose files to Kubernetes manifests, there are discrepancies between v1.6 and v2.x that you must address by manually editing your parsed [Kubernetes manifests](#output). In other words, you need to edit each workload and service imported into Rancher v2.x, as displayed below. - -
Edit Migrated Services
- -![Edit Migrated Workload]({{}}/img/rancher/edit-migration-workload.gif) - -As mentioned in [Migration Tools CLI Output](#migration-tools-cli-output), the `output.txt` files generated during parsing lists the manual steps you must make for each deployment. Review the upcoming topics for more information on manually editing your Kubernetes specs. - -Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x. - -
Output.txt Example
- -![output.txt]({{}}/img/rancher/output-dot-text.png) - -The following table lists possible directives that may appear in `output.txt`, what they mean, and links on how to resolve them. - -Directive | Instructions -----------|-------------- -[ports][4] | Rancher v1.6 _Port Mappings_ cannot be migrated to v2.x. Instead, you must manually declare either a HostPort or NodePort, which are similar to Port Mappings. -[health_check][1] | The Rancher v1.6 health check microservice has been replaced with native Kubernetes health checks, called _probes_. Recreate your v1.6 health checks in v2.0 using probes. -[labels][2] | Rancher v1.6 uses labels to implement a variety of features in v1.6. In v2.x, Kubernetes uses different mechanisms to implement these features. Click through on the links here for instructions on how to address each label.

[io.rancher.container.pull_image][7]: In v1.6, this label instructed deployed containers to pull a new version of the image upon restart. In v2.x, this functionality is replaced by the `imagePullPolicy` directive.

[io.rancher.scheduler.global][8]: In v1.6, this label scheduled a container replica on every cluster host. In v2.x, this functionality is replaced by [Daemon Sets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/).

[io.rancher.scheduler.affinity][9]: In v2.x, affinity is applied in a different way. -[links][3] | During migration, you must create links between your Kubernetes workloads and services for them to function properly in v2.x. -[scale][5] | In v1.6, scale refers to the number of container replicas running on a single node. In v2.x, this feature is replaced by replica sets. -start_on_create | No Kubernetes equivalent. No action is required from you. - -[1]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x -[2]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[3]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services -[4]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/expose-services -[5]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node - - - -[7]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-using-labels -[8]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#scheduling-global-services -[9]:{{}}/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/#label-affinity-antiaffinity - -### [Next: Expose Your Services]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/expose-services/) diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md b/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md deleted file mode 100644 index b993d1eec8..0000000000 --- a/content/rancher/v2.0-v2.4/en/v1.6-migration/schedule-workloads/_index.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: "5. Schedule Your Services" -weight: 500 -aliases: - - /rancher/v2.x/en/v1.6-migration/schedule-workloads/ ---- - -In v1.6, objects called _services_ were used to schedule containers to your cluster hosts. Services included the Docker image for an application, along with configuration settings for a desired state. - -In Rancher v2.x, the equivalent object is known as a _workload_. Rancher v2.x retains all scheduling functionality from v1.6, but because of the change from Cattle to Kubernetes as the default container orchestrator, the terminology and mechanisms for scheduling workloads has changed. - -Workload deployment is one of the more important and complex aspects of container orchestration. Deploying pods to available shared cluster resources helps maximize performance under optimum compute resource use. - -You can schedule your migrated v1.6 services while editing a deployment. Schedule services by using **Workload Type** and **Node Scheduling** sections, which are shown below. - -
Editing Workloads: Workload Type and Node Scheduling Sections
- -![Workload Type and Node Scheduling Sections]({{}}/img/rancher/migrate-schedule-workloads.png) - -## In This Document - - - - - -- [What's Different for Scheduling Services?](#whats-different-for-scheduling-services) -- [Node Scheduling Options](#node-scheduling-options) -- [Scheduling Pods to a Specific Node](#scheduling-pods-to-a-specific-node) -- [Scheduling Using Labels](#scheduling-using-labels) -- [Scheduling Pods Using Resource Constraints](#scheduling-pods-using-resource-constraints) -- [Preventing Scheduling Specific Services to Specific Nodes](#preventing-scheduling-specific-services-to-specific-nodes) -- [Scheduling Global Services](#scheduling-global-services) - - - - -## What's Different for Scheduling Services? - - -Rancher v2.x retains _all_ methods available in v1.6 for scheduling your services. However, because the default container orchestration system has changed from Cattle to Kubernetes, the terminology and implementation for each scheduling option has changed. - -In v1.6, you would schedule a service to a host while adding a service to a Stack. In Rancher v2.x., the equivalent action is to schedule a workload for deployment. The following composite image shows a comparison of the UI used for scheduling in Rancher v2.x versus v1.6. - -![Node Scheduling: Rancher v2.x vs v1.6]({{}}/img/rancher/node-scheduling.png) - -## Node Scheduling Options - -Rancher offers a variety of options when scheduling nodes to host workload pods (i.e., scheduling hosts for containers in Rancher v1.6). - -You can choose a scheduling option as you deploy a workload. The term _workload_ is synonymous with adding a service to a Stack in Rancher v1.6). You can deploy a workload by using the context menu to browse to a cluster project (` > > Workloads`). - -The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads]({{}}/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads/). - -Option | v1.6 Feature | v2.x Feature --------|------|------ -[Schedule a certain number of pods?](#schedule-a-certain-number-of-pods) | ✓ | ✓ -[Schedule pods to specific node?](#scheduling-pods-to-a-specific-node) | ✓ | ✓ -[Schedule to nodes using labels?](#applying-labels-to-nodes-and-pods) | ✓ | ✓ -[Schedule to nodes using label affinity/anti-affinity rules?](#label-affinity-antiaffinity) | ✓ | ✓ -[Schedule based on resource constraints?](#scheduling-pods-using-resource-constraints) | ✓ | ✓ -[Preventing scheduling specific services to specific hosts?](#preventing-scheduling-specific-services-to-specific-nodes) | ✓ | ✓ -[Schedule services globally?](#scheduling-global-services) | ✓ | ✓ - - -### Schedule a certain number of pods - -In v1.6, you could control the number of container replicas deployed for a service. You can schedule pods the same way in v2.x, but you'll have to set the scale manually while editing a workload. - -![Resolve Scale]({{}}/img/rancher/resolve-scale.png) - -During migration, you can resolve `scale` entries in `output.txt` by setting a value for the **Workload Type** option **Scalable deployment** depicted below. - -
Scalable Deployment Option
- -![Workload Scale]({{}}/img/rancher/workload-type-option.png) - -### Scheduling Pods to a Specific Node - -Just as you could schedule containers to a single host in Rancher v1.6, you can schedule pods to single node in Rancher v2.x - -As you deploy a workload, use the **Node Scheduling** section to choose a node to run your pods on. The workload below is being scheduled to deploy an Nginx image with a scale of two pods on a specific node. - - -
Rancher v2.x: Workload Deployment
- -![Workload Tab and Group by Node Icon]({{}}/img/rancher/schedule-specific-node.png) - -Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. - -If you expose the workload using a NodePort that conflicts with another workload, the deployment gets created successfully, but no NodePort service is created. Therefore, the workload isn't exposed outside of the cluster. - -After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. - -![Pods Scheduled to Same Node]({{}}/img/rancher/scheduled-nodes.png) - - - -### Scheduling Using Labels - -In Rancher v2.x, you can constrain pods for scheduling to specific nodes (referred to as hosts in v1.6). Using [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/), which are key/value pairs that you can attach to different Kubernetes objects, you can configure your workload so that pods you've labeled are assigned to specific nodes (or nodes with specific labels are automatically assigned workload pods). - -
Label Scheduling Options
- -Label Object | Rancher v1.6 | Rancher v2.x --------------|--------------|--------------- -Schedule by Node? | ✓ | ✓ -Schedule by Pod? | ✓ | ✓ - -#### Applying Labels to Nodes and Pods - -Before you can schedule pods based on labels, you must first apply labels to your pods or nodes. - ->**Hooray!** ->All the labels that you manually applied in Rancher v1.6 (but _not_ the ones automatically created by Rancher) are parsed by migration-tools CLI, meaning you don't have to manually reapply labels. - -To apply labels to pods, make additions to the **Labels and Annotations** section as you configure your workload. After you complete workload configuration, you can view the label by viewing each pod that you've scheduled. To apply labels to nodes, edit your node and make additions to the **Labels** section. - - -#### Label Affinity/AntiAffinity - -Some of the most-used scheduling features in v1.6 were affinity and anti-affinity rules. - -
output.txt Affinity Label
- -![Affinity Label]({{}}/img/rancher/resolve-affinity.png) - -- **Affinity** - - Any pods that share the same label are scheduled to the same node. Affinity can be configured in one of two ways: - - Affinity | Description - ---------|------------ - **Hard** | A hard affinity rule means that the host chosen must satisfy all the scheduling rules. If no such host can be found, the workload will fail to deploy. In the Kubernetes manifest, this rule translates to the `nodeAffinity` directive.

To use hard affinity, configure a rule using the **Require ALL of** section (see figure below). - **Soft** | Rancher v1.6 user are likely familiar with soft affinity rules, which try to schedule the deployment per the rule, but can deploy even if the rule is not satisfied by any host.

To use soft affinity, configure a rule using the **Prefer Any of** section (see figure below). - -
- -
Affinity Rules: Hard and Soft
- - ![Affinity Rules]({{}}/img/rancher/node-scheduling-affinity.png) - -- **AntiAffinity** - - Any pods that share the same label are scheduled to different nodes. In other words, while affinity _attracts_ a specific label to each other, anti-affinity _repels_ a label from itself, so that pods are scheduled to different nodes. - - You can create an anti-affinity rules using either hard or soft affinity. However, when creating your rule, you must use either the `is not set` or `not in list` operator. - - For anti-affinity rules, we recommend using labels with phrases like `NotIn` and `DoesNotExist`, as these terms are more intuitive when users are applying anti-affinity rules. - -
AntiAffinity Operators
- - ![AntiAffinity ]({{}}/img/rancher/node-schedule-antiaffinity.png) - -Detailed documentation for affinity/anti-affinity is available in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - -Affinity rules that you create in the UI update your workload, adding pod affinity/anti-affinity directives to the workload Kubernetes manifest specs. - - -### Preventing Scheduling Specific Services to Specific Nodes - -In Rancher v1.6 setups, you could prevent services from being scheduled to specific nodes with the use of labels. In Rancher v2.x, you can reproduce this behavior using native Kubernetes scheduling options. - -In Rancher v2.x, you can prevent pods from being scheduled to specific nodes by applying _taints_ to a node. Pods will not be scheduled to a tainted node unless it has special permission, called a _toleration_. A toleration is a special label that allows a pod to be deployed to a tainted node. While editing a workload, you can apply tolerations using the **Node Scheduling** section. Click **Show advanced options**. - -
Applying Tolerations
- -![Tolerations]({{}}/img/rancher/node-schedule-advanced-options.png) - -For more information, see the Kubernetes documentation on [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/). - -### Scheduling Global Services - -Rancher v1.6 included the ability to deploy [global services]({{}}/rancher/v1.6/en/cattle/scheduling/#global-service), which are services that deploy duplicate containers to each host in the environment (i.e., nodes in your cluster using Rancher v2.x terms). If a service has the `io.rancher.scheduler.global: 'true'` label declared, then Rancher v1.6 schedules a service container on each host in the environment. - -
output.txt Global Service Label
- -![Global Service Label]({{}}/img/rancher/resolve-global.png) - -In Rancher v2.x, you can schedule a pod to each node using a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), which is a specific type of workload ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. - -To create a daemonset while configuring a workload, choose **Run one pod on each node** from the **Workload Type** options. - -
Workload Configuration: Choose run one pod on each node to configure daemonset
- -![choose Run one pod on each node]({{}}/img/rancher/workload-type.png) - -### Scheduling Pods Using Resource Constraints - -While creating a service in the Rancher v1.6 UI, you could schedule its containers to hosts based on hardware requirements that you choose. The containers are then scheduled to hosts based on which ones have bandwidth, memory, and CPU capacity. - -In Rancher v2.x, you can still specify the resources required by your pods. However, these options are unavailable in the UI. Instead, you must edit your workload's manifest file to declare these resource constraints. - -To declare resource constraints, edit your migrated workloads, editing the **Security & Host** sections. - -- To reserve a minimum hardware reservation available for your pod(s), edit the following sections: - - - Memory Reservation - - CPU Reservation - - NVIDIA GPU Reservation - -- To set a maximum hardware limit for your pods, edit: - - - Memory Limit - - CPU Limit - -
Scheduling: Resource Constraint Settings
- -![Resource Constraint Settings]({{}}/img/rancher/resource-constraint-settings.png) - -You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). - -### [Next: Service Discovery]({{}}/rancher/v2.0-v2.4/en/v1.6-migration/discover-services/) diff --git a/content/rancher/v2.5/_index.md b/content/rancher/v2.5/_index.md deleted file mode 100644 index 61f266de70..0000000000 --- a/content/rancher/v2.5/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Rancher 2.5 -weight: 2 -showBreadcrumb: false ---- diff --git a/content/rancher/v2.5/en/admin-settings/_index.md b/content/rancher/v2.5/en/admin-settings/_index.md deleted file mode 100644 index 0ff5e139bf..0000000000 --- a/content/rancher/v2.5/en/admin-settings/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Authentication, Permissions and Global Configuration -weight: 6 -aliases: - - /rancher/v2.5/en/concepts/global-configuration/ - - /rancher/v2.5/en/tasks/global-configuration/ - - /rancher/v2.5/en/concepts/global-configuration/server-url/ - - /rancher/v2.5/en/tasks/global-configuration/server-url/ - - /rancher/v2.5/en/admin-settings/log-in/ - - /rancher/v2.x/en/admin-settings/ ---- - -After installation, the [system administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. - -## First Log In - -After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. - ->**Important!** After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. - -## Authentication - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. - -For more information how authentication works and how to configure each provider, see [Authentication]({{}}/rancher/v2.5/en/admin-settings/authentication/). - -## Authorization - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. - -For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)]({{}}/rancher/v2.5/en/admin-settings/rbac/). - -## Pod Security Policies - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. - -For more information how to create and use PSPs, see [Pod Security Policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/). - -## Provisioning Drivers - -Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -For more information, see [Provisioning Drivers]({{}}/rancher/v2.5/en/admin-settings/drivers/). - -## Adding Kubernetes Versions into Rancher - -With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. - -The information that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.]({{}}/rancher/v2.5/en/admin-settings/k8s-metadata/) - -Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/). - -For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata]({{}}/rancher/v2.5/en/admin-settings/k8s-metadata/). - -## Enabling Experimental Features - -Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.]({{}}/rancher/v2.5/en/installation/options/feature-flags/) diff --git a/content/rancher/v2.5/en/admin-settings/authentication/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/_index.md deleted file mode 100644 index e52fb47a05..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/_index.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: Authentication -weight: 1115 -aliases: - - /rancher/v2.5/en/concepts/global-configuration/authentication/ - - /rancher/v2.5/en/tasks/global-configuration/authentication/ - - /rancher/v2.x/en/admin-settings/authentication/ ---- - -One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. - -This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. - -## External vs. Local Authentication - -The Rancher authentication proxy integrates with the following external authentication services. The following table lists the first version of Rancher each service debuted. - -| Auth Service | -| ------------------------------------------------------------------------------------------------ | -| [Microsoft Active Directory]({{}}/rancher/v2.5/en/admin-settings/authentication/ad/) | -| [GitHub]({{}}/rancher/v2.5/en/admin-settings/authentication/github/) | -| [Microsoft Azure AD]({{}}/rancher/v2.5/en/admin-settings/authentication/azure-ad/) | -| [FreeIPA]({{}}/rancher/v2.5/en/admin-settings/authentication/freeipa/) | -| [OpenLDAP]({{}}/rancher/v2.5/en/admin-settings/authentication/openldap/) | -| [Microsoft AD FS]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/) | -| [PingIdentity]({{}}/rancher/v2.5/en/admin-settings/authentication/ping-federate/) | -| [Keycloak]({{}}/rancher/v2.5/en/admin-settings/authentication/keycloak/) | -| [Okta]({{}}/rancher/v2.5/en/admin-settings/authentication/okta/) | -| [Google OAuth]({{}}/rancher/v2.5/en/admin-settings/authentication/google/) | -| [Shibboleth]({{}}/rancher/v2.5/en/admin-settings/authentication/shibboleth) | - -
-However, Rancher also provides [local authentication]({{}}/rancher/v2.5/en/admin-settings/authentication/local/). - -In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. - -## Users and Groups - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.5/en/admin-settings/rbac/). - -> **Note:** Local authentication does not support creating or managing groups. - -For more information, see [Users and Groups]({{}}/rancher/v2.5/en/admin-settings/authentication/user-groups/) - -## Scope of Rancher Authorization - -After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: - -| Access Level | Description | -|----------------------------------------------|-------------| -| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | -| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | -| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | - -To set the Rancher access level for users in the authorization service, follow these steps: - -1. From the **Global** view, click **Security > Authentication.** - -1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. - -1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. - -1. Click **Save.** - -**Result:** The Rancher access configuration settings are applied. - -{{< saml_caveats >}} - -## External Authentication Configuration and Principal Users - -Configuration of external authentication requires: - -- A local user assigned the administrator role, called hereafter the _local principal_. -- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. - -Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. - -1. Sign into Rancher as the local principal and complete configuration of external authentication. - - ![Sign In]({{}}/img/rancher/sign-in.png) - -2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. - - ![Principal ID Sharing]({{}}/img/rancher/principal-ID.png) - -3. After you complete configuration, Rancher automatically signs out the local principal. - - ![Sign Out Local Principal]({{}}/img/rancher/sign-out-local.png) - -4. Then, Rancher automatically signs you back in as the external principal. - - ![Sign In External Principal]({{}}/img/rancher/sign-in-external.png) - -5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. - - ![Sign In External Principal]({{}}/img/rancher/users-page.png) - -6. The external principal and the local principal share the same access rights. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md deleted file mode 100644 index 2c9c82a437..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/ad/_index.md +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: Configuring Active Directory (AD) -weight: 1112 -aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/active-directory/ - - /rancher/v2.x/en/admin-settings/authentication/ad/ ---- - -If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. - -Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication]({{}}/rancher/v2.5/en/admin-settings/authentication/openldap) integration. - -> **Note:** -> -> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -## Prerequisites - -You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. - -Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. - -Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. - -> **Using TLS?** -> -> If the certificate used by the AD server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configuration Steps -### Open Active Directory Configuration - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **Active Directory**. The **Configure an AD server** form will be displayed. - -### Configure Active Directory Server Settings - -In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. - -> **Note:** -> -> If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). - -**Table 1: AD Server parameters** - -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the AD server | -| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | -| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | -| Service Account Password | The password for the service account. | -| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | -| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| - ---- - -### Configure User/Group Schema - -In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. - -Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. - -> **Note:** -> -> If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. - -#### User Schema - -The table below details the parameters for the user schema section configuration. - -**Table 2: User schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | -| User Member Attribute | The attribute containing the groups that a user is a member of. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | -| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | -| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | -| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | - ---- - -#### Group Schema - -The table below details the parameters for the group schema configuration. - -**Table 3: Group schema configuration parameters** - -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | -| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (i.e., you have groups that contain other groups as members. We advise avoiding nested groups when possible). | - ---- - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. - -> **Note:** -> -> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. - -1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. -2. Click **Authenticate with Active Directory** to finalise the setup. - -**Result:** - -- Active Directory authentication has been enabled. -- You have been signed into Rancher as administrator using the provided AD credentials. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Identify Search Base and Schema using ldapsearch - -In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. - -The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. - -For the purpose of the example commands provided below we will assume: - -- The Active Directory server has a hostname of `ad.acme.com` -- The server is listening for unencrypted connections on port `389` -- The Active Directory domain is `acme` -- You have a valid AD account with the username `jdoe` and password `secret` - -### Identify Search Base - -First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" -``` - -This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: - -{{< img "/img/rancher/ldapsearch-user.png" "LDAP User">}} - -Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. - -Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, i.e., `OU=Groups,DC=acme,DC=com`. - -### Identify User Schema - -The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: - -- `Object Class`: **person** [1] -- `Username Attribute`: **name** [2] -- `Login Attribute`: **sAMAccountName** [3] -- `User Member Attribute`: **memberOf** [4] - -> **Note:** -> -> If the AD users in our organization were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. - -We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. - -### Identify Group Schema - -Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: - -``` -$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ --h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ --s sub "CN=examplegroup" -``` - -This command will inform us on the attributes used for group objects: - -{{< img "/img/rancher/ldapsearch-group.png" "LDAP Group">}} - -Again, this allows us to determine the correct values to enter in the group schema configuration: - -- `Object Class`: **group** [1] -- `Name Attribute`: **name** [2] -- `Group Member Mapping Attribute`: **member** [3] -- `Search Attribute`: **sAMAccountName** [4] - -Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. - -In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.5/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md deleted file mode 100644 index 33ea20aa3e..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/azure-ad/_index.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: Configuring Azure AD -weight: 1115 -aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/azure-ad/ - - /rancher/v2.x/en/admin-settings/authentication/azure-ad/ ---- - -If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. - ->**Note:** Azure AD integration only supports Service Provider initiated logins. - ->**Prerequisite:** Have an instance of Azure AD configured. - ->**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). - -## Azure Active Directory Configuration Outline - -Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. - - - ->**Tip:** Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. - - - -- [1. Register Rancher with Azure](#1-register-rancher-with-azure) -- [2. Create a new client secret](#2-create-a-new-client-secret) -- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) -- [4. Add a Reply URL](#4-add-a-reply-url) -- [5. Copy Azure Application Data](#5-copy-azure-application-data) -- [6. Configure Azure AD in Rancher](#6-configure-azure-ad-in-rancher) - - - -### 1. Register Rancher with Azure - -Before enabling Azure AD within Rancher, you must register Rancher with Azure. - -1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. - -1. Use search to open the **App registrations** service. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - -1. Click **New registrations** and complete the **Create** form. - - ![New App Registration]({{}}/img/rancher/new-app-registration.png) - - 1. Enter a **Name** (something like `Rancher`). - - 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. - - 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - - 1. Click **Register**. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 2. Create a new client secret - -From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. - -1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. - - ![Open Rancher Registration]({{}}/img/rancher/open-rancher-app.png) - -1. From the navigation pane on left, click **Certificates and Secrets**. - -1. Click **New client secret**. - - ![Create new client secret]({{< baseurl >}}/img/rancher/select-client-secret.png) - - 1. Enter a **Description** (something like `Rancher`). - - 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. - - 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). - - -1. Copy the key value and save it to an [empty text file](#tip). - - You'll enter this key into the Rancher UI later as your **Application Secret**. - - You won't be able to access the key value again within the Azure UI. - -### 3. Set Required Permissions for Rancher - -Next, set API permissions for Rancher within Azure. - -1. From the navigation pane on left, select **API permissions**. - - ![Open Required Permissions]({{}}/img/rancher/select-required-permissions.png) - -1. Click **Add a permission**. - -1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: - - ![Select API Permissions]({{< baseurl >}}/img/rancher/select-required-permissions-2.png) - -
-
- - **Access the directory as the signed-in user** - - **Read directory data** - - **Read all groups** - - **Read all users' full profiles** - - **Read all users' basic profiles** - - **Sign in and read user profile** - -1. Click **Add permissions**. - -1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. - - >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. - - -### 4. Add a Reply URL - -To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. - - -1. From the **Setting** blade, select **Reply URLs**. - - ![Azure: Enter Reply URL]({{}}/img/rancher/enter-azure-reply-url.png) - -1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. - - >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). - -1. Click **Save**. - -**Result:** Your reply URL is saved. - ->**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. - -### 5. Copy Azure Application Data - -As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. - -1. Obtain your Rancher **Tenant ID**. - - 1. Use search to open the **Azure Active Directory** service. - - ![Open Azure Active Directory]({{}}/img/rancher/search-azure-ad.png) - - 1. From the left navigation pane, open **Overview**. - - 2. Copy the **Directory ID** and paste it into your [text file](#tip). - - You'll paste this value into Rancher as your **Tenant ID**. - -1. Obtain your Rancher **Application ID**. - - 1. Use search to open **App registrations**. - - ![Open App Registrations]({{}}/img/rancher/search-app-registrations.png) - - 1. Find the entry you created for Rancher. - - 1. Copy the **Application ID** and paste it to your [text file](#tip). - -1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. - - 1. From **App registrations**, click **Endpoints**. - - ![Click Endpoints]({{}}/img/rancher/click-endpoints.png) - - 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). - - - **Microsoft Graph API endpoint** (Graph Endpoint) - - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) - - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) - ->**Note:** Copy the v1 version of the endpoints - -### 6. Configure Azure AD in Rancher - -From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. - -Enter the values that you copied to your [text file](#tip). - -1. Log into Rancher. From the **Global** view, select **Security > Authentication**. - -1. Select **Azure AD**. - -1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). - - >**Important:** When entering your Graph Endpoint, remove the tenant ID from the URL, like below. - > - >https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c - - The following table maps the values you copied in the Azure portal to the fields in Rancher. - - | Rancher Field | Azure Value | - | ------------------ | ------------------------------------- | - | Tenant ID | Directory ID | - | Application ID | Application ID | - | Application Secret | Key Value | - | Endpoint | https://login.microsoftonline.com/ | - | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | - | Token Endpoint | OAuth 2.0 Token Endpoint | - | Auth Endpoint | OAuth 2.0 Authorization Endpoint | - -1. Click **Authenticate with Azure**. - -**Result:** Azure Active Directory authentication is configured. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md deleted file mode 100644 index 791d3f5a6b..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/freeipa/_index.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Configuring FreeIPA -weight: 1114 -aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/freeipa/ - - /rancher/v2.x/en/admin-settings/authentication/freeipa/ ---- - -If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. - ->**Prerequisites:** -> ->- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. ->- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. ->- Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **FreeIPA**. - -4. Complete the **Configure an FreeIPA server** form. - - You may need to log in to your domain controller to find the information requested in the form. - - >**Using TLS?** - >If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. -
-
- >**User Search Base vs. Group Search Base** - > - >Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. - > - >* If your users and groups are in the same search base, complete only the User Search Base. - >* If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. - -5. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. - - >**Search Attribute** The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. - > - >The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. - > - > * `uid`: User ID - > * `sn`: Last Name - > * `givenName`: First Name - > - > With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. - -6. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. - -**Result:** - -- FreeIPA authentication is configured. -- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md deleted file mode 100644 index 4ea86b7b44..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/github/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Configuring GitHub -weight: 1116 -aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/github/ - - /rancher/v2.x/en/admin-settings/authentication/github/ ---- - -In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. - ->**Prerequisites:** Read [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). - -2. From the **Global** view, select **Security > Authentication** from the main menu. - -3. Select **GitHub**. - -4. Follow the directions displayed to **Setup a GitHub Application**. Rancher redirects you to GitHub to complete registration. - - >**What's an Authorization Callback URL?** - > - >The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). - - >When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. - -5. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - - >**Where do I find the Client ID and Client Secret?** - > - >From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. - -6. Click **Authenticate with GitHub**. - -7. Use the **Site Access** options to configure the scope of user authorization. - - - **Allow any valid Users** - - _Any_ GitHub user can access Rancher. We generally discourage use of this setting! - - - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** - - Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. - - - **Restrict access to only Authorized Users and Organizations** - - Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. -
-8. Click **Save**. - -**Result:** - -- GitHub authentication is configured. -- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md deleted file mode 100644 index 9738dd3b67..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/google/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Configuring Google OAuth -weight: 15 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/google/ ---- - -If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. - -Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. - -Within Rancher, only administrators or users with the **Manage Authentication** [global role]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) can configure authentication. - -# Prerequisites -- You must have a [G Suite admin account](https://admin.google.com) configured. -- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. -- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) - -After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: -![Enable Admin APIs]({{}}/img/rancher/Google-Enable-APIs-Screen.png) - -# Setting up G Suite for OAuth with Rancher -Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: - -1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) -1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) -1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) -1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) - -### 1. Adding Rancher as an Authorized Domain -1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. -1. Select your project and click **OAuth consent screen.** -![OAuth Consent Screen]({{}}/img/rancher/Google-OAuth-consent-screen-tab.png) -1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) -1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. - -**Result:** Rancher has been added as an authorized domain for the Admin SDK API. - -### 2. Creating OAuth2 Credentials for the Rancher Server -1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) -![Credentials]({{}}/img/rancher/Google-Credentials-tab.png) -1. On the **Create Credentials** dropdown, select **OAuth client ID.** -1. Click **Web application.** -1. Provide a name. -1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. - - Under **Authorized JavaScript origins,** enter your Rancher server URL. - - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. -1. Click on **Create.** -1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. - -**Result:** Your OAuth credentials have been successfully created. - -### 3. Creating Service Account Credentials -Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. - -Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. - -As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. - -This section describes how to: - -- Create a service account -- Create a key for the service account and download the credentials as JSON - -1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. -1. Click on **Create Service Account.** -1. Enter a name and click **Create.** -![Service account creation Step 1]({{}}/img/rancher/Google-svc-acc-step1.png) -1. Don't provide any roles on the **Service account permissions** page and click **Continue** -![Service account creation Step 2]({{}}/img/rancher/Google-svc-acc-step2.png) -1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. -![Service account creation Step 3]({{}}/img/rancher/Google-svc-acc-step3-key-creation.png) - -**Result:** Your service account is created. - -### 4. Register the Service Account Key as an OAuth Client - -You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. - -Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: - -1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** - - ![Service account Unique ID]({{}}/img/rancher/Google-Select-UniqueID-column.png) -1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) -1. Add the Unique ID obtained in the previous step in the **Client Name** field. -1. In the **One or More API Scopes** field, add the following scopes: - ``` - openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly - ``` -1. Click **Authorize.** - -**Result:** The service account is registered as an OAuth client in your G Suite account. - -# Configuring Google OAuth in Rancher -1. Sign into Rancher using a local user assigned the [administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions) role. This user is also called the local principal. -1. From the **Global** view, click **Security > Authentication** from the main menu. -1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. - 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. - 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. - 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. - - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) - - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. - - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. -1. Click **Authenticate with Google**. -1. Click **Save**. - -**Result:** Google authentication is successfully configured. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md deleted file mode 100644 index e4e75f3647..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/keycloak/_index.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Configuring Keycloak (SAML) -description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins -weight: 1200 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/keycloak/ ---- - -If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -## Prerequisites - -- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. -- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. - - Setting | Value - ------------|------------ - `Sign Documents` | `ON` 1 - `Sign Assertions` | `ON` 1 - All other `ON/OFF` Settings | `OFF` - `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 - `Client Name` | (e.g. `rancher`) - `Client Protocol` | `SAML` - `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` - - >1: Optionally, you can enable either one or both of these settings. - >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. - - {{< img "/img/rancher/keycloak/keycloak-saml-client-configuration.png" "">}} - -- In the new SAML client, create Mappers to expose the users fields - - Add all "Builtin Protocol Mappers" - {{< img "/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png" "">}} - - Create a new "Group list" mapper to map the member attribute to a user's groups - {{< img "/img/rancher/keycloak/keycloak-saml-client-group-mapper.png" "">}} - -## Getting the IDP Metadata - -{{% tabs %}} -{{% tab "Keycloak 5 and earlier" %}} -To get the IDP metadata, export a `metadata.xml` file from your Keycloak client. -From the **Installation** tab, choose the **SAML Metadata IDPSSODescriptor** format option and download your file. -{{% /tab %}} -{{% tab "Keycloak 6-13" %}} - -1. From the **Configure** section, click the **Realm Settings** tab. -1. Click the **General** tab. -1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. - -Verify the IDP metadata contains the following attributes: - -``` -xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" -xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" -xmlns:ds="http://www.w3.org/2000/09/xmldsig#" -``` - -Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. - -The following is an example process for Firefox, but will vary slightly for other browsers: - -1. Press **F12** to access the developer console. -1. Click the **Network** tab. -1. From the table, click the row containing `descriptor`. -1. From the details pane, click the **Response** tab. -1. Copy the raw response data. - -The XML obtained contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: - -1. Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. -1. Remove the `` tag from the beginning. -1. Remove the `` from the end of the xml. - -You are left with something similar as the example below: - -``` - -.... - -``` - -{{% /tab %}} -{{% tab "Keycloak 14+" %}} - -1. From the **Configure** section, click the **Realm Settings** tab. -1. Click the **General** tab. -1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. - -Verify the IDP metadata contains the following attributes: - -``` -xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" -xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" -xmlns:ds="http://www.w3.org/2000/09/xmldsig#" -``` - -Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. - -The following is an example process for Firefox, but will vary slightly for other browsers: - -1. Press **F12** to access the developer console. -1. Click the **Network** tab. -1. From the table, click the row containing `descriptor`. -1. From the details pane, click the **Response** tab. -1. Copy the raw response data. - -{{% /tab %}} -{{% /tabs %}} - -## Configuring Keycloak in Rancher - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Keycloak**. - -1. Complete the **Configure Keycloak Account** form. For help with filling the form, see the [configuration reference](#configuration-reference). - -1. After you complete the **Configure Keycloak Account** form, click **Authenticate with Keycloak**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. - -{{< saml_caveats >}} - -## Configuration Reference - - -| Field | Description | -| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Display Name Field | The attribute that contains the display name of users.

Example: `givenName` | -| User Name Field | The attribute that contains the user name/given name.

Example: `email` | -| UID Field | An attribute that is unique to every user.

Example: `email` | -| Groups Field | Make entries for managing group memberships.

Example: `member` | -| Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | -| Rancher API Host | The URL for your Rancher Server. | -| Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | -| IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | - ->**Tip:** You can generate a key/certificate pair using an openssl command. For example: -> -> openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert - - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.5/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. - -### You are not redirected to Keycloak - -When you click on **Authenticate with Keycloak**, you are not redirected to your IdP. - - * Verify your Keycloak client configuration. - * Make sure `Force Post Binding` set to `OFF`. - - -### Forbidden message displayed after IdP login - -You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. - - * Check the Rancher debug log. - * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. - -### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata - -This is usually due to the metadata not being created until a SAML provider is configured. -Try configuring and saving keycloak as your SAML provider and then accessing the metadata. - -### Keycloak Error: "We're sorry, failed to process response" - - * Check your Keycloak log. - * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. - -### Keycloak Error: "We're sorry, invalid requester" - - * Check your Keycloak log. - * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md deleted file mode 100644 index 3ea4302552..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Configuring Microsoft Active Directory Federation Service (SAML) -weight: 1205 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/ ---- - -If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. - -## Prerequisites - -You must have Rancher installed. - -- Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. -- You must have a global administrator account on your Rancher installation. - -You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. - -- Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. -- You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. - -## Setup Outline - -Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. - -- [1. Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) -- [2. Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup) - -{{< saml_caveats >}} - - -### [Next: Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup) diff --git a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md deleted file mode 100644 index 30e6877536..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: 1. Configuring Microsoft AD FS for Rancher -weight: 1205 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/ ---- - -Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. - -1. Log into your AD server as an administrative user. - -1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. - - {{< img "/img/rancher/adfs/adfs-overview.png" "">}} - -1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. - - {{< img "/img/rancher/adfs/adfs-add-rpt-2.png" "">}} - -1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. - - {{< img "/img/rancher/adfs/adfs-add-rpt-3.png" "">}} - -1. Select **AD FS profile** as the configuration profile for your relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-4.png" "">}} - -1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. - - {{< img "/img/rancher/adfs/adfs-add-rpt-5.png" "">}} - -1. Select **Enable support for the SAML 2.0 WebSSO protocol** - and enter `https:///v1-saml/adfs/saml/acs` for the service URL. - - {{< img "/img/rancher/adfs/adfs-add-rpt-6.png" "">}} - -1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-7.png" "">}} - -1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. - - {{< img "/img/rancher/adfs/adfs-add-rpt-8.png" "">}} - -1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-9.png" "">}} - -1. After reviewing your settings, select **Next** to add the relying party trust. - - {{< img "/img/rancher/adfs/adfs-add-rpt-10.png" "">}} - - -1. Select **Open the Edit Claim Rules...** and click **Close**. - - {{< img "/img/rancher/adfs/adfs-add-rpt-11.png" "">}} - -1. On the **Issuance Transform Rules** tab, click **Add Rule...**. - - {{< img "/img/rancher/adfs/adfs-edit-cr.png" "">}} - -1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. - - {{< img "/img/rancher/adfs/adfs-add-tcr-1.png" "">}} - -1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: - - | LDAP Attribute | Outgoing Claim Type | - | -------------------------------------------- | ------------------- | - | Given-Name | Given Name | - | User-Principal-Name | UPN | - | Token-Groups - Qualified by Long Domain Name | Group | - | SAM-Account-Name | Name | -
- {{< img "/img/rancher/adfs/adfs-add-tcr-2.png" "">}} - -1. Download the `federationmetadata.xml` from your AD server at: -``` -https:///federationmetadata/2007-06/federationmetadata.xml -``` - -**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. - -### [Next: Configuring Rancher for Microsoft AD FS]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/) diff --git a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md deleted file mode 100644 index 0a7d121412..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 2. Configuring Rancher for Microsoft AD FS -weight: 1205 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/ ---- - -After you complete [Configuring Microsoft AD FS for Rancher]({{}}/rancher/v2.5/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. - ->**Important Notes For Configuring Your AD FS Server:** -> ->- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` ->- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` ->- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` - - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Microsoft Active Directory Federation Services**. - -1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The [configuration section below](#configuration) describe how you can map AD attributes to fields within Rancher. - - - - - - - - -1. After you complete the **Configure AD FS Account** form, click **Authenticate with AD FS**, which is at the bottom of the page. - - Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. - - >**Note:** You may have to disable your popup blocker to see the AD FS login page. - -**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. - -# Configuration - -| Field | Description | -|---------------------------|-----------------| -| Display Name Field | The AD attribute that contains the display name of users.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | -| User Name Field | The AD attribute that contains the user name/given name.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | -| UID Field | An AD attribute that is unique to every user.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | -| Groups Field | Make entries for managing group memberships.

Example: `http://schemas.xmlsoap.org/claims/Group` | -| Rancher API Host | The URL for your Rancher Server. | -| Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

[Certificate creation command](#cert-command) | -| Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | - - - - -**Tip:** You can generate a certificate using an openssl command. For example: - -``` -openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" -``` diff --git a/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md deleted file mode 100644 index 0d8c49afe3..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/openldap/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Configuring OpenLDAP -weight: 1113 -aliases: - - /rancher/v2.5/en/tasks/global-configuration/authentication/openldap/ - - /rancher/v2.x/en/admin-settings/authentication/openldap/ ---- - -If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. - -## Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -## Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](./openldap-config) - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -### Test Authentication - -Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. - -> **Note:** -> -> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. - -1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. -2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. - -**Result:** - -- OpenLDAP authentication is configured. -- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. - -> **Note:** -> -> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.5/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md deleted file mode 100644 index 5a12e5f78c..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: OpenLDAP Configuration Reference -weight: 2 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config/ ---- - -This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. - -For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) - -> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) -- [OpenLDAP server configuration](#openldap-server-configuration) -- [User/group schema configuration](#user-group-schema-configuration) - - [User schema configuration](#user-schema-configuration) - - [Group schema configuration](#group-schema-configuration) - -## Background: OpenLDAP Authentication Flow - -1. When a user attempts to login with his LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. -2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. -3. Once the user has been found, he is authenticated with another LDAP bind request using the user's DN and provided password. -4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. - -# OpenLDAP Server Configuration - -You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.5/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -
OpenLDAP Server Parameters
- -| Parameter | Description | -|:--|:--| -| Hostname | Specify the hostname or IP address of the OpenLDAP server | -| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| -| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | -| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | -| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. | -| Service Account Password | The password for the service account. | -| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| -| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| - -# User/Group Schema Configuration - -If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. - -Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. - -If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch]({{}}/rancher/v2.5/en/admin-settings/authentication/ad/#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. - -### User Schema Configuration - -The table below details the parameters for the user schema configuration. - -
User Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Username Attribute | The user attribute whose value is suitable as a display name. | -| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | -| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | -| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | -| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | -| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | - -### Group Schema Configuration - -The table below details the parameters for the group schema configuration. - -
Group Schema Configuration Parameters
- -| Parameter | Description | -|:--|:--| -| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | -| Name Attribute | The group attribute whose value is suitable for a display name. | -| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | -| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | -| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | -| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | -| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md deleted file mode 100644 index 3d74de5054..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/_index.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: Configuring Shibboleth (SAML) -weight: 1210 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/shibboleth/ ---- - -If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. - -In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. - -> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](./about) - -This section covers the following topics: - -- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) - - [Shibboleth Prerequisites](#shibboleth-prerequisites) - - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) - - [SAML Provider Caveats](#saml-provider-caveats) -- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) - - [OpenLDAP Prerequisites](#openldap-prerequisites) - - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) - - [Troubleshooting](#troubleshooting) - -# Setting up Shibboleth in Rancher - -### Shibboleth Prerequisites -> ->- You must have a Shibboleth IdP Server configured. ->- Following are the Rancher Service Provider URLs needed for configuration: -Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` -Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` ->- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) - -### Configure Shibboleth in Rancher -If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -1. From the **Global** view, select **Security > Authentication** from the main menu. - -1. Select **Shibboleth**. - -1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. - - 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). - - 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). - - 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). - - 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). - - 1. **Rancher API Host**: Enter the URL for your Rancher Server. - - 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. - - You can generate one using an openssl command. For example: - - ``` - openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" - ``` - 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. - - -1. After you complete the **Configure Shibboleth Account** form, click **Authenticate with Shibboleth**, which is at the bottom of the page. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. - -### SAML Provider Caveats - -If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. - -- There is no validation on users or groups when assigning permissions to them in Rancher. -- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. -- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. -- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. - -To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. - -# Setting up OpenLDAP in Rancher - -If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. - -### OpenLDAP Prerequisites - -Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). - -> **Using TLS?** -> -> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. - -### Configure OpenLDAP in Rancher - -Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.]({{}}/rancher/v2.5/en/admin-settings/authentication/openldap/openldap-config) Note that nested group membership is not available for Shibboleth. - -> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -1. Log into the Rancher UI using the initial local `admin` account. -2. From the **Global** view, navigate to **Security** > **Authentication** -3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. - -# Troubleshooting - -If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.5/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md deleted file mode 100644 index 7d69442ec8..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/shibboleth/about/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Group Permissions with Shibboleth and OpenLDAP -weight: 1 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/shibboleth/about/ ---- - -This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. - -Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. - -One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. - -### Terminology - -- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. -- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. -- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. -- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. - -### Adding OpenLDAP Group Permissions to Rancher Resources - -The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. - -For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. - -In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. - -When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. - -Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. - -![Adding OpenLDAP Group Permissions to Rancher Resources]({{}}/img/rancher/shibboleth-with-openldap-groups.svg) - \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md b/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md deleted file mode 100644 index 8f708809a2..0000000000 --- a/content/rancher/v2.5/en/admin-settings/authentication/user-groups/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Users and Groups -weight: 1 -aliases: - - /rancher/v2.x/en/admin-settings/authentication/user-groups/ ---- - -Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. - -Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control]({{}}/rancher/v2.5/en/admin-settings/rbac/). - -## Managing Members - -When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local]({{}}/rancher/v2.5/en/admin-settings/authentication/local/) user account, you will not be able to search for GitHub users or groups. - -All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. - -{{< saml_caveats >}} - -## User Information - -Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. - -Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. - -### Automatically Refreshing User Information - -Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. From the **Global** view, click on **Settings**. Two settings control this behavior: - -- **`auth-user-info-max-age-seconds`** - - This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. - -- **`auth-user-info-resync-cron`** - - This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. - - -> **Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. - -### Manually Refreshing User Information - -If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. - -1. From the **Global** view, click on **Users** in the navigation bar. - -1. Click on **Refresh Group Memberships**. - -**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. - ->**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. - - -## Session Length - -The default length (TTL) of each user session is adjustable. The default session length is 16 hours. - -1. From the **Global** view, click on **Settings**. -1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** -1. Enter the amount of time in minutes a session length should last and click **Save.** - -**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md b/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md deleted file mode 100644 index d9b0576320..0000000000 --- a/content/rancher/v2.5/en/admin-settings/config-private-registry/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Configuring a Global Default Private Registry -weight: 400 -aliases: - - /rancher/v2.x/en/admin-settings/config-private-registry/ ---- - -You might want to use a private container registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the container images that are used in your clusters. - -There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. - -For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation]({{}}/rancher/v2.5/en/installation/air-gap-single-node) or [air gapped Kubernetes installation]({{}}/rancher/v2.5/en/installation/air-gap-high-availability) instructions. - -If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. - -# Setting a Private Registry with No Credentials as the Default Registry - -1. Log into Rancher and configure the default administrator password. - -1. Go into the **Settings** view. - - {{< img "/img/rancher/airgap/settings.png" "Settings" >}} - -1. Look for the setting called `system-default-registry` and choose **Edit**. - - {{< img "/img/rancher/airgap/edit-system-default-registry.png" "Edit" >}} - -1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. - - {{< img "/img/rancher/airgap/enter-system-default-registry.png" "Save" >}} - -**Result:** Rancher will use your private registry to pull system images. - -# Setting a Private Registry with Credentials when Deploying a Cluster - -You can follow these steps to configure a private registry when you provision a cluster with Rancher: - -1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** -1. In the Enable Private Registries section, click **Enabled.** -1. Enter the registry URL and credentials. -1. Click **Save.** - -**Result:** The new cluster will be able to pull images from the private registry. diff --git a/content/rancher/v2.5/en/admin-settings/drivers/_index.md b/content/rancher/v2.5/en/admin-settings/drivers/_index.md deleted file mode 100644 index 471124f634..0000000000 --- a/content/rancher/v2.5/en/admin-settings/drivers/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Provisioning Drivers -weight: 1140 -aliases: - - /rancher/v2.x/en/admin-settings/drivers/ ---- - -Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/) or [nodes in an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) to allow Rancher to deploy and manage Kubernetes. - -### Rancher Drivers - -With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. - -There are two types of drivers within Rancher: - -* [Cluster Drivers](#cluster-drivers) -* [Node Drivers](#node-drivers) - -### Cluster Drivers - -Cluster drivers are used to provision [hosted Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. - -By default, Rancher has activated several hosted Kubernetes cloud providers including: - -* [Amazon EKS]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/) -* [Google GKE]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/) -* [Azure AKS]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/) - -There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: - -* [Alibaba ACK]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/) -* [Huawei CCE]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/) -* [Tencent]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/) - -### Node Drivers - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: - -* [Amazon EC2]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/) -* [Azure]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/) -* [Digital Ocean]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/) -* [vSphere]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/) diff --git a/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md b/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md deleted file mode 100644 index b5ea85f5f9..0000000000 --- a/content/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Cluster Drivers -weight: 1 -aliases: - - /rancher/v2.x/en/admin-settings/drivers/cluster-drivers/ ---- - -Cluster drivers are used to create clusters in a [hosted Kubernetes provider]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. - -If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. - -### Managing Cluster Drivers - ->**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Cluster Drivers]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) role assigned. - -## Activating/Deactivating Cluster Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page, select the **Cluster Drivers** tab. - -3. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Cluster Drivers - -If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. - -2. From the **Drivers** page select the **Cluster Drivers** tab. - -3. Click **Add Cluster Driver**. - -4. Complete the **Add Cluster Driver** form. Then click **Create**. - - -### Developing your own Cluster Driver - -In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md b/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md deleted file mode 100644 index 52b2c1efd4..0000000000 --- a/content/rancher/v2.5/en/admin-settings/drivers/node-drivers/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Node Drivers -weight: 2 -aliases: - - /rancher/v2.5/en/concepts/global-configuration/node-drivers/ - - /rancher/v2.5/en/tasks/global-configuration/node-drivers/ - - /rancher/v2.x/en/admin-settings/drivers/node-drivers/ ---- - -Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. - -If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. - -#### Managing Node Drivers - ->**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: -> ->- [Administrator Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) ->- [Custom Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Node Drivers]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) role assigned. - -## Activating/Deactivating Node Drivers - -By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. - -2. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. - -## Adding Custom Node Drivers - -If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. - -1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. - -2. Click **Add Node Driver**. - -3. Complete the **Add Node Driver** form. Then click **Create**. - -### Developing your own node driver - -Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md b/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md deleted file mode 100644 index f377c0e626..0000000000 --- a/content/rancher/v2.5/en/admin-settings/k8s-metadata/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Upgrading Kubernetes without Upgrading Rancher -weight: 1120 -aliases: - - /rancher/v2.x/en/admin-settings/k8s-metadata/ ---- - -The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. - -> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. - -Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. - -This table below describes the CRDs that are affected by the periodic data sync. - -> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. - -| Resource | Description | Rancher API URL | -|----------|-------------|-----------------| -| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | -| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | -| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | - -Administrators might configure the RKE metadata settings to do the following: - -- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher -- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub -- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher - -### Refresh Kubernetes Metadata - -The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) - -To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. - -You can configure Rancher to only refresh metadata when desired by setting `refresh-interval-minutes` to `0` (see below) and using this button to perform the metadata refresh manually when desired. - -### Configuring the Metadata Synchronization - -> Only administrators can change these settings. - -The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. - -The way that the metadata is configured depends on the Rancher version. - -To edit the metadata config in Rancher, - -1. Go to the **Global** view and click the **Settings** tab. -1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** -1. You can optionally fill in the following parameters: - - - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. - - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. - -If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) - -However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. -### Air Gap Setups - -Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) - -If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. - -To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) - -After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. - -1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. -1. Download the OS specific image lists for Linux or Windows. -1. Download `rancher-images.txt`. -1. Prepare the private registry using the same steps during the [air gap install]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. - -**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md b/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md deleted file mode 100644 index 16746c823b..0000000000 --- a/content/rancher/v2.5/en/admin-settings/pod-security-policies/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Pod Security Policies -weight: 1135 -aliases: - - /rancher/v2.5/en/concepts/global-configuration/pod-security-policies/ - - /rancher/v2.5/en/tasks/global-configuration/pod-security-policies/ - - /rancher/v2.5/en/tasks/clusters/adding-a-pod-security-policy/ - - /rancher/v2.x/en/admin-settings/pod-security-policies/ ---- - -_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). - -If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. - -- [How PSPs Work](#how-psps-work) -- [Default PSPs](#default-psps) - - [Restricted](#restricted) - - [Unrestricted](#unrestricted) -- [Creating PSPs](#creating-psps) - - [Requirements](#requirements) - - [Creating PSPs in the Rancher UI](#creating-psps-in-the-rancher-ui) -- [Configuration](#configuration) - -# How PSPs Work - -You can assign PSPs at the cluster or project level. - -PSPs work through inheritance: - -- By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. -- **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. -- You can override the default PSP by assigning a different PSP directly to the project. - -Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. - -Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). - -# Default PSPs - -Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies. - -### Restricted - -This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: - -- Prevents pods from running as a privileged user and prevents escalation of privileges. -- Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added. - -### Unrestricted - -This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. - -# Creating PSPs - -Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. - -### Requirements - -Rancher can only assign PSPs for clusters that are [launched using RKE.]({{< baseurl >}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) - -You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) - -It is a best practice to set PSP at the cluster level. - -We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. - -### Creating PSPs in the Rancher UI - -1. From the **Global** view, select **Security** > **Pod Security Policies** from the main menu. Then click **Add Policy**. - - **Step Result:** The **Add Policy** form opens. - -2. Name the policy. - -3. Complete each section of the form. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for more information on what each policy does. - - -# Configuration - -The Kubernetes documentation on PSPs is [here.](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) - - - - - -[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems -[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces -[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups diff --git a/content/rancher/v2.5/en/admin-settings/rbac/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/_index.md deleted file mode 100644 index 5fd866fd8b..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rbac/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Role-Based Access Control (RBAC) -weight: 1120 -aliases: - - /rancher/v2.5/en/concepts/global-configuration/users-permissions-roles/ - - /rancher/v2.x/en/admin-settings/rbac/ ---- - -Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication]({{}}/rancher/v2.5/en/admin-settings/authentication/), users can either be local or external. - -After you configure external authentication, the users that display on the **Users** page changes. - -- If you are logged in as a local user, only local users display. - -- If you are logged in as an external user, both external and local users display. - -## Users and Roles - -Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. - -- [Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/): - - Define user authorization outside the scope of any particular cluster. - -- [Cluster and Project Roles]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/): - - Define user authorization inside the specific cluster or project where they are assigned the role. - -Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md deleted file mode 100644 index 4996f82ab5..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/_index.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Cluster and Project Roles -weight: 1127 -aliases: - - /rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/ ---- - -Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. - -### Membership and Role Assignment - -The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. - -When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. - -> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. - -### Cluster Roles - -_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. - -- **Cluster Owner:** - - These users have full control over the cluster and all resources in it. - -- **Cluster Member:** - - These users can view most cluster level resources and create new projects. - -#### Custom Cluster Roles - -Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. - -#### Cluster Role Reference - -The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. - -| Built-in Cluster Role | Owner | Member | -| ---------------------------------- | ------------- | --------------------------------- | -| Create Projects | ✓ | ✓ | -| Manage Cluster Backups             | ✓ | | -| Manage Cluster Catalogs | ✓ | | -| Manage Cluster Members | ✓ | | -| Manage Nodes | ✓ | | -| Manage Storage | ✓ | | -| View All Projects | ✓ | | -| View Cluster Catalogs | ✓ | ✓ | -| View Cluster Members | ✓ | ✓ | -| View Nodes | ✓ | ✓ | - -For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Note:** ->When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Giving a Custom Cluster Role to a Cluster Member - -After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/) cluster owners and admins can then assign those roles to cluster members. - -To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. - -To assign the role to a new cluster member, - -1. Go to the **Cluster** view, then go to the **Members** tab. -1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. -1. Click **Create.** - -**Result:** The member has the assigned role. - -To assign any custom role to an existing cluster member, - -1. Go to the member you want to give the role to. Click the **⋮ > View in API.** -1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** - -**Result:** The member has the assigned role. - -### Project Roles - -_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. - -- **Project Owner:** - - These users have full control over the project and all resources in it. - -- **Project Member:** - - These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. - - >**Note:** - > - >By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. - -- **Read Only:** - - These users can view everything in the project but cannot create, update, or delete anything. - - >**Caveat:** - > - >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - -#### Custom Project Roles - -Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. - -#### Project Role Reference - -The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. - -| Built-in Project Role | Owner | Member | Read Only | -| ---------------------------------- | ------------- | ----------------------------- | ------------- | -| Manage Project Members | ✓ | | | -| Create Namespaces | ✓ | ✓ | | -| Manage Config Maps | ✓ | ✓ | | -| Manage Ingress | ✓ | ✓ | | -| Manage Project Catalogs | ✓ | | | -| Manage Secrets | ✓ | ✓ | | -| Manage Service Accounts | ✓ | ✓ | | -| Manage Services | ✓ | ✓ | | -| Manage Volumes | ✓ | ✓ | | -| Manage Workloads | ✓ | ✓ | | -| View Secrets | ✓ | ✓ | | -| View Config Maps | ✓ | ✓ | ✓ | -| View Ingress | ✓ | ✓ | ✓ | -| View Project Members | ✓ | ✓ | ✓ | -| View Project Catalogs | ✓ | ✓ | ✓ | -| View Service Accounts | ✓ | ✓ | ✓ | -| View Services | ✓ | ✓ | ✓ | -| View Volumes | ✓ | ✓ | ✓ | -| View Workloads | ✓ | ✓ | ✓ | - -> **Notes:** -> ->- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. - -### Defining Custom Roles -As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. - -When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. - -### Default Cluster and Project Roles - -By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. - -There are two methods for changing default cluster/project roles: - -- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - -- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. - - For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). - ->**Note:** -> ->- Although you can [lock]({{}}/rancher/v2.5/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. ->- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. - -### Configuring Default Roles for Cluster and Project Creators - -You can change the cluster or project role(s) that are automatically assigned to the creating user. - -1. From the **Global** view, select **Security > Roles** from the main menu. Select either the **Cluster** or **Project** tab. - -1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit**. - -1. Enable the role as default. -{{% accordion id="cluster" label="For Clusters" %}} -1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. -1. Click **Save**. -{{% /accordion %}} -{{% accordion id="project" label="For Projects" %}} -1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. -1. Click **Save**. -{{% /accordion %}} - -1. If you want to remove a default role, edit the permission and select **No** from the default roles option. - -**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. - -### Cluster Membership Revocation Behavior - -When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: - -- Access the projects they hold membership in. -- Exercise any [individual project roles](#project-role-reference) they are assigned. - -If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md deleted file mode 100644 index a2b2795e18..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/_index.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Custom Roles -weight: 1128 -aliases: - - /rancher/v2.5/en/tasks/global-configuration/roles/ - - /rancher/v2.x/en/admin-settings/rbac/default-custom-roles/ ---- - -Within Rancher, _roles_ determine what actions a user can make within a cluster or project. - -Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. - -> It is possible for a custom role to enable privilege escalation. For details, see [this section.](#privilege-escalation) - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) -- [Creating a custom global role](#creating-a-custom-global-role) -- [Deleting a custom global role](#deleting-a-custom-global-role) -- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) -- [Privilege escalation](#privilege-escalation) - -# Prerequisites - -To complete the tasks on this page, one of the following permissions are required: - - - [Administrator Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/). - - [Custom Global Permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/#custom-global-permissions) with the [Manage Roles]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) role assigned. - -# Creating A Custom Role for a Cluster or Project - -While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. - -The steps to add custom roles differ depending on the version of Rancher. - -1. From the **Global** view, select **Security > Roles** from the main menu. - -1. Select a tab to determine the scope of the roles you're adding. The tabs are: - - - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. - - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. - -1. Click **Add Cluster/Project Role.** - -1. **Name** the role. - -1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. - - > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. - -1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. - - > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - - You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. - -1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. - -1. Click **Create**. - -# Creating a Custom Global Role - -### Creating a Custom Global Role that Copies Rules from an Existing Role - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. - -The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. - -To create a custom global role based on an existing role, - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, go to the role that the custom global role will be based on. Click **⋮ (…) > Clone.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - -1. Click **Save.** - -### Creating a Custom Global Role that Does Not Copy Rules from Another Role - -Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: - -1. Go to the **Global** view and click **Security > Roles.** -1. On the **Global** tab, click **Add Global Role.** -1. Enter a name for the role. -1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** -1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. - - > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. - -1. Click **Save.** - -# Deleting a Custom Global Role - -When deleting a custom global role, all global role bindings with this custom role are deleted. - -If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. - -Custom global roles can be deleted, but built-in roles cannot be deleted. - -To delete a custom global role, - -1. Go to the **Global** view and click **Security > Roles.** -2. On the **Global** tab, go to the custom global role that should be deleted and click **⋮ (…) > Delete.** -3. Click **Delete.** - -# Assigning a Custom Global Role to a Group - -If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. - -When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.5/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Custom** section, choose any custom global role that will be assigned to the group. -1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. - -# Privilege Escalation - -The `Configure Catalogs` custom permission is powerful and should be used with caution. When an admin assigns the `Configure Catalogs` permission to a standard user, it could result in privilege escalation in which the user could give themselves admin access to Rancher provisioned clusters. Anyone with this permission should be considered equivalent to an admin. - -The `Manager Users` role grants the ability to create, update, and delete _any_ user. This presents the risk of privilege escalation as even non-admin users with this role will be able to create, update, and delete admin users. Admins should take caution when assigning this role. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md deleted file mode 100644 index eef72464cf..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rbac/global-permissions/_index.md +++ /dev/null @@ -1,233 +0,0 @@ ---- -title: Global Permissions -weight: 1126 -aliases: - - /rancher/v2.x/en/admin-settings/rbac/global-permissions/ ---- - -_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. - -Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are three default global permissions: `Administrator`, `Standard User` and `User-base`. - -- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. - -- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. - -- **User-Base:** User-Base users have login-access only. - -You cannot update or delete the built-in Global Permissions. - -This section covers the following topics: - -- [Restricted Admin](#restricted-admin) -- [Global permission assignment](#global-permission-assignment) - - [Global permissions for new local users](#global-permissions-for-new-local-users) - - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) -- [Custom global permissions](#custom-global-permissions) - - [Custom global permissions reference](#custom-global-permissions-reference) - - [Configuring default global permissions for new users](#configuring-default-global-permissions) - - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) - - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) - - [Refreshing group memberships](#refreshing-group-memberships) - -# Restricted Admin - -A new `restricted-admin` role was created in Rancher v2.5 in order to prevent privilege escalation from the local Rancher server Kubernetes cluster. This role has full administrator access to all downstream clusters managed by Rancher, but it does not have permission to alter the local Kubernetes cluster. - -The `restricted-admin` can create other `restricted-admin` users with an equal level of access. - -A new setting was added to Rancher to set the initial bootstrapped administrator to have the `restricted-admin` role. This applies to the first user created when the Rancher server is started for the first time. If the environment variable is set, then no global administrator would be created, and it would be impossible to create the global administrator through Rancher. - -To bootstrap Rancher with the `restricted-admin` as the initial user, the Rancher server should be started with the following environment variable: - -``` -CATTLE_RESTRICTED_DEFAULT_ADMIN=true -``` -### List of `restricted-admin` Permissions - -The permissions for the `restricted-admin` role differ based on the Rancher version. - -{{% tabs %}} -{{% tab "v2.5.7+" %}} - -The `restricted-admin` permissions are as follows: - -- Has full admin access to all downstream clusters managed by Rancher. -- Can add other users and assign them to clusters outside of the local cluster. -- Can create other restricted admins. - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.6" %}} - -The `restricted-admin` permissions are as follows: - -- Has full admin access to all downstream clusters managed by Rancher. -- Has very limited access to the local Kubernetes cluster. Can access Rancher custom resource definitions, but has no access to any Kubernetes native types. -- Can add other users and assign them to clusters outside of the local cluster. -- Can create other restricted admins. -- Cannot grant any permissions in the local cluster they don't currently have. (This is how Kubernetes normally operates) - -{{% /tab %}} -{{% /tabs %}} - -### Upgrading from Rancher with a Hidden Local Cluster - -Before Rancher v2.5, it was possible to run the Rancher server using this flag to hide the local cluster: - -``` ---add-local=false -``` - -You will need to drop this flag when upgrading to Rancher v2.5. Otherwise, Rancher will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. - -### Changing Global Administrators to Restricted Admins - -If Rancher already has a global administrator, they should change all global administrators over to the new `restricted-admin` role. - -This can be done through **Security > Users** and moving any Administrator role over to Restricted Administrator. - -Signed-in users can change themselves over to the `restricted-admin` if they wish, but they should only do that as the last step, otherwise they won't have the permissions to do so. - -# Global Permission Assignment - -Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. - -### Global Permissions for New Local Users - -When you create a new local user, you assign them a global permission as you complete the **Add User** form. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) - -### Global Permissions for Users with External Authentication - -When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. - -To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) - -Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) - -You can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. - -# Custom Global Permissions - -Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. - -When a user from an [external authentication source]({{}}/rancher/v2.5/en/admin-settings/authentication/) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. - -However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. - -The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. - -Administrators can enforce custom global permissions in multiple ways: - -- [Changing the default permissions for new users](#configuring-default-global-permissions) -- [Configuring global permissions for individual users](#configuring-global-permissions-for-individual-users) -- [Configuring global permissions for groups](#configuring-global-permissions-for-groups) - -### Custom Global Permissions Reference - -The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. - -| Custom Global Permission | Administrator | Standard User | User-Base | -| ---------------------------------- | ------------- | ------------- |-----------| -| Create Clusters | ✓ | ✓ | | -| Create RKE Templates | ✓ | ✓ | | -| Manage Authentication | ✓ | | | -| Manage Catalogs | ✓ | | | -| Manage Cluster Drivers | ✓ | | | -| Manage Node Drivers | ✓ | | | -| Manage PodSecurityPolicy Templates | ✓ | | | -| Manage Roles | ✓ | | | -| Manage Settings | ✓ | | | -| Manage Users | ✓ | | | -| Use Catalog Templates | ✓ | ✓ | | -| User Base\* (Basic log-in access) | ✓ | ✓ | | - -> \*This role has two names: -> -> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. -> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. - -For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. - -> **Notes:** -> -> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. -> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Configuring Default Global Permissions - -If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. - -> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. - -To change the default global permissions that are assigned to external users upon their first log in, follow these steps: - -1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. - -1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit**. - -1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. - -1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. - -**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. - -### Configuring Global Permissions for Individual Users - -To configure permission for a user, - -1. Go to the **Users** tab. - -1. On this page, go to the user whose access level you want to change and click **⋮ > Edit.** - -1. In the **Global Permissions** section, click **Custom.** - -1. Check the boxes for each subset of permissions you want the user to have access to. - -1. Click **Save.** - -> **Result:** The user's global permissions have been updated. - -### Configuring Global Permissions for Groups - -If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. - -After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. - -For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) - -For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) - -If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,](#refreshing-group-memberships) whichever comes first. - -> **Prerequisites:** You can only assign a global role to a group if: -> -> * You have set up an [external authentication provider]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-vs-local-authentication) -> * The external authentication provider supports [user groups]({{}}/rancher/v2.5/en/admin-settings/authentication/user-groups/) -> * You have already set up at least one user group with the authentication provider - -To assign a custom global role to a group, follow these steps: - -1. From the **Global** view, go to **Security > Groups.** -1. Click **Assign Global Role.** -1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. -1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. -1. Click **Create.** - -**Result:** The custom global role will take effect when the users in the group log into Rancher. - -### Refreshing Group Memberships - -When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. - -To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. - -An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. - -To refresh group memberships, - -1. From the **Global** view, click **Security > Users.** -1. Click **Refresh Group Memberships.** - -**Result:** Any changes to the group members' permissions will take effect. diff --git a/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md b/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md deleted file mode 100644 index 85e3f58662..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rbac/locked-roles/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Locked Roles -weight: 1129 -aliases: - - /rancher/v2.x/en/admin-settings/rbac/locked-roles/ ---- - -You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. - -Locked roles: - -- Cannot be assigned to users that don't already have it assigned. -- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. -- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. - - **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. - - To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. - -Roles can be locked by the following users: - -- Any user assigned the `Administrator` global permission. -- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. - - -## Locking/Unlocking Roles - -If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. - -You can lock roles in two contexts: - -- When you're [adding a custom role]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/). -- When you editing an existing role (see below). - -1. From the **Global** view, select **Security** > **Roles**. - -2. From the role that you want to lock (or unlock), select **⋮** > **Edit**. - -3. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md deleted file mode 100644 index 7459bcbf9a..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/_index.md +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: RKE Templates -weight: 7010 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/ ---- - -RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. - -RKE is the [Rancher Kubernetes Engine,]({{}}/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. - -With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. - -RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. - -Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. - -If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. - -You can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. - -The core features of RKE templates allow DevOps and security teams to: - -- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices -- Prevent less technical users from making uninformed choices when provisioning clusters -- Share different templates with different sets of users and groups -- Delegate ownership of templates to users who are trusted to make changes to them -- Control which users can create templates -- Require users to create clusters from a template - -# Configurable Settings - -RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: - -- Cloud provider options -- Pod security options -- Network providers -- Ingress controllers -- Network security configuration -- Network plugins -- Private registry URL and credentials -- Add-ons -- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services - -The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. - -# Scope of RKE Templates - -RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. - -RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware]({{}}/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware). - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -The settings of an existing cluster can be [saved as an RKE template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template), and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. - - -# Example Scenarios -When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. - -These [example scenarios]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios) describe how an organization could use templates to standardize cluster creation. - -Some of the example scenarios include the following: - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/#allowing-other-users-to-control-and-share-a-template) - -# Template Management - -When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. - -Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. - -RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. - -In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. - -For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. - -The documents in this section explain the details of RKE template management: - -- [Getting permission to create templates]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/) -- [Creating and revising templates]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/) -- [Enforcing template settings](./enforcement/#requiring-new-clusters-to-use-an-rke-template) -- [Overriding template settings]({{}}/rancher/v2.5/en/admin-settings/rke-templates/overrides/) -- [Sharing templates with cluster creators]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users-or-groups) -- [Sharing ownership of a template]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -An [example YAML configuration file for a template]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-yaml) is provided for reference. - -# Applying Templates - -You can [create a cluster from a template]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing) - -If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) - -RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. - -You can [save the configuration of an existing cluster as an RKE template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -# Standardizing Hardware - -RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools]({{}}/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware). - -# YAML Customization - -If you define an RKE template as a YAML file, you can modify this [example RKE template YAML]({{}}/rancher/v2.5/en/admin-settings/rke-templates/example-yaml). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. - -The RKE documentation also has [annotated]({{}}/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. - -For guidance on available options, refer to the RKE documentation on [cluster configuration.]({{}}/rke/latest/en/config-options/) - -### Add-ons - -The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file]({{}}/rke/latest/en/config-options/add-ons/). - -The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. - -Some things you could do with add-ons include: - -- Install applications on the Kubernetes cluster after it starts -- Install plugins on nodes that are deployed with a Kubernetes daemonset -- Automatically set up namespaces, service accounts, or role binding - -The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md deleted file mode 100644 index 5a1bd48020..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Applying Templates -weight: 50 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/applying-templates/ ---- - -You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing) - -RKE templates can be applied to new clusters. - -You can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. - -You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -This section covers the following topics: - -- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) -- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) -- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) - -### Creating a Cluster from an RKE Template - -To add a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters) using an RKE template, use these steps: - -1. From the **Global** view, go to the **Clusters** tab. -1. Click **Add Cluster** and choose the infrastructure provider. -1. Provide the cluster name and node template details as usual. -1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** -1. Choose an existing template and revision from the dropdown menu. -1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. -1. Click **Save** to launch the cluster. - -### Updating a Cluster Created with an RKE Template - -When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. - -- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) -- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. - -If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. - -An existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. - -> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. - -### Converting an Existing Cluster to Use an RKE Template - -This section describes how to create an RKE template from an existing cluster. - -RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#upgrading-a-cluster-to-use-a-new-template-revision) - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md deleted file mode 100644 index d3966cab1c..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Creating and Revising Templates -weight: 32 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/ ---- - -This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** - -Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. - -Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. - -The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. - -This section covers the following topics: - -- [Prerequisites](#prerequisites) -- [Creating a template](#creating-a-template) -- [Updating a template](#updating-a-template) -- [Deleting a template](#deleting-a-template) -- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) -- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) -- [Disabling a template revision](#disabling-a-template-revision) -- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) -- [Setting a template revision as default](#setting-a-template-revision-as-default) -- [Deleting a template revision](#deleting-a-template-revision) -- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) -- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) - -### Prerequisites - -You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions) - -You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -### Creating a Template - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Click **Add Template.** -1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. -1. Optional: Share the template with other users or groups by [adding them as members.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. -1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. - -**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. - -### Updating a Template - -When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. - -You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) - -When new template revisions are created, clusters using an older revision of the template are unaffected. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to edit and click the **⋮ > Edit.** -1. Edit the required information and click **Save.** -1. Optional: You can change the default revision of this template and also change who it is shared with. - -**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) - -### Deleting a Template - -When you no longer use an RKE template for any of your clusters, you can delete it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to delete and click the **⋮ > Delete.** -1. Confirm the deletion when prompted. - -**Result:** The template is deleted. - -### Creating a Revision Based on the Default Revision - -You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to clone and click the **⋮ > New Revision From Default.** -1. Complete the rest of the form to create a new revision. - -**Result:** The RKE template revision is cloned and configured. - -### Creating a Revision Based on a Cloned Revision - -When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision.** -1. Complete the rest of the form. - -**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. - -### Disabling a Template Revision - -When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. - -You can disable the revision if it is not being used by any cluster. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to disable. Then select **⋮ > Disable.** - -**Result:** The RKE template revision cannot be used to create a new cluster. - -### Re-enabling a Disabled Template Revision - -If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template revision you want to re-enable. Then select **⋮ > Enable.** - -**Result:** The RKE template revision can be used to create a new cluster. - -### Setting a Template Revision as Default - -When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. - -To set an RKE template revision as default, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default.** - -**Result:** The RKE template revision will be used as the default option when clusters are created with the template. - -### Deleting a Template Revision - -You can delete all revisions of a template except for the default revision. - -To permanently delete a revision, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete.** - -**Result:** The RKE template revision is deleted. - -### Upgrading a Cluster to Use a New Template Revision - -> This section assumes that you already have a cluster that [has an RKE template applied.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates) -> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. - -To upgrade a cluster to use a new template revision, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that you want to upgrade and click **⋮ > Edit.** -1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. -1. Click **Save.** - -**Result:** The cluster is upgraded to use the settings defined in the new template revision. - -### Exporting a Running Cluster to a New RKE Template and Revision - -You can save an existing cluster's settings as an RKE template. - -This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] - -To convert an existing cluster to use an RKE template, - -1. From the **Global** view in Rancher, click the **Clusters** tab. -1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** -1. Enter a name for the template in the form that appears, and click **Create.** - -**Results:** - -- A new RKE template is created. -- The cluster is converted to use the new template. -- New clusters can be [created from the new template and revision.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md deleted file mode 100644 index d5f3ea341c..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Template Creator Permissions -weight: 10 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/ ---- - -Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. - -For more information on administrator permissions, refer to the [documentation on global permissions]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/). - -# Giving Users Permission to Create Templates - -Templates can only be created by users who have the global permission **Create RKE Templates.** - -Administrators have the global permission to create templates, and only administrators can give that permission to other users. - -For information on allowing users to modify existing templates, refer to [Sharing Templates.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing) - -Administrators can give users permission to create RKE templates in two ways: - -- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) -- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) - -### Allowing a User to Create Templates - -An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** - -**Result:** The user has permission to create RKE templates. - -### Allowing New Users to Create Templates by Default - -Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. - -1. From the **Global** view, click **Security > Roles.** -1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **⋮ > Edit**. -1. Select the option **Yes: Default role for new users** and click **Save.** - -**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. - -### Revoking Permission to Create Templates - -Administrators can remove a user's permission to create templates with the following steps: - -1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** -1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. -1. Click **Save.** - -**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md deleted file mode 100644 index 99828c0eff..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/enforcement/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Template Enforcement -weight: 32 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/enforcement/ ---- - -This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. - -By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, - -- Only an administrator has the ability to create clusters without a template. -- All standard users must use an RKE template to create a new cluster. -- Standard users cannot create a cluster without using a template. - -Users can only create new templates if the administrator [gives them permission.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creator-permissions/#allowing-a-user-to-create-templates) - -After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision]({{}}/rancher/v2.5/en/admin-settings/rke-templates/applying-templates/#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) - -# Requiring New Clusters to Use an RKE Template - -You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) will use the Kubernetes and/or Rancher settings that are vetted by administrators. - -To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **True** and click **Save.** - -**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. - -# Disabling RKE Template Enforcement - -To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: - -1. From the **Global** view, click the **Settings** tab. -1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** -1. Set the value to **False** and click **Save.** - -**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md deleted file mode 100644 index 5bb86bdda4..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/example-scenarios/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Example Scenarios -weight: 5 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/ ---- - -These example scenarios describe how an organization could use templates to standardize cluster creation. - -- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. -- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. -- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. -- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) - - -# Enforcing a Template Setting for Everyone - -Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. - -1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. -1. The administrator makes the template public. -1. The administrator turns on template enforcement. - -**Results:** - -- All Rancher users in the organization have access to the template. -- All new clusters created by [standard users]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. -- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. - -In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. - -# Templates for Basic and Advanced Users - -Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. - -1. First, an administrator turns on [RKE template enforcement.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/enforcement/#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) in Rancher will need to use an RKE template when they create a cluster. -1. The administrator then creates two templates: - - - One template for basic users, with almost every option specified except for access keys - - One template for advanced users, which has most or all options has **Allow User Override** turned on - -1. The administrator shares the advanced template with only the advanced users. -1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. - -**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. - -# Updating Templates and Clusters Created with Them - -Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. - -In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. - -The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: - -- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. -- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. -- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. - -# Allowing Other Users to Control and Share a Template - -Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. - -Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. - -To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/#sharing-ownership-of-templates) - -The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: - -- [Revise the template]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#updating-a-template) when the best practices change -- [Disable outdated revisions]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#disabling-a-template-revision) of the template so that no new clusters can be created with it -- [Delete the whole template]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#deleting-a-template) if the organization wants to go in a different direction -- [Set a certain revision as default]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising/#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. -- [Share the template]({{}}/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md deleted file mode 100644 index 333396989e..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/overrides/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Overriding Template Settings -weight: 33 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/overrides/ ---- - -When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** - -After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. - -When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. - -The **Allow User Override** model of the RKE template is useful for situations such as: - -- Administrators know that some settings will need the flexibility to be frequently updated over time -- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md deleted file mode 100644 index 2d16547296..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: RKE Templates and Infrastructure -weight: 90 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/ ---- - -In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. - -Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. - -If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. - -### Node Templates - -[Node templates]({{}}/rancher/v2.5/en/user-settings/node-templates) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. - -### Terraform - -Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. - -This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. - -Terraform allows you to: - -- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates -- Leverage catalog apps and multi-cluster apps -- Codify infrastructure across many platforms, including Rancher and major cloud providers -- Commit infrastructure-as-code to version control -- Easily repeat configuration and setup of infrastructure -- Incorporate infrastructure changes into standard development practices -- Prevent configuration drift, in which some servers become configured differently than others - -# How Does Terraform Work? - -Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. - -To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. - -Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. - -When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. - -# Tips for Working with Terraform - -- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) - -- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. - -- You can also modify auth in the Terraform provider. - -- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. - -- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. - -# Tip for Creating CIS Benchmark Compliant Clusters - -This section describes one way that you can make security and compliance-related config files standard in your clusters. - -When you create a [CIS benchmark compliant cluster,]({{}}/rancher/v2.5/en/security/) you have an encryption config file and an audit log config file. - -Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. - -Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. - -In this way, you can create flags that comply with the CIS benchmark. - -# Resources - -- [Terraform documentation](https://www.terraform.io/docs/) -- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) -- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md b/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md deleted file mode 100644 index ab995b0fa3..0000000000 --- a/content/rancher/v2.5/en/admin-settings/rke-templates/template-access-and-sharing/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: Access and Sharing -weight: 31 -aliases: - - /rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/ ---- - -If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. - -Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. - -When you share a template, each user can have one of two access levels: - -- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. -- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. - -If you create a template, you automatically become an owner of that template. - -If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.]({{}}/rancher/v2.5/en/admin-settings/rke-templates/creating-and-revising) - -There are several ways to share templates: - -- Add users to a new RKE template during template creation -- Add users to an existing RKE template -- Make the RKE template public, sharing it with all users in the Rancher setup -- Share template ownership with users who are trusted to modify the template - -### Sharing Templates with Specific Users or Groups - -To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. In the **Share Template** section, click on **Add Member**. -1. Search in the **Name** field for the user or group you want to share the template with. -1. Choose the **User** access type. -1. Click **Save.** - -**Result:** The user or group can create clusters using the template. - -### Sharing Templates with All Users - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** - -**Result:** All users in the Rancher setup can create clusters using the template. - -### Sharing Ownership of Templates - -If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. - -In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. - -To give Owner access to a user or group, - -1. From the **Global** view, click **Tools > RKE Templates.** -1. Go to the RKE template that you want to share and click the **⋮ > Edit.** -1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. -1. In the **Access Type** field, click **Owner.** -1. Click **Save.** - -**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/content/rancher/v2.5/en/api/_index.md b/content/rancher/v2.5/en/api/_index.md deleted file mode 100644 index a36f5f9fea..0000000000 --- a/content/rancher/v2.5/en/api/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: API -weight: 24 -aliases: - - /rancher/v2.x/en/api/ ---- - -## How to use the API - -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys]({{}}/rancher/v2.5/en/user-settings/api-keys/). - -## Authentication - -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.5/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.5/en/api/api-tokens). - -## Making requests - -The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). - -- Every type has a Schema which describes: - - The URL to get to the collection of this type of resources - - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. - - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). - - Every field that filtering is allowed on - - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. - - -- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. - -- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. - -- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. - -- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. - -- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. - -- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. - -- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). - -## Filtering - -Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. - -## Sorting - -Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. - -## Pagination - -API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. - -## Capturing Rancher API Calls - -You can use browser developer tools to capture how the Rancher API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster: - -1. In the Rancher UI, go to **Cluster Management** and click **Create.** -1. Click one of the cluster types. This example uses Digital Ocean. -1. Fill out the form with a cluster name and node template, but don't click **Create**. -1. You will need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click on the Rancher UI and click **Inspect.** -1. In the developer tools, click the **Network** tab. -1. On the **Network** tab, make sure **Fetch/XHR** is selected. -1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`. -1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.** -1. Paste the result into any text editor. You will be able to see the POST request, including the URL it was sent to, all of the headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: The request should be stored in a safe place because it contains credentials. \ No newline at end of file diff --git a/content/rancher/v2.5/en/backups/_index.md b/content/rancher/v2.5/en/backups/_index.md deleted file mode 100644 index 27c1c01cda..0000000000 --- a/content/rancher/v2.5/en/backups/_index.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Backups and Disaster Recovery -weight: 5 -aliases: - - /rancher/v2.5/en/backups/v2.5 - - /rancher/v2.x/en/backups/v2.5/ ---- - -In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. - -As of Rancher v2.5, the `rancher-backup` operator is used to backup and restore Rancher. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.5/charts/rancher-backup) - -The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. - -> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. - -- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) - - [Backup and Restore for Rancher v2.5 installed with Docker](#backup-and-restore-for-rancher-v2-5-installed-with-docker) -- [How Backups and Restores Work](#how-backups-and-restores-work) -- [Installing the rancher-backup Operator](#installing-the-rancher-backup-operator) - - [Installing rancher-backup with the Rancher UI](#installing-rancher-backup-with-the-rancher-ui) - - [Installing rancher-backup with the Helm CLI](#installing-rancher-backup-with-the-helm-cli) - - [RBAC](#rbac) -- [Backing up Rancher](#backing-up-rancher) -- [Restoring Rancher](#restoring-rancher) -- [Migrating Rancher to a New Cluster](#migrating-rancher-to-a-new-cluster) -- [Default Storage Location Configuration](#default-storage-location-configuration) - - [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) - -# Changes in Rancher v2.5 - -The new `rancher-backup` operator allows Rancher to be backed up and restored on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** page, or by using the Helm CLI. - -Previously, the way that cluster data was backed up depended on the type of Kubernetes cluster that was used. - -In Rancher v2.4, it was only supported to install Rancher on two types of Kubernetes clusters: an RKE cluster, or a K3s cluster with an external database. If Rancher was installed on an RKE cluster, RKE would be used to take a snapshot of the etcd database and restore the cluster. If Rancher was installed on a K3s cluster with an external database, the database would need to be backed up and restored using the upstream documentation for the database. - -In Rancher v2.5, it is now supported to install Rancher hosted Kubernetes clusters, such as Amazon EKS clusters, which do not expose etcd to a degree that would allow snapshots to be created by an external tool. etcd doesn't need to be exposed for `rancher-backup` to work, because the operator gathers resources by making calls to `kube-apiserver`. - -### Backup and Restore for Rancher v2.5 installed with Docker - -For Rancher installed with Docker, refer to the same steps used up till 2.5 for [backups](./docker-installs/docker-backups) and [restores.](./docker-installs/docker-restores) - -# How Backups and Restores Work - -The `rancher-backup` operator introduces three custom resources: Backups, Restores, and ResourceSets. The following cluster-scoped custom resource definitions are added to the cluster: - -- `backups.resources.cattle.io` -- `resourcesets.resources.cattle.io` -- `restores.resources.cattle.io` - -The ResourceSet defines which Kubernetes resources need to be backed up. The ResourceSet is not available to be configured in the Rancher UI because the values required to back up Rancher are predefined. This ResourceSet should not be modified. - -When a Backup custom resource is created, the `rancher-backup` operator calls the `kube-apiserver` to get the resources in the ResourceSet (specifically, the predefined `rancher-resource-set`) that the Backup custom resource refers to. - -The operator then creates the backup file in the .tar.gz format and stores it in the location configured in the Backup resource. - -When a Restore custom resource is created, the operator accesses the backup .tar.gz file specified by the Restore, and restores the application from that file. - -The Backup and Restore custom resources can be created in the Rancher UI, or by using `kubectl apply`. - -# Installing the rancher-backup Operator - -The `rancher-backup` operator can be installed from the Rancher UI, or with the Helm CLI. In both cases, the `rancher-backup` Helm chart is installed on the Kubernetes cluster running the Rancher server. It is a cluster-admin only feature and available only for the **local** cluster. (*If you do not see `rancher-backup` in the Rancher UI, you may have selected the wrong cluster.*) - ->**NOTE:** There are two known issues in Fleet that occur after performing a restoration using the backup-restore-operator: Fleet agents are inoperable and clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet/#troubleshooting) for workarounds. - -### Installing rancher-backup with the Rancher UI - -1. In the Rancher UI's Cluster Manager, choose the cluster named **local** -1. On the upper-right click on the **Cluster Explorer.** -1. Click **Apps.** -1. Click the `rancher-backup` operator. -1. Optional: Configure the default storage location. For help, refer to the [configuration section.](./configuration/storage-config) - -**Result:** The `rancher-backup` operator is installed. - -From the **Cluster Explorer,** you can see the `rancher-backup` operator listed under **Deployments.** - -To configure the backup app in Rancher, click **Cluster Explorer** in the upper left corner and click **Rancher Backups.** - -### Installing rancher-backup with the Helm CLI - -Install the backup app as a Helm chart: - -``` -helm repo add rancher-charts https://charts.rancher.io -helm repo update -helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace -helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system -``` - -### RBAC - -Only the rancher admins and the local cluster’s cluster-owner can: - -* Install the Chart -* See the navigation links for Backup and Restore CRDs -* Perform a backup or restore by creating a Backup CR and Restore CR respectively -* List backups/restores performed so far - -# Backing up Rancher - -A backup is performed by creating a Backup custom resource. For a tutorial, refer to [this page.](./back-up-rancher) - -# Restoring Rancher - -A restore is performed by creating a Restore custom resource. For a tutorial, refer to [this page.](./restoring-rancher) - -# Migrating Rancher to a New Cluster - -A migration is performed by following [these steps.]({{}}/rancher/v2.5/en/backups/migrating-rancher) - -# Default Storage Location Configuration - -Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible or Minio object store. - -For information on configuring these options, refer to [this page.](./configuration/storage-config) - -### Example values.yaml for the rancher-backup Helm Chart - -The example [values.yaml file](./configuration/storage-config/#example-values-yaml-for-the-rancher-backup-helm-chart) can be used to configure the `rancher-backup` operator when the Helm CLI is used to install it. diff --git a/content/rancher/v2.5/en/backups/back-up-rancher/_index.md b/content/rancher/v2.5/en/backups/back-up-rancher/_index.md deleted file mode 100644 index 47f57c96e2..0000000000 --- a/content/rancher/v2.5/en/backups/back-up-rancher/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Backing up Rancher -weight: 1 -aliases: - - /rancher/v2.5/en/backups/v2.5/back-up-rancher - - /rancher/v2.x/en/backups/ - - /rancher/v2.x/en/backups/v2.5/back-up-rancher/ ---- - -In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer to the instructions for [single node backups]({{}}/rancher/v2.5/en/backups/v2.5/docker-installs/docker-backups). - -The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. - -Note that the rancher-backup operator version 1.x.x is for Rancher v2.5.x. - -> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. The Kubernetes version should also be considered when restoring a backup, since the supported apiVersion in the cluster and in the backup file could be different. - -### Prerequisites - -Rancher version must be v2.5.0 and up - -### 1. Install the `rancher-backup` operator - -The backup storage location is an operator-level setting, so it needs to be configured when `rancher-backup` is installed or upgraded. - -Backups are created as .tar.gz files. These files can be pushed to S3 or Minio, or they can be stored in a persistent volume. - -1. In the Rancher UI, go to the **Cluster Explorer** view for the local cluster. -1. Click **Apps.** -1. Click **Rancher Backups.** -1. Configure the default storage location. For help, refer to the [storage configuration section.](../configuration/storage-config) - ->**NOTE:** There are two known issues in Fleet that occur after performing a restoration using the backup-restore-operator: Fleet agents are inoperable and clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet/#troubleshooting) for workarounds. - -### 2. Perform a Backup - -To perform a backup, a custom resource of type Backup must be created. - -1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** -1. Click **Backup.** -1. Create the Backup with the form, or with the YAML editor. -1. For configuring the Backup details using the form, click **Create** and refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) -1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Backup YAML. This example Backup custom resource would create encrypted recurring backups in S3. The app uses the `credentialSecretNamespace` value to determine where to look for the S3 backup secret: - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Backup - metadata: - name: s3-recurring-backup - spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 10 - ``` - - > **Note:** When creating the Backup resource using YAML editor, the `resourceSetName` must be set to `rancher-resource-set` - - For help configuring the Backup, refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) - - > **Important:** The `rancher-backup` operator doesn't save the EncryptionConfiguration file. The contents of the EncryptionConfiguration file must be saved when an encrypted backup is created, and the same file must be used when restoring from this backup. -1. Click **Create.** - -**Result:** The backup file is created in the storage location configured in the Backup custom resource. The name of this file is used when performing a restore. - diff --git a/content/rancher/v2.5/en/backups/configuration/_index.md b/content/rancher/v2.5/en/backups/configuration/_index.md deleted file mode 100644 index a7922993be..0000000000 --- a/content/rancher/v2.5/en/backups/configuration/_index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Rancher Backup Configuration Reference -shortTitle: Configuration -weight: 4 -aliases: - - /rancher/v2.5/en/backups/v2.5/configuration - - /rancher/v2.x/en/backups/v2.5/configuration/ ---- - -- [Backup configuration](./backup-config) -- [Restore configuration](./restore-config) -- [Storage location configuration](./storage-config) -- [Example Backup and Restore Custom Resources](../examples) \ No newline at end of file diff --git a/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md b/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md deleted file mode 100644 index b162250b9e..0000000000 --- a/content/rancher/v2.5/en/backups/configuration/backup-config/_index.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Backup Configuration -shortTitle: Backup -weight: 1 -aliases: - - /rancher/v2.5/en/backups/v2.5/configuration/backup-config - - /rancher/v2.x/en/backups/v2.5/configuration/backup-config/ ---- - -The Backup Create page lets you configure a schedule, enable encryption and specify the storage location for your backups. - -{{< img "/img/rancher/backup_restore/backup/backup.png" "">}} - -- [Schedule](#schedule) -- [Encryption](#encryption) -- [Storage Location](#storage-location) - - [S3](#s3) - - [Example S3 Storage Configuration](#example-s3-storage-configuration) - - [Example MinIO Configuration](#example-minio-configuration) - - [Example credentialSecret](#example-credentialsecret) - - [IAM Permissions for EC2 Nodes to Access S3](#iam-permissions-for-ec2-nodes-to-access-s3) -- [Examples](#examples) - - -# Schedule - -Select the first option to perform a one-time backup, or select the second option to schedule recurring backups. Selecting **Recurring Backups** lets you configure following two fields: - -- **Schedule**: This field accepts - - Standard [cron expressions](https://en.wikipedia.org/wiki/Cron), such as `"0 * * * *"` - - Descriptors, such as `"@midnight"` or `"@every 1h30m"` -- **Retention Count**: This value specifies how many backup files must be retained. If files exceed the given retentionCount, the oldest files will be deleted. The default value is 10. - -{{< img "/img/rancher/backup_restore/backup/schedule.png" "">}} - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `schedule` | Provide the cron string for scheduling recurring backups. | -| `retentionCount` | Provide the number of backup files to be retained. | - -# Encryption - -The rancher-backup gathers resources by making calls to the kube-apiserver. Objects returned by apiserver are decrypted, so even if [encryption At rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) is enabled, even the encrypted objects gathered by the backup will be in plaintext. - -To avoid storing them in plaintext, you can use the same encryptionConfig file that was used for at-rest encryption, to encrypt certain resources in your backup. - -> **Important:** You must save the encryptionConfig file, because it won’t be saved by the rancher-backup operator. -The same encryptionFile needs to be used when performing a restore. - -The operator consumes this encryptionConfig as a Kubernetes Secret, and the Secret must be in the operator’s namespace. Rancher installs the `rancher-backup` operator in the `cattle-resources-system` namespace, so create this encryptionConfig secret in that namespace. - -For the `EncryptionConfiguration`, you can use the [sample file provided in the Kubernetes documentation.](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) - -To create the Secret, the encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. - -Save the `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: - -``` -kubectl create secret generic encryptionconfig \ - --from-file=./encryption-provider-config.yaml \ - -n cattle-resources-system -``` - -This will ensure that the secret contains a key named `encryption-provider-config.yaml`, and the operator will use this key to get the encryption configuration. - -The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key - -{{< img "/img/rancher/backup_restore/backup/encryption.png" "">}} - -In the example command above, the name `encryptionconfig` can be changed to anything. - - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | - -# Storage Location - -{{< img "/img/rancher/backup_restore/backup/storageLocation.png" "">}} - -If the StorageLocation is specified in the Backup, the operator will retrieve the backup location from that particular S3 bucket. If not specified, the operator will try to find this file in the default operator-level S3 store, and in the operator-level PVC store. The default storage location is configured during the deployment of the `rancher-backup` operator. - -Selecting the first option stores this backup in the storage location configured while installing the rancher-backup chart. The second option lets you configure a different S3 compatible storage provider for storing the backup. - -### S3 - -The S3 storage location contains the following configuration fields: - -1. **Credential Secret** (optional): If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) The Credential Secret dropdown lists the secrets in all namespaces. -1. **Bucket Name**: The name of the S3 bucket where backup files will be stored. -1. **Region** (optional): The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. This field isn't needed for configuring MinIO. -1. **Folder** (optional): The name of the folder in the S3 bucket where backup files will be stored. Nested folders (e.g., `rancher/cluster1`) are not supported. -1. **Endpoint**: The [endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) that is used to access S3 in the region of your bucket. -1. **Endpoint CA** (optional): This should be the Base64 encoded CA cert. For an example, refer to the [example S3 compatible configuration.](#example-s3-storage-configuration) -1. **Skip TLS Verifications** (optional): Set to true if you are not using TLS. - - -| YAML Directive Name | Description | Required | -| ---------------- | ---------------- | ------------ | -| `credentialSecretName` | If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace as long as you provide that namespace in `credentialSecretNamespace`. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | -| `credentialSecretNamespace` | The namespace of the secret containing the credentials to access S3. This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | -| `bucketName` | The name of the S3 bucket where backup files will be stored. | ✓ | -| `folder` | The name of the folder in the S3 bucket where backup files will be stored. Nested folders (e.g., `rancher/cluster1`) are not supported. | | -| `region` | The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | ✓ | -| `endpoint` | The [endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) that is used to access S3 in the region of your bucket. | ✓ | -| `endpointCA` | This should be the Base64 encoded CA cert. For an example, refer to the [example S3 compatible configuration.](#example-s3-storage-configuration) | | -| `insecureTLSSkipVerify` | Set to true if you are not using TLS. | | - -### Example S3 Storage Configuration - -```yaml -s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com -``` - -### Example MinIO Configuration - -```yaml -s3: - credentialSecretName: minio-creds - bucketName: rancherbackups - endpoint: minio.35.202.130.254.sslip.io - endpointCA: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t -``` -### Example credentialSecret - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: creds -type: Opaque -data: - accessKey: - secretKey: -``` - -### IAM Permissions for EC2 Nodes to Access S3 - -There are two ways to set up the `rancher-backup` operator to use S3 as the backup storage location. - -One way is to configure the `credentialSecretName` in the Backup custom resource, which refers to AWS credentials that have access to S3. - -If the cluster nodes are in Amazon EC2, the S3 access can also be set up by assigning IAM permissions to the EC2 nodes so that they can access S3. - -To allow a node to access S3, follow the instructions in the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-instance-access-s3-bucket/) to create an IAM role for EC2. When you add a custom policy to the role, add the following permissions, and replace the `Resource` with your bucket name: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::rancher-backups" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:DeleteObject", - "s3:PutObjectAcl" - ], - "Resource": [ - "arn:aws:s3:::rancher-backups/*" - ] - } - ] -} -``` - -After the role is created, and you have attached the corresponding instance profile to your EC2 instance(s), the `credentialSecretName` directive can be left empty in the Backup custom resource. - -# Examples - -For example Backup custom resources, refer to [this page.](../../examples/#backup) diff --git a/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md b/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md deleted file mode 100644 index aa837a810a..0000000000 --- a/content/rancher/v2.5/en/backups/configuration/restore-config/_index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Restore Configuration -shortTitle: Restore -weight: 2 -aliases: - - /rancher/v2.5/en/backups/v2.5/configuration/restore-config - - /rancher/v2.x/en/backups/v2.5/configuration/restore-config/ ---- - -The Restore Create page lets you provide details of the backup to restore from - -{{< img "/img/rancher/backup_restore/restore/restore.png" "">}} - -- [Backup Source](#backup-source) - - [An Existing Backup Config](#an-existing-backup-config) - - [The default storage target](#the-default-storage-target) - - [An S3-compatible object store](#an-s3-compatible-object-store) -- [Encryption](#encryption) -- [Prune during restore](#prune-during-restore) -- [Getting the Backup Filename from S3](#getting-the-backup-filename-from-s3) - -# Backup Source -Provide details of the backup file and its storage location, which the operator will then use to perform the restore. Select from the following options to provide these details - - - - -### An existing backup config - -Selecting this option will populate the **Target Backup** dropdown with the Backups available in this cluster. Select the Backup from the dropdown, and that will fill out the **Backup Filename** field for you, and will also pass the backup source information from the selected Backup to the operator. - -{{< img "/img/rancher/backup_restore/restore/existing.png" "">}} - -If the Backup custom resource does not exist in the cluster, you need to get the exact filename and provide the backup source details with the default storage target or an S3-compatible object store. - - -### The default storage target - -Select this option if you are restoring from a backup file that exists in the default storage location configured at the operator-level. The operator-level configuration is the storage location that was configured when the `rancher-backup` operator was installed or upgraded. Provide the exact filename in the **Backup Filename** field. - -{{< img "/img/rancher/backup_restore/restore/default.png" "">}} - -### An S3-compatible object store - -Select this option if no default storage location is configured at the operator-level, OR if the backup file exists in a different S3 bucket than the one configured as the default storage location. Provide the exact filename in the **Backup Filename** field. Refer to [this section](#getting-the-backup-filename-from-s3) for exact steps on getting the backup filename from s3. Fill in all the details for the S3 compatible object store. Its fields are exactly same as ones for the `backup.StorageLocation` configuration in the [Backup custom resource.](../../configuration/backup-config/#storage-location) - -{{< img "/img/rancher/backup_restore/restore/s3store.png" "">}} - -# Encryption - -If the backup was created with encryption enabled, its file will have `.enc` suffix. Choosing such a Backup, or providing a backup filename with `.enc` suffix will display another dropdown named **Encryption Config Secret**. - -{{< img "/img/rancher/backup_restore/restore/encryption.png" "">}} - -The Secret selected from this dropdown must have the same contents as the one used for the Backup custom resource while performing the backup. If the encryption configuration doesn't match, the restore will fail - -The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | - -> **Important** -This field should only be set if the backup was created with encryption enabled. Providing the incorrect encryption config will cause the restore to fail. - -# Prune During Restore - -* **Prune**: In order to fully restore Rancher from a backup, and to go back to the exact state it was at when the backup was performed, we need to delete any additional resources that were created by Rancher after the backup was taken. The operator does so if the **Prune** flag is enabled. Prune is enabled by default and it is recommended to keep it enabled. -* **Delete Timeout**: This is the amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `prune` | Delete the resources managed by Rancher that are not present in the backup (Recommended). | -| `deleteTimeoutSeconds` | Amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. | - -# Getting the Backup Filename from S3 - -This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. - -To obtain this file name from S3, go to your S3 bucket (and folder if it was specified while performing backup). - -Copy the filename and store it in your Restore custom resource. So assuming the name of your backup file is `backupfile`, - -- If your bucket name is `s3bucket` and no folder was specified, then the `backupFilename` to use will be `backupfile`. -- If your bucket name is `s3bucket` and the base folder is`s3folder`, the `backupFilename` to use is only `backupfile` . -- If there is a subfolder inside `s3Folder` called `s3sub`, and that has your backup file, then the `backupFilename` to use is `s3sub/backupfile`. - -| YAML Directive Name | Description | -| ---------------- | ---------------- | -| `backupFilename` | This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. | diff --git a/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md b/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md deleted file mode 100644 index 8b0b33f02d..0000000000 --- a/content/rancher/v2.5/en/backups/configuration/storage-config/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Backup Storage Location Configuration -shortTitle: Storage -weight: 3 -aliases: - - /rancher/v2.5/en/backups/v2.5/configuration/storage-config - - /rancher/v2.x/en/backups/v2.5/configuration/storage-config/ ---- - -Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible object store. - -Only one storage location can be configured at the operator level. - -- [Storage Location Configuration](#storage-location-configuration) - - [No Default Storage Location](#no-default-storage-location) - - [S3-compatible Object Store](#s3-compatible-object-store) - - [Use an existing StorageClass](#existing-storageclass) - - [Use an existing PersistentVolume](#existing-persistent-volume) -- [Encryption](#encryption) -- [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) - -# Storage Location Configuration - -### No Default Storage Location - -You can choose to not have any operator-level storage location configured. If you select this option, you must configure an S3-compatible object store as the storage location for each individual backup. - -### S3-compatible Object Store - -| Parameter | Description | -| -------------- | -------------- | -| Credential Secret | Choose the credentials for S3 from your secrets in Rancher. [Example]({{}}/rancher/v2.5/en/backups/v2.5/examples/#example-credential-secret-for-storing-backups-in-s3). | -| Bucket Name | Enter the name of the [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html) where the backups will be stored. Default: `rancherbackups`. | -| Region | The [AWS region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | -| Folder | The [folder in the S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-folders.html) where the backups will be stored. | -| Endpoint | The [S3 endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) For example, `s3.us-west-2.amazonaws.com`. | -| Endpoint CA | The CA cert used to for the S3 endpoint. Default: base64 encoded CA cert | -| insecureTLSSkipVerify | Set to true if you are not using TLS. | - -### Existing StorageClass - -Installing the `rancher-backup` chart by selecting the StorageClass option will create a Persistent Volume Claim (PVC), and Kubernetes will in turn dynamically provision a Persistent Volume (PV) where all the backups will be saved by default. - -For information about creating storage classes refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/) - -> **Important** -It is highly recommended to use a StorageClass with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. -If no such StorageClass is available, after the PV is provisioned, make sure to edit its reclaim policy and set it to "Retain" before storing backups in it. - -### Existing Persistent Volume - -Select an existing Persistent Volume (PV) that will be used to store your backups. For information about creating PersistentVolumes in Rancher, refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) - -> **Important** -It is highly recommended to use a Persistent Volume with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. - - -# Example values.yaml for the rancher-backup Helm Chart - -The documented `values.yaml` file that can be used to configure `rancher-backup` operator when the Helm CLI is used can be found in the [backup-restore-operator repository.](https://github.com/rancher/backup-restore-operator/blob/release/v1.0/charts/rancher-backup/values.yaml) - -For more information about `values.yaml` files and configuring Helm charts during installation, refer to the [Helm documentation.](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing) diff --git a/content/rancher/v2.5/en/backups/docker-installs/_index.md b/content/rancher/v2.5/en/backups/docker-installs/_index.md deleted file mode 100644 index 2d6c076122..0000000000 --- a/content/rancher/v2.5/en/backups/docker-installs/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Backup and Restore for Rancher Installed with Docker -shortTitle: Docker Installs -weight: 10 -aliases: - - /rancher/v2.5/en/installation/backups-and-restoration/single-node-backup-and-restoration/ - - /rancher/v2.5/en/backups/v2.5/docker-installs - - /rancher/v2.x/en/backups/v2.5/docker-installs/ ---- - -- [Backups](./docker-backups) -- [Restores](./docker-restores) \ No newline at end of file diff --git a/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md b/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md deleted file mode 100644 index 8ceabd8db8..0000000000 --- a/content/rancher/v2.5/en/backups/docker-installs/docker-backups/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Backing up Rancher Installed with Docker -shortTitle: Backups -weight: 3 -aliases: - - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.5/en/backups/backups/single-node-backups/ - - /rancher/v2.5/en/backups/legacy/backup/single-node-backups/ - - /rancher/v2.5/en/backups/v2.5/docker-installs/docker-backups/ - - /rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups/ ---- - - -After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. - -## Before You Start - -During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. - -## Creating a Backup - -This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. - - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` -1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data- rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each placeholder: - - ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** A stream of commands runs on the screen. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. - -1. Restart Rancher Server. Replace `` with the name of your Rancher container: - - ``` - docker start - ``` - -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.5/en/backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md b/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md deleted file mode 100644 index fd4ddac530..0000000000 --- a/content/rancher/v2.5/en/backups/docker-installs/docker-restores/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Restoring Backups—Docker Installs -shortTitle: Restores -weight: 3 -aliases: - - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ - - /rancher/v2.5/en/backups/restorations/single-node-restoration - - /rancher/v2.5/en/backups/v2.5/docker-installs/docker-restores - - /rancher/v2.x/en/backups/v2.5/docker-installs/docker-restores/ ---- - -If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. - -## Before You Start - -During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from -v $PWD:/backup \ -busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar pzxvf /backup/rancher-data-backup--" -``` - -In this command, `` and `-` are environment variables for your Rancher deployment. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version number for your Rancher backup. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Restoring Backups - -Using a [backup]({{}}/rancher/v2.5/en/backups/docker-installs/docker-backups/) that you created earlier, restore Rancher to its last known healthy state. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container: - - ``` - docker stop - ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.5/en/backups/docker-installs/docker-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.5/en/backups/docker-installs/docker-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. - - >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. - - ``` - docker run --volumes-from -v $PWD:/backup \ - busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar pzxvf /backup/rancher-data-backup--.tar.gz" - ``` - - **Step Result:** A series of commands should run. - -1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. - - ``` - docker start - ``` - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.5/en/backups/examples/_index.md b/content/rancher/v2.5/en/backups/examples/_index.md deleted file mode 100644 index 83496c18f9..0000000000 --- a/content/rancher/v2.5/en/backups/examples/_index.md +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: Examples -weight: 5 -aliases: - - /rancher/v2.5/en/backups/v2.5/examples - - /rancher/v2.x/en/backups/v2.5/examples/ ---- - -This section contains examples of Backup and Restore custom resources. - -The default backup storage location is configured when the `rancher-backup` operator is installed or upgraded. - -Encrypted backups can only be restored if the Restore custom resource uses the same encryption configuration secret that was used to create the backup. - -- [Backup](#backup) - - [Backup in the default location with encryption](#backup-in-the-default-location-with-encryption) - - [Recurring backup in the default location](#recurring-backup-in-the-default-location) - - [Encrypted recurring backup in the default location](#encrypted-recurring-backup-in-the-default-location) - - [Encrypted backup in Minio](#encrypted-backup-in-minio) - - [Backup in S3 using AWS credential secret](#backup-in-s3-using-aws-credential-secret) - - [Recurring backup in S3 using AWS credential secret](#recurring-backup-in-s3-using-aws-credential-secret) - - [Backup from EC2 nodes with IAM permission to access S3](#backup-from-ec2-nodes-with-iam-permission-to-access-s3) -- [Restore](#restore) - - [Restore using the default backup file location](#restore-using-the-default-backup-file-location) - - [Restore for Rancher migration](#restore-for-rancher-migration) - - [Restore from encrypted backup](#restore-from-encrypted-backup) - - [Restore an encrypted backup from Minio](#restore-an-encrypted-backup-from-minio) - - [Restore from backup using an AWS credential secret to access S3](#restore-from-backup-using-an-aws-credential-secret-to-access-s3) - - [Restore from EC2 nodes with IAM permissions to access S3](#restore-from-ec2-nodes-with-iam-permissions-to-access-s3) -- [Example Credential Secret for Storing Backups in S3](#example-credential-secret-for-storing-backups-in-s3) -- [Example EncryptionConfiguration](#example-encryptionconfiguration) - -# Backup - -This section contains example Backup custom resources. - -### Backup in the Default Location with Encryption - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: default-location-encrypted-backup -spec: - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -### Recurring Backup in the Default Location - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: default-location-recurring-backup -spec: - resourceSetName: rancher-resource-set - schedule: "@every 1h" - retentionCount: 10 -``` - -### Encrypted Recurring Backup in the Default Location - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: default-enc-recurring-backup -spec: - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 3 -``` - -### Encrypted Backup in Minio - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: minio-backup -spec: - storageLocation: - s3: - credentialSecretName: minio-creds - credentialSecretNamespace: default - bucketName: rancherbackups - endpoint: minio.sslip.io - endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -### Backup in S3 Using AWS Credential Secret - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: s3-backup -spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -### Recurring Backup in S3 Using AWS Credential Secret - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: s3-recurring-backup -spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 10 -``` - -### Backup from EC2 Nodes with IAM Permission to Access S3 - -This example shows that the AWS credential secret does not have to be provided to create a backup if the nodes running `rancher-backup` have [these permissions for access to S3.](../configuration/backup-config/#iam-permissions-for-ec2-nodes-to-access-s3) - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Backup -metadata: - name: s3-iam-backup -spec: - storageLocation: - s3: - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig -``` - -# Restore - -This section contains example Restore custom resources. - -### Restore Using the Default Backup File Location - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-default -spec: - backupFilename: default-location-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-29-54-07-00.tar.gz -# encryptionConfigSecretName: test-encryptionconfig -``` - -### Restore for Rancher Migration -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-migration -spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - prune: false - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com -``` - -### Restore from Encrypted Backup - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-encrypted -spec: - backupFilename: default-test-s3-def-backup-c583d8f2-6daf-4648-8ead-ed826c591471-2020-08-24T20-47-05Z.tar.gz - encryptionConfigSecretName: encryptionconfig -``` - -### Restore an Encrypted Backup from Minio - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-minio -spec: - backupFilename: default-minio-backup-demo-aa5c04b7-4dba-4c48-9ac4-ab7916812eaa-2020-08-30T13-18-17-07-00.tar.gz - storageLocation: - s3: - credentialSecretName: minio-creds - credentialSecretNamespace: default - bucketName: rancherbackups - endpoint: minio.sslip.io - endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t - encryptionConfigSecretName: test-encryptionconfig -``` - -### Restore from Backup Using an AWS Credential Secret to Access S3 - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-s3-demo -spec: - backupFilename: test-s3-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-49-34-07-00.tar.gz.enc - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - encryptionConfigSecretName: test-encryptionconfig -``` - -### Restore from EC2 Nodes with IAM Permissions to Access S3 - -This example shows that the AWS credential secret does not have to be provided to restore from backup if the nodes running `rancher-backup` have [these permissions for access to S3.](../configuration/backup-config/#iam-permissions-for-ec2-nodes-to-access-s3) - -```yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-s3-demo -spec: - backupFilename: default-test-s3-recurring-backup-84bf8dd8-0ef3-4240-8ad1-fc7ec308e216-2020-08-24T10#52#44-07#00.tar.gz - storageLocation: - s3: - bucketName: rajashree-backup-test - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - encryptionConfigSecretName: test-encryptionconfig -``` - -# Example Credential Secret for Storing Backups in S3 - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: creds -type: Opaque -data: - accessKey: - secretKey: -``` - -# Example EncryptionConfiguration - -```yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aesgcm: - keys: - - name: key1 - secret: c2VjcmV0IGlzIHNlY3VyZQ== - - name: key2 - secret: dGhpcyBpcyBwYXNzd29yZA== - - aescbc: - keys: - - name: key1 - secret: c2VjcmV0IGlzIHNlY3VyZQ== - - name: key2 - secret: dGhpcyBpcyBwYXNzd29yZA== - - secretbox: - keys: - - name: key1 - secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= -``` diff --git a/content/rancher/v2.5/en/backups/migrating-rancher/_index.md b/content/rancher/v2.5/en/backups/migrating-rancher/_index.md deleted file mode 100644 index e84fe21de1..0000000000 --- a/content/rancher/v2.5/en/backups/migrating-rancher/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Migrating Rancher to a New Cluster -weight: 3 -aliases: - - /rancher/v2.x/en/backups/v2.5/migrating-rancher/ ---- - -If you are migrating Rancher to a new Kubernetes cluster, you don't need to install Rancher on the new cluster first. If Rancher is restored to a new cluster with Rancher already installed, it can cause problems. - -### Prerequisites - -These instructions assume you have [created a backup](../back-up-rancher) and you have already installed a new Kubernetes cluster where Rancher will be deployed. - -It is required to use the same hostname that was set as the server URL in the first cluster. - -Rancher version must be v2.5.0 and up - -Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes clusters such as Amazon EKS clusters. For help installing Kubernetes, refer to the documentation of the Kubernetes distribution. One of Rancher's Kubernetes distributions may also be used: - -- [RKE Kubernetes installation docs]({{}}/rke/latest/en/installation/) -- [K3s Kubernetes installation docs]({{}}/k3s/latest/en/installation/) - -### 1. Install the rancher-backup Helm chart -Install version 1.x.x of the rancher-backup chart. The following assumes a connected environment with access to DockerHub: - -``` -helm repo add rancher-charts https://charts.rancher.io -helm repo update -helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace --version $CHART_VERSION -helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system --version $CHART_VERSION -``` -
-For an **air-gapped environment**, use the option below to pull the `backup-restore-operator` image from your private registry when installing the rancher-backup-crd helm chart. -``` ---set image.repository $REGISTRY/rancher/backup-restore-operator -``` - -### 2. Restore from backup using a Restore custom resource - -If you are using an S3 store as the backup source, and need to use your S3 credentials for restore, create a secret in this cluster using your S3 credentials. The Secret data must have two keys, `accessKey` and `secretKey` containing the s3 credentials like this: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: s3-creds -type: Opaque -stringData: - accessKey: - secretKey: -``` - -This secret can be created in any namespace, with the above example it will get created in the default namespace - -In the Restore custom resource, `prune` must be set to false. - -Create a Restore custom resource like the example below: - -```yaml -# migrationResource.yaml -apiVersion: resources.cattle.io/v1 -kind: Restore -metadata: - name: restore-migration -spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - prune: false - encryptionConfigSecretName: encryptionconfig - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: backup-test - folder: ecm1 - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com -``` - ->**Important:** The field `encryptionConfigSecretName` must be set only if your backup was created with encryption enabled. Provide the name of the Secret containing the encryption config file. If you only have the encryption config file, but don't have a secret created with it in this cluster, use the following steps to create the secret: - -1. The encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. So save your `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: - ``` - kubectl create secret generic encryptionconfig \ - --from-file=./encryption-provider-config.yaml \ - -n cattle-resources-system - ``` - -1. Then apply the resource: - ``` - kubectl apply -f migrationResource.yaml - ``` - -### 3. Install cert-manager - -Follow the steps to [install cert-manager]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#5-install-cert-manager) in the documentation about installing cert-manager on Kubernetes. - -### 4. Bring up Rancher with Helm - -Use the same version of Helm to install Rancher, that was used on the first cluster. - -``` -helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname= \ -``` diff --git a/content/rancher/v2.5/en/backups/restoring-rancher/_index.md b/content/rancher/v2.5/en/backups/restoring-rancher/_index.md deleted file mode 100644 index d6ab3d801e..0000000000 --- a/content/rancher/v2.5/en/backups/restoring-rancher/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Restoring Rancher -weight: 2 -aliases: - - /rancher/v2.x/en/installation/backups/restores - - /rancher/v2.x/en/backups/restoring-rancher - - /rancher/v2.x/en/backups/v2.5/restoring-rancher/ ---- - -A restore is performed by creating a Restore custom resource. - -> **Important** -> -> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.]({{}}/rancher/v2.5/en/backups/migrating-rancher) -> * While restoring rancher on the same setup, the operator will scale down the rancher deployment when restore starts, and it will scale back up the deployment once restore completes. So Rancher will be unavailable during the restore. -> * When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. - -### Create the Restore Custom Resource - -1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** -1. Click **Restore.** -1. Create the Restore with the form, or with YAML. For creating the Restore resource using form, refer to the [configuration reference]({{}}/rancher/v2.5/en/backups/configuration/restore-config) and to the [examples.]({{}}/rancher/v2.5/en/backups/examples) -1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Restore YAML. - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Restore - metadata: - name: restore-migration - spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - encryptionConfigSecretName: encryptionconfig - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - ``` - - For help configuring the Restore, refer to the [configuration reference]({{}}/rancher/v2.5/en/backups/configuration/restore-config) and to the [examples.]({{}}/rancher/v2.5/en/backups/examples) - -1. Click **Create.** - -**Result:** The rancher-operator scales down the rancher deployment during restore, and scales it back up once the restore completes. The resources are restored in this order: - -1. Custom Resource Definitions (CRDs) -2. Cluster-scoped resources -3. Namespaced resources - -### Logs - -To check how the restore is progressing, you can check the logs of the operator. Run this command to follow the logs: - -``` -kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f -``` - -### Cleanup - -If you created the restore resource with kubectl, remove the resource to prevent a naming conflict with future restores. - -### Known Issues -In some cases, after restoring the backup, Rancher logs will show errors similar to the following: -``` -2021/10/05 21:30:45 [ERROR] error syncing 'c-89d82/m-4067aa68dd78': handler rke-worker-upgrader: clusters.management.cattle.io "c-89d82" not found, requeuing -``` -This happens because one of the resources that was just restored has finalizers but the related resources have been deleted so the handler cannot find it. - -To eliminate the errors, we need to find and delete the resource that causes the error. See more information [here](https://github.com/rancher/rancher/issues/35050#issuecomment-937968556) diff --git a/content/rancher/v2.5/en/best-practices/_index.md b/content/rancher/v2.5/en/best-practices/_index.md deleted file mode 100644 index 77a1ec66e8..0000000000 --- a/content/rancher/v2.5/en/best-practices/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Best Practices Guide -weight: 4 -aliases: - - /rancher/v2.5/en/best-practices/v2.5 - - /rancher/v2.x/en/best-practices/ - - /rancher/v2.x/en/best-practices/v2.5/ ---- - -The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. - -If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. - -Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. - -For more guidance on best practices, you can consult these resources: - -- [Security]({{}}/rancher/v2.5/en/security/) -- [Rancher Blog](https://rancher.com/blog/) - - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) - - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) -- [Rancher Forum](https://forums.rancher.com/) -- [Rancher Users Slack](https://slack.rancher.io/) -- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md deleted file mode 100644 index 58c57134e0..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Best Practices for Rancher Managed Clusters -shortTitle: Rancher Managed Clusters -weight: 2 -aliases: - - /rancher/v2.5/en/best-practices/v2.5/rancher-managed - - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/ ---- - -### Logging - -Refer to [this guide](./logging) for our recommendations for cluster-level logging and application logging. - -### Monitoring - -Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. Refer to this [guide](./monitoring) for our recommendations. - -### Tips for Setting Up Containers - -Running well-built containers can greatly impact the overall performance and security of your environment. Refer to this [guide](./containers) for tips. - -### Best Practices for Rancher Managed vSphere Clusters - -This [guide](./managed-vsphere) outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md deleted file mode 100644 index 5405e8d0f0..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/managed-vsphere/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Best Practices for Rancher Managed vSphere Clusters -shortTitle: Rancher Managed Clusters in vSphere -aliases: - - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/managed-vsphere - - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/managed-vsphere/ ---- - -This guide outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. - -- [1. VM Considerations](#1-vm-considerations) -- [2. Network Considerations](#2-network-considerations) -- [3. Storage Considerations](#3-storage-considerations) -- [4. Backups and Disaster Recovery](#4-backups-and-disaster-recovery) - -
Solution Overview
- -![Solution Overview]({{}}/img/rancher/solution_overview.drawio.svg) - -# 1. VM Considerations - -### Leverage VM Templates to Construct the Environment - -To facilitate consistency across the deployed Virtual Machines across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customisation options. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across ESXi Hosts - -Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across Datastores - -Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. - -### Configure VM's as Appropriate for Kubernetes - -It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. - -# 2. Network Considerations - -### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes - -Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. - -### Consistent IP Addressing for VM's - -Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -# 3. Storage Considerations - -### Leverage SSD Drives for ETCD Nodes - -ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. - -# 4. Backups and Disaster Recovery - -### Perform Regular Downstream Cluster Backups - -Kubernetes uses etcd to store all its data - from configuration, state and metadata. Backing this up is crucial in the event of disaster recovery. - -### Back up Downstream Node VMs - -Incorporate the Rancher downstream node VM's within a standard VM backup policy. \ No newline at end of file diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md b/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md deleted file mode 100644 index 62bf249217..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-managed/monitoring/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Monitoring Best Practices -weight: 2 -aliases: - - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/monitoring - - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/monitoring/ ---- - -Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. This is not different when using Kubernetes and Rancher. Fortunately the integrated monitoring and alerting functionality makes this whole process a lot easier. - -The [Rancher monitoring documentation]({{}}/rancher/v2.5/en/monitoring-alerting/) describes how you can set up a complete Prometheus and Grafana stack. Out of the box this will scrape monitoring data from all system and Kubernetes components in your cluster and provide sensible dashboards and alerts for them to get started. But for a reliable setup, you also need to monitor your own workloads and adapt Prometheus and Grafana to your own specific use cases and cluster sizes. This document aims to give you best practices for this. - -- [What to Monitor](#what-to-monitor) -- [Configuring Prometheus Resource Usage](#configuring-prometheus-resource-usage) -- [Scraping Custom Workloads](#scraping-custom-workloads) -- [Monitoring in a (Micro)Service Architecture](#monitoring-in-a-micro-service-architecture) -- [Real User Monitoring](#real-user-monitoring) -- [Security Monitoring](#security-monitoring) -- [Setting up Alerts](#setting-up-alerts) - -# What to Monitor - -Kubernetes itself, as well as applications running inside of it, form a distributed system where different components interact with each other. For the whole system and each individual component, you have to ensure performance, availability, reliability and scalability. A good resource with more details and information is Google's free [Site Reliability Engineering Book](https://landing.google.com/sre/sre-book/), especially the chapter about [Monitoring distributed systems](https://landing.google.com/sre/sre-book/chapters/monitoring-distributed-systems/). - -# Configuring Prometheus Resource Usage - -When installing the integrated monitoring stack, Rancher allows to configure several settings that are dependent on the size of your cluster and the workloads running in it. This chapter covers these in more detail. - -### Storage and Data Retention - -The amount of storage needed for Prometheus directly correlates to the amount of time series and labels that you store and the data retention you have configured. It is important to note that Prometheus is not meant to be used as a long-term metrics storage. Data retention time is usually only a couple of days and not weeks or months. The reason for this is that Prometheus does not perform any aggregation on its stored metrics. This is great because aggregation can dilute data, but it also means that the needed storage grows linearly over time without retention. - -One way to calculate the necessary storage is to look at the average size of a storage chunk in Prometheus with this query - -``` -rate(prometheus_tsdb_compaction_chunk_size_bytes_sum[1h]) / rate(prometheus_tsdb_compaction_chunk_samples_sum[1h]) -``` - -Next, find out your data ingestion rate per second: - -``` -rate(prometheus_tsdb_head_samples_appended_total[1h]) -``` - -and then multiply this with the retention time, adding a few percentage points as buffer: - -``` -average chunk size in bytes * ingestion rate per second * retention time in seconds * 1.1 = necessary storage in bytes -``` - -You can find more information about how to calculate the necessary storage in this [blog post](https://www.robustperception.io/how-much-disk-space-do-prometheus-blocks-use). - -You can read more about the Prometheus storage concept in the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/storage). - -### CPU and Memory Requests and Limits - -In larger Kubernetes clusters Prometheus can consume quite a bit of memory. The amount of memory Prometheus needs directly correlates to the amount of time series and amount of labels it stores and the scrape interval in which these are filled. - -You can find more information about how to calculate the necessary memory in this [blog post](https://www.robustperception.io/how-much-ram-does-prometheus-2-x-need-for-cardinality-and-ingestion). - -The amount of necessary CPUs correlate with the amount of queries you are performing. - -### Federation and Long-term Storage - -Prometheus is not meant to store metrics for a long amount of time, but should only be used for short term storage. - -In order to store some, or all metrics for a long time, you can leverage Prometheus' [remote read/write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations) capabilities to connect it to storage systems like [Thanos](https://thanos.io/), [InfluxDB](https://www.influxdata.com/), [M3DB](https://www.m3db.io/), or others. You can find an example setup in this [blog post](https://rancher.com/blog/2020/prometheus-metric-federation). - -# Scraping Custom Workloads - -While the integrated Rancher Monitoring already scrapes system metrics from a cluster's nodes and system components, the custom workloads that you deploy on Kubernetes should also be scraped for data. For that you can configure Prometheus to do an HTTP request to an endpoint of your applications in a certain interval. These endpoints should then return their metrics in a Prometheus format. - -In general, you want to scrape data from all the workloads running in your cluster so that you can use them for alerts or debugging issues. Often, you recognize that you need some data only when you actually need the metrics during an incident. It is good, if it is already scraped and stored. Since Prometheus is only meant to be a short-term metrics storage, scraping and keeping lots of data is usually not that expensive. If you are using a long-term storage solution with Prometheus, you can then still decide which data you are actually persisting and keeping there. - -### About Prometheus Exporters - -A lot of 3rd party workloads like databases, queues or web-servers either already support exposing metrics in a Prometheus format, or there are so called exporters available that translate between the tool's metrics and the format that Prometheus understands. Usually you can add these exporters as additional sidecar containers to the workload's Pods. A lot of helm charts already include options to deploy the correct exporter. Additionally you can find a curated list of exports by SysDig on [promcat.io](https://promcat.io/) and on [ExporterHub](https://exporterhub.io/). - -### Prometheus support in Programming Languages and Frameworks - -To get your own custom application metrics into Prometheus, you have to collect and expose these metrics directly from your application's code. Fortunately, there are already libraries and integrations available to help with this for most popular programming languages and frameworks. One example for this is the Prometheus support in the [Spring Framework](https://docs.spring.io/spring-metrics/docs/current/public/prometheus). - -### ServiceMonitors and PodMonitors - -Once all your workloads expose metrics in a Prometheus format, you have to configure Prometheus to scrape it. Under the hood Rancher is using the [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator). This makes it easy to add additional scraping targets with ServiceMonitors and PodMonitors. A lot of helm charts already include an option to create these monitors directly. You can also find more information in the Rancher documentation. - -### Prometheus Push Gateway - -There are some workloads that are traditionally hard to scrape by Prometheus. Examples for these are short lived workloads like Jobs and CronJobs, or applications that do not allow sharing data between individual handled incoming requests, like PHP applications. - -To still get metrics for these use cases, you can set up [prometheus-pushgateways](https://github.com/prometheus/pushgateway). The CronJob or PHP application would push metric updates to the pushgateway. The pushgateway aggregates and exposes them through an HTTP endpoint, which then can be scraped by Prometheus. - -### Prometheus Blackbox Monitor - -Sometimes it is useful to monitor workloads from the outside. For this, you can use the [Prometheus blackbox-exporter](https://github.com/prometheus/blackbox_exporter) which allows probing any kind of endpoint over HTTP, HTTPS, DNS, TCP and ICMP. - -# Monitoring in a (Micro)Service Architecture - -If you have a (micro)service architecture where multiple individual workloads within your cluster are communicating with each other, it is really important to have detailed metrics and traces about this traffic to understand how all these workloads are communicating with each other and where a problem or bottleneck may be. - -Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click]({{}}/rancher/v2.5/en/istio/) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. - -# Real User Monitoring - -Monitoring the availability and performance of all your internal workloads is vitally important to run stable, reliable and fast applications. But these metrics only show you parts of the picture. To get a complete view it is also necessary to know how your end users are actually perceiving it. For this you can look into various [Real user monitoring solutions](https://en.wikipedia.org/wiki/Real_user_monitoring). - -# Security Monitoring - -In addition to monitoring workloads to detect performance, availability or scalability problems, the cluster and the workloads running into it should also be monitored for potential security problems. A good starting point is to frequently run and alert on [CIS Scans]({{}}/rancher/v2.5/en/cis-scans/v2.5/) which check if the cluster is configured according to security best practices. - -For the workloads, you can have a look at Kubernetes and Container security solutions like [Falco](https://falco.org/), [Aqua Kubernetes Security](https://www.aquasec.com/solutions/kubernetes-container-security/), [SysDig](https://sysdig.com/). - -# Setting up Alerts - -Getting all the metrics into a monitoring systems and visualizing them in dashboards is great, but you also want to be pro-actively alerted if something goes wrong. - -The integrated Rancher monitoring already configures a sensible set of alerts that make sense in any Kubernetes cluster. You should extend these to cover your specific workloads and use cases. - -When setting up alerts, configure them for all the workloads that are critical to the availability of your applications. But also make sure that they are not too noisy. Ideally every alert you are receiving should be because of a problem that needs your attention and needs to be fixed. If you have alerts that are firing all the time but are not that critical, there is a danger that you start ignoring your alerts all together and then miss the real important ones. Less may be more here. Start to focus on the real important metrics first, for example alert if your application is offline. Fix all the problems that start to pop up and then start to create more detailed alerts. - -If an alert starts firing, but there is nothing you can do about it at the moment, it's also fine to silence the alert for a certain amount of time, so that you can look at it later. - -You can find more information on how to set up alerts and notification channels in the [Rancher Documentation]({{}}/rancher/v2.5/en/monitoring-alerting). diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/_index.md deleted file mode 100644 index 32606b0dbe..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-server/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Best Practices for the Rancher Server -shortTitle: Rancher Server -weight: 1 -aliases: - - /rancher/v2.5/en/best-practices/v2.5/rancher-server - - /rancher/v2.x/en/best-practices/v2.5/rancher-server/ ---- - -This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. - -### Recommended Architecture and Infrastructure - -Refer to this [guide](./deployment-types) for our general advice for setting up the Rancher server on a high-availability Kubernetes cluster. - -### Deployment Strategies - -This [guide](./deployment-strategies) is designed to help you choose whether a regional deployment strategy or a hub-and-spoke deployment strategy is better for a Rancher server that manages downstream Kubernetes clusters. - -### Installing Rancher in a vSphere Environment - -This [guide](./rancher-in-vsphere) outlines a reference architecture for installing Rancher in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. \ No newline at end of file diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md deleted file mode 100644 index c745f5dc14..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-strategies/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Rancher Deployment Strategy -weight: 100 -aliases: - - /rancher/v2.5/en/best-practices/v2.5/rancher-server/deployment-strategies - - /rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-strategies/ ---- - -There are two recommended deployment strategies for a Rancher server that manages downstream Kubernetes clusters. Each one has its own pros and cons. Read more about which one would fit best for your use case: - -* [Hub and Spoke](#hub-and-spoke-strategy) -* [Regional](#regional-strategy) - -# Hub & Spoke Strategy ---- - -In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. - -{{< img "/img/rancher/bpg/hub-and-spoke.png" "Hub and Spoke Deployment">}} - -### Pros - -* Environments could have nodes and network connectivity across regions. -* Single control plane interface to view/see all regions and environments. -* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. - -### Cons - -* Subject to network latencies. -* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. - -# Regional Strategy ---- -In the regional deployment model a control plane is deployed in close proximity to the compute nodes. - -{{< img "/img/rancher/bpg/regional.png" "Regional Deployment">}} - -### Pros - -* Rancher functionality in regions stay operational if a control plane in another region goes down. -* Network latency is greatly reduced, improving the performance of functionality in Rancher. -* Upgrades of the Rancher control plane can be done independently per region. - -### Cons - -* Overhead of managing multiple Rancher installations. -* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. -* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md deleted file mode 100644 index f7ba5e5f02..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-server/deployment-types/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Tips for Running Rancher -weight: 100 -aliases: - - /rancher/v2.5/en/best-practices/deployment-types - - /rancher/v2.5/en/best-practices/v2.5/rancher-server/deployment-types - - /rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-types/ ---- - -This guide is geared toward use cases where Rancher is used to manage downstream Kubernetes clusters. The high-availability setup is intended to prevent losing access to downstream clusters if the Rancher server is not available. - -A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. - -If you are installing Rancher in a vSphere environment, refer to the best practices documented [here.](../rancher-in-vsphere) - -When you set up your high-availability Rancher installation, consider the following: - -### Run Rancher on a Separate Cluster -Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. - -### Make sure nodes are configured correctly for Kubernetes ### -It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://etcd.io/docs/v3.4/op-guide/performance/). - -### When using RKE: Back up the Statefile -RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. - -### Run All Nodes in the Cluster in the Same Datacenter -For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. - -### Development and Production Environments Should be Similar -It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. - -### Monitor Your Clusters to Plan Capacity -The Rancher server's Kubernetes cluster should run within the [system and hardware requirements]({{}}/rancher/v2.5/en/installation/requirements/) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. - -However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. - -After you [enable monitoring]({{}}/rancher/v2.5/en/monitoring-alerting) in the cluster, you can set up [a notification channel]({{}}/rancher/v2.5/en/monitoring-alerting/) and alerts to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. \ No newline at end of file diff --git a/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md b/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md deleted file mode 100644 index d3db604508..0000000000 --- a/content/rancher/v2.5/en/best-practices/rancher-server/rancher-in-vsphere/_index.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: Installing Rancher in a vSphere Environment -shortTitle: On-Premises Rancher in vSphere -weight: 3 -aliases: - - /rancher/v2.5/en/best-practices/v2.5/rancher-server/rancher-in-vsphere - - /rancher/v2.x/en/best-practices/v2.5/rancher-server/rancher-in-vsphere/ ---- - -This guide outlines a reference architecture for installing Rancher on an RKE Kubernetes cluster in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. - -- [1. Load Balancer Considerations](#1-load-balancer-considerations) -- [2. VM Considerations](#2-vm-considerations) -- [3. Network Considerations](#3-network-considerations) -- [4. Storage Considerations](#4-storage-considerations) -- [5. Backups and Disaster Recovery](#5-backups-and-disaster-recovery) - -
Solution Overview
- -![Solution Overview](/docs/img/rancher/rancher-on-prem-vsphere.svg) - -# 1. Load Balancer Considerations - -A load balancer is required to direct traffic to the Rancher workloads residing on the RKE nodes. - -### Leverage Fault Tolerance and High Availability - -Leverage the use of an external (hardware or software) load balancer that has inherit high-availability functionality (F5, NSX-T, Keepalived, etc). - -### Back Up Load Balancer Configuration - -In the event of a Disaster Recovery activity, availability of the Load balancer configuration will expedite the recovery process. - -### Configure Health Checks - -Configure the Load balancer to automatically mark nodes as unavailable if a health check is failed. For example, NGINX can facilitate this with: - -`max_fails=3 fail_timeout=5s` - -### Leverage an External Load Balancer - -Avoid implementing a software load balancer within the management cluster. - -### Secure Access to Rancher - -Configure appropriate Firewall / ACL rules to only expose access to Rancher - -# 2. VM Considerations - -### Size the VM's According to Rancher Documentation - -https://rancher.com/docs/rancher/v2.5/en/installation/requirements/ - -### Leverage VM Templates to Construct the Environment - -To facilitate the consistency of Virtual Machines deployed across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customization options. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across ESXi Hosts - -Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. - -### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across Datastores - -Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. - -### Configure VM's as Appropriate for Kubernetes - -It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. - -# 3. Network Considerations - -### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes - -Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. - -### Consistent IP Addressing for VM's - -Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -# 4. Storage Considerations - -### Leverage SSD Drives for ETCD Nodes - -ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. - -# 5. Backups and Disaster Recovery - -### Perform Regular Management Cluster Backups - -Rancher stores its data in the ETCD datastore of the Kubernetes cluster it resides on. Like with any Kubernetes cluster, perform frequent, tested backups of this cluster. - -### Back up Rancher Cluster Node VMs - -Incorporate the Rancher management node VM's within a standard VM backup policy. diff --git a/content/rancher/v2.5/en/cis-scans/_index.md b/content/rancher/v2.5/en/cis-scans/_index.md deleted file mode 100644 index 6f72660c12..0000000000 --- a/content/rancher/v2.5/en/cis-scans/_index.md +++ /dev/null @@ -1,353 +0,0 @@ ---- -title: CIS Scans -weight: 17 -aliases: - - /rancher/v2.5/en/cis-scans/v2.5 - - /rancher/v2.x/en/cis-scans/ - - /rancher/v2.x/en/cis-scans/v2.5/ ---- - -Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. - -The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. - -- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) -- [About the CIS Benchmark](#about-the-cis-benchmark) -- [About the Generated Report](#about-the-generated-report) -- [Test Profiles](#test-profiles) -- [About Skipped and Not Applicable Tests](#about-skipped-and-not-applicable-tests) -- [Roles-based Access Control](./rbac) -- [Configuration](./configuration) -- [How-to Guides](#how-to-guides) - - [Installing rancher-cis-benchmark](#installing-rancher-cis-benchmark) - - [Uninstalling rancher-cis-benchmark](#uninstalling-rancher-cis-benchmark) - - [Running a Scan](#running-a-scan) - - [Running a Scan Periodically on a Schedule](#running-a-scan-periodically-on-a-schedule) - - [Skipping Tests](#skipping-tests) - - [Viewing Reports](#viewing-reports) - - [Enabling Alerting for rancher-cis-benchmark](#enabling-alerting-for-rancher-cis-benchmark) - - [Configuring Alerts for a Periodic Scan on a Schedule](#configuring-alerts-for-a-periodic-scan-on-a-schedule) - - [Creating a Custom Benchmark Version for Running a Cluster Scan](#creating-a-custom-benchmark-version-for-running-a-cluster-scan) - -# Changes in Rancher v2.5 - -We now support running CIS scans on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. Previously it was only supported to run CIS scans on RKE Kubernetes clusters. - -In Rancher v2.4, the CIS scan tool was available from the **cluster manager** in the Rancher UI. Now it is available in the **Cluster Explorer** and it can be enabled and deployed using a Helm chart. It can be installed from the Rancher UI, but it can also be installed independently of Rancher. It deploys a CIS scan operator for the cluster, and deploys Kubernetes custom resources for cluster scans. The custom resources can be managed directly from the **Cluster Explorer.** - -In v1 of the CIS scan tool, which was available in Rancher v2.4 through the cluster manager, recurring scans could be scheduled. The ability to schedule recurring scans is now also available for CIS v2 from Rancher v2.5.4. - -Support for alerting for the cluster scan results is now also available from Rancher v2.5.4. - -In Rancher v2.4, permissive and hardened profiles were included. In Rancher v2.5.0 and in v2.5.4, more profiles were included. - -{{% tabs %}} -{{% tab "Profiles in v2.5.4" %}} -- Generic CIS 1.5 -- Generic CIS 1.6 -- RKE permissive 1.5 -- RKE hardened 1.5 -- RKE permissive 1.6 -- RKE hardened 1.6 -- EKS -- GKE -- RKE2 permissive 1.5 -- RKE2 permissive 1.5 -{{% /tab %}} -{{% tab "Profiles in v2.5.0-v2.5.3" %}} -- Generic CIS 1.5 -- RKE permissive -- RKE hardened -- EKS -- GKE -{{% /tab %}} -{{% /tabs %}} -
- - -The default profile and the supported CIS benchmark version depends on the type of cluster that will be scanned and the Rancher version: - -{{% tabs %}} -{{% tab "v2.5.4" %}} - -The `rancher-cis-benchmark` supports the CIS 1.6 Benchmark version. - -- For RKE Kubernetes clusters, the RKE Permissive 1.6 profile is the default. -- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. -- For RKE2 Kubernetes clusters, the RKE2 Permissive 1.5 profile is the default. -- For cluster types other than RKE, RKE2, EKS and GKE, the Generic CIS 1.5 profile will be used by default. - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.3" %}} - -The `rancher-cis-benchmark` supports the CIS 1.5 Benchmark version. - -- For RKE Kubernetes clusters, the RKE permissive profile is the default. -- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. -- For cluster types other than RKE, EKS and GKE, the Generic CIS 1.5 profile will be used by default. - -{{% /tab %}} -{{% /tabs %}} - -> **Note:** CIS v1 cannot run on a cluster when CIS v2 is deployed. In other words, after `rancher-cis-benchmark` is installed, you can't run scans by going to the Cluster Manager view in the Rancher UI and clicking Tools > CIS Scans. - -# About the CIS Benchmark - -The Center for Internet Security is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The official Benchmark documents are available through the CIS website. The sign-up form to access the documents is -here. - -# About the Generated Report - -Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. - -From Rancher v2.5.4, the scan uses the CIS Benchmark v1.6 by default. In Rancher v2.5.0-2.5.3, the CIS Benchmark v1.5. is used. - -The Benchmark version is included in the generated report. - -The Benchmark provides recommendations of two types: Automated and Manual. Recommendations marked as Manual in the Benchmark are not included in the generated report. - -Some tests are designated as "Not Applicable." These tests will not be run on any CIS scan because of the way that Rancher provisions RKE clusters. For information on how test results can be audited, and why some tests are designated to be not applicable, refer to Rancher's self-assessment guide for the corresponding Kubernetes version. - -The report contains the following information: - -| Column in Report | Description | -|------------------|-------------| -| `id` | The ID number of the CIS Benchmark. | -| `description` | The description of the CIS Benchmark test. | -| `remediation` | What needs to be fixed in order to pass the test. | -| `state` | Indicates if the test passed, failed, was skipped, or was not applicable. | -| `node_type` | The node role, which affects which tests are run on the node. Master tests are run on controlplane nodes, etcd tests are run on etcd nodes, and node tests are run on the worker nodes. | -| `audit` | This is the audit check that `kube-bench` runs for this test. | -| `audit_config` | Any configuration applicable to the audit script. | -| `test_info` | Test-related info as reported by `kube-bench`, if any. | -| `commands` | Test-related commands as reported by `kube-bench`, if any. | -| `config_commands` | Test-related configuration data as reported by `kube-bench`, if any. | -| `actual_value` | The test's actual value, present if reported by `kube-bench`. | -| `expected_result` | The test's expected result, present if reported by `kube-bench`. | - -Refer to the table in the cluster hardening guide for information on which versions of Kubernetes, the Benchmark, Rancher, and our cluster hardening guide correspond to each other. Also refer to the hardening guide for configuration files of CIS-compliant clusters and information on remediating failed tests. - -# Test Profiles - -The following profiles are available: - -{{% tabs %}} -{{% tab "Profiles in v2.5.4" %}} -- Generic CIS 1.5 -- Generic CIS 1.6 -- RKE permissive 1.5 -- RKE hardened 1.5 -- RKE permissive 1.6 -- RKE hardened 1.6 -- EKS -- GKE -- RKE2 permissive 1.5 -- RKE2 permissive 1.5 -{{% /tab %}} -{{% tab "Profiles in v2.5.0-v2.5.3" %}} -- Generic CIS 1.5 -- RKE permissive -- RKE hardened -- EKS -- GKE -{{% /tab %}} -{{% /tabs %}} - -You also have the ability to customize a profile by saving a set of tests to skip. - -All profiles will have a set of not applicable tests that will be skipped during the CIS scan. These tests are not applicable based on how a RKE cluster manages Kubernetes. - -There are two types of RKE cluster scan profiles: - -- **Permissive:** This profile has a set of tests that have been will be skipped as these tests will fail on a default RKE Kubernetes cluster. Besides the list of skipped tests, the profile will also not run the not applicable tests. -- **Hardened:** This profile will not skip any tests, except for the non-applicable tests. - -The EKS and GKE cluster scan profiles are based on CIS Benchmark versions that are specific to those types of clusters. - -In order to pass the "Hardened" profile, you will need to follow the steps on the hardening guide and use the `cluster.yml` defined in the hardening guide to provision a hardened cluster. - -# About Skipped and Not Applicable Tests - -For a list of skipped and not applicable tests, refer to this page. - -For now, only user-defined skipped tests are marked as skipped in the generated report. - -Any skipped tests that are defined as being skipped by one of the default profiles are marked as not applicable. - -# Roles-based Access Control - -For information about permissions, refer to this page. - -# Configuration - -For more information about configuring the custom resources for the scans, profiles, and benchmark versions, refer to this page. - -# How-to Guides - -- [Installing rancher-cis-benchmark](#installing-rancher-cis-benchmark) -- [Uninstalling rancher-cis-benchmark](#uninstalling-rancher-cis-benchmark) -- [Running a Scan](#running-a-scan) -- [Running a Scan Periodically on a Schedule](#running-a-scan-periodically-on-a-schedule) -- [Skipping Tests](#skipping-tests) -- [Viewing Reports](#viewing-reports) -- [Enabling Alerting for rancher-cis-benchmark](#enabling-alerting-for-rancher-cis-benchmark) -- [Configuring Alerts for a Periodic Scan on a Schedule](#configuring-alerts-for-a-periodic-scan-on-a-schedule) -- [Creating a Custom Benchmark Version for Running a Cluster Scan](#creating-a-custom-benchmark-version-for-running-a-cluster-scan) -### Installing rancher-cis-benchmark - -1. In the Rancher UI, go to the **Cluster Explorer.** -1. Click **Apps.** -1. Click `rancher-cis-benchmark`. -1. Click **Install.** - -**Result:** The CIS scan application is deployed on the Kubernetes cluster. - -### Uninstalling rancher-cis-benchmark - -1. From the **Cluster Explorer,** go to the top left dropdown menu and click **Apps & Marketplace.** -1. Click **Installed Apps.** -1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. -1. Click **Delete** and confirm **Delete.** - -**Result:** The `rancher-cis-benchmark` application is uninstalled. - -### Running a Scan - -When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. - -Note: There is currently a limitation of running only one CIS scan at a time for a cluster. If you create multiple ClusterScan custom resources, they will be run one after the other by the operator, and until one scan finishes, the rest of the ClusterScan custom resources will be in the "Pending" state. - -To run a scan, - -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. -1. Click **Create.** - -**Result:** A report is generated with the scan results. To see the results, click the name of the scan that appears. -### Running a Scan Periodically on a Schedule -_Available as of v2.5.4_ - -To run a ClusterScan on a schedule, - -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. -1. Choose the option **Run scan on a schedule.** -1. Enter a valid cron schedule expression in the field **Schedule.** -1. Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. -1. Click **Create.** - -**Result:** The scan runs and reschedules to run according to the cron schedule provided. The **Next Scan** value indicates the next time this scan will run again. - -A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. - -You can also see the previous reports by choosing the report from the **Reports** dropdown on the scan detail page. - -### Skipping Tests - -CIS scans can be run using test profiles with user-defined skips. - -To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. - -1. In the **Cluster Explorer,** go to the top-left dropdown menu and click **CIS Benchmark.** -1. Click **Profiles.** -1. From here, you can create a profile in multiple ways. To make a new profile, click **Create** and fill out the form in the UI. To make a new profile based on an existing profile, go to the existing profile, click the three vertical dots, and click **Clone as YAML.** If you are filling out the form, add the tests to skip using the test IDs, using the relevant CIS Benchmark as a reference. If you are creating the new test profile as YAML, you will add the IDs of the tests to skip in the `skipTests` directive. You will also give the profile a name: - - ```yaml - apiVersion: cis.cattle.io/v1 - kind: ClusterScanProfile - metadata: - annotations: - meta.helm.sh/release-name: clusterscan-operator - meta.helm.sh/release-namespace: cis-operator-system - labels: - app.kubernetes.io/managed-by: Helm - name: "" - spec: - benchmarkVersion: cis-1.5 - skipTests: - - "1.1.20" - - "1.1.21" - ``` -1. Click **Create.** - -**Result:** A new CIS scan profile is created. - -When you [run a scan](#running-a-scan) that uses this profile, the defined tests will be skipped during the scan. The skipped tests will be marked in the generated report as `Skip`. - -### Viewing Reports - -To view the generated CIS scan reports, - -1. In the **Cluster Explorer,** go to the top left dropdown menu and click **Cluster Explorer > CIS Benchmark.** -1. The **Scans** page will show the generated reports. To see a detailed report, go to a scan report and click the name. - -One can download the report from the Scans list or from the scan detail page. - -### Enabling Alerting for rancher-cis-benchmark -_Available as of v2.5.4_ - -Alerts can be configured to be sent out for a scan that runs on a schedule. - -> **Prerequisite:** -> -> Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/alertmanager/) -> -> While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/alertmanager/#example-route-config-for-cis-scan-alerts) - -While installing or upgrading the `rancher-cis-benchmark` application, set the following flag to `true` in the `values.yaml`: - -```yaml -alerts: - enabled: true -``` - -### Configuring Alerts for a Periodic Scan on a Schedule -_Available as of v2.5.4_ - -From Rancher v2.5.4, it is possible to run a ClusterScan on a schedule. - -A scheduled scan can also specify if you should receive alerts when the scan completes. - -Alerts are supported only for a scan that runs on a schedule. - -The `rancher-cis-benchmark` application supports two types of alerts: - -- Alert on scan completion: This alert is sent out when the scan run finishes. The alert includes details including the ClusterScan's name and the ClusterScanProfile name. -- Alert on scan failure: This alert is sent out if there are some test failures in the scan run or if the scan is in a `Fail` state. - -> **Prerequisite:** -> -> Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/alertmanager/) -> -> While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/alertmanager/#example-route-config-for-cis-scan-alerts) - -To configure alerts for a scan that runs on a schedule, - -1. Please enable alerts on the `rancher-cis-benchmark` application (#enabling-alerting-for-rancher-cis-benchmark) -1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** -1. In the **Scans** section, click **Create.** -1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. -1. Choose the option **Run scan on a schedule.** -1. Enter a valid [cron schedule expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) in the field **Schedule.** -1. Check the boxes next to the Alert types under **Alerting.** -1. Optional: Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. -1. Click **Create.** - -**Result:** The scan runs and reschedules to run according to the cron schedule provided. Alerts are sent out when the scan finishes if routes and receiver are configured under `rancher-monitoring` application. - -A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. - -### Creating a Custom Benchmark Version for Running a Cluster Scan -_Available as of v2.5.4_ - -There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. - -It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. - -For details, see [this page.](./custom-benchmark) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cli/_index.md b/content/rancher/v2.5/en/cli/_index.md deleted file mode 100644 index 6996b6bdf3..0000000000 --- a/content/rancher/v2.5/en/cli/_index.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: CLI with Rancher -description: Interact with Rancher using command line interface (CLI) tools from your workstation. -weight: 21 ---- - -- [Rancher CLI](#rancher-cli) - - [Download Rancher CLI](#download-rancher-cli) - - [Requirements](#requirements) - - [CLI Authentication](#cli-authentication) - - [Project Selection](#project-selection) - - [Commands](#commands) - - [Rancher CLI Help](#rancher-cli-help) - - [Limitations](#limitations) -- [kubectl](#kubectl) - - [kubectl Utility](#kubectl-utility) - - [Authentication with kubectl and kubeconfig Tokens with TTL](#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) - -# Rancher CLI - -The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. - -### Download Rancher CLI - -The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. - -### Requirements - -After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - -- Your Rancher Server URL, which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.5/en/user-settings/api-keys/). - -### CLI Authentication - -Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): - -```bash -$ ./rancher login https:// --token -``` - -If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. - -### Project Selection - -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. - -**Example: `./rancher context switch` Output** -``` -User:rancher-cli-directory user$ ./rancher context switch -NUMBER CLUSTER NAME PROJECT ID PROJECT NAME -1 cluster-2 c-7q96s:p-h4tmb project-2 -2 cluster-2 c-7q96s:project-j6z6d Default -3 cluster-1 c-lchzv:p-xbpdt project-1 -4 cluster-1 c-lchzv:project-s2mch Default -Select a Project: -``` - -After you enter a number, the console displays a message that you've changed projects. - -``` -INFO[0005] Setting new context to project project-1 -INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json -``` - -Ensure you can run `rancher kubectl get pods` successfully. - -### Commands - -The following commands are available for use in Rancher CLI. - -| Command | Result | -|---|---| -| `apps, [app]` | Performs operations on catalog applications (i.e., individual [Helm charts](https://docs.helm.sh/developing_charts/)) or Rancher charts. | -| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.5/en/helm-charts/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.5/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | -| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | -| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | -| `namespaces, [namespace]` |Performs operations on namespaces. | -| `nodes, [node]` |Performs operations on nodes. | -| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads) in a project. | -| `settings, [setting]` | Shows the current settings for your Rancher Server. | -| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | -| `help, [h]` | Shows a list of commands or help for one command. | - - -### Rancher CLI Help - -Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. - -All commands accept the `--help` flag, which documents each command's usage. - -### Limitations - -The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../helm-charts/). - -# kubectl - -Interact with Rancher using kubectl. - -### kubectl Utility - -Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -Configure kubectl by visiting your cluster in the Rancher Web UI, clicking on `Kubeconfig`, copying contents, and putting them into your `~/.kube/config` file. - -Run `kubectl cluster-info` or `kubectl get pods` successfully. - -### Authentication with kubectl and kubeconfig Tokens with TTL - -_Requirements_ - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.5/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher CLI](../cli) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like: -`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. - -This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: - -1. Local -2. Active Directory (LDAP only) -3. FreeIPA -4. OpenLDAP -5. SAML providers: Ping, Okta, ADFS, Keycloak, Shibboleth - -When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. -The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../../api/api-tokens/#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../api/api-tokens/#deleting-tokens). -Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. diff --git a/content/rancher/v2.5/en/cluster-admin/_index.md b/content/rancher/v2.5/en/cluster-admin/_index.md deleted file mode 100644 index 26a02781cb..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Cluster Administration -weight: 8 -aliases: - - /rancher/v2.x/en/cluster-admin/ ---- - -After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. - -This page covers the following topics: - -- [Switching between clusters](#switching-between-clusters) -- [Managing clusters in Rancher](#managing-clusters-in-rancher) -- [Configuring tools](#configuring-tools) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.5/en/overview/concepts) page. - -## Switching between Clusters - -To switch between clusters, use the drop-down available in the navigation bar. - -Alternatively, you can switch between projects and clusters directly in the navigation bar. Open the **Global** view and select **Clusters** from the main menu. Then select the name of the cluster you want to open. - -## Managing Clusters in Rancher - -After clusters have been [provisioned into Rancher]({{}}/rancher/v2.5/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. - -{{% include file="/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table" %}} - -## Configuring Tools - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - -- Alerts -- Notifiers -- Logging -- Monitoring -- Istio Service Mesh -- OPA Gatekeeper - diff --git a/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md b/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md deleted file mode 100644 index 0f6be35af5..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/backing-up-etcd/_index.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Backing up a Cluster -weight: 2045 -aliases: - - /rancher/v2.x/en/cluster-admin/backing-up-etcd/ ---- - -In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) can be easily performed. - -Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. - -Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -This section covers the following topics: - -- [How snapshots work](#how-snapshots-work) -- [Configuring recurring snapshots](#configuring-recurring-snapshots) -- [One-time snapshots](#one-time-snapshots) -- [Snapshot backup targets](#snapshot-backup-targets) - - [Local backup target](#local-backup-target) - - [S3 backup target](#s3-backup-target) - - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) - - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) -- [Viewing available snapshots](#viewing-available-snapshots) -- [Safe timestamps](#safe-timestamps) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -# How Snapshots Work - -### Snapshot Components - -When Rancher creates a snapshot, it includes three components: - -- The cluster data in etcd -- The Kubernetes version -- The cluster configuration in the form of the `cluster.yml` - -Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. - -The multiple components of the snapshot allow you to select from the following options if you need to restore a cluster from a snapshot: - -- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -It's always recommended to take a new snapshot before any upgrades. - -### Generating the Snapshot from etcd Nodes - -For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. - -The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. - -In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. - -### Snapshot Naming Conventions - -The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. - -When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: - -- `m` stands for manual -- `r` stands for recurring -- `l` stands for local -- `s` stands for S3 - -Some example snapshot names are: - -- c-9dmxz-rl-8b2cx -- c-9dmxz-ml-kr56m -- c-9dmxz-ms-t6bjb -- c-9dmxz-rs-8gxc8 - -### How Restoring from a Snapshot Works - -On restore, the following process is used: - -1. The snapshot is retrieved from S3, if S3 is configured. -2. The snapshot is unzipped (if zipped). -3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. -4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. -5. The cluster is restored and post-restore actions will be done in the cluster. - -# Configuring Recurring Snapshots - -Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. - -By default, [Rancher launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. - -During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. - -In the **Advanced Cluster Options** section, there are several options available to configure: - -| Option | Description | Default Value| -| --- | ---| --- | -| etcd Snapshot Backup Target | Select where you want the snapshots to be saved. Options are either local or in S3 | local| -|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| -| Recurring etcd Snapshot Creation Period | Time in hours between recurring snapshots| 12 hours | -| Recurring etcd Snapshot Retention Count | Number of snapshots to retain| 6 | - -# One-Time Snapshots - -In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. - -1. In the **Global** view, navigate to the cluster that you want to take a one-time snapshot. - -2. Click the **⋮ > Snapshot Now**. - -**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. - -# Snapshot Backup Targets - -Rancher supports two different backup targets: - -* [Local Target](#local-backup-target) -* [S3 Target](#s3-backup-target) - -### Local Backup Target - -By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. - -### S3 Backup Target - -The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. - -| Option | Description | Required| -|---|---|---| -|S3 Bucket Name| S3 bucket name where backups will be stored| *| -|S3 Region|S3 region for the backup bucket| | -|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | -|S3 Access Key|S3 access key with permission to access the backup bucket|*| -|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| -| Custom CA Certificate | A custom certificate used to access private S3 backends || - -### Using a custom CA certificate for S3 - -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. - -### IAM Support for Storing Snapshots in S3 - -The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: - - - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. - - The cluster etcd nodes must have network access to the specified S3 endpoint. - - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. - - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. - - To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -# Viewing Available Snapshots - -The list of all available snapshots for the cluster is available in the Rancher UI. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -# Safe Timestamps - -Snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. - -The option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. - -This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.5/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md b/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md deleted file mode 100644 index 168a7e4094..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/certificate-rotation/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Certificate Rotation -weight: 2040 -aliases: - - /rancher/v2.x/en/cluster-admin/certificate-rotation/ ---- - -> **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. - -By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. - -Certificates can be rotated for the following services: - -- etcd -- kubelet (node certificate) -- kubelet (serving certificate, if [enabled]({{}}/rke/latest/en/config-options/services/#kubelet-options)) -- kube-apiserver -- kube-proxy -- kube-scheduler -- kube-controller-manager - - -### Certificate Rotation - -Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. - -1. In the **Global** view, navigate to the cluster that you want to rotate certificates. - -2. Select **⋮ > Rotate Certificates**. - -3. Select which certificates that you want to rotate. - - * Rotate all Service certificates (keep the same CA) - * Rotate an individual service and choose one of the services from the drop-down menu - -4. Click **Save**. - -**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. - -> **Note:** Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher launched Kubernetes clusters. diff --git a/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md deleted file mode 100644 index 00f1055351..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: Removing Kubernetes Components from Nodes -description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually -weight: 2055 -aliases: - - /rancher/v2.5/en/faq/cleaning-cluster-nodes/ - - /rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/ ---- - -This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. - -When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. - -When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. - -## What Gets Removed? - -When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. - -| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Registered Nodes][4] | -| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | -| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | -| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | -| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | -| Rancher Deployment | ✓ | ✓ | ✓ | | -| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | -| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | -| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | - -[1]: {{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/ - -## Removing a Node from a Cluster by Rancher UI - -When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -## Removing Rancher Components from a Cluster Manually - -When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. - ->**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. - -### Removing Rancher Components from Registered Clusters - -For registered clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. - -After the registered cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. - -{{% tabs %}} -{{% tab "By UI / API" %}} ->**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. - -After you initiate the removal of a registered cluster using the Rancher UI (or API), the following events occur. - -1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. - -1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. - -1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. - -**Result:** All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% tab "By Script" %}} -Rather than cleaning registered cluster nodes using the Rancher UI, you can run a script instead. - ->**Prerequisite:** -> ->Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. - -1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: - - ``` - chmod +x user-cluster.sh - ``` - -1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. - - If you don't have an air gap environment, skip this step. - -1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): - - >**Tip:** - > - >Add the `-dry-run` flag to preview the script's outcome without making changes. - ``` - ./user-cluster.sh rancher/rancher-agent: - ``` - -**Result:** The script runs. All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% /tabs %}} - -### Windows Nodes - -To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. - -To run the script, you can use this command in the PowerShell: - -``` -pushd c:\etc\rancher -.\cleanup.ps1 -popd -``` - -**Result:** The node is reset and can be re-added to a Kubernetes cluster. - -### Docker Containers, Images, and Volumes - -Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) - -**To clean all Docker containers, images and volumes:** - -``` -docker rm -f $(docker ps -qa) -docker rmi -f $(docker images -q) -docker volume rm $(docker volume ls -q) -``` - -### Mounts - -Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. - -Mounts | ---------| -`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | -`/var/lib/kubelet` | -`/var/lib/rancher` | - -**To unmount all mounts:** - -``` -for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done -``` - -### Directories and Files - -The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. - ->**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. - -Directories | ---------| -`/etc/ceph` | -`/etc/cni` | -`/etc/kubernetes` | -`/opt/cni` | -`/opt/rke` | -`/run/secrets/kubernetes.io` | -`/run/calico` | -`/run/flannel` | -`/var/lib/calico` | -`/var/lib/etcd` | -`/var/lib/cni` | -`/var/lib/kubelet` | -`/var/lib/rancher/rke/log` | -`/var/log/containers` | -`/var/log/kube-audit` | -`/var/log/pods` | -`/var/run/calico` | - -**To clean the directories:** - -``` -rm -rf /etc/ceph \ - /etc/cni \ - /etc/kubernetes \ - /opt/cni \ - /opt/rke \ - /run/secrets/kubernetes.io \ - /run/calico \ - /run/flannel \ - /var/lib/calico \ - /var/lib/etcd \ - /var/lib/cni \ - /var/lib/kubelet \ - /var/lib/rancher/rke/log \ - /var/log/containers \ - /var/log/kube-audit \ - /var/log/pods \ - /var/run/calico -``` - -### Network Interfaces and Iptables - -The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. - -### Network Interfaces - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. - -Interfaces | ---------| -`flannel.1` | -`cni0` | -`tunl0` | -`caliXXXXXXXXXXX` (random interface names) | -`vethXXXXXXXX` (random interface names) | - -**To list all interfaces:** - -``` -# Using ip -ip address show - -# Using ifconfig -ifconfig -a -``` - -**To remove an interface:** - -``` -ip link delete interface_name -``` - -### Iptables - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. - -Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. - -Chains | ---------| -`cali-failsafe-in` | -`cali-failsafe-out` | -`cali-fip-dnat` | -`cali-fip-snat` | -`cali-from-hep-forward` | -`cali-from-host-endpoint` | -`cali-from-wl-dispatch` | -`cali-fw-caliXXXXXXXXXXX` (random chain names) | -`cali-nat-outgoing` | -`cali-pri-kns.NAMESPACE` (chain per namespace) | -`cali-pro-kns.NAMESPACE` (chain per namespace) | -`cali-to-hep-forward` | -`cali-to-host-endpoint` | -`cali-to-wl-dispatch` | -`cali-tw-caliXXXXXXXXXXX` (random chain names) | -`cali-wl-to-host` | -`KUBE-EXTERNAL-SERVICES` | -`KUBE-FIREWALL` | -`KUBE-MARK-DROP` | -`KUBE-MARK-MASQ` | -`KUBE-NODEPORTS` | -`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | -`KUBE-SERVICES` | -`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | - -**To list all iptables rules:** - -``` -iptables -L -t nat -iptables -L -t mangle -iptables -L -``` diff --git a/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md b/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md deleted file mode 100644 index 78853f23b7..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cloning-clusters/_index.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Cloning Clusters -weight: 2035 -aliases: - - /rancher/v2.5/en/cluster-provisioning/cloning-clusters/ - - /rancher/v2.x/en/cluster-admin/cloning-clusters/ ---- - -If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. - -Duplication of registered clusters is not supported. - -| Cluster Type | Cloneable? | -|----------------------------------|---------------| -| [Nodes Hosted by Infrastructure Provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) | ✓ | -| [Hosted Kubernetes Providers]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/) | ✓ | -| [Custom Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes) | ✓ | -| [Registered Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/) | | - -> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. - -## Prerequisites - -Download and install [Rancher CLI]({{}}/rancher/v2.5/en/cli). Remember to [create an API bearer token]({{}}/rancher/v2.5/en/user-settings/api-keys) if necessary. - - -## 1. Export Cluster Config - -Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. - -1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. - -1. Enter the following command to list the clusters managed by Rancher. - - - ./rancher cluster ls - - -1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. - -1. Enter the following command to export the configuration for your cluster. - - - ./rancher clusters export - - - **Step Result:** The YAML for a cloned cluster prints to Terminal. - -1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). - -## 2. Modify Cluster Config - -Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. - -> **Note:** Cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. - - >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. - - -1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. - - ```yml - Version: v3 - clusters: - : # ENTER UNIQUE NAME - dockerRootDir: /var/lib/docker - enableNetworkPolicy: false - rancherKubernetesEngineConfig: - addonJobTimeout: 30 - authentication: - strategy: x509 - authorization: {} - bastionHost: {} - cloudProvider: {} - ignoreDockerVersion: true - ``` - -1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. - - ```yml - nodePools: - : - clusterId: do - controlPlane: true - etcd: true - hostnamePrefix: mark-do - nodeTemplateId: do - quantity: 1 - worker: true - ``` - -1. When you're done, save and close the configuration. - -## 3. Launch Cloned Cluster - -Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: - - ./rancher up --file cluster-template.yml - -**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. You can also log into the Rancher UI and open the **Global** view to watch your provisioning cluster's progress. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md deleted file mode 100644 index 5b39cd7712..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: Cluster Access -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/cluster-access/ ---- - -This section is about what tools can be used to access clusters managed by Rancher. - -For information on how to give users permission to access a cluster, see the section on [adding users to clusters.]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/) - -For more information on roles-based access control, see [this section.]({{}}/rancher/v2.5/en/admin-settings/rbac/) - -For information on how to set up an authentication system, see [this section.]({{}}/rancher/v2.5/en/admin-settings/authentication/) - - -### Rancher UI - -Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. - -### kubectl - -You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: - -- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/). -- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/). - -### Rancher CLI - -You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI]({{}}/rancher/v2.5/en/cli/). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. - -### Rancher API - -Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key]({{}}/rancher/v2.5/en/user-settings/api-keys/). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md deleted file mode 100644 index d463b7c049..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/ace/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: How the Authorized Cluster Endpoint Works -weight: 2015 -aliases: - - /rancher/v2.x/en/cluster-admin/cluster-access/ace/ ---- - -This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -### About the kubeconfig File - -The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). - -This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. - -After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.5/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](../cli) to be present in your PATH. - - -### Two Authentication Methods for RKE Clusters - -If the cluster is not an [RKE cluster,]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. - -For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: - -- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. -- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. - -This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. - -To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](../kubectl/#authenticating-directly-with-a-downstream-cluster) - -These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page]({{}}/rancher/v2.5/en/overview/architecture/#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. - -### About the kube-api-auth Authentication Webhook - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,]({{}}/rancher/v2.5/en/overview/architecture/#4-authorized-cluster-endpoint) which is only available for [RKE clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. - -The scheduling rules for `kube-api-auth` are listed below: - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
`node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md deleted file mode 100644 index 202c7c773b..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/_index.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: Adding Users to Clusters -weight: 2020 -aliases: - - /rancher/v2.5/en/tasks/clusters/adding-managing-cluster-members/ - - /rancher/v2.5/en/k8s-in-rancher/cluster-members/ - - /rancher/v2.5/en/cluster-admin/cluster-members - - /rancher/v2.5/en/cluster-provisioning/cluster-members/ - - /rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/ ---- - -If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. - ->**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) instead. - -There are two contexts where you can add cluster members: - -- Adding Members to a New Cluster - - You can add members to a cluster as you create it (recommended if possible). - -- [Adding Members to an Existing Cluster](#editing-cluster-membership) - - You can always add members to a cluster after a cluster is provisioned. - -## Editing Cluster Membership - -Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. - -1. From the **Global** view, open the cluster that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the cluster. - - If external authentication is configured: - - - Rancher returns users from your [external authentication]({{}}/rancher/v2.5/en/admin-settings/authentication/) source as you type. - - >**Using AD but can't find your users?** - >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5]({{}}/rancher/v2.5/en/admin-settings/authentication/ad/). - - - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users]({{}}/rancher/v2.5/en/admin-settings/authentication/#external-authentication-configuration-and-principal-users). - -4. Assign the user or group **Cluster** roles. - - [What are Cluster Roles?]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) - - >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.5/en/admin-settings/rbac/locked-roles). - -**Result:** The chosen users are added to the cluster. - -- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md deleted file mode 100644 index 5b78038e54..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/_index.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Access a Cluster with Kubectl and kubeconfig" -description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." -weight: 2010 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/kubectl/ - - /rancher/v2.5/en/cluster-admin/kubectl - - /rancher/v2.5/en/concepts/clusters/kubeconfig-files/ - - /rancher/v2.5/en/k8s-in-rancher/kubeconfig/ - - /rancher/2.x/en/cluster-admin/kubeconfig - - /rancher/v2.x/en/cluster-admin/cluster-access/kubectl/ ---- - -This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. - -For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). - -- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) -- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) -- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) -- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) - - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) - - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) - - -### Accessing Clusters with kubectl Shell in the Rancher UI - -You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. - -1. From the **Global** view, open the cluster that you want to access with kubectl. - -2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. - -### Accessing Clusters with kubectl from Your Workstation - -This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. - -This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. - -> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. -1. Click **Kubeconfig File**. -1. Copy the contents displayed to your clipboard. -1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: - ``` - kubectl --kubeconfig /custom/path/kube.config get pods - ``` -1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. - - -### Note on Resources Created Using kubectl - -Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. - -# Authenticating Directly with a Downstream Cluster - -This section intended to help you set up an alternative method to access an [RKE cluster.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters) - -This method is only available for RKE clusters that have the [authorized cluster endpoint]({{}}/rancher/v2.5/en/overview/architecture/#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](../ace) - -We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. - -> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) - -To find the name of the context(s) in your downloaded kubeconfig file, run: - -``` -kubectl config get-contexts --kubeconfig /custom/path/kube.config -CURRENT NAME CLUSTER AUTHINFO NAMESPACE -* my-cluster my-cluster user-46tmn - my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn -``` - -In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. - -With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.5/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. - -When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. - -### Connecting Directly to Clusters with FQDN Defined - -If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: - -``` -kubectl --context -fqdn get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods -``` - -### Connecting Directly to Clusters without FQDN Defined - -If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. - -Assuming the kubeconfig file is located at `~/.kube/config`: -``` -kubectl --context - get nodes -``` -Directly referencing the location of the kubeconfig file: -``` -kubectl --kubeconfig /custom/path/kube.config --context - get pods -``` diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md deleted file mode 100644 index 77a6f82013..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Cluster Autoscaler -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/cluster-autoscaler/ ---- - -In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. - -To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. - -Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. - -It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. - -# Cloud Providers - -Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) - -### Setting up Cluster Autoscaler on Amazon Cloud Provider - -For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon) diff --git a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md b/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md deleted file mode 100644 index 940ea54882..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/cluster-autoscaler/amazon/_index.md +++ /dev/null @@ -1,582 +0,0 @@ ---- -title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon/ ---- - -This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. - -- [Prerequisites](#prerequisites) -- [1. Create a Custom Cluster](#1-create-a-custom-cluster) -- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) -- [3. Deploy Nodes](#3-deploy-nodes) -- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) - - [Parameters](#parameters) - - [Deployment](#deployment) -- [Testing](#testing) - - [Generating Load](#generating-load) - - [Checking Scale](#checking-scale) - -# Prerequisites - -These elements are required to follow this guide: - -* The Rancher server is up and running -* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles - -### 1. Create a Custom Cluster - -On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: - -* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag -* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag -* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster - - ```sh - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum - ``` - -### 2. Configure the Cloud Provider - -On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. - -1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. - * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:DescribeTags", - "autoscaling:DescribeLaunchConfigurations", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. - * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - - * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` - * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.5/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--etcd --controlplane" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. - * IAM profile: Provides cloud_provider worker integration. - This profile is called `K8sWorkerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] - } - ``` - - * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` - * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.5/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--worker" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -More info is at [RKE clusters on AWS]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - -### 3. Deploy Nodes - -Once we've configured AWS, let's create VMs to bootstrap our cluster: - -* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/production/) - * IAM role: `K8sMasterRole` - * Security group: `K8sMasterSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` - -* worker: Define an ASG on EC2 with the following settings: - * Name: `K8sWorkerAsg` - * IAM role: `K8sWorkerRole` - * Security group: `K8sWorkerSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` - * Instances: - * minimum: 2 - * desired: 2 - * maximum: 10 - -Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. - -### 4. Install Cluster-autoscaler - -At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. - -#### Parameters - -This table shows cluster-autoscaler parameters for fine tuning: - -| Parameter | Default | Description | -|---|---|---| -|cluster-name|-|Autoscaled cluster name, if available| -|address|:8085|The address to expose Prometheus metrics| -|kubernetes|-|Kubernetes master location. Leave blank for default| -|kubeconfig|-|Path to kubeconfig file with authorization and master location information| -|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| -|namespace|"kube-system"|Namespace in which cluster-autoscaler run| -|scale-down-enabled|true|Should CA scale down the cluster| -|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| -|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| -|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| -|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| -|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| -|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| -|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| -|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| -|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| -|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| -|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| -|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -cloud-provider|-|Cloud provider type| -|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| -|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| -|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| -|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| -|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| -|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| -|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| -|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| -|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: ::| -|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| -|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| -|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| -|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| -|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| -|write-status-configmap|true|Should CA write status information to a configmap| -|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| -|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| -|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| -|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| -|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| -|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| -|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| -|regional|false|Cluster is regional| -|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| -|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| -|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| -|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| -|profiling|false|Is debug/pprof endpoint enabled| - -#### Deployment - -Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: - - -```yml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler - name: cluster-autoscaler - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["events", "endpoints"] - verbs: ["create", "patch"] - - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["endpoints"] - resourceNames: ["cluster-autoscaler"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: - - "pods" - - "services" - - "replicationcontrollers" - - "persistentvolumeclaims" - - "persistentvolumes" - verbs: ["watch", "list", "get"] - - apiGroups: ["extensions"] - resources: ["replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["watch", "list"] - - apiGroups: ["apps"] - resources: ["statefulsets", "replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["watch", "list", "get"] - - apiGroups: ["batch", "extensions"] - resources: ["jobs"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resourceNames: ["cluster-autoscaler"] - resources: ["leases"] - verbs: ["get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create","list","watch"] - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] - verbs: ["delete", "get", "update", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - app: cluster-autoscaler -spec: - replicas: 1 - selector: - matchLabels: - app: cluster-autoscaler - template: - metadata: - labels: - app: cluster-autoscaler - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '8085' - spec: - serviceAccountName: cluster-autoscaler - tolerations: - - effect: NoSchedule - operator: "Equal" - value: "true" - key: node-role.kubernetes.io/controlplane - nodeSelector: - node-role.kubernetes.io/controlplane: "true" - containers: - - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 - name: cluster-autoscaler - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - command: - - ./cluster-autoscaler - - --v=4 - - --stderrthreshold=info - - --cloud-provider=aws - - --skip-nodes-with-local-storage=false - - --expander=least-waste - - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs/ca-certificates.crt - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - -``` - -Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): - -```sh -kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml -``` - -**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) - -# Testing - -At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. - -### Generating Load - -We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world -spec: - replicas: 3 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - ports: - - containerPort: 80 - protocol: TCP - resources: - limits: - cpu: 1000m - memory: 1024Mi - requests: - cpu: 1000m - memory: 1024Mi -``` - -Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): - -``` -kubectl -n default apply -f test-deployment.yaml -``` - -### Checking Scale - -Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. - -Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md deleted file mode 100644 index e98acf0367..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/editing-clusters/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Cluster Configuration -weight: 2025 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/editing-clusters - - /rancher/v2.x/en/cluster-admin/editing-clusters/ ---- - -After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. - -For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members) - -### Cluster Configuration References - -The cluster configuration options depend on the type of Kubernetes cluster: - -- [RKE Cluster Configuration](./rke-config-reference) -- [EKS Cluster Configuration](./eks-config-reference) -- [GKE Cluster Configuration](./gke-config-reference) - -### Cluster Management Capabilities by Cluster Type - -The options and settings available for an existing cluster change based on the method that you used to provision it. - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table" %}} - diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/_index.md deleted file mode 100644 index 0ea3ab8ad8..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/_index.md +++ /dev/null @@ -1,421 +0,0 @@ ---- -title: EKS Cluster Configuration Reference -shortTitle: EKS Cluster Configuration -weight: 2 ---- - -{{% tabs %}} -{{% tab "Rancher v2.5.6+" %}} - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.5/en/user-settings/cloud-credentials/) | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Secrets Encryption - - - -Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) - -### API Server Endpoint Access - - - -Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Private-only API Endpoints - -If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. - -There are two ways to avoid this extra manual step: -- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. -- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). - -### Public Access Endpoints - - - -Optionally limit access to the public endpoint via explicit CIDR blocks. - -If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. - -One of the following is required to enable private access: -- Rancher's IP must be part of an allowed CIDR block -- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group - -For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Subnet - - - -| Option | Description | -| ------- | ------------ | -| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | -| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Logging - - - -Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. - -Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. - -For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - -### Managed Node Groups - - - -Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. - -For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - -#### Bring your own launch template - -A launch template ID and version can be provided in order to easily configure the EC2 instances in a node group. If a launch template is provided, then none of the settings below will be configurable in Rancher. Therefore, using a launch template would require that all the necessary and desired settings from the list below would need to be specified in the launch template. Also note that if a launch template ID and version is provided, then only the template version can be updated. Using a new template ID would require creating a new managed node group. - -| Option | Description | Required/Optional | -| ------ | ----------- | ----------------- | -| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | Required | -| Image ID | Specify a custom AMI for the nodes. Custom AMIs used with EKS must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) | Optional | -| Node Volume Size | The launch template must specify an EBS volume with the desired size | Required | -| SSH Key | A key to be added to the instances to provide SSH access to the nodes | Optional | -| User Data | Cloud init script in [MIME multi-part format](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data) | Optional | -| Instance Resource Tags | Tag each EC2 instance in the node group | Optional | - -#### Rancher-managed launch templates - -If you do not specify a launch template, then you will be able to configure the above options in the Rancher UI and all of them can be updated after creation. In order to take advantage of all of these options, Rancher will create and manage a launch template for you. Each cluster in Rancher will have one Rancher-managed launch template and each managed node group that does not have a specified launch template will have one version of the managed launch template. The name of this launch template will have the prefix "rancher-managed-lt-" followed by the display name of the cluster. In addition, the Rancher-managed launch template will be tagged with the key "rancher-managed-template" and value "do-not-modify-or-delete" to help identify it as Rancher-managed. It is important that this launch template and its versions not be modified, deleted, or used with any other clusters or managed node groups. Doing so could result in your node groups being "degraded" and needing to be destroyed and recreated. - -#### Custom AMIs - -If you specify a custom AMI, whether in a launch template or in Rancher, then the image must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) and you must provide user data to [bootstrap the node](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami). This is considered an advanced use case and understanding the requirements is imperative. - -If you specify a launch template that does not contain a custom AMI, then Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version and selected region. You can also select a [GPU enabled instance](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) for workloads that would benefit from it. - ->**Note** ->The GPU enabled instance setting in Rancher is ignored if a custom AMI is provided, either in the dropdown or in a launch template. - -#### Spot instances - -Spot instances are now [supported by EKS](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot). If a launch template is specified, Amazon recommends that the template not provide an instance type. Instead, Amazon recommends providing multiple instance types. If the "Request Spot Instances" checkbox is enabled for a node group, then you will have the opportunity to provide multiple instance types. - ->**Note** ->Any selection you made in the instance type dropdown will be ignored in this situation and you must specify at least one instance type to the "Spot Instance Types" section. Furthermore, a launch template used with EKS cannot request spot instances. Requesting spot instances must be part of the EKS configuration. - -#### Node Group Settings - -The following settings are also configurable. All of these except for the "Node Group Name" are editable after the node group is created. - -| Option | Description | -| ------- | ------------ | -| Node Group Name | The name of the node group. | -| Desired ASG Size | The desired number of instances. | -| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Labels | Kubernetes labels applied to the nodes in the managed node group. Note: Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) | -| Tags | These are tags for the managed node group and do not propagate to any of the associated resources. | - - -{{% /tab %}} -{{% tab "Rancher v2.5.0-v2.5.5" %}} - -### Changes in Rancher v2.5 - -More EKS options can be configured when you create an EKS cluster in Rancher, including the following: - -- Managed node groups -- Desired size, minimum size, maximum size (requires the Cluster Autoscaler to be installed) -- Control plane logging -- Secrets encryption with KMS - -The following capabilities have been added for configuring EKS clusters in Rancher: - -- GPU support -- Exclusively use managed nodegroups that come with the most up-to-date AMIs -- Add new nodes -- Upgrade nodes -- Add and remove node groups -- Disable and enable private access -- Add restrictions to public access -- Use your cloud credentials to create the EKS cluster instead of passing in your access key and secret key - -Due to the way that the cluster data is synced with EKS, if the cluster is modified from another source, such as in the EKS console, and in Rancher within five minutes, it could cause some changes to be overwritten. For information about how the sync works and how to configure it, refer to [this section](#syncing). - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.]({{}}/rancher/v2.5/en/user-settings/cloud-credentials/) | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Secrets Encryption - - - -Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) - -### API Server Endpoint Access - - - -Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Private-only API Endpoints - -If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. - -There are two ways to avoid this extra manual step: -- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. -- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). - -### Public Access Endpoints - - - -Optionally limit access to the public endpoint via explicit CIDR blocks. - -If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. - -One of the following is required to enable private access: -- Rancher's IP must be part of an allowed CIDR block -- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group - -For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) - -### Subnet - - - -| Option | Description | -| ------- | ------------ | -| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | -| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Logging - - - -Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. - -Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. - -For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) - -### Managed Node Groups - - - -Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. - -For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - -Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version. You can configure whether the AMI has GPU enabled. - -| Option | Description | -| ------- | ------------ | -| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | -| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | -| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | - -{{% /tab %}} -{{% tab "Rancher prior to v2.5" %}} - - -### Account Access - - - -Complete each drop-down and field using the information obtained for your IAM policy. - -| Setting | Description | -| ---------- | -------------------------------------------------------------------------------------------------------------------- | -| Region | From the drop-down choose the geographical region in which to build your cluster. | -| Access Key | Enter the access key that you created for your IAM policy. | -| Secret Key | Enter the secret key that you created for your IAM policy. | - -### Service Role - - - -Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). - -Service Role | Description --------------|--------------------------- -Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. -Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). - -### Public IP for Worker Nodes - - - -Your selection for this option determines what options are available for **VPC & Subnet**. - -Option | Description --------|------------ -Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. -No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. - -### VPC & Subnet - - - -The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) - -Option | Description - -------|------------ - Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. - Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. - - For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. - -- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) -- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) - - -If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. - -{{% accordion id="yes" label="Click to expand" %}} - -If you're using **Custom: Choose from your existing VPC and Subnets**: - -(If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) - -1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -1. Click **Next: Select Security Group**. -{{% /accordion %}} - -If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. -{{% accordion id="no" label="Click to expand" %}} -Follow the steps below. - ->**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). - -1. From the drop-down that displays, choose a VPC. - -1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. - -{{% /accordion %}} - -### Security Group - - - -Amazon Documentation: - -- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) -- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) - -### Instance Options - - - -Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. - -Option | Description --------|------------ -Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. -Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. -Desired ASG Size | The number of instances that your cluster will provision. -User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ - -{{% /tab %}} -{{% /tabs %}} - - - -### Configuring the Refresh Interval - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -The `eks-refresh-cron` setting is deprecated. It has been migrated to the `eks-refresh` setting, which is an integer representing seconds. - -The default value is 300 seconds. - -The syncing interval can be changed by running `kubectl edit setting eks-refresh`. - -If the `eks-refresh-cron` setting was previously set, the migration will happen automatically. - -The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. - -{{% /tab %}} -{{% tab "Before v2.5.8" %}} - -It is possible to change the refresh interval through the setting `eks-refresh-cron`. This setting accepts values in the Cron format. The default is `*/5 * * * *`. - -The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/_index.md deleted file mode 100644 index 67b0c60360..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/_index.md +++ /dev/null @@ -1,453 +0,0 @@ ---- -title: GKE Cluster Configuration Reference -shortTitle: GKE Cluster Configuration -weight: 3 ---- - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -# Changes in v2.5.8 - -- We now support private GKE clusters. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.](./private-clusters) -- [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc) are now supported. -- We now support more configuration options for Rancher managed GKE clusters: - - Project - - Network policy - - Network policy config - - Node pools and node configuration options: - - More image types are available for the nodes - - The maximum number of pods per node can be configured - - Node pools can be added while configuring the GKE cluster -- When provisioning a GKE cluster, you can now use reusable cloud credentials instead of using a service account token directly to create the cluster. - -# Cluster Location - -| Value | Description | -|--------|--------------| -| Location Type | Zonal or Regional. With GKE, you can create a cluster tailored to the availability requirements of your workload and your budget. By default, a cluster's nodes run in a single compute zone. When multiple zones are selected, the cluster's nodes will span multiple compute zones, while the controlplane is located in a single zone. Regional clusters increase the availability of the controlplane as well. For help choosing the type of cluster availability, refer to [these docs.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#choosing_a_regional_or_zonal_control_plane) | -| Zone | Each region in Compute engine contains a number of zones. For more information about available regions and zones, refer to [these docs.](https://cloud.google.com/compute/docs/regions-zones#available) | -| Additional Zones | For zonal clusters, you can select additional zones to create a [multi-zone cluster.](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#multi-zonal_clusters) | -| Region | For [regional clusters,](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#regional_clusters) you can select a region. For more information about available regions and zones, refer to [this section](https://cloud.google.com/compute/docs/regions-zones#available). The first part of each zone name is the name of the region. | - -# Cluster Options - -### Kubernetes Version - -_Mutable: yes_ - -For more information on GKE Kubernetes versions, refer to [these docs.](https://cloud.google.com/kubernetes-engine/versioning) - -### Container Address Range - -_Mutable: no_ - -The IP address range for pods in the cluster. Must be a valid CIDR range, e.g. 10.42.0.0/16. If not specified, a random range is automatically chosen from 10.0.0.0/8 and will exclude ranges already allocated to VMs, other clusters, or routes. Automatically chosen ranges may conflict with reserved IP addresses, dynamic routes, or routes within VPCs peering with the cluster. - -### Network - -_Mutable: no_ - -The Compute Engine Network that the cluster connects to. Routes and firewalls will be created using this network. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC networks that are shared to your project will appear here. will be available to select in this field. For more information, refer to [this page](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets). - -### Node Subnet / Subnet - -_Mutable: no_ - -The Compute Engine subnetwork that the cluster connects to. This subnetwork must belong to the network specified in the **Network** field. Select an existing subnetwork, or select "Auto Create Subnetwork" to have one automatically created. If not using an existing network, **Subnetwork Name** is required to generate one. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC subnets that are shared to your project will appear here. If using a Shared VPC network, you cannot select "Auto Create Subnetwork". For more information, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) - -### Subnetwork Name - -_Mutable: no_ - -Automatically create a subnetwork with the provided name. Required if "Auto Create Subnetwork" is selected for **Node Subnet** or **Subnet**. For more information on subnetworks, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) - -### Ip Aliases - -_Mutable: no_ - -Enable [alias IPs](https://cloud.google.com/vpc/docs/alias-ip). This enables VPC-native traffic routing. Required if using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc). - -### Network Policy - -_Mutable: yes_ - -Enable network policy enforcement on the cluster. A network policy defines the level of communication that can occur between pods and services in the cluster. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy) - -### Node Ipv4 CIDR Block - -_Mutable: no_ - -The IP address range of the instance IPs in this cluster. Can be set if "Auto Create Subnetwork" is selected for **Node Subnet** or **Subnet**. Must be a valid CIDR range, e.g. 10.96.0.0/14. For more information on how to determine the IP address range, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) - -### Cluster Secondary Range Name - -_Mutable: no_ - -The name of an existing secondary range for Pod IP addresses. If selected, **Cluster Pod Address Range** will automatically be populated. Required if using a Shared VPC network. - -### Cluster Pod Address Range - -_Mutable: no_ - -The IP address range assigned to pods in the cluster. Must be a valid CIDR range, e.g. 10.96.0.0/11. If not provided, will be created automatically. Must be provided if using a Shared VPC network. For more information on how to determine the IP address range for your pods, refer to [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_pods) - -### Services Secondary Range Name - -_Mutable: no_ - -The name of an existing secondary range for service IP addresses. If selected, **Service Address Range** will be automatically populated. Required if using a Shared VPC network. - -### Service Address Range - -_Mutable: no_ - -The address range assigned to the services in the cluster. Must be a valid CIDR range, e.g. 10.94.0.0/18. If not provided, will be created automatically. Must be provided if using a Shared VPC network. For more information on how to determine the IP address range for your services, refer to [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_svcs) - -### Private Cluster - -_Mutable: no_ - -> Warning: private clusters require additional planning and configuration outside of Rancher. Refer to the [private cluster guide]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/). - -Assign nodes only internal IP addresses. Private cluster nodes cannot access the public internet unless additional networking steps are taken in GCP. - -### Enable Private Endpoint - -> Warning: private clusters require additional planning and configuration outside of Rancher. Refer to the [private cluster guide]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/). - -_Mutable: no_ - -Locks down external access to the control plane endpoint. Only available if **Private Cluster** is also selected. If selected, and if Rancher does not have direct access to the Virtual Private Cloud network the cluster is running in, Rancher will provide a registration command to run on the cluster to enable Rancher to connect to it. - -### Master IPV4 CIDR Block - -_Mutable: no_ - -The IP range for the control plane VPC. - -### Master Authorized Network - -_Mutable: yes_ - -Enable control plane authorized networks to block untrusted non-GCP source IPs from accessing the Kubernetes master through HTTPS. If selected, additional authorized networks may be added. If the cluster is created with a public endpoint, this option is useful for locking down access to the public endpoint to only certain networks, such as the network where your Rancher service is running. If the cluster only has a private endpoint, this setting is required. - -# Additional Options - -### Cluster Addons - -Additional Kubernetes cluster components. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster.AddonsConfig) - -#### Horizontal Pod Autoscaling - -_Mutable: yes_ - -The Horizontal Pod Autoscaler changes the shape of your Kubernetes workload by automatically increasing or decreasing the number of Pods in response to the workload's CPU or memory consumption, or in response to custom metrics reported from within Kubernetes or external metrics from sources outside of your cluster. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/horizontalpodautoscaler) - -#### HTTP (L7) Load Balancing - -_Mutable: yes_ - -HTTP (L7) Load Balancing distributes HTTP and HTTPS traffic to backends hosted on GKE. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer) - -#### Network Policy Config (master only) - -_Mutable: yes_ - -Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the master, it does not track whether network policy is enabled for the nodes. - -### Cluster Features (Alpha Features) - -_Mutable: no_ - -Turns on all Kubernetes alpha API groups and features for the cluster. When enabled, the cluster cannot be upgraded and will be deleted automatically after 30 days. Alpha clusters are not recommended for production use as they are not covered by the GKE SLA. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/alpha-clusters) - -### Logging Service - -_Mutable: yes_ - -The logging service the cluster uses to write logs. Use either [Cloud Logging](https://cloud.google.com/logging) or no logging service in which case no logs are exported from the cluster. - -### Monitoring Service - -_Mutable: yes_ - -The monitoring service the cluster uses to write metrics. Use either [Cloud Monitoring](https://cloud.google.com/monitoring) or monitoring service in which case no metrics are exported from the cluster. - - -### Maintenance Window - -_Mutable: yes_ - -Set the start time for a 4 hour maintenance window. The time is specified in the UTC time zone using the HH:MM format. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions) - -# Node Pools - -In this section, enter details describing the configuration of each node in the node pool. - -### Kubernetes Version - -_Mutable: yes_ - -The Kubernetes version for each node in the node pool. For more information on GKE Kubernetes versions, refer to [these docs.](https://cloud.google.com/kubernetes-engine/versioning) - -### Image Type - -_Mutable: yes_ - -The node operating system image. For more information for the node image options that GKE offers for each OS, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/node-images#available_node_images) - -> Note: the default option is "Container-Optimized OS with Docker". The read-only filesystem on GCP's Container-Optimized OS is not compatible with the [legacy logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging) implementation in Rancher. If you need to use the legacy logging feature, select "Ubuntu with Docker" or "Ubuntu with Containerd". The [logging feature as of v2.5]({{}}/rancher/v2.5/en/logging) is compatible with the Container-Optimized OS image. - -> Note: if selecting "Windows Long Term Service Channel" or "Windows Semi-Annual Channel" for the node pool image type, you must also add at least one Container-Optimized OS or Ubuntu node pool. - -### Machine Type - -_Mutable: no_ - -The virtualized hardware resources available to node instances. For more information on Google Cloud machine types, refer to [this page.](https://cloud.google.com/compute/docs/machine-types#machine_types) - -### Root Disk Type - -_Mutable: no_ - -Standard persistent disks are backed by standard hard disk drives (HDD), while SSD persistent disks are backed by solid state drives (SSD). For more information, refer to [this section.](https://cloud.google.com/compute/docs/disks) - -### Local SSD Disks - -_Mutable: no_ - -Configure each node's local SSD disk storage in GB. Local SSDs are physically attached to the server that hosts your VM instance. Local SSDs have higher throughput and lower latency than standard persistent disks or SSD persistent disks. The data that you store on a local SSD persists only until the instance is stopped or deleted. For more information, see [this section.](https://cloud.google.com/compute/docs/disks#localssds) - -### Preemptible nodes (beta) - -_Mutable: no_ - -Preemptible nodes, also called preemptible VMs, are Compute Engine VM instances that last a maximum of 24 hours in general, and provide no availability guarantees. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/preemptible-vms) - -### Taints - -_Mutable: no_ - -When you apply a taint to a node, only Pods that tolerate the taint are allowed to run on the node. In a GKE cluster, you can apply a taint to a node pool, which applies the taint to all nodes in the pool. - -### Node Labels - -_Mutable: no_ - -You can apply labels to the node pool, which applies the labels to all nodes in the pool. - -Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) - -# Group Details - -In this section, enter details describing the node pool. - -### Name - -_Mutable: no_ - -Enter a name for the node pool. - -### Initial Node Count - -_Mutable: yes_ - -Integer for the starting number of nodes in the node pool. - -### Max Pod Per Node - -_Mutable: no_ - -GKE has a hard limit of 110 Pods per node. For more information on the Kubernetes limits, see [this section.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#dimension_limits) - -### Autoscaling - -_Mutable: yes_ - -Node pool autoscaling dynamically creates or deletes nodes based on the demands of your workload. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler) - -### Auto Repair - -_Mutable: yes_ - -GKE's node auto-repair feature helps you keep the nodes in your cluster in a healthy, running state. When enabled, GKE makes periodic checks on the health state of each node in your cluster. If a node fails consecutive health checks over an extended time period, GKE initiates a repair process for that node. For more information, see the section on [auto-repairing nodes.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair) - -### Auto Upgrade - -_Mutable: yes_ - -When enabled, the auto-upgrade feature keeps the nodes in your cluster up-to-date with the cluster control plane (master) version when your control plane is [updated on your behalf.](https://cloud.google.com/kubernetes-engine/upgrades#automatic_cp_upgrades) For more information about auto-upgrading nodes, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades) - -### Access Scopes - -_Mutable: no_ - -Access scopes are the legacy method of specifying permissions for your nodes. - -- **Allow default access:** The default access for new clusters is the [Compute Engine default service account.](https://cloud.google.com/compute/docs/access/service-accounts?hl=en_US#default_service_account) -- **Allow full access to all Cloud APIs:** Generally, you can just set the cloud-platform access scope to allow full access to all Cloud APIs, then grant the service account only relevant IAM roles. The combination of access scopes granted to the virtual machine instance and the IAM roles granted to the service account determines the amount of access the service account has for that instance. -- **Set access for each API:** Alternatively, you can choose to set specific scopes that permit access to the particular API methods that the service will call. - -For more information, see the [section about enabling service accounts for a VM.](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) - - -### Configuring the Refresh Interval - -The refresh interval can be configured through the setting "gke-refresh", which is an integer representing seconds. - -The default value is 300 seconds. - -The syncing interval can be changed by running `kubectl edit setting gke-refresh`. - -The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for GCP APIs. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - - -# Labels & Annotations - -Add Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to the cluster. - -Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) - -# Kubernetes Options - -### Location Type -Zonal or Regional. With GKE, you can create a cluster tailored to the availability requirements of your workload and your budget. By default, a cluster's nodes run in a single compute zone. When multiple zones are selected, the cluster's nodes will span multiple compute zones, while the controlplane is located in a single zone. Regional clusters increase the availability of the controlplane as well. For help choosing the type of cluster availability, refer to [these docs.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#choosing_a_regional_or_zonal_control_plane) - -For [regional clusters,](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#regional_clusters) you can select a region. For more information about available regions and zones, refer to [this section](https://cloud.google.com/compute/docs/regions-zones#available). The first part of each zone name is the name of the region. - -The location type can't be changed after the cluster is created. - -### Zone -Each region in Compute engine contains a number of zones. - -For more information about available regions and zones, refer to [these docs.](https://cloud.google.com/compute/docs/regions-zones#available) - -### Additional Zones -For zonal clusters, you can select additional zones to create a [multi-zone cluster.](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#multi-zonal_clusters) - -### Kubernetes Version -Link to list of GKE kubernetes versions - -### Container Address Range - -The IP address range for pods in the cluster. Must be a valid CIDR range, e.g. 10.42.0.0/16. If not specified, a random range is automatically chosen from 10.0.0.0/8 and will exclude ranges already allocated to VMs, other clusters, or routes. Automatically chosen ranges may conflict with reserved IP addresses, dynamic routes, or routes within VPCs peering with the cluster. - -### Alpha Features - -Turns on all Kubernetes alpha API groups and features for the cluster. When enabled, the cluster cannot be upgraded and will be deleted automatically after 30 days. Alpha clusters are not recommended for production use as they are not covered by the GKE SLA. For more information, refer to [this page](https://cloud.google.com/kubernetes-engine/docs/concepts/alpha-clusters). - -### Legacy Authorization - -This option is deprecated and it is recommended to leave it disabled. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#leave_abac_disabled) -### Stackdriver Logging - -Enable logging with Google Cloud's Operations Suite, formerly called Stackdriver. For details, see the [documentation.](https://cloud.google.com/logging/docs/basic-concepts) -### Stackdriver Monitoring - -Enable monitoring with Google Cloud's Operations Suite, formerly called Stackdriver. For details, see the [documentation.](https://cloud.google.com/monitoring/docs/monitoring-overview) -### Kubernetes Dashboard - -Enable the [Kubernetes dashboard add-on.](https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards#kubernetes_dashboard) Starting with GKE v1.15, you will no longer be able to enable the Kubernetes Dashboard by using the add-on API. -### Http Load Balancing - -Set up [HTTP(S) load balancing.](https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer) To use Ingress, you must have the HTTP(S) Load Balancing add-on enabled. -### Horizontal Pod Autoscaling - -The Horizontal Pod Autoscaler changes the shape of your Kubernetes workload by automatically increasing or decreasing the number of Pods in response to the workload's CPU or memory consumption, or in response to custom metrics reported from within Kubernetes or external metrics from sources outside of your cluster. For more information, see the [documentation.](https://cloud.google.com/kubernetes-engine/docs/concepts/horizontalpodautoscaler) -### Maintenance Window - -Set the start time for a 4 hour maintenance window. The time is specified in the UTC time zone using the HH:MM format. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions) - -### Network - -The Compute Engine Network that the cluster connects to. Routes and firewalls will be created using this network. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC networks that are shared to your project will appear here. will be available to select in this field. For more information, refer to [this page](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets). - -### Node Subnet / Subnet - -The Compute Engine subnetwork that the cluster connects to. This subnetwork must belong to the network specified in the **Network** field. Select an existing subnetwork, or select "Auto Create Subnetwork" to have one automatically created. If not using an existing network, **Subnetwork Name** is required to generate one. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC subnets that are shared to your project will appear here. If using a Shared VPC network, you cannot select "Auto Create Subnetwork". For more information, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) -### Ip Aliases - -Enable [alias IPs](https://cloud.google.com/vpc/docs/alias-ip). This enables VPC-native traffic routing. Required if using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc). - -### Pod address range - -When you create a VPC-native cluster, you specify a subnet in a VPC network. The cluster uses three unique subnet IP address ranges for nodes, pods, and services. For more information on IP address ranges, see [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) - -### Service address range - -When you create a VPC-native cluster, you specify a subnet in a VPC network. The cluster uses three unique subnet IP address ranges for nodes, pods, and services. For more information on IP address ranges, see [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) -### Cluster Labels - -A [cluster label](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels) is a key-value pair that helps you organize your Google Cloud clusters. You can attach a label to each resource, then filter the resources based on their labels. Information about labels is forwarded to the billing system, so you can break down your billing charges by label. - -## Node Options - -### Node Count -Integer for the starting number of nodes in the node pool. - -### Machine Type -For more information on Google Cloud machine types, refer to [this page.](https://cloud.google.com/compute/docs/machine-types#machine_types) - -### Image Type -Ubuntu or Container-Optimized OS images are available. - -For more information about GKE node image options, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/node-images#available_node_images) - -### Root Disk Type - -Standard persistent disks are backed by standard hard disk drives (HDD), while SSD persistent disks are backed by solid state drives (SSD). For more information, refer to [this section.](https://cloud.google.com/compute/docs/disks) - -### Root Disk Size -The size in GB of the [root disk.](https://cloud.google.com/compute/docs/disks) - -### Local SSD disks -Configure each node's local SSD disk storage in GB. - -Local SSDs are physically attached to the server that hosts your VM instance. Local SSDs have higher throughput and lower latency than standard persistent disks or SSD persistent disks. The data that you store on a local SSD persists only until the instance is stopped or deleted. For more information, see [this section.](https://cloud.google.com/compute/docs/disks#localssds) - -### Preemptible nodes (beta) - -Preemptible nodes, also called preemptible VMs, are Compute Engine VM instances that last a maximum of 24 hours in general, and provide no availability guarantees. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/preemptible-vms) - -### Auto Upgrade - -> Note: Enabling the Auto Upgrade feature for Nodes is not recommended. - -When enabled, the auto-upgrade feature keeps the nodes in your cluster up-to-date with the cluster control plane (master) version when your control plane is [updated on your behalf.](https://cloud.google.com/kubernetes-engine/upgrades#automatic_cp_upgrades) For more information about auto-upgrading nodes, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades) - -### Auto Repair - -GKE's node auto-repair feature helps you keep the nodes in your cluster in a healthy, running state. When enabled, GKE makes periodic checks on the health state of each node in your cluster. If a node fails consecutive health checks over an extended time period, GKE initiates a repair process for that node. For more information, see the section on [auto-repairing nodes.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair) - -### Node Pool Autoscaling - -Enable node pool autoscaling based on cluster load. For more information, see the documentation on [adding a node pool with autoscaling.](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler#adding_a_node_pool_with_autoscaling) - -### Taints -When you apply a taint to a node, only Pods that tolerate the taint are allowed to run on the node. In a GKE cluster, you can apply a taint to a node pool, which applies the taint to all nodes in the pool. -### Node Labels -You can apply labels to the node pool, which applies the labels to all nodes in the pool. - -Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) - -## Security Options - -### Service Account - -Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) with a JSON private key and provide the JSON here. See [Google Cloud docs](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) for more info about creating a service account. These IAM roles are required: Compute Viewer (`roles/compute.viewer`), (Project) Viewer (`roles/viewer`), Kubernetes Engine Admin (`roles/container.admin`), Service Account User (`roles/iam.serviceAccountUser`). More info on roles can be found [here.](https://cloud.google.com/kubernetes-engine/docs/how-to/iam-integration) - -### Access Scopes - -Access scopes are the legacy method of specifying permissions for your nodes. - -- **Allow default access:** The default access for new clusters is the [Compute Engine default service account.](https://cloud.google.com/compute/docs/access/service-accounts?hl=en_US#default_service_account) -- **Allow full access to all Cloud APIs:** Generally, you can just set the cloud-platform access scope to allow full access to all Cloud APIs, then grant the service account only relevant IAM roles. The combination of access scopes granted to the virtual machine instance and the IAM roles granted to the service account determines the amount of access the service account has for that instance. -- **Set access for each API:** Alternatively, you can choose to set specific scopes that permit access to the particular API methods that the service will call. - -For more information, see the [section about enabling service accounts for a VM.](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/rke-config-reference/_index.md b/content/rancher/v2.5/en/cluster-admin/editing-clusters/rke-config-reference/_index.md deleted file mode 100644 index 6d89a799f1..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/editing-clusters/rke-config-reference/_index.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: RKE Cluster Configuration -weight: 1 ---- - -In [clusters launched by RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. - -- [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) -- [Editing Clusters with YAML](#editing-clusters-with-yaml) -- [Updating ingress-nginx](#updating-ingress-nginx) - -# Configuration Options in the Rancher UI - -To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. - -Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE cluster configuration file in YAML. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -### Kubernetes Version - -The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes). - -### Network Provider - -The \container networking interface (CNI) that powers networking for your cluster.

**Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. - -### Project Network Isolation - -If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. - -Before Rancher v2.5.8, project network isolation is only available if you are using the Canal network plugin for RKE. - -In v2.5.8+, project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. - -### Nginx Ingress - -If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. - -### Metrics Server Monitoring - -Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. - -### Pod Security Policy Support - -Enables [pod security policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. - -### Docker version on nodes - -Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. - -### Docker Root Directory - -The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. - -### Default Pod Security Policy - -If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. - -### Cloud Provider - -If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. - -# Editing Clusters with YAML - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from File**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -# Updating ingress-nginx - -Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. - -If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/nodes/_index.md b/content/rancher/v2.5/en/cluster-admin/nodes/_index.md deleted file mode 100644 index af48e6fa95..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/nodes/_index.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Nodes and Node Pools -weight: 2030 -aliases: - - /rancher/v2.x/en/cluster-admin/nodes/ ---- - -After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used]({{}}/rancher/v2.5/en/cluster-provisioning/) to provision the cluster, there are different node options available. - -> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters]({{< baseurl >}}/rancher/v2.5/en/k8s-in-rancher/editing-clusters). - -This section covers the following topics: - -- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) - - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) - - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) - - [Registered nodes](#registered-nodes) -- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) -- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) -- [Deleting a node](#deleting-a-node) -- [Scaling nodes](#scaling-nodes) -- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) -- [Cordoning a node](#cordoning-a-node) -- [Draining a node](#draining-a-node) - - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) - - [Grace period](#grace-period) - - [Timeout](#timeout) - - [Drained and cordoned state](#drained-and-cordoned-state) -- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) - -# Node Options Available for Each Cluster Creation Option - -The following table lists which node options are available for each type of cluster in Rancher. Click the links in the **Option** column for more detailed information about each feature. - -| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Registered EKS Nodes][4] | [All Other Registered Nodes][5] | Description | -| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | -------------------| ------------------------------------------------------------------ | -| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable. | -| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable _and_ evicts all pods. | -| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | ✓ | ✓ | Enter a custom name, description, label, or taints for a node. | -| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | ✓ | ✓ | View API data. | -| [Delete](#deleting-a-node) | ✓ | ✓ | | * | * | Deletes defective nodes from the cluster. | -| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | | Download SSH key in order to SSH into the node. | -| [Node Scaling](#scaling-nodes) | ✓ | | | ✓ | | Scale the number of nodes in the node pool up or down. | - -[1]: {{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/ -[5]: {{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/ - -\* Delete option accessible via View API - - -### Nodes Hosted by an Infrastructure Provider - -Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) - -Clusters provisioned using [one of the node pool options]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) can be scaled up or down if the node pool is edited. - -A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. - -Rancher uses [node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. - -### Nodes Provisioned by Hosted Kubernetes Providers - -Options for managing nodes [hosted by a Kubernetes provider]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. - -### Registered Nodes - -Although you can deploy workloads to a [registered cluster]({{< baseurl >}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. - -# Managing and Editing Individual Nodes - -Editing a node lets you: - -* Change its name -* Change its description -* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) - -To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**...**). - -# Viewing a Node in the Rancher API - -Select this option to view the node's [API endpoints]({{< baseurl >}}/rancher/v2.5/en/api/). - -# Deleting a Node - -Use **Delete** to remove defective nodes from the cloud provider. - -When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) - ->**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. - -# Scaling Nodes - -For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) by using the scale controls. This option isn't available for other cluster types. - -# SSH into a Node Hosted by an Infrastructure Provider - -For [nodes hosted by an infrastructure provider]({{< baseurl >}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. - -1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. - -1. Find the node that you want to remote into. Select **⋮ > Download Keys**. - - **Step Result:** A ZIP file containing files used for SSH is downloaded. - -1. Extract the ZIP file to any location. - -1. Open Terminal. Change your location to the extracted ZIP file. - -1. Enter the following command: - - ``` - ssh -i id_rsa root@ - ``` - -# Cordoning a Node - -_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. - -# Draining a Node - -_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. - -- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. - -- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. - -You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. - -However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. - -### Aggressive and Safe Draining Options - -There are two drain modes: aggressive and safe. - -- **Aggressive Mode** - - In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. - - Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. - -- **Safe Mode** - - If a node has standalone pods or ephemeral data it will be cordoned but not drained. -### Grace Period - -The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. - -### Timeout - -The amount of time drain should continue to wait before giving up. - ->**Kubernetes Known Issue:** The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node before Kubernetes 1.12. - -### Drained and Cordoned State - -If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. - -If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. - -Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. - ->**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). - -# Labeling a Node to be Ignored by Rancher - -Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. - -Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. - -In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. - -You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. - -> **Note:** There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. - -### Labeling Nodes to be Ignored with kubectl - -To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: - -``` -cattle.rancher.io/node-status: ignore -``` - -**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. - -If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. - -If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. - -If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings in the Rancher API under `v3/settings/ignore-node-name`. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md b/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md deleted file mode 100644 index 3614a7b6e9..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/pod-security-policy/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Adding a Pod Security Policy -weight: 80 -aliases: - - /rancher/v2.x/en/cluster-admin/pod-security-policy/ ---- - -> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) - -When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. - -You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. - -1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **⋮ > Edit**. - -2. Expand **Cluster Options**. - -3. From **Pod Security Policy Support**, select **Enabled**. - - >**Note:** This option is only available for clusters [provisioned by RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/). - -4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. - - Rancher ships with [policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/#default-pod-security-policies) as well. - -5. Click **Save**. - -**Result:** The pod security policy is applied to the cluster and any projects within the cluster. - ->**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. -> ->To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md b/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md deleted file mode 100644 index 493331bc93..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/projects-and-namespaces/_index.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Projects and Kubernetes Namespaces with Rancher -description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces -weight: 2032 -aliases: - - /rancher/v2.5/en/concepts/projects/ - - /rancher/v2.5/en/tasks/projects/ - - /rancher/v2.5/en/tasks/projects/create-project/ - - /rancher/v2.5/en/tasks/projects/create-project/ - - /rancher/v2.x/en/cluster-admin/projects-and-namespaces/ ---- - -A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. - -A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -This section describes how projects and namespaces work with Rancher. It covers the following topics: - -- [About namespaces](#about-namespaces) -- [About projects](#about-projects) - - [The cluster's default project](#the-cluster-s-default-project) - - [The system project](#the-system-project) -- [Project authorization](#project-authorization) -- [Pod security policies](#pod-security-policies) -- [Creating projects](#creating-projects) -- [Switching between clusters and projects](#switching-between-clusters-and-projects) - -# About Namespaces - -A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) - -> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. - -Namespaces provide the following functionality: - -- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. -- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. - -You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. - -You can assign the following resources directly to namespaces: - -- [Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.5/en/k8s-in-rancher/service-discovery/) -- [Persistent Volume Claims]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/) -- [Certificates]({{}}/rancher/v2.5/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.5/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.5/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.5/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -For more information on creating and moving namespaces, see [Namespaces]({{}}/rancher/v2.5/en/project-admin/namespaces/). - -### Role-based access control issues with namespaces and kubectl - -Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. - -This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. - -If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.5/en/project-admin/namespaces/) to ensure that you will have permission to access the namespace. - -If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. - -# About Projects - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. - -In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. - -You can use projects to perform actions such as: - -- Assign users to a group of namespaces (i.e., [project membership]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/project-members)). -- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/). -- Assign resources to the project. -- Assign Pod Security Policies. - -When you create a cluster, two projects are automatically created within it: - -- [Default Project](#the-cluster-s-default-project) -- [System Project](#the-system-project) - -### The Cluster's Default Project - -When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. - -If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. - -If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. - -### The System Project - -When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. - -To open it, open the **Global** menu, and then select the `system` project for your cluster. - -The `system` project: - -- Is automatically created when you provision a cluster. -- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. -- Allows you to add more namespaces or move its namespaces to other projects. -- Cannot be deleted because it's required for cluster operations. - ->**Note:** In RKE clusters where the project network isolation option is enabled, the `system` project overrides the project network isolation option so that it can communicate with other projects, collect logs, and check health. - -# Project Authorization - -Standard users are only authorized for project access in two situations: - -- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. -- Standard users can access projects that they create themselves. - -# Pod Security Policies - -Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the [project level]({{}}/rancher/v2.5/en/project-admin/pod-security-policies) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. - -# Creating Projects - -This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. - -1. [Name a new project.](#1-name-a-new-project) -2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) -3. [Recommended: Add project members.](#3-recommended-add-project-members) -4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) - -### 1. Name a New Project - -1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. - -1. From the main menu, choose **Projects/Namespaces**. Then click **Add Project**. - -1. Enter a **Project Name**. - -### 2. Optional: Select a Pod Security Policy - -This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/). - -Assigning a PSP to a project will: - -- Override the cluster's default PSP. -- Apply the PSP to the project. -- Apply the PSP to any namespaces you add to the project later. - -### 3. Recommended: Add Project Members - -Use the **Members** section to provide other users with project access and roles. - -By default, your user is added as the project `Owner`. - ->**Notes on Permissions:** -> ->- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. -> ->- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. -> ->- Choose `Custom` to create a custom role on the fly: [Custom Project Roles]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#custom-project-roles). - -To add members: - -1. Click **Add Member**. -1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. -1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) - -### 4. Optional: Add Resource Quotas - -Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas). - -To add a resource quota, - -1. Click **Add Quota**. -1. Select a Resource Type. For more information, see [Resource Quotas.]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas/). -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. -1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit]({{}}/rancher/v2.5/en/project-admin/resource-quotas/) -1. Click **Create**. - -**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. - -| Field | Description | -| ----------------------- | -------------------------------------------------------------------------------------------------------- | -| Project Limit | The overall resource limit for the project. | -| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | - -# Switching between Clusters and Projects - -To switch between clusters and projects, use the **Global** drop-down available in the main menu. - -![Global Menu]({{}}/img/rancher/global-menu.png) - -Alternatively, you can switch between projects and clusters using the main menu. - -- To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. -- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. diff --git a/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md b/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md deleted file mode 100644 index 9cc546c05a..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/restoring-etcd/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Restoring a Cluster from Backup -weight: 2050 -aliases: - - /rancher/v2.x/en/cluster-admin/restoring-etcd/ ---- - -etcd backup and recovery for [Rancher launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. - -Rancher recommends enabling the [ability to set up recurring snapshots of etcd]({{}}/rancher/v2.5/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots), but [one-time snapshots]({{}}/rancher/v2.5/en/cluster-admin/backing-up-etcd/#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). - -Clusters can also be restored to a prior Kubernetes version and cluster configuration. - -This section covers the following topics: - -- [Viewing Available Snapshots](#viewing-available-snapshots) -- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) -- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) -- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) - -## Viewing Available Snapshots - -The list of all available snapshots for the cluster is available. - -1. In the **Global** view, navigate to the cluster that you want to view snapshots. - -2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. - -## Restoring a Cluster from a Snapshot - -If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. - -Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: - -- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. -- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. -- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. - -When rolling back to a prior Kubernetes version, the [upgrade strategy options]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. - -> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.]({{}}/rancher/v2.5/en/cluster-admin/backing-up-etcd/#configuring-recurring-snapshots) - -1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. - -2. Click the **⋮ > Restore Snapshot**. - -3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. - -4. In the **Restoration Type** field, choose one of the restore options described above. - -5. Click **Save**. - -**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. - -## Recovering etcd without a Snapshot - -If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. The cluster should have three etcd nodes to prevent a loss of quorum. If you want to recover your set of etcd nodes, follow these instructions: - -1. Keep only one etcd node in the cluster by removing all other etcd nodes. - -2. On the single remaining etcd node, run the following command: - - ``` - $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd - ``` - - This command outputs the running command for etcd, save this command to use later. - -3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. - - ``` - $ docker stop etcd - $ docker rename etcd etcd-old - ``` - -4. Take the saved command from Step 2 and revise it: - - - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. - - Add `--force-new-cluster` to the end of the command. - -5. Run the revised command. - -6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes) and you want to reuse an old node, you are required to [clean up the nodes]({{}}/rancher/v2.5/en/faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. - -# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 - -If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI]({{}}/rancher/v2.5/en/cluster-admin/restoring-etcd/). diff --git a/content/rancher/v2.5/en/cluster-admin/tools/_index.md b/content/rancher/v2.5/en/cluster-admin/tools/_index.md deleted file mode 100644 index 39835e7e07..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/tools/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Tools for Logging, Monitoring, and Visibility -weight: 2033 -aliases: - - /rancher/v2.5/en/tools/notifiers-and-alerts/ - - /rancher/v2.x/en/cluster-admin/tools/ ---- - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - - -- [Logging](#logging) -- [Monitoring and Alerts](#monitoring-and-alerts) -- [Istio](#istio) -- [OPA Gatekeeper](#opa-gatekeeper) -- [CIS Scans](#cis-scans) - - - - -# Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debugg and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -For more information, refer to the logging documentation [here.]({{}}/rancher/v2.5/en/logging/) -# Monitoring and Alerts - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. - -After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -For more information, refer to the monitoring documentation [here.]({{}}/rancher/v2.5/en/monitoring-alerting/) - -# Istio - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. - -Rancher's integration with Istio was improved in Rancher v2.5. - -For more information, refer to the Istio documentation [here.]({{}}/rancher/v2.5/en/istio) -# OPA Gatekeeper - -[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.]({{}}/rancher/v2.5/en/opa-gatekeper) - -# CIS Scans - -Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. - -For more information, refer to the CIS scan documentation [here.]({{}}/rancher/v2.5/en/cis-scans) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md b/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md deleted file mode 100644 index 8bc7af3ef1..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/_index.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Upgrading and Rolling Back Kubernetes -weight: 70 -aliases: - - /rancher/v2.x/en/cluster-admin/upgrading-kubernetes/ ---- - -Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. - -Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation]({{}}/rke/latest/en/). - -This section covers the following topics: - -- [New Features](#new-features) -- [Tested Kubernetes Versions](#tested-kubernetes-versions) -- [How Upgrades Work](#how-upgrades-work) -- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) -- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) -- [Rolling Back](#rolling-back) -- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) - - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) - - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) - - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) - - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) -- [Troubleshooting](#troubleshooting) - -# Tested Kubernetes Versions - -Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.5.9/) - -# How Upgrades Work - -RKE v1.1.0 changed the way that clusters are upgraded. - -In this section of the [RKE documentation,]({{}}/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. - - -# Recommended Best Practice for Upgrades - -When upgrading the Kubernetes version of a cluster, we recommend that you: - -1. Take a snapshot. -1. Initiate a Kubernetes upgrade. -1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. This is achieved by selecting the **Restore etcd and Kubernetes version** option. This will return your cluster to the pre-upgrade kubernetes version before restoring the etcd snapshot. - -The restore operation will work on a cluster that is not in a healthy or active state. - -# Upgrading the Kubernetes Version - -> **Prerequisites:** -> -> - The options below are available only for [Rancher-launched RKE Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) and [Registered K3s Kubernetes clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/#additional-features-for-registered-k3s-clusters) -> - Before upgrading Kubernetes, [back up your cluster.]({{}}/rancher/v2.5/en/backups) - -1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. - -1. Expand **Cluster Options**. - -1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. - -1. Click **Save**. - -**Result:** Kubernetes begins upgrading for the cluster. - -# Rolling Back - -A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: - -- [Backing up a cluster]({{}}/rancher/v2.5/en/cluster-admin/backing-up-etcd/#how-snapshots-work) -- [Restoring a cluster from backup]({{}}/rancher/v2.5/en/cluster-admin/restoring-etcd/#restoring-a-cluster-from-a-snapshot) - -# Configuring the Upgrade Strategy - -As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements]({{}}/rke/latest/en/upgrades/maintaining-availability) are met. - -The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. - -### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI - -From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. - -By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. - -To change the default number or percentage of worker nodes, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Maxiumum Worker Nodes Unavailable** field. Enter the percentage of worker nodes that can be upgraded in a batch. Optionally, select **Count** from the drop-down menu and enter the maximum unavailable worker nodes as an integer. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -### Enabling Draining Nodes During Upgrades from the Rancher UI - -By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. - -To enable draining each node during a cluster upgrade, - -1. Go to the cluster view in the Rancher UI. -1. Click **⋮ > Edit.** -1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** -1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/nodes/#aggressive-and-safe-draining-options) -1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. -1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. -1. Click **Save.** - -**Result:** The cluster is updated to use the new upgrade strategy. - -> **Note:** As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. - -### Maintaining Availability for Applications During Upgrades - -_Available as of RKE v1.1.0_ - -In [this section of the RKE documentation,]({{}}/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. - -### Configuring the Upgrade Strategy in the cluster.yml - -More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. - -For details, refer to [Configuring the Upgrade Strategy]({{}}/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. - -# Troubleshooting - -If a node doesn't come up after an upgrade, the `rke up` command errors out. - -No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. - -If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. - -A failed node could be in many different states: - -- Powered off -- Unavailable -- User drains a node while upgrade is in process, so there are no kubelets on the node -- The upgrade itself failed - -If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md deleted file mode 100644 index 18273041b1..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: "Kubernetes Persistent Storage: Volumes and Storage Classes" -description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" -weight: 2031 -aliases: - - /rancher/v2.5/en/tasks/clusters/adding-storage/ - - /rancher/v2.5/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/ ---- -When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. - -The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](./how-storage-works) - -### Prerequisites - -To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. - -If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) - -For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. - -### Setting up Existing Storage - -The overall workflow for setting up existing storage is as follows: - -1. Set up your persistent storage. This may be storage in an infrastructure provider, or it could be your own storage. -2. Add a persistent volume (PV) that refers to the persistent storage. -3. Add a persistent volume claim (PVC) that refers to the PV. -4. Mount the PVC as a volume in your workload. - -For details and prerequisites, refer to [this page.](./attaching-existing-storage) - -### Dynamically Provisioning New Storage in Rancher - -The overall workflow for provisioning new storage is as follows: - -1. Add a StorageClass and configure it to use your storage provider. The StorageClass could refer to storage in an infrastructure provider, or it could refer to your own storage. -2. Add a persistent volume claim (PVC) that refers to the storage class. -3. Mount the PVC as a volume for your workload. - -For details and prerequisites, refer to [this page.](./provisioning-new-storage) - -### Longhorn Storage - -[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. - -Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. - -If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/1.0.2/what-is-longhorn/) - -Rancher v2.5 simplified the process of installing Longhorn on a Rancher-managed cluster. For more information, see [this page.]({{}}/rancher/v2.5/en/longhorn) - -### Provisioning Storage Examples - -We provide examples of how to provision storage with [NFS,](./examples/nfs) [vSphere,](./examples/vsphere) and [Amazon's EBS.](./examples/ebs) - -### GlusterFS Volumes - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](./glusterfs-volumes) - -### iSCSI Volumes - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](./iscsi-volumes) - -### hostPath Volumes -Before you create a hostPath volume, you need to set up an [extra_bind]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. - -### Migrating vSphere Cloud Provider from In-tree to Out-of-tree - -Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. - -For instructions on how to migrate from the in-tree vSphere cloud provider to out-of-tree, and manage the existing VMs post migration, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree) - -### Related Links - -- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md deleted file mode 100644 index 619c89baec..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: Setting up Existing Storage -weight: 1 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/ - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/ ---- - -This section describes how to set up existing persistent storage for workloads in Rancher. - -> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -To set up storage, follow these steps: - -1. [Set up persistent storage.](#1-set-up-persistent-storage) -2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) -3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) -4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-volume-claim-as-a-volume-in-your-workload) - -### Prerequisites - -- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. - -### 1. Set up persistent storage - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../examples/vsphere) [NFS,](../examples/nfs) or Amazon's [EBS.](../examples/ebs) - -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.]({{}}/rancher/v2.5/en/longhorn) - -### 2. Add a persistent volume that refers to the persistent storage - -These steps describe how to set up a persistent volume at the cluster level in Kubernetes. - -1. From the cluster view, select **Storage > Persistent Volumes**. - -1. Click **Add Volume**. - -1. Enter a **Name** for the persistent volume. - -1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. - -1. Enter the **Capacity** of your volume in gigabytes. - -1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. - -1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. - -1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. - -1. Click **Save**. - -**Result:** Your new persistent volume is created. - -### 3. Add a persistent volume claim that refers to the persistent volume - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the project containing a workload that you want to add a persistent volume claim to. - -1. Then click the **Volumes** tab and click **Add Volume**. - -1. Enter a **Name** for the volume claim. - -1. Select the namespace of the workload that you want to add the persistent storage to. - -1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. - -1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 4. Mount the persistent volume claim as a volume in your workload - -Mount PVCs to stateful workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -The following steps describe how to assign existing storage to a new workload that is a stateful set: - -1. From the **Project** view, go to the **Workloads** tab. -1. Click **Deploy.** -1. Enter a name for the workload. -1. Next to the **Workload Type** field, click **More Options.** -1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. -1. Choose the namespace where the workload will be deployed. -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -The following steps describe how to assign persistent storage to an existing workload: - -1. From the **Project** view, go to the **Workloads** tab. -1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. -1. In the **Persistent Volume Claim** field, select the PVC that you created. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md deleted file mode 100644 index 43d9327a6a..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/ceph/_index.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -title: Using an External Ceph Driver -weight: 10 -aliases: - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/ ---- - -These instructions are about using the external Ceph driver in an RKE2 cluster. If you are using RKE, additional steps are required. For details, refer to [this section.](#using-the-ceph-driver-with-rke) - -- [Requirements](#requirements) -- [Using the Ceph Driver with RKE](#using-the-ceph-driver-with-rke) -- [Installing the ceph-csi driver on an RKE2 cluster](#installing-the-ceph-csi-driver-on-an-rke2-cluster) -- [Install the ceph-csi driver using Helm](#install-the-ceph-csi-driver-using-helm) -- [Creating RBD Ceph Resources](#creating-rbd-ceph-resources) -- [Configure RBD Ceph Access Secrets](#configure-rbd-ceph-access-secrets) - - [User Account](#user-account) - - [Admin Account](#admin-account) -- [Create RBD Testing Resources](#create-rbd-testing-resources) - - [Using RBD in Pods](#using-rbd-in-pods) - - [Using RBD in Persistent Volumes](#using-rbd-in-persistent-volumes) - - [Using RBD in Storage Classes](#using-rbd-in-storage-classes) - - [RKE2 Server/Master Provisioning](#rke2-server-master-provisioning) - - [RKE2 Agent/Worker provisioning](#rke2-agent-worker-provisioning) -- [Tested Versions](#tested-versions) -- [Troubleshooting](#troubleshooting) - -# Requirements - -Make sure ceph-common and xfsprogs packages are installed on SLE worker nodes. - -# Using the Ceph Driver with RKE - -The resources below are fully compatible with RKE based clusters, but there is a need to do an additional kubelet configuration for RKE. - -On RKE clusters, the kubelet component is running in a Docker container and doesn't have access to the host's kernel modules as rbd and libceph by default. - -To solve this limitation, you can either run `modprobe rbd` on worker nodes, or configure the kubelet containers to automatically mount the `/lib/modules` directory from the host into the container. - -For the kubelet configuration, put the following lines into the `cluster.yml` file prior to RKE cluster provisioning. You can also modify the `cluster.yml` later in the Rancher UI by clicking on **Edit Cluster > Edit as YAML** and restarting the worker nodes. - -```yaml -services: - kubelet: - extra_binds: - - '/lib/modules:/lib/modules:ro' -``` - -For more information about the `extra_binds` directive, refer to [this section.]({{}}/rke/latest/en/config-options/services/services-extras/#extra-binds) - -# Installing the ceph-csi driver on an RKE2 cluster - -> **Note:** These steps are needed for dynamic RBD provisioning only. - -For more information about the `ceph-csi-rbd` chart, refer to [this page.](https://github.com/ceph/ceph-csi/blob/devel/charts/ceph-csi-rbd/README.md) - -To get details about your SES cluster, run: - -``` -ceph mon dump -``` - -Read its output: - -``` -dumped monmap epoch 3 -epoch 3 -fsid 79179d9d-98d8-4976-ab2e-58635caa7235 -last_changed 2021-02-11T10:56:42.110184+0000 -created 2021-02-11T10:56:22.913321+0000 -min_mon_release 15 (octopus) -0: [v2:10.85.8.118:3300/0,v1:10.85.8.118:6789/0] mon.a -1: [v2:10.85.8.123:3300/0,v1:10.85.8.123:6789/0] mon.b -2: [v2:10.85.8.124:3300/0,v1:10.85.8.124:6789/0] mon.c -``` - -Later you'll need the fsid and mon addresses values. - -# Install the ceph-csi Driver Using Helm - -Run these commands: - -``` -helm repo add ceph-csi https://ceph.github.io/csi-charts -helm repo update -helm search repo ceph-csi -l -helm inspect values ceph-csi/ceph-csi-rbd > ceph-csi-rbd-values.yaml -``` - -Modify the `ceph-csi-rbd-values.yaml` file and keep there only the required changes: - -```yaml -# ceph-csi-rbd-values.yaml -csiConfig: - - clusterID: "79179d9d-98d8-4976-ab2e-58635caa7235" - monitors: - - "10.85.8.118:6789" - - "10.85.8.123:6789" - - "10.85.8.124:6789" -provisioner: - name: provisioner - replicaCount: 2 -``` - -Make sure the ceph monitors are reachable from the RKE2 cluster, for example, by ping. - -``` -kubectl create namespace ceph-csi-rbd -helm install --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml -kubectl rollout status deployment ceph-csi-rbd-provisioner -n ceph-csi-rbd -helm status ceph-csi-rbd -n ceph-csi-rbd -``` - -in case you'd like to modify the configuration directly via Helm, you may adapt the `ceph-csi-rbd-values.yaml` file and call: - -``` -helm upgrade \ - --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml -``` - -# Creating RBD Ceph Resources - -``` -# Create a ceph pool: -ceph osd pool create myPool 64 64 - -# Create a block device pool: -rbd pool init myPool - -# Create a block device image: -rbd create -s 2G myPool/image - -# Create a block device user and record the key: -ceph auth get-or-create-key client.myPoolUser mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=myPool" | tr -d '\n' | base64 -QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== - -# Encode the ceph user myPoolUser into a bash64 hash: -echo "myPoolUser" | tr -d '\n' | base64 -bXlQb29sVXNlcg== - -# Create a block device admin user and record the key: -ceph auth get-or-create-key client.myPoolAdmin mds 'allow *' mgr 'allow *' mon 'allow *' osd 'allow * pool=myPool' | tr -d '\n' | base64 -QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== - -# Encode the ceph user myPoolAdmin into a bash64 hash: -echo "myPoolAdmin" | tr -d '\n' | base64 -bXlQb29sQWRtaW4= -``` -# Configure RBD Ceph Access Secrets - -### User Account - -For static RBD provisioning (the image within the ceph pool must exist), run these commands: - -``` -cat > ceph-user-secret.yaml << EOF -apiVersion: v1 -kind: Secret -metadata: - name: ceph-user - namespace: default -type: kubernetes.io/rbd -data: - userID: bXlQb29sVXNlcg== - userKey: QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== -EOF - -kubectl apply -f ceph-user-secret.yaml -``` - -### Admin Account - -For dynamic RBD provisioning (used for automatic image creation within a given ceph pool), run these commands: - -``` -cat > ceph-admin-secret.yaml << EOF -apiVersion: v1 -kind: Secret -metadata: - name: ceph-admin - namespace: default -type: kubernetes.io/rbd -data: - userID: bXlQb29sQWRtaW4= - userKey: QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== -EOF - -kubectl apply -f ceph-admin-secret.yaml -``` - -# Create RBD Testing Resources - -### Using RBD in Pods - -``` -# pod -cat > ceph-rbd-pod-inline.yaml << EOF -apiVersion: v1 -kind: Pod -metadata: - name: ceph-rbd-pod-inline -spec: - containers: - - name: ceph-rbd-pod-inline - image: busybox - command: ["sleep", "infinity"] - volumeMounts: - - mountPath: /mnt/ceph_rbd - name: volume - volumes: - - name: volume - rbd: - monitors: - - 10.85.8.118:6789 - - 10.85.8.123:6789 - - 10.85.8.124:6789 - pool: myPool - image: image - user: myPoolUser - secretRef: - name: ceph-user - fsType: ext4 - readOnly: false -EOF - -kubectl apply -f ceph-rbd-pod-inline.yaml -kubectl get pod -kubectl exec pod/ceph-rbd-pod-inline -- df -k | grep rbd -``` - -### Using RBD in Persistent Volumes - -``` -# pod-pvc-pv -cat > ceph-rbd-pod-pvc-pv-allinone.yaml << EOF -apiVersion: v1 -kind: PersistentVolume -metadata: - name: ceph-rbd-pv -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - rbd: - monitors: - - 10.85.8.118:6789 - - 10.85.8.123:6789 - - 10.85.8.124:6789 - pool: myPool - image: image - user: myPoolUser - secretRef: - name: ceph-user - fsType: ext4 - readOnly: false ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: ceph-rbd-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi ---- -apiVersion: v1 -kind: Pod -metadata: - name: ceph-rbd-pod-pvc-pv -spec: - containers: - - name: ceph-rbd-pod-pvc-pv - image: busybox - command: ["sleep", "infinity"] - volumeMounts: - - mountPath: /mnt/ceph_rbd - name: volume - volumes: - - name: volume - persistentVolumeClaim: - claimName: ceph-rbd-pvc -EOF - -kubectl apply -f ceph-rbd-pod-pvc-pv-allinone.yaml -kubectl get pv,pvc,pod -kubectl exec pod/ceph-rbd-pod-pvc-pv -- df -k | grep rbd -``` - -### Using RBD in Storage Classes - -This example is for dynamic provisioning. The ceph-csi driver is needed. - -``` -# pod-pvc-sc -cat > ceph-rbd-pod-pvc-sc-allinone.yaml < /root/.bashrc << EOF -export PATH=$PATH:/var/lib/rancher/rke2/bin/ -export KUBECONFIG=/etc/rancher/rke2/rke2.yaml -EOF - -cat /var/lib/rancher/rke2/server/node-token -token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED -``` - -### RKE2 Agent/Worker provisioning - -``` -mkdir -p /etc/rancher/rke2/ - -cat > /etc/rancher/rke2/config.yaml << EOF -server: https://10.100.103.23:9345 -token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED -EOF - -curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - -systemctl enable --now rke2-agent.service -``` - -The cluster can be imported into Rancher from the Rancher UI by clicking **Global/Add Cluster > Other Cluster.** Then run the provided kubectl command on the server/master node. - -# Tested Versions - -OS for running RKE2 nodes: JeOS SLE15-SP2 with installed kernel-default-5.3.18-24.49 - -``` -kubectl version -Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-22T12:00:00Z", GoVersion:"go1.13.11", Compiler:"gc", Platform:"linux/amd64"} -Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7+rke2r1", GitCommit:"1dd5338295409edcfff11505e7bb246f0d325d15", GitTreeState:"clean", BuildDate:"2021-01-20T01:50:52Z", GoVersion:"go1.15.5b5", Compiler:"gc", Platform:"linux/amd64"} - -helm version -version.BuildInfo{Version:"3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.12"} -``` - -Kubernetes version on RKE2 cluster: v1.19.7+rke2r1 - -# Troubleshooting - -In case you are using SUSE's ceph-rook based on SES7, it might be useful to expose the monitors on hostNetwork by editing `rook-1.4.5/ceph/cluster.yaml` and setting `spec.network.hostNetwork=true`. - -Also for operating the ceph-rook cluster, it is useful to deploy a toolbox on the Kubernetes cluster where ceph-rook is provisioned by `kubectl apply -f rook-1.4.5/ceph/toolbox.yaml` Then all the ceph related commands can be executed in the toolbox pod, for example, by running `kubectl exec -it -n rook-ceph rook-ceph-tools-686d8b8bfb-2nvqp -- bash` - -Operating with the ceph - basic commands: - -``` -ceph osd pool stats -ceph osd pool delete myPool myPool --yes-i-really-really-mean-it -rbd list -p myPool -> csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 -> image -``` - -Delete the image: `rbd rm csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 -p myPool` - -CephFS commands in rook toolbox: - -``` -ceph -s -ceph fs ls -ceph fs fail cephfs -ceph fs rm cephfs --yes-i-really-mean-it -ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it -ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it -``` - -To prepare a cephfs filesystem, you can run this command on a rook cluster: - -``` -kubectl apply -f rook-1.4.5/ceph/filesystem.yaml -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md deleted file mode 100644 index 5227586f10..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Provisioning Storage Examples -weight: 3053 -aliases: - - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/ - - /rancher/v2.5/en/k8s-in-rancher/volumes-and-storage/examples/ - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ ---- - -Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. - -For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: - -- [NFS](./nfs) -- [vSphere](./vsphere) -- [EBS](./ebs) diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md deleted file mode 100644 index 3a33a7369e..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Creating Persistent Storage in Amazon's EBS -weight: 3053 -aliases: - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/ ---- - -This section describes how to set up Amazon's Elastic Block Store in EC2. - -1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** -1. Click **Create Volume.** -1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. -1. Click **Create Volume.** -1. Click **Close.** - -**Result:** Persistent storage has been created. - -For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/attaching-existing-storage/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md deleted file mode 100644 index 395c2b516b..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: NFS Storage -weight: 3054 -aliases: - - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/ ---- - -Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. - ->**Note:** -> ->- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/). -> ->- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. - ->**Recommended:** To simplify the process of managing firewall rules, use NFSv4. - -1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. - -1. Enter the following command: - - ``` - sudo apt-get install nfs-kernel-server - ``` - -1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. - - ``` - mkdir -p /nfs && chown nobody:nogroup /nfs - ``` - - The `-p /nfs` parameter creates a directory named `nfs` at root. - - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. - -1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. - - 1. Open `/etc/exports` using your text editor of choice. - 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. - - ``` - /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) - ``` - - **Tip:** You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` - - 1. Update the NFS table by entering the following command: - - ``` - exportfs -ra - ``` - -1. Open the ports used by NFS. - - 1. To find out what ports NFS is using, enter the following command: - - ``` - rpcinfo -p | grep nfs - ``` - 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: - - ``` - sudo ufw allow 2049 - ``` - -**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. - -## What's Next? - -Within Rancher, add the NFS server as a storage volume and/or storage class. After adding the server, you can use it for storage for your deployments. diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md deleted file mode 100644 index 8893877c3f..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: vSphere Storage -weight: 3055 -aliases: - - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/ ---- - -To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a persistent volume claim. - -In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) - -- [Prerequisites](#prerequisites) -- [Creating a StorageClass](#creating-a-storageclass) -- [Creating a Workload with a vSphere Volume](#creating-a-workload-with-a-vsphere-volume) -- [Verifying Persistence of the Volume](#verifying-persistence-of-the-volume) -- [Why to Use StatefulSets Instead of Deployments](#why-to-use-statefulsets-instead-of-deployments) - -### Prerequisites - -In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)]({{< baseurl>}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/), the [vSphere cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/). - -### Creating a StorageClass - -> **Note:** -> -> The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. - -1. From the Global view, open the cluster where you want to provide vSphere storage. -2. From the main menu, select **Storage > Storage Classes**. Then click **Add Class**. -3. Enter a **Name** for the class. -4. Under **Provisioner**, select **VMWare vSphere Volume**. - - {{< img "/img/rancher/vsphere-storage-class.png" "vsphere-storage-class">}} - -5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. -5. Click **Save**. - -### Creating a Workload with a vSphere Volume - -1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/). -2. For **Workload Type**, select **Stateful set of 1 pod**. -3. Expand the **Volumes** section and click **Add Volume**. -4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. -5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. -6. Enter the required **Capacity** for the volume. Then click **Define**. - - {{< img "/img/rancher/workload-add-volume.png" "workload-add-volume">}} - -7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. -8. Click **Launch** to create the workload. - -### Verifying Persistence of the Volume - -1. From the context menu of the workload you just created, click **Execute Shell**. -2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). -3. Create a file in the volume by executing the command `touch //data.txt`. -4. **Close** the shell window. -5. Click on the name of the workload to reveal detail information. -6. Open the context menu next to the Pod in the *Running* state. -7. Delete the Pod by selecting **Delete**. -8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. -9. Once the replacement pod is running, click **Execute Shell**. -10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. - - ![workload-persistent-data]({{}}/img/rancher/workload-persistent-data.png) - -### Why to Use StatefulSets Instead of Deployments - -You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. - -Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. - -Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. - -### Related Links - -- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) -- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md deleted file mode 100644 index 81249a85c9..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: GlusterFS Volumes -weight: 5000 -aliases: - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/ ---- - -> This section only applies to [RKE clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) - -In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: - -- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) -- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) - -``` -docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version -``` - ->**Note:** -> ->Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. - -``` -services: - kubelet: - extra_binds: - - "/usr/bin/systemd-run:/usr/bin/systemd-run" -``` - -After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: - -``` -Detected OS with systemd -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md deleted file mode 100644 index 11279b1b89..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: How Persistent Storage Works -weight: 1 -aliases: - - /rancher/v2.5/en/tasks/workloads/add-persistent-volume-claim - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/ ---- - -A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. - -There are two ways to use persistent storage in Kubernetes: - -- Use an existing persistent volume -- Dynamically provision new persistent volumes - -To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. - -For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. - -![Setting Up New and Existing Persistent Storage]({{}}/img/rancher/rancher-storage.svg) - -For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) - -This section covers the following topics: - -- [About persistent volume claims](#about-persistent-volume-claims) - - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) -- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) - - [Binding PVs to PVCs](#binding-pvs-to-pvcs) -- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) - -# About Persistent Volume Claims - -Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. - -To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. - -Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** You can reuse these PVCs when creating deployments in the future. - -### PVCs are Required for Both New and Existing Persistent Storage - -A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. - -If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. - -If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. - -Rancher lets you create as many PVCs within a project as you'd like. - -You can mount PVCs to a deployment as you create it, or later, after the deployment is running. - -# Setting up Existing Storage with a PVC and PV - -Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. - -PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. - -Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. - -> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. - -### Binding PVs to PVCs - -When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) - -> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. - -In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. - -To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. - -# Provisioning New Storage with a PVC and Storage Class - -Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. - -For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. - -The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. - diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md deleted file mode 100644 index deaaaac4fc..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: iSCSI Volumes -weight: 6000 -aliases: - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/ ---- - -In [Rancher Launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. - -Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. - -If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: - -| Platform | Package Name | Install Command | -| ------------- | ----------------------- | -------------------------------------- | -| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | -| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | - - -After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. - ->**Notes:** -> ->- Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed.
->
-> ->- The example YAML below does not apply to K3s, but only to RKE clusters. Since the K3s kubelet does not run in a container, adding extra binds is not necessary. However, all iSCSI tools must still be installed on your K3s nodes. - -``` -services: - kubelet: - extra_binds: - - "/etc/iscsi:/etc/iscsi" - - "/sbin/iscsiadm:/sbin/iscsiadm" -``` diff --git a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md deleted file mode 100644 index 5edb8a7d77..0000000000 --- a/content/rancher/v2.5/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Dynamically Provisioning New Storage in Rancher -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/ ---- - -This section describes how to provision new persistent storage for workloads in Rancher. - -This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](../how-storage-works) - -New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. - -If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.]({{}}/rancher/v2.5/en/longhorn) - -To provision new storage for your workloads, follow these steps: - -1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage) -2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) -3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) - -### Prerequisites - -- To set up persistent storage, the `Manage Volumes` [role]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-role-reference) is required. -- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. -- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- Make sure your storage provisioner is available to be enabled. - -The following storage provisioners are enabled by default: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.]({{}}/rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers/) - -### 1. Add a storage class and configure it to use your storage - -These steps describe how to set up a storage class at the cluster level. - -1. Go to the **Cluster Explorer** of the cluster for which you want to dynamically provision persistent storage volumes. - -1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. - -1. Enter a `Name` for your storage class. - -1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. - -1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. - -1. Click `Save`. - -**Result:** The storage class is available to be consumed by a PVC. - -For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). - -### 2. Add a persistent volume claim that refers to the storage class - -These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. - -1. Go to the **Cluster Manager** to the project containing a workload that you want to add a PVC to. - -1. From the main navigation bar, choose **Resources > Workloads.** Then select the **Volumes** tab. Click **Add Volume**. - -1. Enter a **Name** for the volume claim. - -1. Select the namespace of the volume claim. - -1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** - -1. Go to the **Storage Class** drop-down and select the storage class that you created. - -1. Enter a volume **Capacity**. - -1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. - -1. Click **Create.** - -**Result:** Your PVC is created. You can now attach it to any workload in the project. - -### 3. Mount the persistent volume claim as a volume for your workload - -Mount PVCs to workloads so that your applications can store their data. - -You can mount PVCs during the deployment of a workload, or following workload creation. - -To attach the PVC to a new workload, - -1. Create a workload as you would in [Deploying Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/). -1. For **Workload Type**, select **Stateful set of 1 pod**. -1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Launch.** - -**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. - -To attach the PVC to an existing workload, - -1. Go to the project that has the workload that will have the PVC attached. -1. Go to the workload that will have persistent storage and click **⋮ > Edit.** -1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** -1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. -1. In the **Mount Point** field, enter the path that the workload will use to access the volume. -1. Click **Save.** - -**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/_index.md b/content/rancher/v2.5/en/cluster-provisioning/_index.md deleted file mode 100644 index 8fe1bc1c85..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/_index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Setting up Kubernetes Clusters in Rancher -description: Provisioning Kubernetes Clusters -weight: 7 -aliases: - - /rancher/v2.5/en/concepts/clusters/ - - /rancher/v2.5/en/concepts/clusters/cluster-providers/ - - /rancher/v2.5/en/tasks/clusters/ - - /rancher/v2.x/en/cluster-provisioning/ ---- - -Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. - -This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.5/en/overview/concepts) page. - -For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.5/en/overview/architecture/) page. - -This section covers the following topics: - - - -- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) -- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) -- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) - - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) - - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) -- [Registering Existing Clusters](#registering-existing-clusters) - - - -### Cluster Management Capabilities by Cluster Type - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table" %}} - -# Setting up Clusters in a Hosted Kubernetes Provider - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters) - -# Launching Kubernetes with Rancher - -Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. - -In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. - -These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. - -If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. - -For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) - -### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider - -Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. - -One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. - -The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. - -For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. - -You can bring any nodes you want to Rancher and use them to create a cluster. - -These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. - -# Registering Existing Clusters - -The cluster registration feature replaces the feature to import clusters. - -Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. - -When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -For more information, see [this page.](./registered-clusters) diff --git a/content/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table/index.md b/content/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table/index.md deleted file mode 100644 index 16ee4674be..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table/index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -headless: true ---- - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -| Action | Rancher Launched Kubernetes Clusters | EKS and GKE Clusters1 | Other Hosted Kubernetes Clusters | Non-EKS or GKE Registered Clusters | -| --- | --- | ---| ---|----| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) | ✓ | ✓ | ✓ | ✓2 | -| [Managing Nodes]({{}}/rancher/v2.5/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | ✓3 | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.5/en/catalog/) | ✓ | ✓ | ✓ | ✓ | -| Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio) | ✓ | ✓ | ✓ | ✓ | -| [Running Security Scans]({{}}/rancher/v2.5/en/security/security-scan/) | ✓ | ✓ | ✓ | ✓ | -| [Use existing configuration to create additional clusters]({{}}/rancher/v2.5/en/cluster-admin/cloning-clusters/)| ✓ | ✓ |✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.5/en/cluster-admin/certificate-rotation/) | ✓ | ✓ | | | -| Ability to [backup]({{}}/rancher/v2.5/en/cluster-admin/backing-up-etcd/) and [restore]({{}}/rancher/v2.5/en/cluster-admin/restoring-etcd/) Rancher-launched clusters | ✓ | ✓ | | ✓4 | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.5/en/cluster-admin/pod-security-policy/) | ✓ | ✓ | | | -| [Authorized Cluster Endpoint]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) | ✓ | | | | - -1. Registered GKE and EKS clusters have the same options available as GKE and EKS clusters created from the Rancher UI. The difference is that when a registered cluster is deleted from the Rancher UI, [it is not destroyed.]({{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/#additional-features-for-registered-eks-and-gke-clusters) - -2. Cluster configuration options can't be edited for registered clusters, except for [K3s and RKE2 clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/) - -3. For registered cluster nodes, the Rancher UI exposes the ability to cordon, drain, and edit the node. - -4. For registered clusters using etcd as a control plane, snapshots must be taken manually outside of the Rancher UI to use for backup and recovery. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -| Action | Rancher Launched Kubernetes Clusters | Hosted Kubernetes Clusters | Registered EKS Clusters | All Other Registered Clusters | -| --- | --- | ---| ---|----| -| [Using kubectl and a kubeconfig file to Access a Cluster]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/) | ✓ | ✓ | ✓ | ✓ | -| [Managing Cluster Members]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/cluster-members/) | ✓ | ✓ | ✓ | ✓ | -| [Editing and Upgrading Clusters]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) | ✓ | ✓ | ✓ | ✓1 | -| [Managing Nodes]({{}}/rancher/v2.5/en/cluster-admin/nodes) | ✓ | ✓ | ✓ | ✓2 | -| [Managing Persistent Volumes and Storage Classes]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/) | ✓ | ✓ | ✓ | ✓ | -| [Managing Projects, Namespaces and Workloads]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) | ✓ | ✓ | ✓ | ✓ | -| [Using App Catalogs]({{}}/rancher/v2.5/en/catalog/) | ✓ | ✓ | ✓ | ✓ | -| Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio) | ✓ | ✓ | ✓ | ✓ | -| [Running Security Scans]({{}}/rancher/v2.5/en/security/security-scan/) | ✓ | ✓ | ✓ | ✓ | -| [Use existing configuration to create additional clusters]({{}}/rancher/v2.5/en/cluster-admin/cloning-clusters/)| ✓ | ✓ |✓ | | -| [Ability to rotate certificates]({{}}/rancher/v2.5/en/cluster-admin/certificate-rotation/) | ✓ | | ✓ | | -| Ability to [backup]({{}}/rancher/v2.5/en/cluster-admin/backing-up-etcd/) and [restore]({{}}/rancher/v2.5/en/cluster-admin/restoring-etcd/) Rancher-launched clusters | ✓ | ✓ | | ✓3 | -| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher]({{}}/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/) | ✓ | | | | -| [Configuring Pod Security Policies]({{}}/rancher/v2.5/en/cluster-admin/pod-security-policy/) | ✓ | | ✓ | | -| [Authorized Cluster Endpoint]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) | ✓ | | | - -1. Cluster configuration options can't be edited for registered clusters, except for [K3s and RKE2 clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/) - -2. For registered cluster nodes, the Rancher UI exposes the ability to cordon, drain, and edit the node. - -3. For registered clusters using etcd as a control plane, snapshots must be taken manually outside of the Rancher UI to use for backup and recovery. - - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md deleted file mode 100644 index 114b75e9cf..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Setting up Clusters from Hosted Kubernetes Providers -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ ---- - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-prem or in an infrastructure provider. - -Rancher supports the following Kubernetes providers: - -- [Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) -- [Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) -- [Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) -- [Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) -- [Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) -- [Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) - -## Hosted Kubernetes Provider Authentication - -When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: - -- [Creating a GKE Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke) -- [Creating an EKS Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks) -- [Creating an AKS Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks) -- [Creating an ACK Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack) -- [Creating a TKE Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke) -- [Creating a CCE Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce) diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md deleted file mode 100644 index 0bda69520c..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Creating an Aliyun ACK Cluster -shortTitle: Alibaba Cloud Container Service for Kubernetes -weight: 2120 -aliases: - - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/ ---- - -You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. - -## Prerequisites - ->**Note** ->Deploying to ACK will incur charges. - -1. In Aliyun, activate the following services in their respective consoles. - - - [Container Service](https://cs.console.aliyun.com) - - [Resource Orchestration Service](https://ros.console.aliyun.com) - - [RAM](https://ram.console.aliyun.com) - -2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. - -3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). - -4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. - -## Create an ACK Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Alibaba ACK**. - -1. Enter a **Cluster Name**. - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. - -1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. - -1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. - -1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. - -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md deleted file mode 100644 index eace4d7ca0..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Creating a Huawei CCE Cluster -shortTitle: Huawei Cloud Kubernetes Service -weight: 2130 -aliases: - - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/ ---- - -You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. - -## Prerequisites in Huawei - ->**Note** ->Deploying to CCE will incur charges. - -1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). - -2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). - -## Limitations - -Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. - -## Create the CCE Cluster - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Huawei CCE**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. Fill in the cluster configuration. For help filling out the form, refer to [Huawei CCE Configuration.](#huawei-cce-configuration) -1. Fill the following node configuration of the cluster. For help filling out the form, refer to [Node Configuration.](#node-configuration) -1. Click **Create** to create the CCE cluster. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -# Huawei CCE Configuration - -|Settings|Description| -|---|---| -| Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | -| Description | The description of the cluster. | -| Master Version | The Kubernetes version. | -| Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | -| High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | -| Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | -| Container Network CIDR | Network CIDR for the cluster. | -| VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | -| Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | -| External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | -| Cluster Label | The labels for the cluster. | -| Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | - -**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -# Node Configuration - -|Settings|Description| -|---|---| -| Zone | The available zone at where the node(s) of the cluster is deployed. | -| Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | -| Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | -| Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | -| Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | -| Data Volume Size | Data volume size for the cluster node(s) | -| Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | -| Root Volume Size | Root volume size for the cluster node(s) | -| Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | -| Node Count | The node count of the cluster | -| Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | -| SSH Key Name | The ssh key for the cluster node(s) | -| EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | -| EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | -| EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | -| EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | -| EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | -| EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | -| Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | -| Node Label | The labels for the cluster node(s). Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) | \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md deleted file mode 100644 index c1da61125b..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: Creating an EKS Cluster -shortTitle: Amazon EKS -weight: 2110 -aliases: - - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-eks/ - - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/ ---- -Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). - -- [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) - - [Amazon VPC](#amazon-vpc) - - [IAM Policies](#iam-policies) -- [Create the EKS Cluster](#create-the-eks-cluster) -- [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) -- [Architecture](#architecture) -- [AWS Service Events](#aws-service-events) -- [Security and Compliance](#security-and-compliance) -- [Tutorial](#tutorial) -- [Minimum EKS Permissions](#minimum-eks-permissions) -- [Syncing](#syncing) -- [Troubleshooting](#troubleshooting) -# Prerequisites in Amazon Web Services - ->**Note** ->Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). - -To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate [permissions.](#minimum-eks-permissions) For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). - -### Amazon VPC - -An Amazon VPC is required to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. You can set one up yourself and provide it during cluster creation in Rancher. If you do not provide one during creation, Rancher will create one. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). - -### IAM Policies - -Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. - -1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - -2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. The minimum permissions required for an EKS cluster are listed [here.](#minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. - -3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. - -> **Note:** It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. - -For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). - - -# Create the EKS Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Amazon EKS**. - -1. Enter a **Cluster Name.** - -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -1. Fill out the rest of the form. For help, refer to the [configuration reference.](#eks-cluster-configuration-reference) - -1. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -# EKS Cluster Configuration Reference - -For the full list of EKS cluster configuration options, see [this page.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference) - -# Architecture - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -# AWS Service Events - -To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). - -# Security and Compliance - -By default only the IAM user or role that created a cluster has access to it. Attempting to access the cluster with any other user or role without additional configuration will lead to an error. In Rancher, this means using a credential that maps to a user or role that was not used to create the cluster will cause an unauthorized error. For example, an EKSCtl cluster will not register in Rancher unless the credentials used to register the cluster match the role or user used by EKSCtl. Additional users and roles can be authorized to access a cluster by being added to the aws-auth configmap in the kube-system namespace. For a more in-depth explanation and detailed instructions, please see this [documentation](https://aws.amazon.com/premiumsupport/knowledge-center/amazon-eks-cluster-access/). - -For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). - -# Tutorial - -This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. - -# Minimum EKS Permissions - -See [this page](./permissions) for the minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. - -# Syncing - -The EKS provisioner can synchronize the state of an EKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/) - -For information on configuring the refresh interval, refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/eks-config-reference/#configuring-the-refresh-interval) - -# Troubleshooting - -If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) - -If an unauthorized error is returned while attempting to modify or register the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) - -For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md deleted file mode 100644 index 68fec4a4f2..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Managing GKE Clusters -shortTitle: Google Kubernetes Engine -weight: 2105 -aliases: - - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-gke/ - - /rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke - - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/ ---- - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -- [Prerequisites](#prerequisites) -- [Provisioning a GKE Cluster](#provisioning-a-gke-cluster) -- [Private Clusters](#private-clusters) -- [Configuration Reference](#configuration-reference) -- [Updating Kubernetes Version](#updating-kubernetes-version) -- [Syncing](#syncing) - -# Prerequisites - -Some setup in Google Kubernetes Engine is required. - -### Service Account Token - -Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. - -The service account requires the following roles: - -- **Compute Viewer:** `roles/compute.viewer` -- **Project Viewer:** `roles/viewer` -- **Kubernetes Engine Admin:** `roles/container.admin` -- **Service Account User:** `roles/iam.serviceAccountUser` - -[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) - -For help obtaining a private key for your service account, refer to the Google cloud documentation [here.](https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys) You will need to save the key in JSON format. - -### Google Project ID - -Your cluster will need to be part of a Google Project. - -To create a new project, refer to the Google cloud documentation [here.](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project) - -To get the project ID of an existing project, refer to the Google cloud documentation [here.](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) - -# Provisioning a GKE Cluster - ->**Note** ->Deploying to GKE will incur charges. - -### 1. Create a Cloud Credential - -1. In the upper right corner, click the user profile dropdown menu and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for your Google cloud credentials. -1. In the **Cloud Credential Type** field, select **Google.** -1. In the **Service Account** text box, paste your service account private key JSON, or upload the JSON file. -1. Click **Create.** - -**Result:** You have created credentials that Rancher will use to provision the new GKE cluster. - -### 2. Create the GKE Cluster -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. -1. Under **With a hosted Kubernetes provider,** click **Google GKE**. -1. Enter a **Cluster Name**. -1. Optional: Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Optional: Add Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to the cluster. -1. Enter your Google project ID and your Google cloud credentials. -1. Fill out the rest of the form. For help, refer to the [GKE cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference) -1. Click **Create.** - -**Result:** You have successfully deployed a GKE cluster. - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -# Private Clusters - -Private GKE clusters are supported. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/) - -# Configuration Reference - -For details on configuring GKE clusters in Rancher, see [this page.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference) -# Updating Kubernetes Version - -The Kubernetes version of a cluster can be upgraded to any version available in the region or zone fo the GKE cluster. Upgrading the master Kubernetes version does not automatically upgrade worker nodes. Nodes can be upgraded independently. - ->**Note** ->GKE has removed basic authentication in 1.19+. In order to upgrade a cluster to 1.19+, basic authentication must be disabled in the Google Cloud. Otherwise, an error will appear in Rancher when an upgrade to 1.19+ is attempted. You can follow the [Google documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication#disabling_authentication_with_a_static_password). After this, the Kubernetes version can be updated to 1.19+ via Rancher. - -# Syncing - -The GKE provisioner can synchronize the state of a GKE cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/syncing) - -For information on configuring the refresh interval, see [this section.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/#configuring-the-refresh-interval) - - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -# Prerequisites - -Some setup in Google Kubernetes Engine is required. - -### Service Account Token - -Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. - -The service account requires the following roles: - -- **Compute Viewer:** `roles/compute.viewer` -- **Project Viewer:** `roles/viewer` -- **Kubernetes Engine Admin:** `roles/container.admin` -- **Service Account User:** `roles/iam.serviceAccountUser` - -[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) - - ->**Note** ->Deploying to GKE will incur charges. - -# Create the GKE Cluster - -Use Rancher to set up and configure your Kubernetes cluster. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Google Kubernetes Engine**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Either paste your service account private key in the **Service Account** text box or **Read from a file**. Then click **Next: Configure Nodes**. - - >**Note:** After submitting your private key, you may have to enable the Google Kubernetes Engine API. If prompted, browse to the URL displayed in the Rancher UI to enable the API. - -6. Select your cluster options, node options and security options. For help, refer to the [GKE Cluster Configuration Reference.](#gke-before-v2-5-8) -9. Review your options to confirm they're correct. Then click **Create**. - -**Result:** You have successfully deployed a GKE cluster. - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md deleted file mode 100644 index 6fdb9597d1..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Creating a Tencent TKE Cluster -shortTitle: Tencent Kubernetes Engine -weight: 2125 -aliases: - - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/ ---- - -You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver]({{}}/rancher/v2.5/en/admin-settings/drivers/cluster-drivers/#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. - -## Prerequisites in Tencent - ->**Note** ->Deploying to TKE will incur charges. - -1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. - -2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). - -3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. - -4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. - -## Create a TKE Cluster - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Tencent TKE**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites-in-tencent). - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Region | From the drop-down chooses the geographical region in which to build your cluster. | - | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | - | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | - -6. Click `Next: Configure Cluster` to set your TKE cluster configurations. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | - | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | - | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | - | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | - - **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) - -7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. - - | Option | Description | - | ---------- | -------------------------------------------------------------------------------------------------------------------- | - | Availability Zone | Choose the availability zone of the VPC region. | - | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | - | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | - -8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. - - Option | Description - -------|------------ - Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 - Security Group | Security group ID, default does not bind any security groups. - Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). - Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. - Data Disk Type | Data disk type, default value to the SSD cloud drive - Data Disk Size | Data disk size (GB), the step size is 10 - Band Width Type | Type of bandwidth, PayByTraffic or PayByHour - Band Width | Public network bandwidth (Mbps) - Key Pair | Key id, after associating the key can be used to logging to the VM node - -9. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md deleted file mode 100644 index 3daf9c0cb8..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/node-requirements/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Node Requirements for Rancher Managed Clusters -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/node-requirements/ ---- - -This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. - -> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.5/en/installation/requirements/) - -Make sure the nodes for the Rancher server fulfill the following requirements: - -- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) -- [Networking Requirements](#networking-requirements) -- [Optional: Security Considerations](#optional-security-considerations) - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.5/en/installation/options/arm64-platform/) - -For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) - -### Oracle Linux and RHEL Derived Linux Nodes - -Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - ->**Note:** In RHEL 8.4, two extra services are included on the NetworkManager: `nm-cloud-setup.service` and `nm-cloud-setup.timer`. These services add a routing table that interferes with the CNI plugin's configuration. If these services are enabled, you must disable them using the command below, and then reboot the node to restore connectivity: -> -> ``` - systemctl disable nm-cloud-setup.service nm-cloud-setup.timer - reboot - ``` - -### SUSE Linux Nodes - -SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps]({{}}/rancher/v2.5/en/installation/requirements/ports/#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. - -### Flatcar Container Linux Nodes - -When [Launching Kubernetes with Rancher]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) - -{{% tabs %}} -{{% tab "Canal"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: canal - options: - canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} - -{{% tab "Calico"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: calico - options: - calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} -{{% /tabs %}} - -It is also required to enable the Docker service, you can enable the Docker service using the following command: - -``` -systemctl enable docker.service -``` - -The Docker service is enabled automatically when using [Node Drivers]({{}}/rancher/v2.5/en/admin-settings/drivers/#node-drivers). - -### Windows Nodes - -Nodes with Windows Server must run Docker Enterprise Edition. - -Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/) - -# Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. - -For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) - -For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) - -# Networking Requirements - -For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. - -IPv6 should be disabled at the OS level. Unless you specifically intend to utilize IPv6, you should disable it on your nodes. IPv6 is not yet fully supported and often times it is not enough to disable IPv6 on the NICs to avoid complications. - -The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.5/en/cluster-provisioning/). - -For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) - -Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements]({{}}/rancher/v2.5/en/installation/requirements/ports#downstream-kubernetes-cluster-nodes). - -# Optional: Security Considerations - -If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. - -For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.5/en/security/#rancher-hardening-guide) diff --git a/content/rancher/v2.5/en/cluster-provisioning/production/_index.md b/content/rancher/v2.5/en/cluster-provisioning/production/_index.md deleted file mode 100644 index c765e2ed37..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/production/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Checklist for Production-Ready Clusters -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-provisioning/production/ ---- - -In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. - -For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements) - -This is a shortlist of best practices that we strongly recommend for all production clusters. - -For a full list of all the best practices that we recommend, refer to the [best practices section.]({{}}/rancher/v2.5/en/best-practices) - -### Node Requirements - -* Make sure your nodes fulfill all of the [node requirements,]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/) including the port requirements. - -### Back up etcd - -* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure recurring snapshots of etcd for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. - -### Cluster Architecture - -* Nodes should have one of the following role configurations: - * `etcd` - * `controlplane` - * `etcd` and `controlplane` - * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) -* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -* Assign two or more nodes the `controlplane` role for master component high availability. -* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles) - -For more information about the -number of nodes for each Kubernetes role, refer to the section on [recommended architecture.]({{}}/rancher/v2.5/en/overview/architecture-recommendations/) - -### Logging and Monitoring - -* Configure alerts/notifiers for Kubernetes components (System Service). -* Configure logging for cluster analysis and post-mortems. - -### Reliability - -* Perform load tests on your cluster to verify that its hardware can support your workloads. - -### Networking - -* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). -* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md b/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md deleted file mode 100644 index 108741dec0..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Roles for Nodes in Kubernetes -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/ ---- - -This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. - -This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/). - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid - -# etcd - -Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. - ->**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -# controlplane - -Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. - ->**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. - -### kube-apiserver - -The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. - -### kube-controller-manager - -The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -### kube-scheduler - -The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). - -# worker - -Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. - -# References - -* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md b/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md deleted file mode 100644 index c874c1596d..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/production/recommended-architecture/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Recommended Cluster Architecture -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/ ---- - -There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. - -# Separating Worker Nodes from Nodes with Other Roles - -When designing your cluster(s), you have two options: - -* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements). -* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. - -In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. - -Therefore, each node should have one of the following role configurations: - - * `etcd` - * `controlplane` - * Both `etcd` and `controlplane` - * `worker` - -# Recommended Number of Nodes with Each Role - -The cluster should have: - -- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. -- At least two nodes with the role `controlplane` for master component high availability. -- At least two nodes with the role `worker` for workload rescheduling upon node failure. - -For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.]({{}}/rancher/v2.5/en/cluster-provisioning/production/nodes-and-roles) - - -### Number of Controlplane Nodes - -Adding more than one node with the `controlplane` role makes every master component highly available. - -### Number of etcd Nodes - -The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. - -| Nodes with `etcd` role | Majority | Failure Tolerance | -|--------------|------------|-------------------| -| 1 | 1 | 0 | -| 2 | 2 | 0 | -| 3 | 2 | **1** | -| 4 | 3 | 1 | -| 5 | 3 | **2** | -| 6 | 4 | 2 | -| 7 | 4 | **3** | -| 8 | 5 | 3 | -| 9 | 5 | **4** | - -References: - -* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) -* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) - -### Number of Worker Nodes - -Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. - -### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications - -You may have noticed that our [Kubernetes Install]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -# References - -* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md deleted file mode 100644 index 0e1ee65c00..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/registered-clusters/_index.md +++ /dev/null @@ -1,329 +0,0 @@ ---- -title: Registering Existing Clusters -weight: 6 -aliases: - - /rancher/v2.5/en/cluster-provisioning/imported-clusters - - /rancher/v2.x/en/cluster-provisioning/imported-clusters/ - - /rancher/v2.x/en/cluster-provisioning/registered-clusters/ ---- - -The cluster registration feature replaced the feature to import clusters. - -The control that Rancher has to manage a registered cluster depends on the type of cluster. For details, see [Management Capabilities for Registered Clusters.](#management-capabilities-for-registered-clusters) - -- [Prerequisites](#prerequisites) -- [Registering a Cluster](#registering-a-cluster) -- [Management Capabilities for Registered Clusters](#management-capabilities-for-registered-clusters) -- [Configuring K3s Cluster Upgrades](#configuring-k3s-cluster-upgrades) -- [Debug Logging and Troubleshooting for Registered K3s Clusters](#debug-logging-and-troubleshooting-for-registered-k3s-clusters) -- [Annotating Registered Clusters](#annotating-registered-clusters) - -# Prerequisites - -{{% tabs %}} -{{% tab "v2.5.9+" %}} - -### Kubernetes Node Roles - -Registered RKE Kubernetes clusters must have all three node roles - etcd, controlplane and worker. A cluster with only controlplane components cannot be registered in Rancher. - -For more information on RKE node roles, see the [best practices.]({{}}/rancher/v2.5/en/cluster-provisioning/production/#cluster-architecture) - -### Permissions - -If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to register the cluster in Rancher. - -In order to apply the privilege, you need to run: - -```plain -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole cluster-admin \ - --user [USER_ACCOUNT] -``` - -before running the `kubectl` command to register the cluster. - -By default, GKE users are not given this privilege, so you will need to run the command before registering GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - -If you are registering a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-registration-in-rancher) - -### EKS Clusters - -EKS clusters must have at least one managed node group to be imported into Rancher or provisioned from Rancher successfully. - -{{% /tab %}} -{{% tab "Rancher before v2.5.9" %}} - -### Permissions - -If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to register the cluster in Rancher. - -In order to apply the privilege, you need to run: - -```plain -kubectl create clusterrolebinding cluster-admin-binding \ - --clusterrole cluster-admin \ - --user [USER_ACCOUNT] -``` - -before running the `kubectl` command to register the cluster. - -By default, GKE users are not given this privilege, so you will need to run the command before registering GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). - -If you are registering a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-registration-in-rancher) - -### EKS Clusters - -EKS clusters must have at least one managed node group to be imported into Rancher or provisioned from Rancher successfully. - -{{% /tab %}} -{{% /tabs %}} - -# Registering a Cluster - -1. From the **Clusters** page, click **Add Cluster**. -2. Under **Register an existing Kubernetes cluster**, click the type of Kubernetes cluster you want to register. -3. Enter a **Cluster Name**. -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -5. For Rancher v2.5.6+, use **Agent Environment Variables** under **Cluster Options** to set environment variables for [rancher cluster agent]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. -6. Click **Create**. -7. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. -8. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. -9. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. -10. When you finish running the command(s) on your node, click **Done**. - - -**Result:** - -- Your cluster is registered and assigned a state of **Pending.** Rancher is deploying resources to manage your cluster. -- You can access your cluster after its state is updated to **Active.** -- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). - - -> **Note:** -> You can not re-register a cluster that is currently active in a Rancher setup. - -### Configuring a K3s Cluster to Enable Registration in Rancher - -The K3s server needs to be configured to allow writing to the kubeconfig file. - -This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: - -``` -$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 -``` - -The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: - -``` -$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - -``` - -### Configuring an Imported EKS Cluster with Terraform - -You should define **only** the minimum fields that Rancher requires when importing an EKS cluster with Terraform. This is important as Rancher will overwrite what was in the EKS cluster with any config that the user has provided. - ->**Warning:** Even a small difference between the current EKS cluster and a user-provided config could have unexpected results. - -The minimum config fields required by Rancher to import EKS clusters with Terraform using `eks_config_v2` are as follows: - -- cloud_credential_id -- name -- region -- imported (this field should always be set to `true` for imported clusters) - -Example YAML configuration for imported EKS clusters: - -``` -resource "rancher2_cluster" "my-eks-to-import" { - name = "my-eks-to-import" - description = "Terraform EKS Cluster" - eks_config_v2 { - cloud_credential_id = rancher2_cloud_credential.aws.id - name = var.aws_eks_name - region = var.aws_region - imported = true - } -} -``` - -# Management Capabilities for Registered Clusters - -The control that Rancher has to manage a registered cluster depends on the type of cluster. - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -- [Changes in v2.5.8](#changes-in-v2-5-8) -- [Features for All Registered Clusters](#2-5-8-features-for-all-registered-clusters) -- [Additional Features for Registered K3s Clusters](#2-5-8-additional-features-for-registered-k3s-clusters) -- [Additional Features for Registered EKS and GKE Clusters](#additional-features-for-registered-eks-and-gke-clusters) - -### Changes in v2.5.8 - -Greater management capabilities are now available for [registered GKE clusters.](#additional-features-for-registered-eks-and-gke-clusters) The same configuration options are available for registered GKE clusters as for the GKE clusters created through the Rancher UI. - - -### Features for All Registered Clusters - -After registering a cluster, the cluster owner can: - -- [Manage cluster access]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring, alerts and notifiers]({{}}/rancher/v2.5/en/monitoring-alerting/) -- Enable [logging]({{}}/rancher/v2.5/en/logging/v2.5/) -- Enable [Istio]({{}}/rancher/v2.5/en/istio/) -- Use [pipelines]({{}}/rancher/v2.5/en/project-admin/pipelines/) -- Manage projects and workloads - - -### Additional Features for Registered K3s Clusters - -[K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. - -When a K3s cluster is registered in Rancher, Rancher will recognize it as K3s. The Rancher UI will expose the features for [all registered clusters,](#features-for-all-registered-clusters) in addition to the following features for editing and upgrading the cluster: - -- The ability to [upgrade the K3s version]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/) -- The ability to configure the maximum number of nodes that will be upgraded concurrently -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster - -### Additional Features for Registered EKS and GKE Clusters - -Registering an Amazon EKS cluster or GKE cluster allows Rancher to treat it as though it were created in Rancher. - -Amazon EKS clusters and GKE clusters can now be registered in Rancher. For the most part, these registered clusters are treated the same way as clusters created in the Rancher UI, except for deletion. - -When you delete an EKS cluster or GKE cluster that was created in Rancher, the cluster is destroyed. When you delete a cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -The capabilities for registered clusters are listed in the table on [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/) - - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -- [Features for All Registered Clusters](#before-2-5-8-features-for-all-registered-clusters) -- [Additional Features for Registered K3s Clusters](#before-2-5-8-additional-features-for-registered-k3s-clusters) -- [Additional Features for Registered EKS Clusters](#additional-features-for-registered-eks-clusters) - - -### Features for All Registered Clusters - -After registering a cluster, the cluster owner can: - -- [Manage cluster access]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) through role-based access control -- Enable [monitoring, alerts and notifiers]({{}}/rancher/v2.5/en/monitoring-alerting/) -- Enable [logging]({{}}/rancher/v2.5/en/logging/v2.5/) -- Enable [Istio]({{}}/rancher/v2.5/en/istio/) -- Use [pipelines]({{}}/rancher/v2.5/en/project-admin/pipelines/) -- Manage projects and workloads - - -### Additional Features for Registered K3s Clusters - -[K3s]({{}}/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. - -When a K3s cluster is registered in Rancher, Rancher will recognize it as K3s. The Rancher UI will expose the features for [all registered clusters,](#features-for-all-registered-clusters) in addition to the following features for editing and upgrading the cluster: - -- The ability to [upgrade the K3s version]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes/) -- The ability to configure the maximum number of nodes that will be upgraded concurrently -- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster - -### Additional Features for Registered EKS Clusters - -Registering an Amazon EKS cluster allows Rancher to treat it as though it were created in Rancher. - -Amazon EKS clusters can now be registered in Rancher. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. - -When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -The capabilities for registered EKS clusters are listed in the table on [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/) -{{% /tab %}} -{{% /tabs %}} - - - -# Configuring K3s Cluster Upgrades - -> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. - -The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. - -- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes -- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes - -In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. - -Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. - -# Debug Logging and Troubleshooting for Registered K3s Clusters - -Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. - -To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. - -Logs created by the `system-upgrade-controller` can be viewed by running this command: - -``` -kubectl logs -n cattle-system system-upgrade-controller -``` - -The current status of the plans can be viewed with this command: - -``` -kubectl get plans -A -o yaml -``` - -If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. - -To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. - - - - -# Annotating Registered Clusters - -For all types of registered Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. - -Therefore, when Rancher registers a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the registered cluster. - -However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. - -By annotating a registered cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. - -This example annotation indicates that a pod security policy is enabled: - -``` -"capabilities.cattle.io/pspEnabled": "true" -``` - -The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. - -``` -"capabilities.cattle.io/ingressCapabilities": "[ - { - "customDefaultBackend":true, - "ingressProvider":"asdf" - } -]" -``` - -These capabilities can be annotated for the cluster: - -- `ingressCapabilities` -- `loadBalancerCapabilities` -- `nodePoolScalingSupported` -- `nodePortRange` -- `pspEnabled` -- `taintSupport` - -All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. - -To annotate a registered cluster, - -1. Go to the cluster view in Rancher and select **⋮ > Edit.** -1. Expand the **Labels & Annotations** section. -1. Click **Add Annotation.** -1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. -1. Click **Save.** - -**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. - diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md deleted file mode 100644 index d1aa119059..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Launching Kubernetes with Rancher -weight: 4 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/ ---- - -You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine]({{}}/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: - -- Bare-metal servers -- On-premise virtual machines -- Virtual machines hosted by an infrastructure provider - -Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. - -RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. - -### Requirements - -If you use RKE to set up a cluster, your nodes must meet the [requirements]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements) for nodes in downstream user clusters. - -### Launching Kubernetes on New Nodes in an Infrastructure Provider - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -For more information, refer to the section on [launching Kubernetes on new nodes.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.5/en/cluster-admin/cleaning-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -For more information, refer to the section on [custom nodes.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md deleted file mode 100644 index 2679bbe634..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Setting up Cloud Providers -weight: 2300 -aliases: - - /rancher/v2.5/en/concepts/clusters/cloud-providers/ - - /rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/ ---- -A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. - -Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. - -By default, the **Cloud Provider** option is set to `None`. - -The following cloud providers can be enabled: - -* Amazon -* Azure -* GCE (Google Compute Engine) -* vSphere - -### Setting up the Amazon Cloud Provider - -For details on enabling the Amazon cloud provider, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon) - -### Setting up the Azure Cloud Provider - -For details on enabling the Azure cloud provider, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure) - -### Setting up the GCE Cloud Provider - -For details on enabling the Google Compute Engine cloud provider, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce) - -### Setting up the vSphere Cloud Provider - -For details on enabling the vSphere cloud provider, refer to [this page.](./vsphere) - -### Setting up a Custom Cloud Provider - -The `Custom` cloud provider is available if you want to configure any Kubernetes cloud provider. - -For the custom cloud provider option, you can refer to the [RKE docs]({{}}/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration: - -* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/) -* [OpenStack]({{}}/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md deleted file mode 100644 index d473d1e72c..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Setting up the Amazon Cloud Provider -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/ ---- - -When using the `Amazon` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. -- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. - -See [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for all information regarding the Amazon cloud provider. - -To set up the Amazon cloud provider, - -1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) -2. [Configure the ClusterID](#2-configure-the-clusterid) - -### 1. Create an IAM Role and attach to the instances - -All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: - -* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. -* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. - -While creating an [Amazon EC2 cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. - -While creating a [Custom cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes), you must manually attach the IAM role to the instance(s). - -IAM Policy for nodes with the `controlplane` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } -] -} -``` - -IAM policy for nodes with the `etcd` or `worker` role: - -```json -{ -"Version": "2012-10-17", -"Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } -] -} -``` - -### 2. Configure the ClusterID - -The following resources need to tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster. -- **Security Group**: The security group used for your cluster. - ->**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). - -When you create an [Amazon EC2 Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. - -Use the following tag: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` - -`CLUSTERID` can be any string you like, as long as it is equal across all tags set. - -Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: - -**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. - -### Using Amazon Elastic Container Registry (ECR) - -The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md deleted file mode 100644 index 0e9498bc52..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Setting up the Azure Cloud Provider -weight: 2 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure/ ---- - -When using the `Azure` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. - -- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. - -- **Network Storage:** Support Azure Files via CIFS mounts. - -The following account types are not supported for Azure Subscriptions: - -- Single tenant accounts (i.e. accounts with no subscriptions). -- Multi-subscription accounts. - -To set up the Azure cloud provider following credentials need to be configured: - -1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) -2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) -3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) -4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) - -### 1. Set up the Azure Tenant ID - -Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). - -If you want to use the Azure CLI, you can run the command `az account show` to get the information. - -### 2. Set up the Azure Client ID and Azure Client Secret - -Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). - -1. Select **Azure Active Directory**. -1. Select **App registrations**. -1. Select **New application registration**. -1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. -1. Select **Create**. - -In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. - -The next step is to generate the **Azure Client Secret**: - -1. Open your created App registration. -1. In the **Settings** view, open **Keys**. -1. Enter a **Key description**, select an expiration time and select **Save**. -1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. - -### 3. Configure App Registration Permissions - -The last thing you will need to do, is assign the appropriate permissions to your App registration. - -1. Go to **More services**, search for **Subscriptions** and open it. -1. Open **Access control (IAM)**. -1. Select **Add**. -1. For **Role**, select `Contributor`. -1. For **Select**, select your created App registration name. -1. Select **Save**. - -### 4. Set up Azure Network Security Group Name - -A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. - -If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. - -You should already assign custom hosts to this Network Security Group during provisioning. - -Only hosts expected to be load balancer back ends need to be in this group. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md deleted file mode 100644 index 4e63d522e8..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Setting up the vSphere Cloud Provider -weight: 4 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/ ---- - -In this section, you'll learn how to set up a vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. - -# In-tree Cloud Provider - -To use the in-tree vSphere cloud provider, you will need to use an RKE configuration option. For details, refer to [this page.](./in-tree) - -# Out-of-tree Cloud Provider - -_Available as of v2.5+_ - -To set up the out-of-tree vSphere cloud provider, you will need to install Helm charts from the Rancher marketplace. For details, refer to [this page.](./out-of-tree) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md deleted file mode 100644 index 6902e487f5..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: How to Configure In-tree vSphere Cloud Provider -shortTitle: In-tree Cloud Provider -weight: 10 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/ ---- - -To set up the in-tree vSphere cloud provider, follow these steps while creating the vSphere cluster in Rancher: - -1. Set **Cloud Provider** option to `Custom` or `Custom (In-Tree)`. - - {{< img "/img/rancher/vsphere-node-driver-cloudprovider.png" "vsphere-node-driver-cloudprovider">}} - -1. Click on **Edit as YAML** -1. Insert the following structure to the pre-populated cluster YAML. This structure must be placed under `rancher_kubernetes_engine_config`. Note that the `name` *must* be set to `vsphere`. - - ```yaml - rancher_kubernetes_engine_config: - cloud_provider: - name: vsphere - vsphereCloudProvider: - [Insert provider configuration] - ``` - -Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md deleted file mode 100644 index f0b3e4bd00..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: Launching Kubernetes on Existing Custom Nodes -description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements -metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" -weight: 2225 -aliases: - - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ - - /rancher/v2.5/en/cluster-provisioning/custom-clusters/ - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ ---- - -When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. - -To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. - -This section describes how to set up a custom cluster. - -# Creating a Cluster with Custom Nodes - ->**Want to use Windows hosts as Kubernetes workers?** -> ->See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/) before you start. - - - -- [1. Provision a Linux Host](#1-provision-a-linux-host) -- [2. Create the Custom Cluster](#2-create-the-custom-cluster) -- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) - - - -### 1. Provision a Linux Host - -Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-prem VM -- A bare-metal server - -If you want to reuse a node from a previous custom cluster, [clean the node]({{}}/rancher/v2.5/en/admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. - -Provision the host according to the [installation requirements]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements) and the [checklist for production-ready clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/production) - -### 2. Create the Custom Cluster - -Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. - -1. From the **Clusters** page, click **Add Cluster**. - -2. Choose **Custom**. - -3. Enter a **Cluster Name**. - -4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. - -5. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** - - >**Using Windows nodes as Kubernetes workers?** - > - >- See [Enable the Windows Support Option]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/). - >- The only Network Provider available for clusters with Windows support is Flannel. -6. Click **Next**. - -7. From **Node Role**, choose the roles that you want filled by a cluster node. You must provision at least one node for each role: `etcd`, `worker`, and `control plane`. All three roles are required for a custom cluster to finish provisioning. For more information on roles, see [this section.]({{}}/rancher/v2.5/en/overview/concepts/#roles-for-nodes-in-kubernetes-clusters) - - >**Notes:** - > - >- Using Windows nodes as Kubernetes workers? See [this section]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/). - >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). - -8. **Optional**: Click **[Show advanced options]({{}}/rancher/v2.5/en/admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. - -9. Copy the command displayed on screen to your clipboard. - -10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - - >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. - -11. When you finish running the command(s) on your Linux host(s), click **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -### 3. Amazon Only: Tag Resources - -If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. - -[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) - ->**Note:** You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) - - -The following resources need to be tagged with a `ClusterID`: - -- **Nodes**: All hosts added in Rancher. -- **Subnet**: The subnet used for your cluster -- **Security Group**: The security group used for your cluster. - - >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. - -The tag that should be used is: - -``` -Key=kubernetes.io/cluster/, Value=owned -``` - -`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. - -If you share resources between clusters, you can change the tag to: - -``` -Key=kubernetes.io/cluster/CLUSTERID, Value=shared -``` - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md deleted file mode 100644 index 0e12cb24eb..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Rancher Agent Options -weight: 2500 -aliases: - - /rancher/v2.5/en/admin-settings/agent-options/ - - /rancher/v2.5/en/cluster-provisioning/custom-clusters/agent-options - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/ ---- - -Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes) and add the options to the generated `docker run` command when adding a node. - -For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.]({{}}/rancher/v2.5/en/overview/architecture/#3-node-agents) - -## General options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | -| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | -| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | -| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | -| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | -| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | - -## Role options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | -| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | -| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | -| `--worker` | `WORKER=true` | Apply the role `worker` to the node | - -## IP address options - -| Parameter | Environment variable | Description | -| ---------- | -------------------- | ----------- | -| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | -| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | - -### Dynamic IP address options - -For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. - -| Value | Example | Description | -| ---------- | -------------------- | ----------- | -| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | -| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | -| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | -| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | -| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | -| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | -| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | -| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | -| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | -| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | -| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | -| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md deleted file mode 100644 index 14b4d11ba5..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/_index.md +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: Launching Kubernetes on New Nodes in an Infrastructure Provider -weight: 2205 -aliases: - - /rancher/v2.5/en/concepts/global-configuration/node-templates/ - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ ---- - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. - -One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. - -The available cloud providers to create a node template are decided based on active [node drivers]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers). - -This section covers the following topics: - -- [Node templates](#node-templates) - - [Node labels](#node-labels) - - [Node taints](#node-taints) - - [Administrator control of node templates](#administrator-control-of-node-templates) -- [Node pools](#node-pools) - - [Node pool taints](#node-pool-taints) - - [About node auto-replace](#about-node-auto-replace) - - [Enabling node auto-replace](#enabling-node-auto-replace) - - [Disabling node auto-replace](#disabling-node-auto-replace) -- [Cloud credentials](#cloud-credentials) -- [Node drivers](#node-drivers) - -# Node Templates - -A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. - -After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. - -### Node Labels - -You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. - -Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) - -### Node Taints - -You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. - -Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### Administrator Control of Node Templates - -Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. - -To access all node templates, an administrator will need to do the following: - -1. In the Rancher UI, click the user profile icon in the upper right corner. -1. Click **Node Templates.** - -**Result:** All node templates are listed and grouped by owner. The templates can be edited or cloned by clicking the **⋮.** - -# Node Pools - -Using Rancher, you can create pools of nodes based on a [node template](#node-templates). - -A node template defines the configuration of a node, like what operating system to use, number of CPUs and amount of memory. - -The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. - -Each node pool must have one or more nodes roles assigned. - -Each node role (i.e. etcd, control plane, and worker) should be assigned to a distinct node pool. Although it is possible to assign multiple node roles to a node pool, this should not be done for production clusters. - -The recommended setup is to have: - -- a node pool with the etcd node role and a count of three -- a node pool with the control plane node role and a count of at least two -- a node pool with the worker node role and a count of at least two - -### Node Pool Taints - -If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. - -For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. - -When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. - -### About Node Auto-replace - -If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. - -> **Important:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. - -### Enabling Node Auto-replace - -When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. - -1. In the form for creating a cluster, go to the **Node Pools** section. -1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Fill out the rest of the form for creating a cluster. - -**Result:** Node auto-replace is enabled for the node pool. - -You can also enable node auto-replace after the cluster is created with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. -1. Click **Save.** - -**Result:** Node auto-replace is enabled for the node pool. - -### Disabling Node Auto-replace - -You can disable node auto-replace from the Rancher UI with the following steps: - -1. From the Global view, click the Clusters tab. -1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** -1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. -1. Click **Save.** - -**Result:** Node auto-replace is disabled for the node pool. - -# Cloud Credentials - -Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: - -- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. - -- After the cloud credential is created, it can be re-used to create additional node templates. - -- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. - -After cloud credentials are created, the user can start [managing the cloud credentials that they created]({{}}/rancher/v2.5/en/user-settings/cloud-credentials/). - -# Node Drivers - -If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it]({{}}/rancher/v2.5/en/admin-settings/drivers/node-drivers/#activating-deactivating-node-drivers), or you can [add your own custom node driver]({{}}/rancher/v2.5/en/admin-settings/drivers/node-drivers/#adding-custom-node-drivers). diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md deleted file mode 100644 index eab3d5a8d2..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Creating an Azure Cluster -shortTitle: Azure -weight: 2220 -aliases: - - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-azure/ - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/ ---- - -In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. - -First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. - -Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - ->**Warning:** When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: - -> - Terminate the SSL/TLS on the internal load balancer -> - Use the L7 load balancer - -> For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). - -For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options) - -For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](./azure-node-template-config) - -- [Preparation in Azure](#preparation-in-azure) -- [Creating an Azure Cluster](#creating-an-azure-cluster) - -# Preparation in Azure - -Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. - -To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. - -The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: - -``` -az ad sp create-for-rbac \ - --name="" \ - --role="Contributor" \ - --scopes="/subscriptions/" -``` - -The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, *The client secret*, and *The tenant ID*. This information will be used when you create a node template for Azure. - -# Creating an Azure Cluster - - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Azure**. -1. Enter your Azure credentials. -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](./azure-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in Azure. - -Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Azure**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md deleted file mode 100644 index 4b32a480ae..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Creating a DigitalOcean Cluster -shortTitle: DigitalOcean -weight: 2215 -aliases: - - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-digital-ocean/ - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/ ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. - -First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. - -Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **DigitalOcean**. -1. Enter your Digital Ocean credentials. -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](./do-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **DigitalOcean**. -1. Enter a **Cluster Name**. -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options) -1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md deleted file mode 100644 index 35c1f99753..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ /dev/null @@ -1,233 +0,0 @@ ---- -title: Creating an Amazon EC2 Cluster -shortTitle: Amazon EC2 -description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher -weight: 2210 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. - -First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. - -Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -### Prerequisites - -- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) - - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) -- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. - -# Creating an EC2 Cluster - -The steps to create a cluster differ based on your Rancher version. - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **Amazon.** -1. In the **Region** field, select the AWS region where your cluster nodes will be located. -1. Enter your AWS EC2 **Access Key** and **Secret Key.** -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials and information from EC2 - -Creating a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Add one or more node pools to your cluster. For more information about node pools, see [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools) - -Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. - -1. From the **Clusters** page, click **Add Cluster**. -1. Choose **Amazon EC2**. -1. Enter a **Cluster Name**. -1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools) -1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options) -1. Click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# IAM Policies - -### Example IAM Policy - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` - -### Example IAM Policy with PassRole - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", - "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` -### Example IAM Policy to allow encrypted EBS volumes -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:GenerateDataKeyWithoutPlaintext", - "kms:Encrypt", - "kms:DescribeKey", - "kms:CreateGrant", - "ec2:DetachVolume", - "ec2:AttachVolume", - "ec2:DeleteSnapshot", - "ec2:DeleteTags", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteVolume", - "ec2:CreateSnapshot" - ], - "Resource": [ - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", - "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" - ] - }, - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots" - ], - "Resource": "*" - } - ] -} -``` diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md deleted file mode 100644 index e4a50144aa..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: EC2 Node Template Configuration -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ ---- - -For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). -### Region - -In the **Region** field, select the same region that you used when creating your cloud credentials. - -### Cloud Credentials - -Your AWS account access information, stored in a [cloud credential.]({{}}/rancher/v2.5/en/user-settings/cloud-credentials/) - -See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. - -See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. - -See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM - -See our three example JSON policies: - -- [Example IAM Policy]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy) -- [Example IAM Policy with PassRole]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) -- [Example IAM Policy to allow encrypted EBS volumes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. - -### Authenticate & Configure Nodes - -Choose an availability zone and network settings for your cluster. - -### Security Group - -Choose the default security group or configure a security group. - -Please refer to [Amazon EC2 security group when using Node Driver]({{}}/rancher/v2.5/en/installation/requirements/ports/#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. - -### Instance Options - -Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. It is possible that a selected region does not support the default instance type. In this scenario you must select an instance type that does exist, otherwise an error will occur stating the requested configuration is not supported. - -If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. - -### Engine Options - -In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md deleted file mode 100644 index 0fc3708e0d..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Creating a vSphere Cluster -shortTitle: vSphere -description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. -weight: 2225 -aliases: - - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/ ---- - -By using Rancher with vSphere, you can bring cloud operations on-premises. - -Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. - -A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. - -- [vSphere Enhancements in Rancher v2.3](#vsphere-enhancements-in-rancher-v2-3) -- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) -- [Provisioning Storage](#provisioning-storage) -- [Enabling the vSphere Cloud Provider](#enabling-the-vsphere-cloud-provider) - -# vSphere Enhancements in Rancher v2.3 - -The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: - -### Self-healing Node Pools - -One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. - -> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. - -### Dynamically Populated Options for Instances and Scheduling - -Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. - -For the fields to be populated, your setup needs to fulfill the [prerequisites.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) - -### More Supported Operating Systems - -You can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) - -### Video Walkthrough of v2.3.3 Node Template Features - -In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. - -{{< youtube id="dPIwg6x1AlU">}} - -# Creating a vSphere Cluster - -In [this section,](./provisioning-vsphere-clusters) you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. - -# Provisioning Storage - -For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) - -# Enabling the vSphere Cloud Provider - -When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. - -For details, refer to the section on [enabling the vSphere cloud provider.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md deleted file mode 100644 index cf2082a1da..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Creating Credentials in the vSphere Console -weight: 3 -aliases: - - /rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/ ---- - -This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. - -The following table lists the permissions required for the vSphere user account: - -| Privilege Group | Operations | -|:----------------------|:-----------------------------------------------------------------------| -| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | -| Network | Assign | -| Resource | AssignVMToPool | -| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | - -The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: - -1. From the **vSphere** console, go to the **Administration** page. - -2. Go to the **Roles** tab. - -3. Create a new role. Give it a name and select the privileges listed in the permissions table above. - - {{< img "/img/rancher/rancherroles1.png" "image" >}} - -4. Go to the **Users and Groups** tab. - -5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. - - {{< img "/img/rancher/rancheruser.png" "image" >}} - -6. Go to the **Global Permissions** tab. - -7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. - - {{< img "/img/rancher/globalpermissionuser.png" "image" >}} - - {{< img "/img/rancher/globalpermissionrole.png" "image" >}} - -**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md deleted file mode 100644 index 6e0a9b5df8..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Provisioning Kubernetes Clusters in vSphere -weight: 1 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/ ---- - -In this section, you'll learn how to use Rancher to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in vSphere. - -First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. - -Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) - -For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options) - -- [Preparation in vSphere](#preparation-in-vsphere) -- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) - -# Preparation in vSphere - -This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. - -The node templates are documented and tested with the vSphere Web Services API version 6.5. - -### Create Credentials in vSphere - -Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. - -Refer to this [how-to guide]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. - -### Network Permissions - -It must be ensured that the hosts running the Rancher server are able to establish the following network connections: - -- To the vSphere API on the vCenter server (usually port 443/TCP). -- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required when using the ISO creation method*). -- To port 22/TCP and 2376/TCP on the created VMs - -See [Node Networking Requirements]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. - -### Valid ESXi License for vSphere API Access - -The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. - -### VM-VM Affinity Rules for Clusters with DRS - -If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. - -# Creating a vSphere Cluster - -The a vSphere cluster is created in Rancher depends on the Rancher version. - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** -1. Click **Add Cloud Credential.** -1. Enter a name for the cloud credential. -1. In the **Cloud Credential Type** field, select **VMware vSphere**. -1. Enter your vSphere credentials. For help, refer to **Account Access** in the [node template configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/) -1. Click **Create.** - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. - -1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** -1. Click **Add Template.** -1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template [configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/). - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in vSphere. - -Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. - -1. Navigate to **Clusters** in the **Global** view. -1. Click **Add Cluster** and select the **vSphere** infrastructure provider. -1. Enter a **Cluster Name.** -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options) -1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-pools) -1. Review your options to confirm they're correct. Then click **Create**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. -- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/examples/vsphere) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md deleted file mode 100644 index 9271841fad..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: VSphere Node Template Configuration -weight: 2 -aliases: - - /rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference - - /rancher/v2.5/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/ ---- - -The following node template configuration reference applies to Rancher v2.3.3+. - -- [Account Access](#account-access) -- [Scheduling](#scheduling) -- [Instance Options](#instance-options) -- [Networks](#networks) -- [Node tags and custom attributes](#node-tags-and-custom-attributes) -- [cloud-init](#cloud-init) - -# Account Access - -| Parameter | Required | Description | -|:----------------------|:--------:|:-----| -| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.]({{}}/rancher/v2.5/en/user-settings/cloud-credentials/) | - -Your cloud credential has these fields: - -| Credential Field | Description | -|-----------------|--------------| -| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | -| Port | Optional: configure configure the port of the vCenter or ESXi server. | -| Username and password | Enter your vSphere login username and password. | - -# Scheduling - -Choose what hypervisor the virtual machine will be scheduled to. - -The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. - -| Field | Required | Explanation | -|---------|---------------|-----------| -| Data Center | * | Choose the name/path of the data center where the VM will be scheduled. | -| Resource Pool | | Name of the resource pool to schedule the VMs in. Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | -| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | -| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. | -| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. | - -# Instance Options - -In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. - -| Parameter | Required | Description | -|:----------------|:--------:|:-----------| -| CPUs | * | Number of vCPUS to assign to VMs. | -| Memory | * | Amount of memory to assign to VMs. | -| Disk | * | Size of the disk (in MB) to attach to the VMs. | -| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | -| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | -| Networks | | Name(s) of the network to attach the VM to. | -| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{< baseurl >}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | - - -### About VM Creation Methods - -In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). - -The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). - -Choose the way that the VM will be created: - -- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. -- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list **Library templates.** -- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. -- **Install from boot2docker ISO:** Ensure that the **OS ISO URL** field contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). Note that this URL must be accessible from the nodes running your Rancher server installation. - -# Networks - -The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. - -# Node Tags and Custom Attributes - -Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. - -For tags, all your vSphere tags will show up as options to select from in your node template. - -In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. - -> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. - -# cloud-init - -[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. - -To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. - -Note that cloud-init is not supported when using the ISO creation method. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md deleted file mode 100644 index 63b3bfddcb..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/_index.md +++ /dev/null @@ -1,323 +0,0 @@ ---- -title: RKE Cluster Configuration Reference -weight: 2250 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/ ---- - -When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) as the Kubernetes distribution. - -This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. - -You can configure the Kubernetes options one of two ways: - -- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -The RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) - -This section is a cluster configuration reference, covering the following topics: - -- [Rancher UI Options](#rancher-ui-options) - - [Kubernetes version](#kubernetes-version) - - [Network provider](#network-provider) - - [Project network isolation](#project-network-isolation) - - [Kubernetes cloud providers](#kubernetes-cloud-providers) - - [Private registries](#private-registries) - - [Authorized cluster endpoint](#authorized-cluster-endpoint) - - [Node pools](#node-pools) -- [Advanced Options](#advanced-options) - - [NGINX Ingress](#nginx-ingress) - - [Node port range](#node-port-range) - - [Metrics server monitoring](#metrics-server-monitoring) - - [Pod security policy support](#pod-security-policy-support) - - [Docker version on nodes](#docker-version-on-nodes) - - [Docker root directory](#docker-root-directory) - - [Recurring etcd snapshots](#recurring-etcd-snapshots) - - [Agent Environment Variables](#agent-environment-variables) -- [Cluster config file](#cluster-config-file) - - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0) - - [Default DNS provider](#default-dns-provider) -- [Rancher specific parameters](#rancher-specific-parameters) - -# Rancher UI Options - -When creating a cluster using one of the options described in [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters), you can configure basic Kubernetes options using the **Cluster Options** section. - -### Kubernetes Version - -The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). - -### Network Provider - -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.5/en/faq/networking/cni-providers/). - ->**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - -Out of the box, Rancher is compatible with the following network providers: - -- [Canal](https://github.com/projectcalico/canal) -- [Flannel](https://github.com/coreos/flannel#flannel) -- [Calico](https://docs.projectcalico.org/v3.11/introduction/) -- [Weave](https://github.com/weaveworks/weave) - - -**Notes on Weave:** - -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). - -### Project Network Isolation - -Project network isolation is used to enable or disable communication between pods in different projects. - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -To enable project network isolation as a cluster option, you will need to use any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -To enable project network isolation as a cluster option, you will need to use Canal as the CNI. - -{{% /tab %}} -{{% /tabs %}} - -### Kubernetes Cloud Providers - -You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. - ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. - -If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: - -### Private registries - -The cluster-level private registry configuration is only used for provisioning clusters. - -There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.5/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. - -The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. - -- **System images** are components needed to maintain the Kubernetes cluster. -- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. - -See the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. - -### Authorized Cluster Endpoint - -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. - -> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE]({{}}/rancher/v2.5/en/overview/architecture/#tools-for-provisioning-kubernetes-clusters). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are registered with Rancher; it is available only on Rancher-launched Kubernetes clusters. - -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. - -For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.5/en/overview/architecture/#4-authorized-cluster-endpoint) - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.5/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -### Node Pools - -For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools) - -# Advanced Options - -The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** - -### NGINX Ingress - -Option to enable or disable the [NGINX ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/). - -### Node Port Range - -Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. - -### Metrics Server Monitoring - -Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). - -### Pod Security Policy Support - -Option to enable and select a default [Pod Security Policy]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies). You must have an existing Pod Security Policy configured before you can use this option. - -### Docker Version on Nodes - -Option to require [a supported Docker version]({{}}/rancher/v2.5/en/installation/requirements/) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. - -### Docker Root Directory - -If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. - -### Recurring etcd Snapshots - -Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). - -### Agent Environment Variables - -_Available as of v2.5.6_ - -Option to set environment variables for [rancher agents]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. - - -# Cluster Config File - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. - -- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. -- To read from an existing RKE file, click **Read from a file**. - -![image]({{}}/img/rancher/cluster-options-yaml.png) - -### Config File Structure in Rancher v2.3.0+ - -RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. - -{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File" %}} - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` -{{% /accordion %}} - -### Default DNS provider - -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. - -| Rancher version | Kubernetes version | Default DNS provider | -|-------------|--------------------|----------------------| -| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | -| v2.2.5 and higher | v1.13.x and lower | kube-dns | -| v2.2.4 and lower | any | kube-dns | - -# Rancher specific parameters - -Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): - -### docker_root_dir - -See [Docker Root Directory](#docker-root-directory). - -### enable_cluster_monitoring - -Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.5/en/monitoring-alerting/). - -### enable_network_policy - -Option to enable or disable Project Network Isolation. - -Before Rancher v2.5.8, project network isolation is only available if you are using the Canal network plugin for RKE. - -In v2.5.8+, project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. - -### local_cluster_auth_endpoint - -See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). - -Example: - -```yaml -local_cluster_auth_endpoint: - enabled: true - fqdn: "FQDN" - ca_certs: "BASE64_CACERT" -``` - -### Custom Network Plug-in - -You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. - -There are two ways that you can specify an add-on: - -- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) -- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) - -For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md deleted file mode 100644 index d06de7da09..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/pod-security-policies/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Assigning Pod Security Policies -weight: 2260 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/ ---- - -_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). - -## Adding a Default Pod Security Policy - -When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. - ->**Prerequisite:** ->Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/). ->**Note:** ->For security purposes, we recommend assigning a PSP as you create your clusters. - -To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. - -When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md deleted file mode 100644 index 39b6571012..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Rancher Agents -weight: 2400 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/ ---- - -There are two different agent resources deployed on Rancher managed clusters: - -- [cattle-cluster-agent](#cattle-cluster-agent) -- [cattle-node-agent](#cattle-node-agent) - -For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture]({{}}/rancher/v2.5/en/overview/architecture/) - -### cattle-cluster-agent - -The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. - -### cattle-node-agent - -The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters when `cattle-cluster-agent` is unavailable. - -### Scheduling rules - -_Applies to v2.5.4 and higher_ - -Starting with Rancher v2.5.4, the tolerations for the `cattle-cluster-agent` changed from `operator:Exists` (allowing all taints) to a fixed set of tolerations (listed below, if no controlplane nodes are visible in the cluster) or dynamically added tolerations based on taints applied to the controlplane nodes. This change was made to allow [Taint based Evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions) to work properly for `cattle-cluster-agent`. The default tolerations are described below. If controlplane nodes are present the cluster, the tolerations will be replaced with tolerations matching the taints on the controlplane nodes. - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | **Note:** These are the default tolerations, and will be replaced by tolerations matching taints applied to controlplane nodes.

`effect:NoSchedule`
`key:node-role.kubernetes.io/controlplane`
`value:true`

`effect:NoSchedule`
`key:node-role.kubernetes.io/control-plane`
`operator:Exists`

`effect:NoSchedule`
`key:node-role.kubernetes.io/master`
`operator:Exists` | -| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | - -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. When there are no controlplane nodes visible in the cluster (this is usually the case when using [Clusters from Hosted Kubernetes Providers]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/)), you can add the label `cattle.io/cluster-agent=true` on a node to prefer scheduling the `cattle-cluster-agent` pod to that node. - -See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. - -The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: - -| Weight | Expression | -| ------ | ------------------------------------------------ | -| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | -| 100 | `node-role.kubernetes.io/control-plane:In:"true"` | -| 100 | `node-role.kubernetes.io/master:In:"true"` | -| 1 | `cattle.io/cluster-agent:In:"true"` | - -_Applies to v2.3.0 up to v2.5.3_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | -| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | -| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | - -The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. - -The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: - -| Weight | Expression | -| ------ | ------------------------------------------------ | -| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | -| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md deleted file mode 100644 index 16bf34656c..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: Launching Kubernetes on Windows Clusters -weight: 2240 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/ ---- - -When provisioning a [custom cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. - -In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. - -Some other requirements for Windows clusters include: - -- You can only add Windows nodes to a cluster if Windows support is enabled when the cluster is created. Windows support cannot be enabled for existing clusters. -- Kubernetes 1.15+ is required. -- The Flannel network provider must be used. -- Windows nodes must have 50 GB of disk space. - -For the full list of requirements, see [this section.](#requirements-for-windows-clusters) - -For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). - -This guide covers the following topics: - - - -- [Requirements](#requirements-for-windows-clusters) -- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) -- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) - - -# Requirements for Windows Clusters - -The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation]({{}}/rancher/v2.5/en/installation/requirements/). - -### OS and Docker Requirements - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -Our support for Windows Server and Windows containers match the Microsoft official lifecycle for LTSC (Long-Term Servicing Channel) and SAC (Semi-Annual Channel). - -For the support lifecycle dates for Windows Server, see the [Microsoft Documentation.](https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info) -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} -In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): - -- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. -- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. - -> **Notes:** -> -> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). -> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. -{{% /tab %}} -{{% /tabs %}} - -### Kubernetes Version - -Kubernetes v1.15+ is required. - -### Node Requirements - -The hosts in the cluster need to have at least: - -- 2 core CPUs -- 5 GB memory -- 50 GB disk space - -Rancher will not provision the node if the node does not meet these requirements. - -### Networking Requirements - -Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation]({{}}/rancher/v2.5/en/installation/) before proceeding with this guide. - -Rancher only supports Windows using Flannel as the network provider. - -There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. - -For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. - -If you are configuring DHCP options sets for an AWS virtual private cloud, note that in the `domain-name` option field, only one domain name can be specified. According to the DHCP options [documentation:](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) - -> Some Linux operating systems accept multiple domain names separated by spaces. However, other Linux operating systems and Windows treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name. - -### Rancher on vSphere with ESXi 6.7u2 and above - -If you are using Rancher on VMware vSphere with ESXi 6.7u2 or later with Red Hat Enterprise Linux 8.3, CentOS 8.3, or SUSE Enterprise Linux 15 SP2 or later, it is necessary to disable the `vmxnet3` virtual network adapter hardware offloading feature. Failure to do so will result in all network connections between pods on different cluster nodes to fail with timeout errors. All connections from Windows pods to critical services running on Linux nodes, such as CoreDNS, will fail as well. It is also possible that external connections may fail. This issue is the result of Linux distributions enabling the hardware offloading feature in `vmxnet3` and a bug in the `vmxnet3` hardware offloading feature that results in the discarding of packets for guest overlay traffic. To address this issue, it is necessary disable the `vmxnet3` hardware offloading feature. This setting does not survive reboot, so it is necessary to disable on every boot. The recommended course of action is to create a systemd unit file at `/etc/systemd/system/disable_hw_offloading.service`, which disables the `vmxnet3` hardware offloading feature on boot. A sample systemd unit file which disables the `vmxnet3` hardware offloading feature is as follows. Note that `` must be customized to the host `vmxnet3` network interface, e.g., `ens192`: - -``` -[Unit] -Description=Disable vmxnet3 hardware offloading feature - -[Service] -Type=oneshot -ExecStart=ethtool -K tx-udp_tnl-segmentation off -ExecStart=ethtool -K tx-udp_tnl-csum-segmentation off -StandardOutput=journal - -[Install] -WantedBy=multi-user.target -``` -Then set the appropriate permissions on the systemd unit file: -``` -chmod 0644 /etc/systemd/system/disable_hw_offloading.service -``` -Finally, enable the systemd service: -``` -systemctl enable disable_hw_offloading.service -``` - -### Architecture Requirements - -The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. - -The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. - -Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. - -We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: - - - -| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | -| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | Control plane, etcd, worker | Manage the Kubernetes cluster | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | Worker | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | -| Node 3 | Windows (Windows Server core version 1809 or above) | Worker | Run your Windows containers | - -### Container Requirements - -Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. - -### Cloud Provider Specific Requirements - -If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. - -If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: - -- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce) -- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. - -# Tutorial: How to Create a Cluster with Windows Support - -This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) - -When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. - -To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. - - - -1. [Provision Hosts](#1-provision-hosts) -1. [Create the Cluster on Existing Nodes](#2-create-the-cluster-on-existing-nodes) -1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) -1. [Optional: Configuration for Azure Files](#4-optional-configuration-for-azure-files) - - -# 1. Provision Hosts - -To begin provisioning a cluster on existing nodes with Windows support, prepare your hosts. - -Your hosts can be: - -- Cloud-hosted VMs -- VMs from virtualization clusters -- Bare-metal servers - -You will provision three nodes: - -- One Linux node, which manages the Kubernetes control plane and stores your `etcd` -- A second Linux node, which will be another worker node -- The Windows node, which will run your Windows containers as a worker node - -| Node | Operating System | -| ------ | ------------------------------------------------------------ | -| Node 1 | Linux (Ubuntu Server 18.04 recommended) | -| Node 2 | Linux (Ubuntu Server 18.04 recommended) | -| Node 3 | Windows (Windows Server core version 1809 or above required) | - -If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers) - -# 2. Create the Cluster on Existing Nodes - -The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/) with some Windows-specific requirements. - -1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. -1. Click **From existing nodes (Custom)**. -1. Enter a name for your cluster in the **Cluster Name** text box. -1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. -1. In the **Network Provider** field, select **Flannel.** -1. In the **Windows Support** section, click **Enable.** -1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. -1. Click **Next**. - -> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. - -# 3. Add Nodes to the Cluster - -This section describes how to register your Linux and Worker nodes to your cluster. You will run a command on each node, which will install the Rancher agent and allow Rancher to manage each node. - -### Add Linux Master Node - -In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. - -The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. - -1. In the **Node Operating System** section, click **Linux**. -1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. -1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent]({{}}/rancher/v2.5/en/admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) -1. Copy the command displayed on the screen to your clipboard. -1. SSH into your Linux host and run the command that you copied to your clipboard. -1. When you are finished provisioning your Linux node(s), select **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -It may take a few minutes for the node to be registered in your cluster. - -### Add Linux Worker Node - -In this section, we run a command to register the Linux worker node to the cluster. - -After the initial provisioning of your cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. - -1. From the **Global** view, click **Clusters.** -1. Go to the cluster that you created and click **⋮ > Edit.** -1. Scroll down to **Node Operating System**. Choose **Linux**. -1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. -1. Copy the command displayed on screen to your clipboard. -1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. -1. From **Rancher**, click **Save**. - -**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. - -> **Note:** Taints on Linux Worker Nodes -> -> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the Windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. - -> | Taint Key | Taint Value | Taint Effect | -> | -------------- | ----------- | ------------ | -> | `cattle.io/os` | `linux` | `NoSchedule` | - -### Add a Windows Worker Node - -In this section, we run a command to register the Windows worker node to the cluster. - -You can add Windows hosts to the cluster by editing the cluster and choosing the **Windows** option. - -1. From the **Global** view, click **Clusters.** -1. Go to the cluster that you created and click **⋮ > Edit.** -1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. -1. Copy the command displayed on screen to your clipboard. -1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. -1. From Rancher, click **Save**. -1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. - -**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# Configuration for Storage Classes in Azure - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass) diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md b/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md deleted file mode 100644 index c67aaa96cb..0000000000 --- a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Configuration for Storage Classes in Azure -weight: 3 -aliases: - - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/ ---- - -If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. - -In order to have the Azure platform create the required storage resources, follow these steps: - -1. [Configure the Azure cloud provider.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/azure) -1. Configure `kubectl` to connect to your cluster. -1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: - - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: system:azure-cloud-provider - rules: - - apiGroups: [''] - resources: ['secrets'] - verbs: ['get','create'] - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: system:azure-cloud-provider - roleRef: - kind: ClusterRole - apiGroup: rbac.authorization.k8s.io - name: system:azure-cloud-provider - subjects: - - kind: ServiceAccount - name: persistent-volume-binder - namespace: kube-system - -1. Create these in your cluster using one of the follow command. - - ``` - # kubectl create -f - ``` diff --git a/content/rancher/v2.5/en/contributing/_index.md b/content/rancher/v2.5/en/contributing/_index.md deleted file mode 100644 index f7db188002..0000000000 --- a/content/rancher/v2.5/en/contributing/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Contributing to Rancher -weight: 27 -aliases: - - /rancher/v2.5/en/faq/contributing/ - - /rancher/v2.x/en/contributing/ ---- - -This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. - -For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: - -- How to set up the Rancher development environment and run tests -- The typical flow of an issue through the development lifecycle -- Coding guidelines and development best practices -- Debugging and troubleshooting -- Developing the Rancher API - -On the Rancher Users Slack, the channel for developers is **#developer**. - -# Repositories - -All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. - -Repository | URL | Description ------------|-----|------------- -Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. -Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. -API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. -User Interface | https://github.com/rancher/ui | This repository is the source of the UI. -(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. -machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. -kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. -RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. -CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. -(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. -Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. -loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. - -To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. - -![Rancher diagram]({{}}/img/rancher/ranchercomponentsdiagram.svg)
-Rancher components used for provisioning/managing Kubernetes clusters. - -# Building - -Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. - -The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. - -# Bugs, Issues or Questions - -If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. - -If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). - -### Checklist for Filing Issues - -Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. - ->**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. ->**Important:** Please remove any sensitive data as it will be publicly viewable. - -- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: - - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce - - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used - - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` - - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer - - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host - - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it -- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. - - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. -- **Logs:** Provide data/logs from the used resources. - - Rancher - - Docker install - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') - ``` - - Kubernetes install using `kubectl` - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - -l app=rancher \ - --timestamps=true - ``` - - Docker install using `docker` on each of the nodes in the RKE cluster - - ``` - docker logs \ - --timestamps \ - $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') - ``` - - Kubernetes Install with RKE Add-On - - > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. - - ``` - kubectl -n cattle-system \ - logs \ - --timestamps=true \ - -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') - ``` - - System logging (these might not all exist, depending on operating system) - - `/var/log/messages` - - `/var/log/syslog` - - `/var/log/kern.log` - - Docker daemon logging (these might not all exist, depending on operating system) - - `/var/log/docker.log` -- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. - -# Docs - -If you have any updates to our documentation, please make any pull request to our docs repo. - -- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. - -- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/content/rancher/v2.5/en/deploy-across-clusters/_index.md b/content/rancher/v2.5/en/deploy-across-clusters/_index.md deleted file mode 100644 index fe47c83262..0000000000 --- a/content/rancher/v2.5/en/deploy-across-clusters/_index.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Deploying Applications across Clusters -weight: 12 -aliases: - - /rancher/v2.x/en/deploy-across-clusters/ ---- -### Fleet - -Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. - -Fleet is GitOps at scale. For more information, refer to the [Fleet section.](./fleet) - -### Multi-cluster Apps - -In Rancher before v2.5, the multi-cluster apps feature was used to deploy applications across clusters. The multi-cluster apps feature is deprecated, but still available in Rancher v2.5. - -Refer to the documentation [here.](./multi-cluster-apps) \ No newline at end of file diff --git a/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md b/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md deleted file mode 100644 index 9774589340..0000000000 --- a/content/rancher/v2.5/en/deploy-across-clusters/fleet/_index.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Fleet - GitOps at Scale -weight: 1 -aliases: - - /rancher/v2.x/en/deploy-across-clusters/fleet/ ---- - -_Available as of Rancher v2.5_ - -Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. - -Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. - -- [Architecture](#architecture) -- [Accessing Fleet in the Rancher UI](#accessing-fleet-in-the-rancher-ui) -- [Windows Support](#windows-support) -- [GitHub Repository](#github-repository) -- [Using Fleet Behind a Proxy](#using-fleet-behind-a-proxy) -- [Helm Chart Dependencies](#helm-chart-dependencies) -- [Troubleshooting](#troubleshooting) -- [Documentation](#documentation) - -# Architecture - -For information about how Fleet works, see [this page.](./architecture) - -# Accessing Fleet in the Rancher UI - -Fleet comes preinstalled in Rancher v2.5. Users can leverage continuous delivery to deploy their applications to the Kubernetes clusters in the git repository without any manual operation by following **gitops** practice. For additional information on Continuous Delivery and other Fleet troubleshooting tips, refer [here](https://fleet.rancher.io/troubleshooting/). - -Follow the steps below to access Continuous Delivery in the Rancher UI: - -1. Click **Cluster Explorer** in the Rancher UI. - -1. In the top left dropdown menu, click **Cluster Explorer > Continuous Delivery.** - -1. Select your namespace at the top of the menu, noting the following: - - By default,`fleet-default` is selected which includes all downstream clusters that are registered through Rancher. - - You may switch to `fleet-local`, which only contains the `local` cluster, or you may create your own workspace to which you may assign and move clusters. - - You can then manage clusters by clicking on **Clusters** on the left navigation bar. - -1. Click on **Gitrepos** on the left navigation bar to deploy the gitrepo into your clusters in the current workspace. - -1. Select your [git repository](https://fleet.rancher.io/gitrepo-add/) and [target clusters/cluster group](https://fleet.rancher.io/gitrepo-structure/). You can also create the cluster group in the UI by clicking on **Cluster Groups** from the left navigation bar. - -1. Once the gitrepo is deployed, you can monitor the application through the Rancher UI. - -# Windows Support - -_Available as of v2.5.6_ - -For details on support for clusters with Windows nodes, see [this page.](./windows) - - -# GitHub Repository - -The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/releases/latest) - - -# Using Fleet Behind a Proxy - -_Available as of v2.5.8_ - -For details on using Fleet behind a proxy, see [this page.](./proxy) - -# Helm Chart Dependencies - -In order for Helm charts with dependencies to deploy successfully, you must run a manual command (as listed below), as it is up to the user to fulfill the dependency list. If you do not do this and proceed to clone your repository and run `helm install`, your installation will fail because the dependencies will be missing. - -The Helm chart in the git repository must include its dependencies in the charts subdirectory. You must either manually run `helm dependencies update $chart` OR run `helm dependencies build $chart` locally, then commit the complete charts directory to your git repository. Note that you will update your commands with the applicable parameters. - -# Troubleshooting ---- -* **Known Issue:** Fleet becomes inoperable after a restore using the [backup-restore-operator]({{}}/rancher/v2.5/en/backups/back-up-rancher/#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. - -* **Temporary Workaround:**
- 1. Find the two service account tokens listed in the fleet-controller and the fleet-controller-bootstrap service accounts. These are under the fleet-system namespace of the local cluster.
- 2. Remove the non-existent token secret. Doing so allows for only one entry to be present for the service account token secret that actually exists.
- 3. Delete the fleet-controller Pod in the fleet-system namespace to reschedule.
- 4. After the service account token issue is resolved, you can force redeployment of the fleet-agents. In the Rancher UI, go to **☰ > Cluster Management**, click on **Clusters** page, then click **Force Update**.
- 5. If the fleet-agent bundles remain in a `Modified` state after Step 4, update the field `spec.forceSyncGeneration` for the fleet-agent bundle to force re-creation. - ---- -* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator]({{}}/rancher/v2.5/en/backups/back-up-rancher/#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. - -* **Temporary Workaround:**
-By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). - ---- - -# Documentation - -The Fleet documentation is at [https://fleet.rancher.io/.](https://fleet.rancher.io/) diff --git a/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md b/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md deleted file mode 100644 index 862daaacaf..0000000000 --- a/content/rancher/v2.5/en/deploy-across-clusters/multi-cluster-apps/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Multi-cluster Apps -weight: 2 -aliases: - - /rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps/ ---- - -> As of Rancher v2.5, we now recommend using [Fleet]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet) for deploying apps across clusters. - -Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. - -Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. - -After creating a multi-cluster application, you can program a global DNS entry to make it easier to access the application. - -- [Prerequisites](#prerequisites) -- [Launching a multi-cluster app](#launching-a-multi-cluster-app) -- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) - - [Targets](#targets) - - [Upgrades](#upgrades) - - [Roles](#roles) -- [Application configuration options](#application-configuration-options) - - [Using a questions.yml file](#using-a-questions-yml-file) - - [Key value pairs for native Helm charts](#key-value-pairs-for-native-helm-charts) - - [Members](#members) - - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) -- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) -- [Multi-cluster application management](#multi-cluster-application-management) -- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) - -# Prerequisites - -To create a multi-cluster app in Rancher, you must have at least one of the following permissions: - -- A [project-member role]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads -- A [cluster owner role]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) for the clusters(s) that include the target project(s) - -# Launching a Multi-Cluster App - -1. From the **Global** view, choose **Apps** in the navigation bar. Click **Launch**. - -2. Find the application that you want to launch, and then click **View Details**. - -3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. - -4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. - -5. Select a **Template Version**. - -6. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). - -7. Select the **Members** who can [interact with the multi-cluster application](#members). - -8. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. - -7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. - -**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: - -# Multi-cluster App Configuration Options - -Rancher has divided the configuration option for the multi-cluster application into several sections. - -### Targets - -In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. - -### Upgrades - -In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. - -* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. - -* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. - -### Roles - -In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications]({{}}/rancher/v2.5/en/catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. - -For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. - -Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. - -- **Project** - This is the equivalent of a [project member]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/), a [cluster owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or a [project owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles), then the user is considered to have the appropriate level of permissions. - -- **Cluster** - This is the equivalent of a [cluster owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/), then the user is considered to have the appropriate level of permissions. - -When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. - -> **Note:** There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. - -# Application Configuration Options - -For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. - -> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). - -### Using a questions.yml file - -If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. - -### Key Value Pairs for Native Helm Charts - -For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a custom Helm chart repository, answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. - -### Members - -By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. - -1. Find the user that you want to add by typing in the member's name in the **Member** search box. - -2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. - - - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. - - - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. - - > **Note:** Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. - -### Overriding Application Configuration Options for Specific Projects - -The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. - -1. In the **Answer Overrides** section, click **Add Override**. - -2. For each override, you can select the following: - - - **Scope**: Select which target projects you want to override the answer in the configuration option. - - - **Question**: Select which question you want to override. - - - **Answer**: Enter the answer that you want to be used instead. - -# Upgrading Multi-Cluster App Roles and Projects - -- **Changing Roles on an existing Multi-Cluster app** -The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. - -- **Adding/Removing target projects** -1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. -2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. - - -# Multi-Cluster Application Management - -One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: - - * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. - * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). - * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. - -# Deleting a Multi-Cluster Application - -1. From the **Global** view, choose **Apps** in the navigation bar. - -2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. - - > **Note:** The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. diff --git a/content/rancher/v2.5/en/faq/_index.md b/content/rancher/v2.5/en/faq/_index.md deleted file mode 100644 index 9e0ab2b648..0000000000 --- a/content/rancher/v2.5/en/faq/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: FAQ -weight: 25 -aliases: - - /rancher/v2.5/en/about/ - - /rancher/v2.x/en/faq/ ---- - -This FAQ is a work in progress designed to answer the questions our users most frequently ask about Rancher v2.x. - -See [Technical FAQ]({{}}/rancher/v2.5/en/faq/technical/), for frequently asked technical questions. - -
- -**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** - -When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. - -
- -**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** - -Yes. - -
- -**Does Rancher support Windows?** - -As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/) - -
- -**Does Rancher support Istio?** - -As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.5/en/istio/) - -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) - -
- -**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** - -There is no built-in integration of Rancher and Hashicorp's Vault. Rancher manages Kubernetes and integrates with secrets via the Kubernetes API. Thus in any downstream (managed) cluster, you can use a secret vault of your choice provided it integrates with Kubernetes, including [Vault](https://www.vaultproject.io/docs/platform/k8s). - -
- -**Does Rancher v2.x support RKT containers as well?** - -At this time, we only support Docker. - -
- -**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and registered Kubernetes?** - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. - -
- -**Are you planning on supporting Traefik for existing setups?** - -We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. - -
- -**Can I import OpenShift Kubernetes clusters into v2.x?** - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -
- -**Are you going to integrate Longhorn?** - -Yes. Longhorn was integrated into Rancher v2.5+. diff --git a/content/rancher/v2.5/en/faq/networking/_index.md b/content/rancher/v2.5/en/faq/networking/_index.md deleted file mode 100644 index 092cb4d1b1..0000000000 --- a/content/rancher/v2.5/en/faq/networking/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Networking -weight: 8005 -aliases: - - /rancher/v2.x/en/faq/networking/ ---- - -Networking FAQ's - -- [CNI Providers]({{}}/rancher/v2.5/en/faq/networking/cni-providers/) - diff --git a/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md deleted file mode 100644 index 6e19619b22..0000000000 --- a/content/rancher/v2.5/en/faq/networking/cni-providers/_index.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Container Network Interface (CNI) Providers -description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you -weight: 2300 -aliases: - - /rancher/v2.x/en/faq/networking/cni-providers/ ---- - -## What is CNI? - -CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. - -Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. - -![CNI Logo]({{}}/img/rancher/cni-logo.png) - -For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). - -### What Network Models are Used in CNI? - -CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). - -#### What is an Encapsulated Network? - -This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. - -In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. - -This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. - -CNI network providers using this network model include Flannel, Canal, and Weave. - -![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) - -#### What is an Unencapsulated Network? - -This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). - -In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. - -This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. - -CNI network providers using this network model include Calico and Romana. - -![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) - -### What CNI Providers are Provided by Rancher? - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. - -#### Canal - -![Canal Logo]({{}}/img/rancher/canal-logo.png) - -Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. - -In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/) - -{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} - -For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) - -#### Flannel - -![Flannel Logo]({{}}/img/rancher/flannel-logo.png) - -Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). - -Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) - -For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). - -#### Calico - -![Calico Logo]({{}}/img/rancher/calico-logo.png) - -Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. - -Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. - -Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) - -For more information, see the following pages: - -- [Project Calico Official Site](https://www.projectcalico.org/) -- [Project Calico GitHub Page](https://github.com/projectcalico/calico) - - -#### Weave - -![Weave Logo]({{}}/img/rancher/weave-logo.png) - -Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. - -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -For more information, see the following pages: - -- [Weave Net Official Site](https://www.weave.works/) - -### CNI Features by Provider - -The following table summarizes the different features available for each CNI network provider provided by Rancher. - -| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | -| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | -| Canal | Encapsulated (VXLAN) | No | Yes | No | K8S API | No | Yes | -| Flannel | Encapsulated (VXLAN) | No | No | No | K8S API | No | No | -| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8S API | No | Yes | -| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | - -- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) - -- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. - -- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. - -- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. - -- External Datastore: CNI network providers with this feature need an external datastore for its data. - -- Encryption: This feature allows cyphered and secure network control and data planes. - -- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. - -#### CNI Community Popularity - -The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. - -| Provider | Project | Stars | Forks | Contributors | -| ---- | ---- | ---- | ---- | ---- | -| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | -| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | -| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | -| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 | - -
- -### Which CNI Provider Should I Use? - -It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. - -Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. - -### How can I configure a CNI network provider? - -Please see [Cluster Options]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.5/en/faq/removing-rancher/_index.md b/content/rancher/v2.5/en/faq/removing-rancher/_index.md deleted file mode 100644 index e05744e740..0000000000 --- a/content/rancher/v2.5/en/faq/removing-rancher/_index.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Rancher is No Longer Needed -weight: 8010 -aliases: - - /rancher/v2.5/en/installation/removing-rancher/cleaning-cluster-nodes/ - - /rancher/v2.5/en/installation/removing-rancher/ - - /rancher/v2.5/en/admin-settings/removing-rancher/ - - /rancher/v2.5/en/admin-settings/removing-rancher/rancher-cluster-nodes/ - - /rancher/v2.x/en/faq/removing-rancher/ ---- - -This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. - -- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) -- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) -- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) -- [What if I don't want my registered cluster managed by Rancher?](#what-if-i-don-t-want-my-registered-cluster-managed-by-rancher) -- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) - -### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? - -If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. - -### If the Rancher server is deleted, how do I access my downstream clusters? - -The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: - -- **Registered clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. -- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. -- **RKE clusters:** Please note that you will no longer be able to manage the individual Kubernetes components or perform any upgrades on them after the deletion of the Rancher server. However, you can still access the cluster to manage your workloads. To access an [RKE cluster,]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) the cluster must have the [authorized cluster endpoint]({{}}/rancher/v2.5/en/overview/architecture/#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.]({{}}/rancher/v2.5/en/overview/architecture/#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. - -### What if I don't want Rancher anymore? - -If you [installed Rancher on a Kubernetes cluster,]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) remove Rancher by using the [System Tools]({{}}/rancher/v2.5/en/system-tools/) with the `remove` subcommand. - -As of Rancher v2.5.8, uninstalling Rancher in high-availability (HA) mode will also remove all `helm-operation-*` pods and the following apps: - -- fleet -- fleet-agent -- rancher-operator -- rancher-webhook - -Custom resources (CRDs) and custom namespaces will still need to be manually removed. - -If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. - -Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) - -### What if I don't want my registered cluster managed by Rancher? - -If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. - -To detach the cluster, - -1. From the **Global** view in Rancher, go to the **Clusters** tab. -2. Go to the registered cluster that should be detached from Rancher and click **⋮ > Delete.** -3. Click **Delete.** - -**Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. - -### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? - -At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. - -The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) - -For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/content/rancher/v2.5/en/faq/security/_index.md b/content/rancher/v2.5/en/faq/security/_index.md deleted file mode 100644 index c0162f779a..0000000000 --- a/content/rancher/v2.5/en/faq/security/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Security -weight: 8007 -aliases: - - /rancher/v2.x/en/faq/security/ ---- - -**Is there a Hardening Guide?** - -The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.5/en/security/) section. - -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** - -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.5/en/security/) section. diff --git a/content/rancher/v2.5/en/faq/technical/_index.md b/content/rancher/v2.5/en/faq/technical/_index.md deleted file mode 100644 index 9ea91ea6d9..0000000000 --- a/content/rancher/v2.5/en/faq/technical/_index.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Technical -weight: 8006 -aliases: - - /rancher/v2.x/en/faq/technical/ ---- - -### How can I reset the administrator password? - -Docker Install: -``` -$ docker exec -ti reset-password -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password -New password for default administrator (user-xxxxx): - -``` - - - -### I deleted/deactivated the last admin, how can I fix it? -Docker Install: -``` -$ docker exec -ti ensure-default-admin -New default administrator (user-xxxxx) -New password for default administrator (user-xxxxx): - -``` - -Kubernetes install (Helm): -``` -$ KUBECONFIG=./kube_config_cluster.yml -$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin -New password for default administrator (user-xxxxx): - -``` -### How can I enable debug logging? - -See [Troubleshooting: Logging]({{}}/rancher/v2.5/en/troubleshooting/logging/) - -### My ClusterIP does not respond to ping - -ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. - -### Where can I manage Node Templates? - -Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. - -### Why is my Layer-4 Load Balancer in `Pending` state? - -The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) - -### Where is the state of Rancher stored? - -- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. -- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. - -### How are the supported Docker versions determined? - -We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. - -### How can I access nodes created by Rancher? - -SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. - -![Download Keys]({{}}/img/rancher/downloadsshkeys.png) - -Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) - -``` -$ ssh -i id_rsa user@ip_of_node -``` - -### How can I automate task X in Rancher? - -The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: - -* Visit `https://your_rancher_ip/v3` and browse the API options. -* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) - -### The IP address of a node changed, how can I recover? - -A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. - -When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes]({{}}/rancher/v2.5/en/faq/cleaning-cluster-nodes/) to clean the node. - -When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. - -### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? - -You can add additional arguments/binds/environment variables via the [Config File]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls]({{}}/rke/latest/en/example-yamls/). - -### How do I check if my certificate chain is valid? - -Use the `openssl verify` command to validate your certificate chain: - ->**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: - -``` -SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem -rancher.yourdomain.com.pem: OK -``` - -If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). - -``` ------BEGIN CERTIFICATE----- -%YOUR_CERTIFICATE% ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -%YOUR_INTERMEDIATE_CERTIFICATE% ------END CERTIFICATE----- -``` - -If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: - -``` -openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem -subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com -issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA -``` - -### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? - -Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. - -Check `Common Name`: - -``` -openssl x509 -noout -subject -in cert.pem -subject= /CN=rancher.my.org -``` - -Check `Subject Alternative Names`: - -``` -openssl x509 -noout -in cert.pem -text | grep DNS - DNS:rancher.my.org -``` - -### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? - -This is due to a combination of the following default Kubernetes settings: - -* kubelet - * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) -* kube-controller-manager - * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) - * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) - * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) - -See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. - -In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. - -* kube-apiserver (Kubernetes v1.13 and up) - * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. - * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. - -### Can I use keyboard shortcuts in the UI? - -Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. - - -### What does `Unknown schema for type:` errors followed by something like `catalog.cattle.io.operation` mean when trying to modify an App? - -This error occurs when Kubernetes can not find the CRD mentioned. The vast majority of the time these are a result of missing RBAC permissions. Try with an admin user and if this works, add permissions for the resource mentioned by the error (ie. `Get`, `List`, `Patch` as needed). diff --git a/content/rancher/v2.5/en/helm-charts/_index.md b/content/rancher/v2.5/en/helm-charts/_index.md deleted file mode 100644 index b74682a0c6..0000000000 --- a/content/rancher/v2.5/en/helm-charts/_index.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Helm Charts in Rancher -weight: 11 -aliases: - - /rancher/v2.x/en/helm-charts/apps-marketplace - - /rancher/v2.5/en/catalog/ - - /rancher/v2.5/en/catalog/apps - - /rancher/v2.5/en/catalog/launching-apps - - /rancher/v2.x/en/helm-charts/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/launching-apps/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/built-in/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/ - - /rancher/v2.x/en/helm-charts/apps-marketplace/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/tutorial/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/managing-apps/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/ - - /rancher/v2.x/en/helm-charts/legacy-catalogs/multi-cluster-apps/ ---- - -In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. - -### Changes in Rancher v2.5 - -In Rancher v2.5, the Apps and Marketplace feature replaced the catalog system. - -In the cluster manager, Rancher uses a catalog system to import bundles of charts and then uses those charts to either deploy custom helm applications or Rancher's tools such as Monitoring or Istio. The catalog system is still available in the cluster manager in Rancher v2.5, but it is deprecated. - -Now in the Cluster Explorer, Rancher uses a similar but simplified version of the same system. Repositories can be added in the same way that catalogs were, but are specific to the current cluster. Rancher tools come as pre-loaded repositories which deploy as standalone helm charts. - -### Charts - -From the top-left menu select _"Apps & Marketplace"_ and you will be taken to the Charts page. - -The charts page contains all Rancher, Partner, and Custom Charts. - -* Rancher tools such as Logging or Monitoring are included under the Rancher label -* Partner charts reside under the Partners label -* Custom charts will show up under the name of the repository - -All three types are deployed and managed in the same way. - -> Apps managed by the Cluster Manager should continue to be managed only by the Cluster Manager, and apps managed with the Cluster Explorer must be managed only by the Cluster Explorer. - -### Repositories - -From the left sidebar select _"Repositories"_. - -These items represent helm repositories, and can be either traditional helm endpoints which have an index.yaml, or git repositories which will be cloned and can point to a specific branch. In order to use custom charts, simply add your repository here and they will become available in the Charts tab under the name of the repository. - -To add a private CA for Helm Chart repositories: - -- **HTTP-based chart repositories**: You must add a base64 encoded copy of the CA certificate in DER format to the spec.caBundle field of the chart repo, such as `openssl x509 -outform der -in ca.pem | base64 -w0`. Click **Edit YAML** for the chart repo and set, as in the following example:
- ``` - [...] - spec: - caBundle: - MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT - ... - nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4= - [...] - ``` - -- **Git-based chart repositories**: It is not currently possible to add a private CA. For git-based chart repositories with a certificate signed by a private CA, you must disable TLS verification. Click **Edit YAML** for the chart repo, and add the key/value pair as follows: - ``` - [...] - spec: - insecureSkipTLSVerify: true - [...] - ``` - -> **Note:** Helm chart repositories with authentication -> -> As of Rancher v2.5.12, a new value `disableSameOriginCheck` has been added to the Repo.Spec. This allows users to bypass the same origin checks, sending the repository Authentication information as a Basic Auth Header with all API calls. This is not recommended but can be used as a temporary solution in cases of non-standard Helm chart repositories such as those that have redirects to a different origin URL. -> -> To use this feature for an existing Helm chart repository, click ⋮ > Edit YAML. On the `spec` portion of the YAML file, add `disableSameOriginCheck` and set it to `true`. -> -> ```yaml -[...] -spec: - disableSameOriginCheck: true -[...] -``` - -### Helm Compatibility - -The Cluster Explorer only supports Helm 3 compatible charts. - - -### Deployment and Upgrades - -From the _"Charts"_ tab select a Chart to install. Rancher and Partner charts may have extra configurations available through custom pages or questions.yaml files, but all chart installations can modify the values.yaml and other basic settings. Once you click install, a Helm operation job is deployed, and the console for the job is displayed. - -To view all recent changes, go to the _"Recent Operations"_ tab. From there you can view the call that was made, conditions, events, and logs. - -After installing a chart, you can find it in the _"Installed Apps"_ tab. In this section you can upgrade or delete the installation, and see further details. When choosing to upgrade, the form and values presented will be the same as installation. - -Most Rancher tools have additional pages located in the toolbar below the _"Apps & Marketplace"_ section to help manage and use the features. These pages include links to dashboards, forms to easily add Custom Resources, and additional information. - -> If you are upgrading your chart using _"Customize Helm options before upgrade"_ , please be aware that using the _"--force"_ option may result in errors if your chart has immutable fields. This is because some objects in Kubernetes cannot be changed once they are created. To ensure you do not get this error you can: - * use the default upgrade option ( i.e do not use _"--force"_ option ) - * uninstall the existing chart and install the upgraded chart - * delete the resources with immutable fields from the cluster before performing the _"--force"_ upgrade diff --git a/content/rancher/v2.5/en/installation/_index.md b/content/rancher/v2.5/en/installation/_index.md deleted file mode 100644 index 9f49f7a1b7..0000000000 --- a/content/rancher/v2.5/en/installation/_index.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: Installing/Upgrading Rancher -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 3 -aliases: - - /rancher/v2.5/en/installation/how-ha-works/ - - /rancher/v2.x/en/installation/ ---- - -This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. - -# Terminology - -In this section, - -- **The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. -- **RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. -- **K3s (Lightweight Kubernetes)** is also a fully compliant Kubernetes distribution. It is newer than RKE, easier to use, and more lightweight, with a binary size of less than 100 MB. -- **RKE2** is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. -- **RancherD** was an experimental tool for installing Rancher; a single binary that first launched an RKE2 Kubernetes cluster, then installed the Rancher server Helm chart on the cluster. It was available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -# Changes to Installation in Rancher v2.5 - -In Rancher v2.5, the Rancher management server can be installed on any Kubernetes cluster, including hosted clusters, such as Amazon EKS clusters. - -For Docker installations, a local Kubernetes cluster is installed in the single Docker container, and Rancher is installed on the local cluster. - -The `restrictedAdmin` Helm chart option was added. When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/#restricted-admin) - -# Overview of Installation Options - -Rancher can be installed on these main architectures: - -### High-availability Kubernetes Install with the Helm CLI - -We recommend using Helm, a Kubernetes package manager, to install Rancher on multiple nodes on a dedicated Kubernetes cluster. For RKE clusters, three nodes are required to achieve a high-availability cluster. For K3s clusters, only two nodes are required. - -### High-availability Kubernetes Install with RancherD - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. - -In both the RancherD install and the Helm CLI install, Rancher is installed as a Helm chart on a Kubernetes cluster. - -Configuration and upgrading are also simplified with RancherD. When you upgrade the RancherD binary, both the Kubernetes cluster and the Rancher Helm chart are upgraded. - -### Automated Quickstart to Deploy Rancher on Amazon EKS - -Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS Kubernetes cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) - -### Single-node Kubernetes Install - -Rancher can be installed on a single-node Kubernetes cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. - -However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. - -### Docker Install - -For test and demonstration purposes, Rancher can be installed with Docker on a single node. - -The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.5/en/backups/migrating-rancher) - -### Other Options - -There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: - -| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | -| ---------------------------------- | ------------------------------ | ---------- | -| With direct access to the Internet | [Docs]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) | [Docs]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker) | -| Behind an HTTP proxy | [Docs]({{}}/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/) | These [docs,]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker) plus this [configuration]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/) | -| In an air gap environment | [Docs]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap) | [Docs]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap) | - -We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. - -For that reason, we recommend that for a production-grade architecture, you should set up a high-availability Kubernetes cluster, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. - -For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. - -Our [instructions for installing Rancher on Kubernetes]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. - -When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,]({{}}/rancher/v2.5/en/installation/requirements) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. - -For a longer discussion of Rancher architecture, refer to the [architecture overview,]({{}}/rancher/v2.5/en/overview/architecture) [recommendations for production-grade architecture,]({{}}/rancher/v2.5/en/overview/architecture-recommendations) or our [best practices guide.]({{}}/rancher/v2.5/en/best-practices/deployment-types) - -# Prerequisites -Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.]({{}}/rancher/v2.5/en/installation/requirements/) - -# Architecture Tip - -For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.5/en/cluster-provisioning/) for running your workloads. - -For more architecture recommendations, refer to [this page.]({{}}/rancher/v2.5/en/overview/architecture-recommendations) - -### More Options for Installations on a Kubernetes Cluster - -Refer to the [Helm chart options]({{}}/rancher/v2.5/en/installation/resources/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: - -- With [API auditing to record all transactions]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) -- With [TLS termination on a load balancer]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) -- With a [custom Ingress]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#customizing-your-ingress) - -In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: - -- [RKE configuration options]({{}}/rke/latest/en/config-options/) -- [K3s configuration options]({{}}/k3s/latest/en/installation/install-options/) - -### More Options for Installations with Docker - -Refer to the [docs about options for Docker installs]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker) for details about other configurations including: - -- With [API auditing to record all transactions]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) -- With an [external load balancer]({{}}/rancher/v2.5/en/installation/options/single-node-install-external-lb/) -- With a [persistent data store]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#persistent-data) diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md deleted file mode 100644 index b0e02303a7..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/_index.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Install/Upgrade Rancher on a Kubernetes Cluster -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 2 -aliases: - - /rancher/v2.5/en/installation/k8s-install/ - - /rancher/v2.5/en/installation/k8s-install/helm-rancher - - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke - - /rancher/v2.5/en/installation/ha-server-install - - /rancher/v2.5/en/installation/install-rancher-on-k8s/install - - /rancher/v2.x/en/installation/install-rancher-on-k8s/ ---- - -In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. - -- [Prerequisites](#prerequisites) -- [Install the Rancher Helm Chart](#install-the-rancher-helm-chart) - -# Prerequisites - -- [Kubernetes Cluster](#kubernetes-cluster) -- [CLI Tools](#cli-tools) -- [Ingress Controller (Only for Hosted Kubernetes)](#ingress-controller-for-hosted-kubernetes) - -### Kubernetes Cluster - -Set up the Rancher server's local Kubernetes cluster. - -Rancher can be installed on any Kubernetes cluster. This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. - -For help setting up a Kubernetes cluster, we provide these tutorials: - -- **RKE:** For the tutorial to install an RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha) -- **K3s:** For the tutorial to install a K3s Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-with-external-db) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db) -- **RKE2:** For the tutorial to install an RKE2 Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2) For help setting up the infrastructure for a high-availability RKE2 cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha) -- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks) -- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/aks) -- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/gke) - -### CLI Tools - -The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.5/en/installation/options/helm-version) to choose a version of Helm to install Rancher. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. - -### Ingress Controller (For Hosted Kubernetes) - -To deploy Rancher v2.5 on a hosted Kubernetes cluster such as EKS, GKE, or AKS, you should deploy a compatible Ingress controller first to configure [SSL termination on Rancher.](#3-choose-your-ssl-configuration) - -For an example of how to deploy an ingress on EKS, refer to [this section.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/#5-install-an-ingress) - -# Install the Rancher Helm Chart - -Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. - -With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.5/en/installation/air-gap-installation/install-rancher/). - -To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.5/en/installation/options/server-tags) - -To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.5/en/installation/options/helm-version) - -> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section]({{}}/rancher/v2.5/en/installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -To set up Rancher, - -1. [Add the Helm chart repository](#1-add-the-helm-chart-repository) -2. [Create a namespace for Rancher](#2-create-a-namespace-for-rancher) -3. [Choose your SSL configuration](#3-choose-your-ssl-configuration) -4. [Install cert-manager](#4-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) -5. [Install Rancher with Helm and your chosen certificate option](#5-install-rancher-with-helm-and-your-chosen-certificate-option) -6. [Verify that the Rancher server is successfully deployed](#6-verify-that-the-rancher-server-is-successfully-deployed) -7. [Save your options](#7-save-your-options) - -### 1. Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### 2. Create a Namespace for Rancher - -We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: - -``` -kubectl create namespace cattle-system -``` - -### 3. Choose your SSL Configuration - -The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: - -- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. -- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. -- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. - - -| Configuration | Helm Chart Option | Requires cert-manager | -| ------------------------------ | ----------------------- | ------------------------------------- | -| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#5-install-cert-manager) | -| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#5-install-cert-manager) | -| Certificates from Files | `ingress.tls.source=secret` | no | - -### 4. Install cert-manager - -> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). - -{{% accordion id="cert-manager" label="Click to Expand" %}} - -> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager/). - -These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). - -``` -# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart: -kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml - -# Add the Jetstack Helm repository -helm repo add jetstack https://charts.jetstack.io - -# Update your local Helm chart repository cache -helm repo update - -# Install the cert-manager Helm chart -helm install cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace \ - --version v1.5.1 -``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -{{% /accordion %}} - -### 5. Install Rancher with Helm and Your Chosen Certificate Option - -The exact command to install Rancher differs depending on the certificate configuration. - -However, irrespective of the certificate configuration, the name of the Rancher installation in the `cattle-system` namespace should always be `rancher`. - -{{% tabs %}} -{{% tab "Rancher-generated Certificates" %}} - - -The default is for Rancher to generate a self-signed CA, and uses `cert-manager` to issue the certificate for access to the Rancher server interface. - -Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set `hostname` to the DNS record that resolves to your load balancer. -- Set `replicas` to the number of replicas to use for the Rancher Deployment. This defaults to 3; if you have less than 3 nodes in your cluster you should reduce it accordingly. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set replicas=3 -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Let's Encrypt" %}} - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. - ->**Note:** You need to have port 80 open as the HTTP-01 challenge can only be done on port 80. - -In the following command, - -- Set `hostname` to the public DNS record that resolves to your load balancer. -- Set `replicas` to the number of replicas to use for the Rancher Deployment. This defaults to 3; if you have less than 3 nodes in your cluster you should reduce it accordingly. -- Set `ingress.tls.source` to `letsEncrypt`. -- Set `letsEncrypt.email` to the email address used for communication about your certificate (for example, expiry notices). -- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set replicas=3 \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org \ - --set letsEncrypt.ingress.class=nginx -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Certificates from Files" %}} -In this option, Kubernetes secrets are created from your own certificates for Rancher to use. - -When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate, or the Ingress controller will fail to configure correctly. - -Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. - -> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.5/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set `hostname` as appropriate for your certificate, as described above. -- Set `replicas` to the number of replicas to use for the Rancher Deployment. This defaults to 3; if you have less than 3 nodes in your cluster you should reduce it accordingly. -- Set `ingress.tls.source` to `secret`. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set replicas=3 \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.5/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. -{{% /tab %}} -{{% /tabs %}} - -The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. - -- [HTTP Proxy]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) -- [Private Docker Image Registry]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/) for the full list of options. - - -### 6. Verify that the Rancher Server is Successfully Deployed - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### 7. Save Your Options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it. You should have a functional Rancher server. - -In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.5/en/installation/options/troubleshooting/) Page - - -### Optional Next Steps - -Enable the Enterprise Cluster Manager. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/aks/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/aks/_index.md deleted file mode 100644 index eb273d4348..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/aks/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Installing Rancher on Azure Kubernetes Service -shortTitle: AKS -weight: 4 ---- - -This page covers how to install Rancher on Microsoft's Azure Kubernetes Service (AKS). - -The guide uses command line tools to provision an AKS cluster with an ingress. If you prefer to provision your cluster using the Azure portal, refer to the [official documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough-portal). - -If you already have an AKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -# Prerequisites - ->**Note** ->Deploying to Microsoft Azure will incur charges. - -- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. -- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. -- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. -- Your subscription has sufficient quota for at least 2 vCPUs. For details on Rancher server resource requirements, refer to [this section]({{}}/rancher/v2.5/en/installation/requirements/#rke-and-hosted-kubernetes) -- When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). - -# 1. Prepare your Workstation - -Install the following command line tools on your workstation: - -- The Azure CLI, **az:** For help, refer to these [installation steps.](https://docs.microsoft.com/en-us/cli/azure/) -- **kubectl:** For help, refer to these [installation steps.](https://kubernetes.io/docs/tasks/tools/#kubectl) -- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) - -# 2. Create a Resource Group - -After installing the CLI, you will need to log in with your Azure account. - -``` -az login -``` - -Create a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) to hold all relevant resources for your cluster. Use a location that applies to your use case. - -``` -az group create --name rancher-rg --location eastus -``` - -# 3. Create the AKS Cluster - -To create an AKS cluster, run the following command. Use a VM size that applies to your use case. Refer to [this article](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) for available sizes and options. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. - -``` -az aks create \ - --resource-group rancher-rg \ - --name rancher-server \ - --kubernetes-version 1.20.5 \ - --node-count 3 \ - --node-vm-size Standard_D2_v3 -``` - -The cluster will take some time to be deployed. - -# 4. Get Access Credentials - -After the cluster is deployed, get the access credentials. - -``` -az aks get-credentials --resource-group rancher-rg --name rancher-server -``` - -This command merges your cluster's credentials into the existing kubeconfig and allows `kubectl` to interact with the cluster. - -# 5. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. Installing an Ingress requires allocating a public IP address. Ensure you have sufficient quota, otherwise it will fail to assign the IP address. Limits for public IP addresses are applicable at a regional level per subscription. - -The following command installs an `nginx-ingress-controller` with a Kubernetes load balancer service. - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -# 6. Get Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) - AGE -ingress-nginx-controller LoadBalancer 10.0.116.18 40.31.180.83 80:31229/TCP,443:31050/TCP - 67s -``` - -Save the `EXTERNAL-IP`. - -# 7. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the `EXTERNAL-IP` that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the [Azure DNS documentation](https://docs.microsoft.com/en-us/azure/dns/) - -# 8. Install the Rancher Helm Chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md deleted file mode 100644 index 7bbcf58695..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/_index.md +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: Installing Rancher on Amazon EKS -shortTitle: Amazon EKS -weight: 4 -aliases: - - /rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/ ---- - -This page covers two ways to install Rancher on EKS. - -The first is a guide for deploying the Rancher server on an EKS cluster using CloudFormation. This guide was created in collaboration with Amazon Web Services to show how to deploy Rancher following best practices. - -The second is a guide for installing an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. - -If you already have an EKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -- [Automated Quickstart using AWS Best Practices](#automated-quickstart-using-aws-best-practices) -- [Creating an EKS Cluster for the Rancher Server](#creating-an-eks-cluster-for-the-rancher-server) - -# Automated Quickstart using AWS Best Practices - -Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) - -The quick start guide provides three options for deploying Rancher on EKS: - -- **Deploy Rancher into a new VPC and new Amazon EKS cluster.** This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, Amazon EKS cluster, and other infrastructure components. It then deploys Rancher into this new EKS cluster. -- **Deploy Rancher into an existing VPC and a new Amazon EKS cluster.** This option provisions Rancher in your existing AWS infrastructure. -- **Deploy Rancher into an existing VPC and existing Amazon EKS cluster.** This option provisions Rancher in your existing AWS infrastructure. - -Deploying this Quick Start for a new virtual private cloud (VPC) and new Amazon EKS cluster using default parameters builds the following Rancher environment in the AWS Cloud: - -- A highly available architecture that spans three Availability Zones.* -- A VPC configured with public and private subnets, according to AWS best practices, to provide you with your own virtual network on AWS.* -- In the public subnets: - - Managed network address translation (NAT) gateways to allow outbound internet access for resources.* - - Linux bastion hosts in an Auto Scaling group to allow inbound Secure Shell (SSH) access to Amazon Elastic Compute Cloud (Amazon EC2) instances in public and private subnets.* -- In the private subnets: - - Kubernetes nodes in an Auto Scaling group.* - - A Network Load Balancer (not shown) for accessing the Rancher console. -- Rancher deployment using AWS Systems Manager automation. -- Amazon EKS service for the EKS cluster, which provides the Kubernetes control plane.* -- An Amazon Route 53 DNS record for accessing the Rancher deployment. - -\* The CloudFormation template that deploys the Quick Start into an existing Amazon EKS cluster skips the components marked by asterisks and prompts you for your existing VPC configuration. - -# Creating an EKS Cluster for the Rancher Server - -In this section, you'll install an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. - -> **Prerequisites:** -> -> - You should already have an AWS account. -> - It is recommended to use an IAM user instead of the root AWS account. You will need the IAM user's access key and secret key to configure the AWS command line interface. -> - The IAM user needs the minimum IAM policies described in the official [eksctl documentation.](https://eksctl.io/usage/minimum-iam-policies/) - -### 1. Prepare your Workstation - -Install the following command line tools on your workstation: - -- **The AWS CLI v2:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -- **eksctl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) -- **kubectl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) -- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) - -### 2. Configure the AWS CLI - -To configure the AWS CLI, run the following command: - -``` -aws configure -``` - -Then enter the following values: - -| Value | Description | -|-------|-------------| -| AWS Access Key ID | The access key credential for the IAM user with EKS permissions. | -| AWS Secret Access Key | The secret key credential for the IAM user with EKS permissions. | -| Default region name | An [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) where the cluster nodes will be located. | -| Default output format | Enter `json`. | - -### 3. Create the EKS Cluster - -To create an EKS cluster, run the following command. Use the AWS region that applies to your use case. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. - -``` -eksctl create cluster \ - --name rancher-server \ - --version 1.20 \ - --region us-west-2 \ - --nodegroup-name ranchernodes \ - --nodes 3 \ - --nodes-min 1 \ - --nodes-max 4 \ - --managed -``` - -The cluster will take some time to be deployed with CloudFormation. - -### 4. Test the Cluster - -To test the cluster, run: - -``` -eksctl get cluster -``` - -The result should look like the following: - -``` -eksctl get cluster -2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0 -2021-03-18 15:09:35 [ℹ] using region us-west-2 -NAME REGION EKSCTL CREATED -rancher-server-cluster us-west-2 True -``` - -### 5. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. - -The following command installs an `nginx-ingress-controller` with a LoadBalancer service. This will result in an ELB (Elastic Load Balancer) in front of NGINX: - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -### 6. Get Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) - AGE -ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP - 27m -``` - -Save the `EXTERNAL-IP`. - -### 7. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the AWS documentation on [routing traffic to an ELB load balancer.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) - -### 8. Install the Rancher Helm Chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md deleted file mode 100644 index e1572af146..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/_index.md +++ /dev/null @@ -1,263 +0,0 @@ ---- -title: Rancher Helm Chart Options -weight: 1 -aliases: - - /rancher/v2.5/en/installation/options/ - - /rancher/v2.5/en/installation/options/chart-options/ - - /rancher/v2.5/en/installation/options/helm2/helm-rancher/chart-options/ - - /rancher/v2.5/en/installation/resources/chart-options - - /rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/ ---- - -This page is a configuration reference for the Rancher Helm chart. - -For help choosing a Helm chart version, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/choosing-version/) - -For information on enabling experimental features, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/feature-flags/) - -- [Common Options](#common-options) -- [Advanced Options](#advanced-options) -- [API Audit Log](#api-audit-log) -- [Setting Extra Environment Variables](#setting-extra-environment-variables) -- [TLS Settings](#tls-settings) -- [Customizing your Ingress](#customizing-your-ingress) -- [HTTP Proxy](#http-proxy) -- [Additional Trusted CAs](#additional-trusted-cas) -- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) -- [External TLS Termination](#external-tls-termination) - -### Common Options - -| Option | Default Value | Description | -| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | -| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | -| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | -| `letsEncrypt.email` | " " | `string` - Your email address | -| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | -| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | - -
- -### Advanced Options - -| Option | Default Value | Description | -| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | -| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" Rancher server cluster. _Note: This option is no longer available in v2.5.0. Consider using the `restrictedAdmin` option to prevent users from modifying the local cluster._ | -| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | -| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | -| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.level` | 0 | `int` - set the [API Audit Log]({{}}/rancher/v2.5/en/installation/api-auditing) level. 0 is off. [0-3] | -| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | -| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | -| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs | -| `certmanager.version` | "" | `string` - set cert-manager compatibility | -| `debug` | false | `bool` - set debug flag on rancher server | -| `extraEnv` | [] | `list` - set additional environment variables for Rancher | -| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | -| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. | -| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | -| `ingress.enabled` | true | When set to false, Helm will not install a Rancher ingress. Set the option to false to deploy your own ingress. _Available as of v2.5.6_ | -| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges. Options: traefik, nginx. | | -| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - comma separated list of hostnames or ip address not to use the proxy | | -| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | -| `rancherImage` | "rancher/rancher" | `string` - rancher image source | -| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | -| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | -| `replicas` | 3 | `int` - Number of replicas of Rancher pods | -| `resources` | {} | `map` - rancher pod resource requests & limits | -| `restrictedAdmin` | `false` | _Available in Rancher v2.5_ `bool` - When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/#restricted-admin) | -| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ | -| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | -| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | - - - -### API Audit Log - -Enabling the [API Audit Log]({{}}/rancher/v2.5/en/installation/api-auditing/). - -You can collect this log as you would any container log. Enable [logging]({{}}/rancher/v2.5/en/logging) for the `System` Project on the Rancher server cluster. - -```plain ---set auditLog.level=1 -``` - -By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable [logging]({{}}/rancher/v2.5/en/logging) for the Rancher server cluster or System Project. - -Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. - -### Setting Extra Environment Variables - -You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. - -```plain ---set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' ---set 'extraEnv[0].value=1.0' -``` - -### TLS Settings - -When you install Rancher inside of a Kubernetes cluster, TLS is offloaded at the cluster's ingress controller. The possible TLS settings depend on the used ingress controller. - -See [TLS settings]({{}}/rancher/v2.5/en/installation/resources/tls-settings) for more information and options. - -### Import `local` Cluster - -By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. - -> **Important:** If you turn addLocal off, most Rancher v2.5 features won't work, including the EKS provisioner. - -If this is a concern in your environment you can set this option to "false" on your initial install. - -This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. - -```plain ---set addLocal="false" -``` - -### Customizing your Ingress - -To customize or use a different ingress with Rancher server you can set your own Ingress annotations. - -Example on setting a custom certificate issuer: - -```plain ---set ingress.extraAnnotations.'cert-manager\.io/cluster-issuer'=issuer-name -``` - -Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. - -```plain ---set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' -``` - -### HTTP Proxy - -Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. - -Add your IP exceptions to the `noProxy` list. Make sure you add the Pod cluster IP range (default: `10.42.0.0/16`), Service cluster IP range (default: `10.43.0.0/16`), the internal cluster domains (default: `.svc,.cluster.local`) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. - -```plain ---set proxy="http://:@:/" ---set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local" -``` - -### Additional Trusted CAs - -If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. - -```plain ---set additionalTrustedCAs=true -``` - -Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. - -```plain -kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem -``` - -### Private Registry and Air Gap Installs - -For details on installing Rancher with a private registry, see: - -- [Air Gap: Docker Install]({{}}/rancher/v2.5/en/installation/air-gap-single-node/) -- [Air Gap: Kubernetes Install]({{}}/rancher/v2.5/en/installation/air-gap-high-availability/) - -# External TLS Termination - -We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. - -You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. - -> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate]({{}}/rancher/v2.5/en/installation/resources/encryption/tls-secrets/) to add the CA cert for Rancher. - -Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. - -### Configuring Ingress for External TLS when Using NGINX v0.25 - -In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: - -```yaml -ingress: - provider: nginx - options: - use-forwarded-headers: 'true' -``` - -### Required Headers - -- `Host` -- `X-Forwarded-Proto` -- `X-Forwarded-Port` -- `X-Forwarded-For` - -### Recommended Timeouts - -- Read Timeout: `1800 seconds` -- Write Timeout: `1800 seconds` -- Connect Timeout: `30 seconds` - -### Health Checks - -Rancher will respond `200` to health checks on the `/healthz` endpoint. - -### Example NGINX config - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server IP_NODE_1:80; - server IP_NODE_2:80; - server IP_NODE_3:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/gke/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/gke/_index.md deleted file mode 100644 index 0bb0b01f39..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/gke/_index.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Installing Rancher on a Google Kubernetes Engine Cluster -shortTitle: GKE -weight: 5 ---- - -In this section, you'll learn how to install Rancher using Google Kubernetes Engine. - -If you already have a GKE Kubernetes cluster, skip to the step about [installing an ingress.](#7-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -# Prerequisites - -- You will need a Google account. -- You will need a Google Cloud billing account. You can manage your Cloud Billing accounts using the Google Cloud Console. For more information about the Cloud Console, visit [General guide to the console.](https://support.google.com/cloud/answer/3465889?hl=en&ref_topic=3340599) -- You will need a cloud quota for at least one in-use IP address and at least 2 CPUs. For more details about hardware requirements for the Rancher server, refer to [this section.]({{}}/rancher/v2.5/en/installation/requirements/#rke-and-hosted-kubernetes) - -# 1. Enable the Kubernetes Engine API - -Take the following steps to enable the Kubernetes Engine API: - -1. Visit the [Kubernetes Engine page](https://console.cloud.google.com/projectselector/kubernetes?_ga=2.169595943.767329331.1617810440-856599067.1617343886) in the Google Cloud Console. -1. Create or select a project. -1. Open the project and enable the Kubernetes Engine API for the project. Wait for the API and related services to be enabled. This can take several minutes. -1. Make sure that billing is enabled for your Cloud project. For information on how to enable billing for your project, refer to the [Google Cloud documentation.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project) - -# 2. Open the Cloud Shell - -Cloud Shell is a shell environment for managing resources hosted on Google Cloud. Cloud Shell comes preinstalled with the `gcloud` command-line tool and kubectl command-line tool. The `gcloud` tool provides the primary command-line interface for Google Cloud, and `kubectl` provides the primary command-line interface for running commands against Kubernetes clusters. - -The following sections describe how to launch the cloud shell from the Google Cloud Console or from your local workstation. - -### Cloud Shell - -To launch the shell from the [Google Cloud Console,](https://console.cloud.google.com) go to the upper-right corner of the console and click the terminal button. When hovering over the button, it is labeled **Activate Cloud Shell.** - -### Local Shell - -To install `gcloud` and `kubectl`, perform the following steps: - -1. Install the Cloud SDK by following [these steps.](https://cloud.google.com/sdk/docs/install) The Cloud SDK includes the `gcloud` command-line tool. The steps vary based on your OS. -1. After installing Cloud SDK, install the `kubectl` command-line tool by running the following command: - - ``` - gcloud components install kubectl - ``` - In a later step, `kubectl` will be configured to use the new GKE cluster. -1. [Install Helm 3](https://helm.sh/docs/intro/install/) if it is not already installed. -1. Enable Helm experimental [support for OCI images](https://github.com/helm/community/blob/master/hips/hip-0006.md) with the `HELM_EXPERIMENTAL_OCI` variable. Add the following line to `~/.bashrc` (or `~/.bash_profile` in macOS, or wherever your shell stores environment variables): - - ``` - export HELM_EXPERIMENTAL_OCI=1 - ``` -1. Run the following command to load your updated `.bashrc` file: - - ``` - source ~/.bashrc - ``` - If you are running macOS, use this command: - ``` - source ~/.bash_profile - ``` - - - -# 3. Configure the gcloud CLI - - Set up default gcloud settings using one of the following methods: - -- Using gcloud init, if you want to be walked through setting defaults. -- Using gcloud config, to individually set your project ID, zone, and region. - -{{% tabs %}} -{{% tab "Using gloud init" %}} - -1. Run gcloud init and follow the directions: - - ``` - gcloud init - ``` - If you are using SSH on a remote server, use the --console-only flag to prevent the command from launching a browser: - - ``` - gcloud init --console-only - ``` -2. Follow the instructions to authorize gcloud to use your Google Cloud account and select the new project that you created. - -{{% /tab %}} -{{% tab "Using gcloud config" %}} -{{% /tab %}} -{{% /tabs %}} - -# 4. Confirm that gcloud is configured correctly - -Run: - -``` -gcloud config list -``` - -The output should resemble the following: - -``` -[compute] -region = us-west1 # Your chosen region -zone = us-west1-b # Your chosen zone -[core] -account = -disable_usage_reporting = True -project = - -Your active configuration is: [default] -``` - -# 5. Create a GKE Cluster - -The following command creates a three-node cluster. - -Replace `cluster-name` with the name of your new cluster. - -When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. - -``` -gcloud container clusters create cluster-name --num-nodes=3 --cluster-version=1.20.10-gke.301 -``` - -# 6. Get Authentication Credentials - -After creating your cluster, you need to get authentication credentials to interact with the cluster: - -``` -gcloud container clusters get-credentials cluster-name -``` - -This command configures `kubectl` to use the cluster you created. - -# 7. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. - -The following command installs an `nginx-ingress-controller` with a LoadBalancer service: - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -# 8. Get the Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:31876/TCP,443:32497/TCP 81s -``` - -Save the `EXTERNAL-IP`. - -# 9. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the Google Cloud documentation about [managing DNS records.](https://cloud.google.com/dns/docs/records) - -# 10. Install the Rancher Helm chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use the DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md deleted file mode 100644 index 185435db22..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/rollbacks/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Rollbacks -weight: 3 -aliases: - - /rancher/v2.x/en/upgrades/rollbacks - - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks - - /rancher/v2.x/en/upgrades/ha-server-rollbacks - - /rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks - - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks/ha-server-rollbacks - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/rollbacks - - /rancher/v2.x/en/installation/install-rancher-on-k8s/rollbacks/ ---- - -- [Rolling Back to Rancher v2.5.0+](#rolling-back-to-rancher-v2-5-0) -- [Rolling Back to Rancher v2.2-v2.4+](#rolling-back-to-rancher-v2-2-v2-4) -- [Rolling Back to Rancher v2.0-v2.1](#rolling-back-to-rancher-v2-0-v2-1) - -# Rolling Back to Rancher v2.5.0+ - -To roll back to Rancher v2.5.0+, use the **Rancher Backups** application and restore Rancher from backup. - -Rancher has to be started with the lower/previous version after a rollback. - -A restore is performed by creating a Restore custom resource. - -> **Important** -> -> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.]({{}}/rancher/v2.5/en/backups/migrating-rancher) -> * While restoring Rancher on the same setup, the Rancher deployment is manually scaled down before the restore starts, then the operator will scale it back up once the restore completes. As a result, Rancher and its UI will be unavailable until the restore is complete. While the UI is unavailable, use the original cluster kubeconfig with the restore YAML file: `kubectl create -f restore.yaml`. - -### Scale the Rancher Deployment to 0 - -1. From the **Global** view, hover over the **local** cluster. -1. Under **Projects in local**, click on **System**. -1. From the **cattle-system** namespace section, find the `rancher` deployment. -1. Select **⋮ > Edit**. -1. Change **Scalable deployment of _ pods** to `0`. -1. Scroll to the bottom and click **Save**. - -### Create the Restore Custom Resource - -1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** - * **Note:** If the Rancher Backups app is not visible in the dropdown, you will need to install it from the Charts page in **Apps & Marketplace**. Refer [here]({{}}/rancher/v2.5/en/helm-charts/#charts) for more information. -1. Click **Restore.** -1. Create the Restore with the form or with YAML. For help creating the Restore resource using the online form, refer to the [configuration reference]({{}}/rancher/v2.5/en/backups/configuration/restore-config) and to the [examples.]({{}}/rancher/v2.5/en/backups/examples) -1. To use the YAML editor, you can click **Create > Create from YAML.** Enter the Restore YAML. The following is an example Restore custom resource: - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Restore - metadata: - name: restore-migration - spec: - backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz - encryptionConfigSecretName: encryptionconfig - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - ``` - For help configuring the Restore, refer to the [configuration reference]({{}}/rancher/v2.5/en/backups/configuration/restore-config) and to the [examples.]({{}}/rancher/v2.5/en/backups/examples) - -1. Click **Create.** - -**Result:** The backup file is created and updated to the target storage location. The resources are restored in this order: - -1. Custom Resource Definitions (CRDs) -2. Cluster-scoped resources -3. Namespaced resources - -To check how the restore is progressing, you can check the logs of the operator. Follow these steps to get the logs: - -```yaml -kubectl get pods -n cattle-resources-system -kubectl logs -n cattle-resources-system -f -``` - -### Roll back to a previous Rancher version - -Rancher can be rolled back using the Helm CLI. To roll back to the previous version: - -```yaml -helm rollback rancher -n cattle-system -``` - -If the previous revision is not the intended target, you can specify a revision to roll back to. To see the deployment history: - -```yaml -helm history rancher -n cattle-system -``` - -When the target revision is determined, perform the rollback. This example will roll back to revision `3`: - -```yaml -helm rollback rancher 3 -n cattle-system -``` - -# Rolling Back to Rancher v2.2-v2.4+ - -To roll back to Rancher before v2.5, follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{}}/rancher/v2.0-v2.4/en/backups/restore/rke-restore/) Restoring a snapshot of the Rancher server cluster will revert Rancher to the version and state at the time of the snapshot. - -For information on how to roll back Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks) - -> Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. - -# Rolling Back to Rancher v2.0-v2.1 - -Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved [here]({{}}/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1) and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md deleted file mode 100644 index 8e1acae6ce..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/_index.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: Upgrades -weight: 2 -aliases: - - /rancher/v2.5/en/upgrades/upgrades - - /rancher/v2.5/en/installation/upgrades-rollbacks/upgrades - - /rancher/v2.5/en/upgrades/upgrades/ha-server-upgrade-helm-airgap - - /rancher/v2.5/en/upgrades/air-gap-upgrade/ - - /rancher/v2.5/en/upgrades/upgrades/ha - - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/upgrades/ha - - /rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ - - /rancher/v2.5/en/upgrades/upgrades/ha-server-upgrade-helm/ - - /rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ha - - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades - - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha - - /rancher/v2.5/en/installation/upgrades-rollbacks/ - - /rancher/v2.5/en/upgrades/ - - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/ ---- -The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. - -For the instructions to upgrade Rancher installed on Kubernetes with RancherD, refer to [this page.]({{}}/rancher/v2.5/en/installation/install-rancher-on-linux/upgrades) - -For the instructions to upgrade Rancher installed with Docker, refer to [this page.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) - -To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services]({{}}/rke/latest/en/config-options/services/) or [add-ons]({{}}/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE]({{}}/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. - -- [Prerequisites](#prerequisites) -- [Upgrade Outline](#upgrade-outline) -- [Known Upgrade Issues](#known-upgrade-issues) -- [RKE Add-on Installs](#rke-add-on-installs) - -# Prerequisites - -### Access to kubeconfig - -Helm should be run from the same location as your kubeconfig file, or the same location where you run your kubectl commands from. - -If you installed Kubernetes with RKE, the config will have been created in the directory you ran `rke up` in. - -The kubeconfig can also be manually targeted for the intended cluster with the `--kubeconfig` tag (see: https://helm.sh/docs/helm/helm/) - -### Review Known Issues - -Review the list of known issues for each Rancher version, which can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren't supported. - -### Helm Version - -The upgrade instructions assume you are using Helm 3. - -For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -### For air gap installs: Populate private registry - -For [air gap installs only,]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -### For upgrades from a Rancher server with a hidden local cluster - -If you are upgrading to Rancher v2.5 from a Rancher server that was started with the Helm chart option `--add-local=false`, you will need to drop that flag when upgrading. Otherwise, the Rancher server will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. For more information, see [this section.]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/#upgrading-from-rancher-with-a-hidden-local-cluster) - -### For upgrades with cert-manager older than 0.8.0 - -[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager) - -# Upgrade Outline - -Follow the steps to upgrade Rancher server: - -- [1. Back up your Kubernetes cluster that is running Rancher server](#1-back-up-your-kubernetes-cluster-that-is-running-rancher-server) -- [2. Update the Helm chart repository](#2-update-the-helm-chart-repository) -- [3. Upgrade Rancher](#3-upgrade-rancher) -- [4. Verify the Upgrade](#4-verify-the-upgrade) - -# 1. Back up Your Kubernetes Cluster that is Running Rancher Server - -Use the [backup application]({{}}/rancher/v2.5/en/backups/back-up-rancher) to back up Rancher. - -You'll use the backup as a restoration point if something goes wrong during upgrade. - -# 2. Update the Helm chart repository - -1. Update your local helm repo cache. - - ``` - helm repo update - ``` - -1. Get the repository name that you used to install Rancher. - - For information about the repos and their differences, see [Helm Chart Repositories]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - - {{< release-channel >}} - - ``` - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - - > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories]({{}}/rancher/v2.5/en/installation/resources/choosing-version/#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. - - -1. Fetch the latest chart to install Rancher from the Helm chart repository. - - This command will pull down the latest charts and save it in the current directory as a `.tgz` file. - - ```plain - helm fetch rancher-/rancher - ``` - You can fetch the chart for the specific version you are upgrading to by adding in the `--version=` tag. For example: - - ```plain - helm fetch rancher-/rancher --version=v2.4.11 - ``` - -# 3. Upgrade Rancher - -This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. - -> **Air Gap Instructions:** If you are installing Rancher in an air gapped environment, skip the rest of this page and render the Helm template by following the instructions on [this page.](./air-gap-upgrade) - - -Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. - -``` -helm get values rancher -n cattle-system - -hostname: rancher.my.org -``` - -> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. - -If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow [Option B: Reinstalling Rancher and cert-manager.](#option-b-reinstalling-rancher-and-cert-manager) - -Otherwise, follow [Option A: Upgrading Rancher.](#option-a-upgrading-rancher) - -### Option A: Upgrading Rancher - -Upgrade Rancher to the latest version with all your settings. - -Take all the values from the previous step and append them to the command using `--set key=value`: - -``` -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org -``` - -> **Note:** The above is an example, there may be more values from the previous step that need to be appended. - -Alternatively, it's possible to export the current values to a file and reference that file during upgrade. For example, to only change the Rancher version: - -``` -helm get values rancher -n cattle-system -o yaml > values.yaml - -helm upgrade rancher rancher-/rancher \ - --namespace cattle-system \ - -f values.yaml \ - --version=2.4.5 -``` - -### Option B: Reinstalling Rancher and cert-manager - -If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11. - -1. Uninstall Rancher - - ``` - helm delete rancher -n cattle-system - ``` - -2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager) page. - -3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. - - ``` - helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org - ``` - -# 4. Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). - -# Known Upgrade Issues - -A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) diff --git a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md b/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md deleted file mode 100644 index 2d591e83ab..0000000000 --- a/content/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Rendering the Helm Template in an Air Gapped Environment -shortTitle: Air Gap Upgrade -weight: 1 ---- - -> These instructions assume you have already followed the instructions for a Kubernetes upgrade on [this page,]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/) including the prerequisites, up until step 3. Upgrade Rancher. - -### Rancher Helm Template Options - -Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -Based on the choice you made during installation, complete one of the procedures below. - -Placeholder | Description -------------|------------- -`` | The version number of the output tarball. -`` | The DNS name you pointed at your load balancer. -`` | The DNS name for your private registry. -`` | Cert-manager version running on k8s cluster. - - -### Option A: Default Self-signed Certificate - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - - ```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -{{% /tab %}} -{{% /tabs %}} - - - -### Option B: Certificates from Files using Kubernetes Secrets - - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - - -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - - -```plain -helm template rancher ./rancher-.tgz --output-dir . \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain -helm template rancher ./rancher-.tgz --output-dir . \ ---namespace cattle-system \ ---set hostname= \ ---set rancherImage=/rancher/rancher \ ---set ingress.tls.source=secret \ ---set privateCA=true \ ---set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher ---set useBundledSystemChart=true # Use the packaged Rancher system charts -``` -{{% /tab %}} -{{% /tabs %}} - - -### Apply the Rendered Templates - -Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. - -Use `kubectl` to apply the rendered manifests. - -```plain -kubectl -n cattle-system apply -R -f ./rancher -``` - -# Verify the Upgrade - -Log into Rancher to confirm that the upgrade succeeded. - ->**Having network issues following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). - -# Known Upgrade Issues - -A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/_index.md deleted file mode 100644 index 9fc0270d96..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Other Installation Methods -weight: 3 -aliases: - - /rancher/v2.x/en/installation/other-installation-methods/ ---- - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -The Docker installation is for development and testing environments only. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.5/en/backups/migrating-rancher) \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md deleted file mode 100644 index fad1967087..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Air Gapped Helm CLI Install -weight: 1 -aliases: - - /rancher/v2.5/en/installation/air-gap-installation/ - - /rancher/v2.5/en/installation/air-gap-high-availability/ - - /rancher/v2.5/en/installation/air-gap-single-node/ - - /rancher/v2.x/en/installation/other-installation-methods/air-gap/ ---- - -This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. - -For more information on each installation option, refer to [this page.]({{}}/rancher/v2.5/en/installation/) - -Throughout the installation instructions, there will be _tabs_ for each installation option. - -> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. - -# Installation Outline - -1. [Set up infrastructure and private registry]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/) -2. [Collect and publish images to your private registry]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/) -3. [Set up a Kubernetes cluster (Skip this step for Docker installations)]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/) -4. [Install Rancher]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/) - -# Upgrades - -To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/) - -### [Next: Prepare your Node(s)]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md deleted file mode 100644 index 78daa4f58a..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/_index.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 -aliases: - - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-system-charts/ - - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.5/en/installation/air-gap-single-node/install-rancher - - /rancher/v2.5/en/installation/air-gap/install-rancher - - /rancher/v2.5/en/installation/air-gap-installation/install-rancher/ - - /rancher/v2.5/en/installation/air-gap-high-availability/install-rancher/ - - /rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/ ---- - -This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Privileged Access for Rancher v2.5+ - -When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. - -# Docker Instructions - -If you want to continue the air gapped installation using Docker commands, skip the rest of this page and follow the instructions on [this page.](./docker-install-commands) - -# Kubernetes Instructions - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher: - -- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) -- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) -- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) -- [4. Install Rancher](#4-install-rancher) - -# 1. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.5/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. - ```plain - helm fetch rancher-/rancher - ``` - - If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: - ```plain - helm fetch rancher-stable/rancher --version=v2.4.8 - ``` - -# 2. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -# Helm Chart Options for Air Gap Installations - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | - -# 3. Render the Rancher Helm Template - -Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. - -# Option A: Default Self-Signed Certificate - - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager/). - -### 1. Add the cert-manager repo - -From a system connected to the internet, add the cert-manager repo to Helm: - -```plain -helm repo add jetstack https://charts.jetstack.io -helm repo update -``` - -### 2. Fetch the cert-manager chart - -Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - -```plain -helm fetch jetstack/cert-manager --version v1.5.1 -``` - -### 3. Render the cert-manager template - -Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - -```plain -helm template cert-manager ./cert-manager-v1.5.1.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller \ - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ - --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl -``` - -### 4. Download the cert-manager CRD - -Download the required CRD file for cert-manager: - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml - ``` - -### 5. Render the Rancher template - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - -Placeholder | Description -------------|------------- -`` | The version number of the output tarball. -`` | The DNS name you pointed at your load balancer. -`` | The DNS name for your private registry. -`` | Cert-manager version running on k8s cluster. - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.6` -{{% /tab %}} -{{% /tabs %}} - - - -# Option B: Certificates From Files using Kubernetes Secrets - - -### 1. Create secrets - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -### 2. Render the Rancher template - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.5/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.5/en/installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. -{{% /tab %}} -{{% /tabs %}} - - - -# 4. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -### For Self-Signed Certificate Installs, Install Cert-manager - -{{% accordion id="install-cert-manager" label="Click to expand" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -### Install Rancher with kubectl - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` -The installation is complete. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.5/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -# Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.5/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.5/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.5/en/installation/options/troubleshooting/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/_index.md deleted file mode 100644 index 81a30b69e8..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/_index.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Docker Install Commands -weight: 1 ---- - -The Docker installation is for Rancher users who want to test out Rancher. - -Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -For Rancher v2.5+, the backup application can be used to migrate the Rancher server from a Docker install to a Kubernetes install using [these steps.]({{}}/rancher/v2.5/en/backups/migrating-rancher) - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | - -> **Do you want to...** -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.5/en/installation/options/custom-ca-root-certificate/). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). - -Choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to install. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to install. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in PEM format. - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged - /rancher/rancher: -``` - -{{% /accordion %}} - - - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.5/en/faq/telemetry/) during the initial login. - diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md deleted file mode 100644 index 58578d7d8f..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: '3. Install Kubernetes (Skip for Docker Installs)' -weight: 300 -aliases: - - /rancher/v2.5/en/installation/air-gap-high-availability/install-kube - - /rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/ ---- - -> Skip this section if you are installing Rancher on a single node with Docker. - -This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.5/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. - -As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes providers. - -The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown below. - -{{% tabs %}} -{{% tab "K3s" %}} - -In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. - -### Installation Outline - -1. [Prepare Images Directory](#1-prepare-images-directory) -2. [Create Registry YAML](#2-create-registry-yaml) -3. [Install K3s](#3-install-k3s) -4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) - -### 1. Prepare Images Directory -Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. - -Place the tar file in the `images` directory before starting K3s on each node, for example: - -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` - -### 2. Create Registry YAML -Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. - -The registries.yaml file should look like this before plugging in the necessary information: - -``` ---- -mirrors: - customreg: - endpoint: - - "https://ip-to-server:5000" -configs: - customreg: - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password - tls: - cert_file: - key_file: - ca_file: -``` - -Note, at this time only secure registries are supported with K3s (SSL with custom CA). - -For more information on private registries configuration file for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/private-registry/) - -### 3. Install K3s - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. -Also obtain the K3s install script at https://get.k3s.io - -Place the binary in `/usr/local/bin` on each node. -Place the install script anywhere on each node, and name it `install.sh`. - -Install K3s on each server: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh -``` - -Install K3s on each agent: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh -``` - -Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. -The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` - ->**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -### 4. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -``` -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### Note on Upgrading - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. -2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. -3. Restart the K3s service (if not restarted automatically by installer). -{{% /tab %}} -{{% tab "RKE" %}} -We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. - -### 1. Install RKE - -Install RKE by following the instructions in the [RKE documentation.]({{}}/rke/latest/en/installation/) - -### 2. Create an RKE Config File - -From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. - -This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the three nodes you created. - -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | -| `user` | ✓ | A user that can run Docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: - - address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: - - url: # private registry url - user: rancher - password: '*********' - is_default: true -``` - -### 3. Run RKE - -After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: - -``` -rke up --config ./rancher-cluster.yml -``` - -### 4. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ -{{% /tab %}} -{{% /tabs %}} - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.5/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md deleted file mode 100644 index 5c152feb54..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md +++ /dev/null @@ -1,296 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 -aliases: - - /rancher/v2.5/en/installation/air-gap-high-availability/prepare-private-registry/ - - /rancher/v2.5/en/installation/air-gap-single-node/prepare-private-registry/ - - /rancher/v2.5/en/installation/air-gap-single-node/config-rancher-for-private-reg/ - - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ - - /rancher/v2.5/en/installation/air-gap-installation/prepare-private-reg/ - - /rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/ ---- - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/) or launch any tools in Rancher, e.g. monitoring and logging, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. - -The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. - -> **Prerequisites:** -> -> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. -> -> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) -2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) -3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) -4. [Populate the private registry](#4-populate-the-private-registry) - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### 1. Find the required assets for your Rancher version - -1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets.** Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### 2. Collect the cert-manager image - -> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v1.5.1 - helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### 4. Populate the private registry - -Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -# Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -1. Find the required assets for your Rancher version -2. Save the images to your Windows Server workstation -3. Prepare the Docker daemon -4. Populate the private registry - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|----------------------------|------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - ```plain - ./rancher-save-images.ps1 - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - - - -### 3. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - - - -### 4. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -# Linux Steps - -The Linux images need to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -1. Find the required assets for your Rancher version -2. Collect all the required images -3. Save the images to your Linux workstation -4. Populate the private registry - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets.** - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -|----------------------------| -------------------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.12.0 - helm template ./cert-manager-.tgz | | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - - - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - -**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - - - -### 4. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. - -The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` - -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - -```plain -./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry -``` - - -{{% /tab %}} -{{% /tabs %}} - -### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md deleted file mode 100644 index 84bff62728..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: '1. Set up Infrastructure and Private Registry' -weight: 100 -aliases: - - /rancher/v2.5/en/installation/air-gap-single-node/provision-host - - /rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/ ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). - -An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. - -The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.5/en/installation/) - -As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster. The RKE and K3s Kubernetes infrastructure tutorials below are still included for convenience. - -{{% tabs %}} -{{% tab "K3s" %}} -We recommend setting up the following infrastructure for a high-availability installation: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set up one of the following external databases: - -* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) -* [MySQL](https://www.mysql.com/) (certified against version 5.7) -* [etcd](https://etcd.io/) (certified against version 3.3.15) - -When you install Kubernetes, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the database, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/rds) for setting up a MySQL database on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 5. Set up a Private Docker Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) -{{% /tab %}} -{{% tab "RKE" %}} - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 4. Set up a Private Docker Registry - -Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file]({{}}/rke/latest/en/config-options/private-registries/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) - -{{% /tab %}} -{{% tab "Docker" %}} -> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. -> -> As of Rancher v2.5, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.5/en/backups/migrating-rancher) - -### 1. Set up a Linux Node - -This host will be disconnected from the Internet, but needs to be able to connect to your private registry. - -Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up a Private Docker Registry - -Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md deleted file mode 100644 index 1585ae7c0b..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Installing Rancher behind an HTTP Proxy -weight: 4 -aliases: - - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/ ---- - -In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. - -Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/). - -# Installation Outline - -1. [Set up infrastructure]({{}}/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/) -2. [Set up a Kubernetes cluster]({{}}/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) -3. [Install Rancher]({{}}/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md deleted file mode 100644 index dfc9854ee4..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 3. Install Rancher -weight: 300 -aliases: - - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/ ---- - -Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. - -> **Note:** These installation instructions assume you are using Helm 3. - -### Install cert-manager - -Add the cert-manager helm repository: - -``` -helm repo add jetstack https://charts.jetstack.io -``` - -Create a namespace for cert-manager: - -``` -kubectl create namespace cert-manager -``` - -Install the CustomResourceDefinitions of cert-manager: - -``` -kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml -``` - -And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers: - -``` -helm upgrade --install cert-manager jetstack/cert-manager \ - --namespace cert-manager --version v1.5.1 \ - --set http_proxy=http://${proxy_host} \ - --set https_proxy=http://${proxy_host} \ - --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local -``` - -Now you should wait until cert-manager is finished starting up: - -``` -kubectl rollout status deployment -n cert-manager cert-manager -kubectl rollout status deployment -n cert-manager cert-manager-webhook -``` - -### Install Rancher - -Next you can install Rancher itself. First add the helm repository: - -``` -helm repo add rancher-latest https://releases.rancher.com/server-charts/latest -``` - -Create a namespace: - -``` -kubectl create namespace cattle-system -``` - -And install Rancher with Helm. Rancher also needs a proxy configuration so that it can communicate with external application catalogs or retrieve Kubernetes version update metadata. - -Note that `rancher.cattle-system` must be added to the noProxy list (as shown below) so that Fleet can communicate directly to Rancher with Kubernetes service DNS using service discovery. - -``` -helm upgrade --install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.example.com \ - --set proxy=http://${proxy_host} - --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local,rancher.cattle-system -``` - -After waiting for the deployment to finish: - -``` -kubectl rollout status deployment -n cattle-system rancher -``` - -You can now navigate to `https://rancher.example.com` and start using Rancher. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.5/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -### Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.5/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.5/en/installation/resources/encryption/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.5/en/installation/options/troubleshooting/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md deleted file mode 100644 index e4326734a4..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md +++ /dev/null @@ -1,153 +0,0 @@ ---- -title: '2. Install Kubernetes' -weight: 200 -aliases: - - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/ ---- - -Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. - -### Installing Docker - -First, you have to install Docker and setup the HTTP proxy on all three Linux nodes. For this perform the following steps on all three nodes. - -For convenience export the IP address and port of your proxy into an environment variable and set up the HTTP_PROXY variables for your current shell: - -``` -export proxy_host="10.0.0.5:8888" -export HTTP_PROXY=http://${proxy_host} -export HTTPS_PROXY=http://${proxy_host} -export NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16 -``` - -Next configure apt to use this proxy when installing packages. If you are not using Ubuntu, you have to adapt this step accordingly: - -``` -cat < /dev/null -Acquire::http::Proxy "http://${proxy_host}/"; -Acquire::https::Proxy "http://${proxy_host}/"; -EOF -``` - -Now you can install Docker: - -``` -curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh -``` - -Then ensure that your current user is able to access the Docker daemon without sudo: - -``` -sudo usermod -aG docker YOUR_USERNAME -``` - -And configure the Docker daemon to use the proxy to pull images: - -``` -sudo mkdir -p /etc/systemd/system/docker.service.d -cat < /dev/null -[Service] -Environment="HTTP_PROXY=http://${proxy_host}" -Environment="HTTPS_PROXY=http://${proxy_host}" -Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" -EOF -``` - -To apply the configuration, restart the Docker daemon: - -``` -sudo systemctl daemon-reload -sudo systemctl restart docker -``` - -### Creating the RKE Cluster - -You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: - -* [RKE CLI binary]({{}}/rke/latest/en/installation/#download-the-rke-binary) - -``` -sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 -sudo chmod +x /usr/local/bin/rke -``` - -* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - -``` -curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" -chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -``` - -* [helm](https://helm.sh/docs/intro/install/) - -``` -curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 -chmod +x get_helm.sh -sudo ./get_helm.sh -``` - -Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -``` -nodes: - - address: 10.0.1.200 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 10.0.1.201 - user: ubuntu - role: [controlplane,worker,etcd] - - address: 10.0.1.202 - user: ubuntu - role: [controlplane,worker,etcd] - -services: - etcd: - backup_config: - interval_hours: 12 - retention: 6 -``` - -After that, you can create the Kubernetes cluster by running: - -``` -rke up --config rancher-cluster.yaml -``` - -RKE creates a state file called `rancher-cluster.rkestate`, this is needed if you want to perform updates, modify your cluster configuration or restore it from a backup. It also creates a `kube_config_cluster.yaml` file, that you can use to connect to the remote Kubernetes cluster locally with tools like kubectl or Helm. Make sure to save all of these files in a secure location, for example by putting them into a version control system. - -To have a look at your cluster run: - -``` -export KUBECONFIG=kube_config_cluster.yaml -kubectl cluster-info -kubectl get pods --all-namespaces -``` - -You can also verify that your external load balancer works, and the DNS entry is set up correctly. If you send a request to either, you should receive HTTP 404 response from the ingress controller: - -``` -$ curl 10.0.1.100 -default backend - 404 -$ curl rancher.example.com -default backend - 404 -``` - -### Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.5/en/installation/options/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md deleted file mode 100644 index 3e3d9370e1..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -title: '1. Set up Infrastructure' -weight: 100 -aliases: - - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/ ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will connect to the internet through an HTTP proxy. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - - -### [Next: Set up a Kubernetes cluster]({{}}/rancher/v2.5/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md deleted file mode 100644 index 19c6afd9c6..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/_index.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: Install/Upgrade Rancher with RancherD -weight: 3 -aliases: - - /rancher/v2.5/en/installation/install-rancher-on-linux - - /rancher/v2.x/en/installation/install-rancher-on-linux/ ---- - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -We are excited to introduce a new, simpler way to install Rancher called RancherD. - -RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. - -- [About RancherD Installs](#about-rancherd-installs) -- [Prerequisites](#prerequisites) -- [Part I: Installing Rancher](#part-i-installing-rancher) -- [Part II: High Availability](#part-ii-high-availability) -- [Upgrades](#upgrades) -- [Configuration](#configuration) -- [Uninstall](#uninstall) -- [RKE2 Documentation](#rke2-documentation) - -# About RancherD Installs - -When RancherD is launched on a host, it first installs an RKE2 Kubernetes cluster, then deploys Rancher on the cluster as a Kubernetes daemonset. - -In both the RancherD install and the Helm CLI install, Rancher is installed as a Helm chart on a Kubernetes cluster. - -Configuration and upgrading are also simplified with RancherD. When you upgrade the RancherD binary, both the Kubernetes cluster and the Rancher Helm chart are upgraded. - -In Part I of these instructions, you'll learn how to launch RancherD on a single node. The result of following the steps in Part I is a single-node [RKE2](https://docs.rke2.io/) Kubernetes cluster with the Rancher server installed. This cluster can easily become high availability later. If Rancher only needs to manage the local Kubernetes cluster, the installation is complete. - -Part II explains how to convert the single-node Rancher installation into a high-availability installation. If the Rancher server will manage downstream Kubernetes clusters, it is important to follow these steps. A discussion of recommended architecture for highly available Rancher deployments can be found in our [Best Practices Guide.]({{}}/rancher/v2.5/en/best-practices/v2.5/rancher-server) - -# Prerequisites - -### Node Requirements - -RancherD must be launched on a Linux OS. At this time, only OSes that leverage systemd are supported. - -The Linux node needs to fulfill the [installation requirements]({{}}/rancher/v2.5/en/installation/requirements) for hardware and networking. Docker is not required for RancherD installs. - -To install RancherD on SELinux Enforcing CentOS 8 nodes or RHEL 8 nodes, some [additional steps]({{}}/rancher/v2.5/en/installation/requirements/#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) are required. -### Root Access - -Before running the installation commands, you will need to log in as root: - -``` -sudo -s -``` - -### Fixed Registration Address - -A fixed registration address is recommended for single-node installs and required for high-availability installs with RancherD. - -The fixed registration address is an endpoint that is used for two purposes: - -- To access the Kubernetes API. So you can, for example, modify your [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file to point to it instead of a specific node. -- To add new nodes to the Kubernetes cluster. To add nodes to the cluster later, you will run a command on the node that will specify the fixed registration address of the cluster. - -If you are installing Rancher on a single node, the fixed registration address makes it possible to add more nodes to the cluster so that you can convert the single-node install to a high-availability install without causing downtime to the cluster. If you don't set up this address when installing the single-node Kubernetes cluster, you would need to re-run the installation script with a fixed registration address in order to add new nodes to the cluster. - -The fixed registration can be the IP or hostname of any of the server nodes, but in many cases those may change over time as nodes are created and destroyed. Therefore, you should have a stable endpoint in front of the server nodes. - -This endpoint can be set up using any number of approaches, such as: - -* A layer 4 (TCP) load balancer -* Round-robin DNS -* Virtual or elastic IP addresses - -The following should be taken into consideration when configuring the load balancer or other endpoint: - -- The RancherD server process listens on port 9345 for new nodes to register. -- The Kubernetes API is served on port 6443, as normal. -- In RancherD installs, the Rancher UI is served on port 8443 by default. (This is different from Helm chart installs, where port 443 is used by default.) - -# Part I: Installing Rancher - -### 1. Set up Configurations - -To avoid certificate errors with the fixed registration address, you should launch the server with the `tls-san` parameter set. This parameter should refer to your fixed registration address. - -This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access the Kubernetes cluster via both the IP and the hostname. - -Create the RancherD config file at `/etc/rancher/rke2/config.yaml`: - -```yaml -token: my-shared-secret -tls-san: - - my-fixed-registration-address.com - - another-kubernetes-domain.com -``` - -The first server node establishes the secret token that other nodes would register with if they are added to the cluster. - -If you do not specify a pre-shared secret, RancherD will generate one and place it at `/var/lib/rancher/rke2/server/node-token`. - -To specify your own pre-shared secret as the token, set the `token` argument on startup. - -Installing Rancher this way will use Rancher-generated certificates. To use your own self-signed or trusted certificates, refer to the [configuration guide.]({{}}/rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration/#certificates-for-the-rancher-server) - -For information on customizing the RancherD Helm chart values.yaml, refer to [this section.]({{}}/rancher/v2.5/en/installation/install-rancher-on-linux/rancherd-configuration/#customizing-the-rancherd-helm-chart) - -### 2. Launch the first server node - -Run the RancherD installer: - -``` -curl -sfL https://get.rancher.io | sh - -``` - -The RancherD version can be specified using the `INSTALL_RANCHERD_VERSION` environment variable: - -``` -curl -sfL https://get.rancher.io | INSTALL_RANCHERD_VERSION=v2.5.4-rc6 sh - -``` - -Once installed, the `rancherd` binary will be on your PATH. You can check out its help text like this: - -``` -rancherd --help -NAME: - rancherd - Rancher Kubernetes Engine 2 -... -``` - -Next, launch RancherD: - -``` -systemctl enable rancherd-server.service -systemctl start rancherd-server.service -``` - -When RancherD launches, it installs an RKE2 Kubernetes cluster. Use the following command to see the logs of the Kubernetes cluster as it comes up: - -``` -journalctl -eu rancherd-server -f -``` - -### 3. Set up the kubeconfig file with kubectl - -Once the Kubernetes cluster is up, set up RancherD’s kubeconfig file and `kubectl`: - -``` -export KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=$PATH:/var/lib/rancher/rke2/bin -``` - -### 4. Verify that Rancher is installed on the Kubernetes cluster - -Now, you can start issuing `kubectl` commands. Use the following commands to verify that Rancher is deployed as a daemonset on the cluster: - -``` -kubectl get daemonset rancher -n cattle-system -kubectl get pod -n cattle-system -``` - -If you watch the pods, you will see the following pods installed: - -- `helm-operation` pods in the `cattle-system` namespace -- a `rancher` pod and `rancher-webhook` pod in the `cattle-system` namespace -- a `fleet-agent`, `fleet-controller`, and `gitjob` pod in the `fleet-system` namespace -- a `rancher-operator` pod in the `rancher-operator-system` namespace - -### 5. Set the initial Rancher password - -Once the `rancher` pod is up and running, run the following: - -``` -rancherd reset-admin -``` - -This will give you the URL, username and password needed to log into Rancher. Follow that URL, plug in the credentials, and you’re up and running with Rancher! - -If Rancher will only manage the local Kubernetes cluster, the installation is complete. - -# Part II: High Availability - -If you plan to use the Rancher server to manage downstream Kubernetes clusters, Rancher needs to be highly available. In these steps, you will add more nodes to achieve a high-availability cluster. Since Rancher is running as a daemonset, it will automatically launch on the nodes you add. - -An odd number of nodes is required because the etcd cluster, which contains the cluster data, needs a majority of live nodes to avoid losing quorum. A loss of quorum could require the cluster to be restored from backup. Therefore, we recommend using three nodes. - -When following these steps, you should still be logged in as root. - -### 1. Configure the fixed registration address on a new node - -Additional server nodes are launched much like the first, except that you must specify the `server` and `token` parameters so that they can successfully connect to the initial server node. - -Here is an example of what the RancherD config file would look like for additional server nodes. By default, this config file is expected to be located at `/etc/rancher/rke2/config.yaml`. - -```yaml -server: https://my-fixed-registration-address.com:9345 -token: my-shared-secret -tls-san: - - my-fixed-registration-address.com - - another-kubernetes-domain.com -``` - -### 2. Launch an additional server node - -Run the installer on the new node: - -``` -curl -sfL https://get.rancher.io | sh - -``` - -This will download RancherD and install it as a systemd unit on your host. - - -Next, launch RancherD: - -``` -systemctl enable rancherd-server.service -systemctl start rancherd-server.service -``` - -### 3. Repeat - -Repeat steps one and two for another Linux node, bringing the number of nodes in the cluster to three. - -**Result:** Rancher is highly available and the installation is complete. - -# Upgrades - -For information on upgrades and rollbacks, refer to [this page.](./upgrades) - -# Configuration - -For information on how to configure certificates, node taints, Rancher Helm chart options, or RancherD CLI options, refer to the [configuration reference.](./rancherd-configuration) - -# Uninstall - -To uninstall RancherD from your system, run the command below. This will shut down the process, remove the RancherD binary, and clean up files used by RancherD. - -``` -rancherd-uninstall.sh -``` - -# RKE2 Documentation - -For more information on RKE2, the Kubernetes distribution used to provision the underlying cluster, refer to the documentation [here.](https://docs.rke2.io/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md deleted file mode 100644 index 3f63915f6f..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/upgrades/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Upgrades -weight: 2 -aliases: - - /rancher/v2.5/en/installation/install-rancher-on-linux/upgrades - - /rancher/v2.x/en/installation/install-rancher-on-linux/upgrades/ ---- - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -When RancherD is upgraded, the Rancher Helm controller and the Fleet pods are upgraded. - -During a RancherD upgrade, there is very little downtime, but it is possible that RKE2 may be down for a minute, during which you could lose access to Rancher. - -When Rancher is installed with RancherD, the underlying Kubernetes cluster can't be upgraded from the Rancher UI. It needs to be upgraded using the RancherD CLI. - -### Upgrading the Rancher Helm Chart without Upgrading the Underlying Cluster - -To upgrade Rancher without upgrading the underlying Kubernetes cluster, follow these steps. - -> Before upgrading, we recommend that you should: -> -> - Create a backup of the Rancher server using the [backup application.]({{}}/rancher/v2.5/en/backups/v2.5/back-up-rancher/) -> - Review the known issues for the Rancher version you are upgrading to. The known issues are listed in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -1. Uninstall the chart with Helm: - - ``` - helm uninstall rancher - ``` - -2. Reinstall the Rancher chart with Helm. To install a specific Rancher version, use the `--version` flag. For example: - - ``` - helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --version 2.5.1 - ``` - -**Result:** Rancher is upgraded to the new version. - -If necessary, restore Rancher from backup by following [these steps.]({{}}/rancher/v2.5/en/backups/restoring-rancher/) - -### Upgrading Both Rancher and the Underlying Cluster - -Upgrade both RancherD and the underlying Kubernetes cluster by re-running the RancherD installation script. - -> Before upgrading, we recommend that you should: -> -> - Create a backup of the Rancher server using the [backup application.]({{}}/rancher/v2.5/en/backups/v2.5/back-up-rancher/) -> - Review the known issues for the Rancher version you are upgrading to. The known issues are listed in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) - -``` -sudo curl -sfL https://get.rancher.io | sudo sh - -``` - -To specify a specific version to upgrade to, use `INSTALL_RANCHERD_VERSION` environment variable: - -``` -curl -sfL https://get.rancher.io | INSTALL_RANCHERD_VERSION=v2.5.1 sh - -``` - -Then launch the server: - -``` -systemctl enable rancherd-server -systemctl start rancherd-server -``` - -The upgrade can also be performed by manually installing the binary of the desired version. - - diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md deleted file mode 100644 index d28ce130fb..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/_index.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Installing Rancher on a Single Node Using Docker -description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. -weight: 2 -aliases: - - /rancher/v2.5/en/installation/single-node-install/ - - /rancher/v2.5/en/installation/single-node - - /rancher/v2.5/en/installation/other-installation-methods/single-node - - /rancher/v2.x/en/installation/requirements/installing-docker/ - - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/ ---- - -Rancher can be installed by running a single Docker container. - -In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. - -> **Want to use an external load balancer?** -> See [Docker Install with an External Load Balancer]({{}}/rancher/v2.5/en/installation/options/single-node-install-external-lb) instead. - -A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: - -The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.5/en/backups/migrating-rancher) - -### Privileged Access for Rancher v2.5+ - -When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. - -# Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.5/en/installation/requirements/) - -# 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.5/en/installation/requirements) to launch your Rancher server. - -# 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Use a proxy? See [HTTP Proxy Configuration]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/) -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) -> - Complete an Air Gap Installation? See [Air Gap: Docker Install]({{}}/rancher/v2.5/en/installation/air-gap-single-node/) -> - Record all transactions with the Rancher API? See [API Auditing](./advanced/#api-audit-log) - -Choose from the following options: - -- [Option A: Default Rancher-generated Self-signed Certificate](#option-a-default-rancher-generated-self-signed-certificate) -- [Option B: Bring Your Own Certificate, Self-signed](#option-b-bring-your-own-certificate-self-signed) -- [Option C: Bring Your Own Certificate, Signed by a Recognized CA](#option-c-bring-your-own-certificate-signed-by-a-recognized-ca) -- [Option D: Let's Encrypt Certificate](#option-d-let-s-encrypt-certificate) - -### Option A: Default Rancher-generated Self-signed Certificate - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the minimum installation command below. - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher:latest -``` - -### Option B: Bring Your Own Certificate, Self-signed -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| ------------------- | --------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - --privileged \ - rancher/rancher:latest -``` - -### Option C: Bring Your Own Certificate, Signed by a Recognized CA - -In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After obtaining your certificate, run the Docker command below. - -- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. -- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -| Placeholder | Description | -| ------------------- | ----------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -```bash -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - --privileged \ - rancher/rancher:latest \ - --no-cacerts -``` - -### Option D: Let's Encrypt Certificate - -> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. - -> **Prerequisites:** -> -> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. -> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). -> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. - -| Placeholder | Description | -| ----------------- | ------------------- | -| `` | Your domain address | - -As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher:latest \ - --acme-domain -``` - -## Advanced Options - -When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: - -- Custom CA Certificate -- API Audit Log -- TLS Settings -- Air Gap -- Persistent Data -- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -Refer to [this page](./advanced) for details. - -## Troubleshooting - -Refer to [this page](./troubleshooting) for frequently asked questions and troubleshooting tips. - -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.5/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.5/en/cluster-provisioning/). diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md deleted file mode 100644 index ef4cfe2c62..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/_index.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Advanced Options for Docker Installs -weight: 5 -aliases: - - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/ ---- - -When installing Rancher, there are several [advanced options]({{}}/rancher/v2.5/en/installation/options/) that can be enabled: - -- [Custom CA Certificate](#custom-ca-certificate) -- [API Audit Log](#api-audit-log) -- [TLS Settings](#tls-settings) -- [Air Gap](#air-gap) -- [Persistent Data](#persistent-data) -- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) - -### Custom CA Certificate - -If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. - -Use the command example to start a Rancher container with your private CA certificates mounted. - -- The volume flag (`-v`) should specify the host directory containing the CA root certificates. -- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. -- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. -- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. - -The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /host/certs:/container/certs \ - -e SSL_CERT_DIR="/container/certs" \ - --privileged \ - rancher/rancher:latest -``` - -### API Audit Log - -The API Audit Log records all the user and system transactions made through Rancher server. - -The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. - -See [API Audit Log]({{}}/rancher/v2.5/en/installation/api-auditing) for more information and options. - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /var/log/rancher/auditlog:/var/log/auditlog \ - -e AUDIT_LEVEL=1 \ - --privileged \ - rancher/rancher:latest -``` - -### TLS settings - -To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_TLS_MIN_VERSION="1.0" \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -See [TLS settings]({{}}/rancher/v2.5/en/admin-settings/tls-settings) for more information and options. - -### Air Gap - -If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node - -In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. - -If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. - -Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. - -To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: - -``` -docker run -d --restart=unless-stopped \ - -p 8080:80 -p 8443:443 \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md deleted file mode 100644 index 1bb416a902..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: HTTP Proxy Configuration -weight: 251 -aliases: - - /rancher/v2.5/en/installation/proxy-configuration/ - - /rancher/v2.5/en/installation/single-node/proxy - - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/ ---- - -If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. - -Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. - -| Environment variable | Purpose | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | -| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | -| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | -| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | - -> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. - -## Docker Installation - -Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation]({{}}/rancher/v2.5/en/installation/single-node-install/) are: - -- `localhost` -- `127.0.0.1` -- `0.0.0.0` -- `10.0.0.0/8` -- `cattle-system.svc` -- `.svc` -- `.cluster.local` - -The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e HTTP_PROXY="http://192.168.10.1:3128" \ - -e HTTPS_PROXY="http://192.168.10.1:3128" \ - -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local,example.com" \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md deleted file mode 100644 index 11189b9dd6..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Rolling Back Rancher Installed with Docker -weight: 1015 -aliases: - - /rancher/v2.5/en/upgrades/single-node-rollbacks - - /rancher/v2.5/en/upgrades/rollbacks/single-node-rollbacks - - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/ ---- - -If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades). Rolling back restores: - -- Your previous version of Rancher. -- Your data backup created before upgrade. - -## Before You Start - -During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker pull rancher/rancher: -``` - -In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | ------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that the backup is for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Rolling Back Rancher - -If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. - ->**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. - - For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. - - ``` - docker pull rancher/rancher: - ``` - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - You can obtain the name for your Rancher container by entering `docker ps`. - -1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades), it will have a name similar to (`rancher-data-backup--.tar.gz`). - -1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. - - ``` - docker run --volumes-from rancher-data \ - -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ - && tar zxvf /backup/rancher-data-backup--.tar.gz" - ``` - -1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. - ``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: - ``` - As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - - >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. - -**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md b/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md deleted file mode 100644 index 6c55386d77..0000000000 --- a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md +++ /dev/null @@ -1,365 +0,0 @@ ---- -title: Upgrading Rancher Installed with Docker -weight: 1010 -aliases: - - /rancher/v2.5/en/upgrades/single-node-upgrade/ - - /rancher/v2.5/en/upgrades/upgrades/single-node-air-gap-upgrade - - /rancher/v2.5/en/upgrades/upgrades/single-node - - /rancher/v2.5/en/upgrades/upgrades/single-node-upgrade/ - - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/upgrades/single-node/ - - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/ ---- - -The following instructions will guide you through upgrading a Rancher server that was installed with Docker. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren’t supported. -- **For [air gap installs only,]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Placeholder Review - -During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). - -Here's an **example** of a command with a placeholder: - -``` -docker stop -``` - -In this command, `` is the name of your Rancher container. - -# Get Data for Upgrade Commands - -To obtain the data to replace the placeholders, run: - -``` -docker ps -``` - -Write down or copy this information before starting the upgrade. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | -| `` | `2018-12-19` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -# Upgrade Outline - -During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: - -- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) -- [2. Create a backup tarball](#2-create-a-backup-tarball) -- [3. Pull the new Docker image](#3-pull-the-new-docker-image) -- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) -- [5. Verify the Upgrade](#5-verify-the-upgrade) -- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) - -# 1. Create a copy of the data from your Rancher server container - -1. Using a remote Terminal connection, log into the node running your Rancher server. - -1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - -1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data rancher/rancher: - ``` - -# 2. Create a backup tarball - -1. From the data container that you just created (`rancher-data`), create a backup tarball (`rancher-data-backup--.tar.gz`). - - This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. - - - ``` - docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** When you enter this command, a series of commands should run. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - - ``` - [rancher@ip-10-0-0-50 ~]$ ls - rancher-data-backup-v2.1.3-20181219.tar.gz - ``` - -1. Move your backup tarball to a safe location external from your Rancher server. - -# 3. Pull the New Docker Image - -Pull the image of the Rancher version that you want to upgrade to. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker pull rancher/rancher: -``` - -# 4. Start the New Rancher Server Container - -Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. - ->**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. - -If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/proxy/) - -If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -To see the command to use when starting the new Rancher server container, choose from the following options: - -- Docker Upgrade -- Docker Upgrade for Air Gap Installs - -{{% tabs %}} -{{% tab "Docker Upgrade" %}} - -Select which option you had installed Rancher server - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. - -Placeholder | Description -------------|------------- - `` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - --privileged \ - rancher/rancher: -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - --privileged \ - rancher/rancher: \ - --no-cacerts -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} - -### Option D: Let's Encrypt Certificate - -{{% accordion id="option-d" label="Click to expand" %}} - ->**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. - ->**Reminder of the Cert Prerequisites:** -> ->- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). ->- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. -`` | The domain address that you had originally started with - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: \ - --acme-domain -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -{{% /accordion %}} - -{{% /tab %}} -{{% tab "Docker Air Gap Upgrade" %}} - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -When starting the new Rancher server container, choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to to upgrade to. - -``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. - - >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.5/en/installation/resources/chart-options/) that you want to upgrade to. - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged - /rancher/rancher: -``` -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) -{{% /accordion %}} -{{% /tab %}} -{{% /tabs %}} - -**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. - -# 5. Verify the Upgrade - -Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. - ->**Having network issues in your user clusters following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). - - -# 6. Clean up Your Old Rancher Server Container - -Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. - -# Rolling Back - -If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks). diff --git a/content/rancher/v2.5/en/installation/requirements/_index.md b/content/rancher/v2.5/en/installation/requirements/_index.md deleted file mode 100644 index 41eda67564..0000000000 --- a/content/rancher/v2.5/en/installation/requirements/_index.md +++ /dev/null @@ -1,214 +0,0 @@ ---- -title: Installation Requirements -description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup -weight: 1 -aliases: - - /rancher/v2.x/en/installation/requirements/ ---- - -This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. - -> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/) which will run your apps and services. - -Make sure the node(s) for the Rancher server fulfill the following requirements: - -- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) - - [RKE Specific Requirements](#rke-specific-requirements) - - [K3s Specific Requirements](#k3s-specific-requirements) - - [RancherD Specific Requirements](#rancherd-specific-requirements) - - [RKE2 Specific Requirements](#rke2-specific-requirements) - - [Installing Docker](#installing-docker) -- [Hardware Requirements](#hardware-requirements) -- [CPU and Memory](#cpu-and-memory) - - [RKE and Hosted Kubernetes](#rke-and-hosted-kubernetes) - - [K3s Kubernetes](#k3s-kubernetes) - - [RancherD](#rancherd) - - [RKE2 Kubernetes](#rke2-kubernetes) - - [Docker](#docker) -- [Ingress](#ingress) - - [Ingress for RKE2](#ingress-for-rke2) - - [Ingress for EKS](#ingress-for-eks) -- [Disks](#disks) -- [Networking Requirements](#networking-requirements) - - [Node IP Addresses](#node-ip-addresses) - - [Port Requirements](#port-requirements) -- [RancherD on SELinux Enforcing CentOS 8 or RHEL 8 Nodes](#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) - -For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.5/en/best-practices/deployment-types/) - -The Rancher UI works best in Firefox or Chrome. - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution. - -Docker is required for nodes that will run RKE Kubernetes clusters. It is not required for RancherD or RKE2 Kubernetes installs. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. - -Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19 and 1.20, firewalld must be turned off. - -> If you don't feel comfortable doing so you might check suggestions in the [respective issue](https://github.com/rancher/rancher/issues/28840). Some users were successful [creating a separate firewalld zone with a policy of ACCEPT for the Pod CIDR](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822). - -If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.5/en/installation/options/arm64-platform/) - -### RKE Specific Requirements - -For the container runtime, RKE should work with any modern Docker version. - -Note that the following sysctl setting must be applied: - -``` -net.bridge.bridge-nf-call-iptables=1 -``` - -### K3s Specific Requirements - -For the container runtime, K3s should work with any modern version of Docker or containerd. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. - -If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. - -### RancherD Specific Requirements - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -At this time, only Linux OSes that leverage systemd are supported. - -To install RancherD on SELinux Enforcing CentOS 8 or RHEL 8 nodes, some [additional steps](#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) are required. - -Docker is not required for RancherD installs. - -### RKE2 Specific Requirements - -_The RKE2 install is available as of v2.5.6._ - -For details on which OS versions were tested with RKE2, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -Docker is not required for RKE2 installs. - -The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. Currently, RKE2 deploys nginx-ingress as a deployment by default, so you will need to deploy it as a DaemonSet by following [these steps.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/#5-configure-nginx-to-be-a-daemonset) - -### Installing Docker - -Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.5/en/installation/requirements/installing-docker) to install Docker with one command. - -Docker is not required for RancherD installs. - -# Hardware Requirements - -The following sections describe the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. - -# CPU and Memory - -Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. - -### RKE and Hosted Kubernetes - -These CPU and memory requirements apply to each host in the Kubernetes cluster where the Rancher server is installed. - -These requirements apply to RKE Kubernetes clusters, as well as to hosted Kubernetes clusters such as EKS. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | ---------- | ------------ | -------| ------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | - -Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. - -### K3s Kubernetes - -These CPU and memory requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | -| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | - -Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. - -### RancherD - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -These CPU and memory requirements apply to each instance with RancherD installed. Minimum recommendations are outlined here. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 2 | 5 GB | -| Medium | Up to 15 | Up to 200 | 3 | 9 GB | - -### RKE2 Kubernetes - -These CPU and memory requirements apply to each instance with RKE2 installed. Minimum recommendations are outlined here. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 2 | 5 GB | -| Medium | Up to 15 | Up to 200 | 3 | 9 GB | - -### Docker - -These CPU and memory requirements apply to a host with a [single-node]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker) installation of Rancher. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 1 | 4 GB | -| Medium | Up to 15 | Up to 200 | 2 | 8 GB | - -# Ingress - -Each node in the Kubernetes cluster that Rancher is installed on should run an Ingress. - -The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. - -For RKE, K3s and RancherD installations, you don't have to install the Ingress manually because it is installed by default. - -For hosted Kubernetes clusters (EKS, GKE, AKS) and RKE2 Kubernetes installations, you will need to set up the ingress. - -### Ingress for RKE2 - -Currently, RKE2 deploys nginx-ingress as a deployment by default, so you will need to deploy it as a DaemonSet by following [these steps.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/#5-configure-nginx-to-be-a-daemonset) - -### Ingress for EKS -For an example of how to deploy an nginx-ingress-controller with a LoadBalancer service, refer to [this section.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/amazon-eks/#5-install-an-ingress) - -# Disks - -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. - -# Networking Requirements - -This section describes the networking requirements for the node(s) where the Rancher server is installed. - -### Node IP Addresses - -Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -### Port Requirements - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements]({{}}/rancher/v2.5/en/installation/requirements/ports) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. - -# RancherD on SELinux Enforcing CentOS 8 or RHEL 8 Nodes - -Before installing Rancher on SELinux Enforcing CentOS 8 nodes or RHEL 8 nodes, you must install `container-selinux` and `iptables`: - -``` -sudo yum install iptables -sudo yum install container-selinux -``` diff --git a/content/rancher/v2.5/en/installation/requirements/ports/_index.md b/content/rancher/v2.5/en/installation/requirements/ports/_index.md deleted file mode 100644 index af7f109a89..0000000000 --- a/content/rancher/v2.5/en/installation/requirements/ports/_index.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: Port Requirements -description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes -weight: 300 -aliases: - - /rancher/v2.x/en/installation/requirements/ports/ ---- - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. - -- [Rancher Nodes](#rancher-nodes) - - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) - - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) - - [Ports for Rancher Server Nodes on RancherD or RKE2](#ports-for-rancher-server-nodes-on-rancherd-or-rke2) - - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) -- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) - - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) - - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) - - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) - - [Ports for Registered Clusters](#ports-for-registered-clusters) -- [Other Port Considerations](#other-port-considerations) - - [Commonly Used Ports](#commonly-used-ports) - - [Local Node Traffic](#local-node-traffic) - - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) - - [Opening SUSE Linux Ports](#opening-suse-linux-ports) - -# Rancher Nodes - -The following table lists the ports that need to be open to and from nodes that are running the Rancher server. - -The port requirements differ based on the Rancher server architecture. - -As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster. For Rancher installs on a K3s, RKE, or RKE2 Kubernetes cluster, refer to the tabs below. For other Kubernetes distributions, refer to the distribution's documentation for the port requirements for cluster nodes. - -> **Notes:** -> -> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). -> - Kubernetes recommends TCP 30000-32767 for node port services. -> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. - -### Ports for Rancher Server Nodes on K3s - -{{% accordion label="Click to expand" %}} - -The K3s server needs port 6443 to be accessible by the nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • server nodes
  • agent nodes
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | -| TCP | 6443 | K3s server nodes | Kubernetes API -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. -| TCP | 10250 | K3s server and agent nodes | kubelet - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RKE - -{{% accordion label="Click to expand" %}} - -Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. - -The following tables break down the port requirements for traffic between the Rancher nodes: - -
Rules for traffic between Rancher nodes
- -| Protocol | Port | Description | -|-----|-----|----------------| -| TCP | 443 | Rancher agents | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| TCP | 6443 | Kubernetes apiserver | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 10250 | Metrics server communication with all nodes | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | -| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | -| TCP | 443 |
  • Load Balancer/Reverse Proxy
  • IPs of all cluster nodes and other API/UI clients
| HTTPS traffic to Rancher UI/API | -| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -|-----|-----|----------------|---| -| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | -| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | -| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | -| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RancherD or RKE2 - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -{{% accordion label="Click to expand" %}} - -The RancherD (or RKE2) server needs port 6443 and 9345 to be accessible by other nodes in the cluster. - -All nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -**Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -
Inbound Rules for RancherD or RKE2 Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 9345 | RancherD/RKE2 agent nodes | Kubernetes API -| TCP | 6443 | RancherD/RKE2 agent nodes | Kubernetes API -| UDP | 8472 | RancherD/RKE2 server and agent nodes | Required only for Flannel VXLAN -| TCP | 10250 | RancherD/RKE2 server and agent nodes | kubelet -| TCP | 2379 | RancherD/RKE2 server nodes | etcd client port -| TCP | 2380 | RancherD/RKE2 server nodes | etcd peer port -| TCP | 30000-32767 | RancherD/RKE2 server and agent nodes | NodePort port range -| HTTP | 8080 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| HTTPS | 8443 |
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl. Not needed if you have LB doing TLS termination. | - -Typically all outbound traffic is allowed. -{{% /accordion %}} - -### Ports for Rancher Server in Docker - -{{% accordion label="Click to expand" %}} - -The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: - -
Inbound Rules for Rancher Node
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used -| TCP | 443 |
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl - -
Outbound Rules for Rancher Node
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -# Downstream Kubernetes Cluster Nodes - -Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. - -The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.5/en/cluster-provisioning/). - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.5/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - ->**Tip:** -> ->If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. - -### Ports for Rancher Launched Kubernetes Clusters using Node Pools - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/). - ->**Note:** ->The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. - -{{< ports-iaas-nodes >}} - -{{% /accordion %}} - -### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes/). - -{{< ports-custom-nodes >}} - -{{% /accordion %}} - -### Ports for Hosted Kubernetes Clusters - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - -### Ports for Registered Clusters - -Note: Registered clusters were called imported clusters before Rancher v2.5. - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [registered clusters]({{}}/rancher/v2.5/en/cluster-provisioning/registered-clusters/). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - - -# Other Port Considerations - -### Commonly Used Ports - -These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. - -{{% include file="/rancher/v2.5/en/installation/requirements/ports/common-ports-table" %}} - ----- - -### Local Node Traffic - -Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). -These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. - -However, this traffic may be blocked when: - -- You have applied strict host firewall policies on the node. -- You are using nodes that have multiple interfaces (multihomed). - -In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. - -### Rancher AWS EC2 Security Group - -When using the [AWS EC2 node driver]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. - -| Type | Protocol | Port Range | Source/Destination | Rule Type | -|-----------------|:--------:|:-----------:|------------------------|:---------:| -| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | -| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | -| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | -| All traffic | All | All | 0.0.0.0/0 | Outbound | - -### Opening SUSE Linux Ports - -SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, - -{{% tabs %}} -{{% tab "SLES 15 / openSUSE Leap 15" %}} -1. SSH into the instance. -1. Start YaST in text mode: -``` -sudo yast2 -``` - -1. Navigate to **Security and Users** > **Firewall** > **Zones:public** > **Ports**. To navigate within the interface, follow the instructions [here](https://doc.opensuse.org/documentation/leap/reference/html/book.opensuse.reference/cha-yast-text.html#sec-yast-cli-navigate). -1. To open the required ports, enter them into the **TCP Ports** and **UDP Ports** fields. In this example, ports 9796 and 10250 are also opened for monitoring. The resulting fields should look similar to the following: -```yaml -TCP Ports -22, 80, 443, 2376, 2379, 2380, 6443, 9099, 9796, 10250, 10254, 30000-32767 -UDP Ports -8472, 30000-32767 -``` - -1. When all required ports are enter, select **Accept**. - -{{% /tab %}} -{{% tab "SLES 12 / openSUSE Leap 42" %}} -1. SSH into the instance. -1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: - ``` - FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" - FW_SERVICES_EXT_UDP="8472 30000:32767" - FW_ROUTE=yes - ``` -1. Restart the firewall with the new ports: - ``` - SuSEfirewall2 - ``` -{{% /tab %}} -{{% /tabs %}} - -**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/content/rancher/v2.5/en/installation/requirements/ports/common-ports-table/index.md b/content/rancher/v2.5/en/installation/requirements/ports/common-ports-table/index.md deleted file mode 100644 index 4819129eb2..0000000000 --- a/content/rancher/v2.5/en/installation/requirements/ports/common-ports-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Protocol | Port | Description | -|:--------: |:----------------: |---------------------------------------------------------------------------------- | -| TCP | 22 | Node driver SSH provisioning | -| TCP | 179 | Calico BGP Port | -| TCP | 2376 | Node driver Docker daemon TLS port | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | -| TCP | 8443 | Rancher webhook | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | -| TCP | 9443 | Rancher webhook | -| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | -| TCP | 6783 | Weave Port | -| UDP | 6783-6784 | Weave UDP Ports | -| TCP | 10250 | Metrics server communication with all nodes API | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | -| TCP/UDP | 30000-
32767 | NodePort port range | diff --git a/content/rancher/v2.5/en/installation/resources/_index.md b/content/rancher/v2.5/en/installation/resources/_index.md deleted file mode 100644 index 5fe6f4dd31..0000000000 --- a/content/rancher/v2.5/en/installation/resources/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Resources -weight: 5 -aliases: - - /rancher/v2.5/en/installation/options - - /rancher/v2.x/en/installation/resources/ ---- - -### Docker Installations - -The [single-node Docker installation]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. - -Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -### Air Gapped Installations - -Follow [these steps]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap) to install the Rancher server in an air gapped environment. - -An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Advanced Options - -When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: - -- [Custom CA Certificate]({{}}/rancher/v2.5/en/installation/options/custom-ca-root-certificate/) -- [API Audit Log]({{}}/rancher/v2.5/en/installation/options/api-audit-log/) -- [TLS Settings]({{}}/rancher/v2.5/en/installation/options/tls-settings/) -- [etcd configuration]({{}}/rancher/v2.5/en/installation/options/etcd/) -- [Local System Charts for Air Gap Installations]({{}}/rancher/v2.5/en/installation/options/local-system-charts) | v2.3.0 | diff --git a/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md deleted file mode 100644 index 4250740a76..0000000000 --- a/content/rancher/v2.5/en/installation/resources/advanced/api-audit-log/_index.md +++ /dev/null @@ -1,570 +0,0 @@ ---- -title: Enabling the API Audit Log to Record System Events -weight: 4 -aliases: - - /rancher/v2.5/en/installation/options/api-audit-log/ - - /rancher/v2.5/en/installation/api-auditing - - /rancher/v2.x/en/installation/resources/advanced/api-audit-log/ ---- - -You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. - -You can enable API Auditing during Rancher installation or upgrade. - -## Enabling API Audit Log - -The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. - -- [Docker Install]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -- [Kubernetes Install]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#api-audit-log) - -## API Audit Log Options - -The usage below defines rules about what the audit log should record and what data it should include: - -| Parameter | Description | -| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | -| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| -| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | -| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | -| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | - -
- -### Audit Log Levels - -The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. - -| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | -| --------------------- | ---------------- | ------------ | ----------------- | ------------- | -| `0` | | | | | -| `1` | ✓ | | | | -| `2` | ✓ | ✓ | | | -| `3` | ✓ | ✓ | ✓ | ✓ | - -## Viewing API Audit Logs - -### Docker Install - -Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. - -### Kubernetes Install - -Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. - -The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. - -#### CLI - -```bash -kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log -``` - -#### Rancher Web GUI - -1. From the context menu, select **Cluster: local > System**. -1. From the main navigation bar, choose **Resources > Workloads.** Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. -1. Pick one of the `rancher` pods and select **⋮ > View Logs**. -1. From the **Logs** drop-down, select `rancher-audit-log`. - -#### Shipping the Audit Log - -You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Logging]({{}}/rancher/v2.5/en/logging) for details. - -## Audit Log Samples - -After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. - -### Metadata Level - -If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. - -```json -{ - "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", - "requestURI": "/v3/schemas", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "GET", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:22:43 +0800" -} -``` - -### Metadata and Request Body Level - -If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:28:08 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my description", - "volumes": [] - } -} -``` - -### Metadata, Request Body, and Response Body Level - -If you set your `AUDIT_LEVEL` to `3`, Rancher logs: - -- The metadata header and body for every API request. -- The metadata header and body for every API response. - -#### Request - -The code sample below depicts an API request, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "sourceIPs": ["::1"], - "user": { - "name": "user-f4tt2", - "group": ["system:authenticated"] - }, - "verb": "PUT", - "stage": "RequestReceived", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "requestBody": { - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "paused": false, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements", - "requests": {}, - "limits": {} - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container", - "environmentFrom": [], - "capAdd": [], - "capDrop": [], - "livenessProbe": null, - "volumeMounts": [] - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "name": "nginx", - "namespaceId": "default", - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport", - "type": "publicEndpoint" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "scheduling": { - "node": {} - }, - "description": "my decript", - "volumes": [] - } -} -``` - -#### Response - -The code sample below depicts an API response, with both its metadata header and body. - -```json -{ - "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", - "responseStatus": "200", - "stage": "ResponseComplete", - "stageTimestamp": "2018-07-20 10:33:06 +0800", - "responseBody": { - "actionLinks": { - "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", - "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", - "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" - }, - "annotations": {}, - "baseType": "workload", - "containers": [ - { - "allowPrivilegeEscalation": false, - "image": "nginx", - "imagePullPolicy": "Always", - "initContainer": false, - "name": "nginx", - "ports": [ - { - "containerPort": 80, - "dnsName": "nginx-nodeport", - "kind": "NodePort", - "name": "80tcp01", - "protocol": "TCP", - "sourcePort": 0, - "type": "/v3/project/schemas/containerPort" - } - ], - "privileged": false, - "readOnly": false, - "resources": { - "type": "/v3/project/schemas/resourceRequirements" - }, - "restartCount": 0, - "runAsNonRoot": false, - "stdin": true, - "stdinOnce": false, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "tty": true, - "type": "/v3/project/schemas/container" - } - ], - "created": "2018-07-18T07:34:16Z", - "createdTS": 1531899256000, - "creatorId": null, - "deploymentConfig": { - "maxSurge": 1, - "maxUnavailable": 0, - "minReadySeconds": 0, - "progressDeadlineSeconds": 600, - "revisionHistoryLimit": 10, - "strategy": "RollingUpdate" - }, - "deploymentStatus": { - "availableReplicas": 1, - "conditions": [ - { - "lastTransitionTime": "2018-07-18T07:34:38Z", - "lastTransitionTimeTS": 1531899278000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "Deployment has minimum availability.", - "reason": "MinimumReplicasAvailable", - "status": "True", - "type": "Available" - }, - { - "lastTransitionTime": "2018-07-18T07:34:16Z", - "lastTransitionTimeTS": 1531899256000, - "lastUpdateTime": "2018-07-18T07:34:38Z", - "lastUpdateTimeTS": 1531899278000, - "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", - "reason": "NewReplicaSetAvailable", - "status": "True", - "type": "Progressing" - } - ], - "observedGeneration": 2, - "readyReplicas": 1, - "replicas": 1, - "type": "/v3/project/schemas/deploymentStatus", - "unavailableReplicas": 0, - "updatedReplicas": 1 - }, - "dnsPolicy": "ClusterFirst", - "hostIPC": false, - "hostNetwork": false, - "hostPID": false, - "id": "deployment:default:nginx", - "labels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "links": { - "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", - "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", - "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" - }, - "name": "nginx", - "namespaceId": "default", - "paused": false, - "projectId": "c-bcz5t:p-fdr4s", - "publicEndpoints": [ - { - "addresses": ["10.64.3.58"], - "allNodes": true, - "ingressId": null, - "nodeId": null, - "podId": null, - "port": 30917, - "protocol": "TCP", - "serviceId": "default:nginx-nodeport" - } - ], - "restartPolicy": "Always", - "scale": 1, - "schedulerName": "default-scheduler", - "selector": { - "matchLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - }, - "type": "/v3/project/schemas/labelSelector" - }, - "state": "active", - "terminationGracePeriodSeconds": 30, - "transitioning": "no", - "transitioningMessage": "", - "type": "deployment", - "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", - "workloadAnnotations": { - "deployment.kubernetes.io/revision": "1", - "field.cattle.io/creatorId": "user-f4tt2" - }, - "workloadLabels": { - "workload.user.cattle.io/workloadselector": "deployment-default-nginx" - } - } -} -``` diff --git a/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md deleted file mode 100644 index c18445bd40..0000000000 --- a/content/rancher/v2.5/en/installation/resources/advanced/arm64-platform/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Running on ARM64 (Experimental)" -weight: 3 -aliases: - - /rancher/v2.5/en/installation/options/arm64-platform - - /rancher/v2.x/en/installation/resources/advanced/arm64-platform/ ---- - -> **Important:** -> -> Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. - -The following options are available when using an ARM64 platform: - -- Running Rancher on ARM64 based node(s) - - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) link: - - ``` - # In the last line `rancher/rancher:vX.Y.Z`, be certain to replace "X.Y.Z" with a released version in which ARM64 builds exist. For example, if your matching version is v2.5.8, you would fill in this line with `rancher/rancher:v2.5.8`. - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher:vX.Y.Z - ``` -> **Note:** To check if your specific released version is compatible with the ARM64 architecture, you may navigate to your -> version's release notes in the following two ways: -> -> - Manually find your version using https://github.com/rancher/rancher/releases. -> - Go directly to your version using the tag and the specific version number. If you plan to use v2.5.8, for example, you may -> navigate to https://github.com/rancher/rancher/releases/tag/v2.5.8. - -- Create custom cluster and adding ARM64 based node(s) - - Kubernetes cluster version must be 1.12 or higher - - CNI Network Provider must be [Flannel]({{}}/rancher/v2.5/en/faq/networking/cni-providers/#flannel) -- Importing clusters that contain ARM64 based nodes - - Kubernetes cluster version must be 1.12 or higher - -Please see [Cluster Options]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/) how to configure the cluster options. - -The following features are not tested: - -- Monitoring, alerts, notifiers, pipelines and logging -- Launching apps from the catalog diff --git a/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md deleted file mode 100644 index 2ff27022f2..0000000000 --- a/content/rancher/v2.5/en/installation/resources/advanced/firewall/_index.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Opening Ports with firewalld -weight: 1 -aliases: - - /rancher/v2.x/en/installation/resources/advanced/firewall/ ---- - -> We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. - -Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. - -For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: - -``` -Chain INPUT (policy ACCEPT) -target prot opt source destination -ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED -ACCEPT icmp -- anywhere anywhere -ACCEPT all -- anywhere anywhere -ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain FORWARD (policy ACCEPT) -target prot opt source destination -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain OUTPUT (policy ACCEPT) -target prot opt source destination -``` - -You can check the default firewall rules with this command: - -``` -sudo iptables --list -``` - -This section describes how to use `firewalld` to apply the [firewall port rules]({{}}/rancher/v2.5/en/installation/requirements/ports) for nodes in a high-availability Rancher server cluster. - -# Prerequisite - -Install v7.x or later ofv`firewalld`: - -``` -yum install firewalld -systemctl start firewalld -systemctl enable firewalld -``` - -# Applying Firewall Port Rules - -In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: - -``` -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` -If your Rancher server nodes have separate roles, use the following commands based on the role of the node: - -``` -# For etcd nodes, run the following commands: -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=2379/tcp -firewall-cmd --permanent --add-port=2380/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp - -# For control plane nodes, run the following commands: -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=6443/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp - -# For worker nodes, run the following commands: -firewall-cmd --permanent --add-port=22/tcp -firewall-cmd --permanent --add-port=80/tcp -firewall-cmd --permanent --add-port=443/tcp -firewall-cmd --permanent --add-port=2376/tcp -firewall-cmd --permanent --add-port=8472/udp -firewall-cmd --permanent --add-port=9099/tcp -firewall-cmd --permanent --add-port=10250/tcp -firewall-cmd --permanent --add-port=10254/tcp -firewall-cmd --permanent --add-port=30000-32767/tcp -firewall-cmd --permanent --add-port=30000-32767/udp -``` - -After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: - -``` -firewall-cmd --reload -``` - -**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. diff --git a/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md b/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md deleted file mode 100644 index aba571407b..0000000000 --- a/content/rancher/v2.5/en/installation/resources/advanced/single-node-install-external-lb/_index.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer -weight: 252 -aliases: - - /rancher/v2.5/en/installation/single-node/single-node-install-external-lb/ - - /rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb - - /rancher/v2.5/en/installation/options/single-node-install-external-lb - - /rancher/v2.5/en/installation/single-node-install-external-lb - - /rancher/v2.x/en/installation/resources/advanced/single-node-install-external-lb/ ---- - -For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. - -A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. - -This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. - -> **Want to skip the external load balancer?** -> See [Docker Installation]({{}}/rancher/v2.5/en/installation/single-node) instead. - -## Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.5/en/installation/requirements/) - -## Installation Outline - - - -- [1. Provision Linux Host](#1-provision-linux-host) -- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) -- [3. Configure Load Balancer](#3-configure-load-balancer) - - - -## 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.5/en/installation/requirements) to launch your Rancher Server. - -## 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to...** -> -> - Complete an Air Gap Installation? -> - Record all transactions with the Rancher API? -> -> See [Advanced Options](#advanced-options) below before continuing. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Bring Your Own Certificate: Self-Signed" %}} -If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. - -> **Prerequisites:** -> Create a self-signed certificate. -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Self-Signed Cert:** - -1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest - ``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Signed by Recognized CA" %}} -If your cluster is public facing, it's best to use a certificate signed by a recognized CA. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Cert Signed by a Recognized CA:** - -If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. - -1. Enter the following command. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest --no-cacerts - ``` - - {{% /accordion %}} - -## 3. Configure Load Balancer - -When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. - -The load balancer or proxy has to be configured to support the following: - -- **WebSocket** connections -- **SPDY** / **HTTP/2** protocols -- Passing / setting the following headers: - - | Header | Value | Description | - |--------|-------|-------------| - | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. - | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. - | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. - | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. -### Example NGINX configuration - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server rancher-server:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` - -
- -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.5/en/installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.5/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -For help troubleshooting certificates, see [this section.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -## Advanced Options - -### API Auditing - -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.5/en/installation/api-auditing) feature by adding the flags below into your install command. - - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - -### Air Gap - -If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.5/en/installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - -``` -upstream rancher { - server rancher-server:80; -} - -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -server { - listen 443 ssl http2; - server_name rancher.yourdomain.com; - ssl_certificate /etc/your_certificate_directory/fullchain.pem; - ssl_certificate_key /etc/your_certificate_directory/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } -} - -server { - listen 80; - server_name rancher.yourdomain.com; - return 301 https://$server_name$request_uri; -} -``` - -
- diff --git a/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md b/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md deleted file mode 100644 index 618e8e36c3..0000000000 --- a/content/rancher/v2.5/en/installation/resources/choosing-version/_index.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Choosing a Rancher Version -weight: 1 -aliases: - - /rancher/v2.5/en/installation/options/server-tags - - /rancher/v2.x/en/installation/resources/choosing-version/ ---- - -This section describes how to choose a Rancher version. - -For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.5/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** - -The Helm chart version also applies to RancherD installs because RancherD installs the Rancher Helm chart on a Kubernetes cluster. - -> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. - -{{% tabs %}} -{{% tab "Helm Charts" %}} - -When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. - -Refer to the [Helm version requirements]({{}}/rancher/v2.5/en/installation/options/helm-version) to choose a version of Helm to install Rancher. - -### Helm Chart Repositories - -Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. - -| Type | Command to Add the Repo | Description of the Repo | -| -------------- | ------------ | ----------------- | -| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | -| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | -| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | - -
-Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). - -> **Note:** All charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. - -### Helm Chart Versions - -Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
-    `helm search repo --versions` - -If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
-For more information, see https://helm.sh/docs/helm/helm_search_repo/ - -To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
-    `helm fetch rancher-stable/rancher --version=2.4.8` - -### Switching to a Different Helm Chart Repository - -After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. - -> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. - -{{< release-channel >}} - -1. List the current Helm chart repositories. - - ```plain - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - -2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. - - ```plain - helm repo remove rancher- - ``` - -3. Add the Helm chart repository that you want to start installing Rancher from. - - ```plain - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ha) from the new Helm chart repository. -{{% /tab %}} -{{% tab "Docker Images" %}} -When performing [Docker installs]({{}}/rancher/v2.5/en/installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. - -### Server Tags - -Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. - -| Tag | Description | -| -------------------------- | ------ | -| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | -| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | -| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | - -> **Notes:** -> -> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. -> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md b/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md deleted file mode 100644 index 2274e2a9a4..0000000000 --- a/content/rancher/v2.5/en/installation/resources/custom-ca-root-certificate/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: About Custom CA Root Certificates -weight: 1 -aliases: - - /rancher/v2.5/en/installation/options/custom-ca-root-certificate/ - - /rancher/v2.5/en/installation/resources/choosing-version/encryption/custom-ca-root-certificate - - /rancher/v2.x/en/installation/resources/custom-ca-root-certificate/ ---- - -If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). - -Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. - -To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. - -Examples of services that Rancher can access: - -- Catalogs -- Authentication providers -- Accessing hosting/cloud API when using Node Drivers - -## Installing with the custom CA Certificate - -For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: - -- [Docker install Custom CA certificate options]({{}}/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -- [Kubernetes install options for Additional Trusted CAs]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/chart-options/#additional-trusted-cas) - diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md deleted file mode 100644 index b06257e74e..0000000000 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/_index.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: Enabling Experimental Features -weight: 17 -aliases: - - /rancher/v2.5/en/installation/options/feature-flags/ - - /rancher/v2.5/en/admin-settings/feature-flags/ - - /rancher/v2.x/en/installation/resources/feature-flags/ ---- -Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type]({{}}/rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. - -The features can be enabled in three ways: - -- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. -- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) by going to the **Settings** page. -- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. - -Each feature has two values: - -- A default value, which can be configured with a flag or environment variable from the command line -- A set value, which can be configured with the Rancher API or UI - -If no value has been set, Rancher uses the default value. - -Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. - -For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. - -> **Note:** There are some feature flags that may require a restart of the Rancher server container. These features that require a restart are marked in the table of these docs and in the UI. - -The following is a list of the feature flags available in Rancher: - -- `fleet`: Rancher comes with [Fleet]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet) preinstalled in v2.5+. -- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules]({{}}/rancher/v2.5/en/installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. -- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.]({{}}/rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. - -The below table shows the availability and default value for feature flags in Rancher: - -| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | -| ----------------------------- | ------------- | ------------ | --------------- |---| -| `dashboard` | `true` | Experimental | v2.4.0 | x | -| `dashboard` | `true` | GA* and no longer a feature flag | v2.5.0 | x | -| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | -| `istio-virtual-service-ui` | `true` | GA* | v2.3.2 | | -| `proxy` | `false` | Experimental | v2.4.0 | | -| `proxy` | N/A | Discontinued | v2.5.0 | | -| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | -| `fleet` | `true` | GA* | v2.5.0 | | - -\* Generally Available. This feature is included in Rancher and it is not experimental. - -# Enabling Features when Starting Rancher - -When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. - -### Enabling Features for Kubernetes Installs - -> **Note:** Values set from the Rancher API will override the value passed in through the command line. - -When installing Rancher with a Helm chart, use the `--set` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: - -``` -helm install rancher-latest/rancher \ - --name rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set 'extraEnv[0].name=CATTLE_FEATURES' - --set 'extraEnv[0].value==true,=true' -``` - -Note: If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -### Rendering the Helm Chart for Air Gap Installations - -For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.]({{}}/rancher/v2.5/en/installation/other-installation-methods/air-gap/install-rancher) - -Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. - -The Helm 3 command is as follows: - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' - --set 'extraEnv[0].value==true,=true' -``` -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' - --set 'extraEnv[0].value==true,=true' -``` -{{% /tab %}} -{{% /tabs %}} - -The Helm 2 command is as follows: - -``` -helm template rancher ./rancher-.tgz --output-dir . \ - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts - --set 'extraEnv[0].name=CATTLE_FEATURES' - --set 'extraEnv[0].value==true,=true' -``` - -### Enabling Features for Docker Installs - -When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: - -``` -docker run -d -p 80:80 -p 443:443 \ - --restart=unless-stopped \ - rancher/rancher:rancher-latest \ - --features==true,=true -``` - - -# Enabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher UI - -1. Go to the **Global** view and click **Settings.** -1. Click the **Feature Flags** tab. You will see a list of experimental features. -1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate.** - -**Result:** The feature is disabled. - -# Enabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **True.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is enabled. - -### Disabling Features with the Rancher API - -1. Go to `/v3/features`. -1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. -1. In the upper left corner of the screen, under **Operations,** click **Edit.** -1. In the **Value** drop-down menu, click **False.** -1. Click **Show Request.** -1. Click **Send Request.** -1. Click **Close.** - -**Result:** The feature is disabled. diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md deleted file mode 100644 index d6568c937b..0000000000 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Allow Unsupported Storage Drivers -weight: 1 -aliases: - - /rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers/ - - /rancher/v2.x/en/installation/resources/feature-flags/enable-not-default-storage-drivers/ ---- - -This feature allows you to use types for storage providers and provisioners that are not enabled by default. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.5/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Description ----|---|--- - `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. - -### Types for Persistent Volume Plugins that are Enabled by Default -Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|---------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` -Network File System | `nfs` -hostPath | `host-path` - -### Types for StorageClass that are Enabled by Default -Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: - -Name | Plugin ---------|-------- -Amazon EBS Disk | `aws-ebs` -AzureFile | `azure-file` -AzureDisk | `azure-disk` -Google Persistent Disk | `gce-pd` -Longhorn | `flex-volume-longhorn` -VMware vSphere Volume | `vsphere-volume` -Local | `local` \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md b/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md deleted file mode 100644 index bce3973f02..0000000000 --- a/content/rancher/v2.5/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: UI for Istio Virtual Services and Destination Rules -weight: 2 -aliases: - - /rancher/v2.5/en/installation/options/feature-flags/istio-virtual-service-ui - - /rancher/v2.x/en/installation/resources/feature-flags/istio-virtual-service-ui/ ---- - -This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. - -> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster]({{}}/rancher/v2.5/en/istio/setup) in order to use the feature. - -To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.]({{}}/rancher/v2.5/en/installation/options/feature-flags/) - -Environment Variable Key | Default Value | Status | Available as of ----|---|---|--- -`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 -`istio-virtual-service-ui` | `true` | GA | v2.3.2 - -# About this Feature - -A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. - -When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. - -The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** - -- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) -- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) - -To see these tabs, - -1. Go to the project view in Rancher and click **Resources > Istio.** -1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/helm-version/_index.md b/content/rancher/v2.5/en/installation/resources/helm-version/_index.md deleted file mode 100644 index 2c890c7e10..0000000000 --- a/content/rancher/v2.5/en/installation/resources/helm-version/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Helm Version Requirements -weight: 3 -aliases: - - /rancher/v2.5/en/installation/options/helm-version - - /rancher/v2.5/en/installation/options/helm2 - - /rancher/v2.5/en/installation/options/helm2/helm-init - - /rancher/v2.5/en/installation/options/helm2/helm-rancher - - /rancher/v2.x/en/installation/resources/helm-version/ ---- - -This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. - -> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.5/en/installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. - -- Helm v3.2.x or higher is required to install or upgrade Rancher v2.5. -- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. -- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. -- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md deleted file mode 100644 index 9cd29bcab1..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-RKE/_index.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: Setting up a High-availability RKE Kubernetes Cluster -shortTitle: Set up RKE Kubernetes -weight: 3 -aliases: - - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke - - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke/ ---- - - -This section describes how to install a Kubernetes cluster. This cluster should be dedicated to run only the Rancher server. - -> As of Rancher v2.5, Rancher can run on any Kubernetes cluster, included hosted Kubernetes solutions such as Amazon EKS. The below instructions represent only one possible way to install Kubernetes. - -For systems without direct internet access, refer to [Air Gap: Kubernetes install.]({{}}/rancher/v2.5/en/installation/air-gap-high-availability/) - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Installing Kubernetes - -### Required CLI Tools - -Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. - -Also install [RKE,]({{}}/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. - -### 1. Create the cluster configuration file - -In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. - -Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. - -If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. - -RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. - -```yaml -nodes: - - address: 165.227.114.63 - internal_address: 172.16.22.12 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.116.167 - internal_address: 172.16.32.37 - user: ubuntu - role: [controlplane, worker, etcd] - - address: 165.227.127.226 - internal_address: 172.16.42.73 - user: ubuntu - role: [controlplane, worker, etcd] - -services: - etcd: - snapshot: true - creation: 6h - retention: 24h - -# Required for external TLS termination with -# ingress-nginx v0.22+ -ingress: - provider: nginx - options: - use-forwarded-headers: "true" -``` - -
Common RKE Nodes Options
- -| Option | Required | Description | -| ------------------ | -------- | -------------------------------------------------------------------------------------- | -| `address` | yes | The public DNS or IP address | -| `user` | yes | A user that can run docker commands | -| `role` | yes | List of Kubernetes roles assigned to the node | -| `internal_address` | no | The private DNS or IP address for internal cluster traffic | -| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | - -> **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. -> -> Please see the [RKE Documentation]({{}}/rke/latest/en/config-options/) for the full list of options and capabilities. -> -> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide]({{}}/rancher/v2.5/en/installation/options/etcd/). - -### 2. Run RKE - -``` -rke up --config ./rancher-cluster.yml -``` - -When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. - -### 3. Test Your Cluster - -This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. - -Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. - -When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_cluster.yml`. This file has the credentials for `kubectl` and `helm`. - -> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_cluster.yml`: - -``` -export KUBECONFIG=$(pwd)/kube_config_cluster.yml -``` - -Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: - -``` -kubectl get nodes - -NAME STATUS ROLES AGE VERSION -165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 -165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 -``` - -### 4. Check the Health of Your Cluster Pods - -Check that all the required pods and containers are healthy are ready to continue. - -- Pods are in `Running` or `Completed` state. -- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` -- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. - -``` -kubectl get pods --all-namespaces - -NAMESPACE NAME READY STATUS RESTARTS AGE -ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s -ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s -kube-system canal-jp4hz 3/3 Running 0 30s -kube-system canal-z2hg8 3/3 Running 0 30s -kube-system canal-z6kpw 3/3 Running 0 30s -kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s -kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s -kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s -kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s -kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s -kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s -kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s -``` - -This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. - -### 5. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.5/en/installation/options/troubleshooting/) page. - - -### [Next: Install Rancher]({{}}/rancher/v2.5/en/installation/k8s-install/helm-rancher/) - diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md deleted file mode 100644 index 788a80c1e4..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-rke2/_index.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -title: Setting up a High-availability RKE2 Kubernetes Cluster for Rancher -shortTitle: Set up RKE2 for Rancher -weight: 2 -aliases: - - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2 - - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2/ ---- -_Tested on v2.5.6_ - -This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.]({{}}/rancher/v2.5/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) - -# Prerequisites - -These instructions assume you have set up three nodes, a load balancer, and a DNS record, as described in [this section.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha) - -Note that in order for RKE2 to work correctly with the load balancer, you need to set up two listeners: one for the supervisor on port 9345, and one for the Kubernetes API on port 6443. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the RKE2 version, use the INSTALL_RKE2_VERSION environment variable when running the RKE2 installation script. -# Installing Kubernetes - -### 1. Install Kubernetes and Set up the RKE2 Server - -RKE2 server runs with embedded etcd so you will not need to set up an external datastore to run in HA mode. - -On the first node, you should set up the configuration file with your own pre-shared secret as the token. The token argument can be set on startup. - -If you do not specify a pre-shared secret, RKE2 will generate one and place it at /var/lib/rancher/rke2/server/node-token. - -To avoid certificate errors with the fixed registration address, you should launch the server with the tls-san parameter set. This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access via both the IP and the hostname. - -First, you must create the directory where the RKE2 config file is going to be placed: - -``` -mkdir -p /etc/rancher/rke2/ -``` - -Next, create the RKE2 config file at `/etc/rancher/rke2/config.yaml` using the following example: - -``` -token: my-shared-secret -tls-san: - - my-kubernetes-domain.com - - another-kubernetes-domain.com -``` -After that, you need to run the install command and enable and start rke2: - -``` -curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=v1.20 sh - -systemctl enable rke2-server.service -systemctl start rke2-server.service -``` -1. To join the rest of the nodes, you need to configure each additional node with the same shared token or the one generated automatically. Here is an example of the configuration file: - - token: my-shared-secret - server: https://:9345 - tls-san: - - my-kubernetes-domain.com - - another-kubernetes-domain.com -After that, you need to run the installer and enable, then start, rke2: - - curl -sfL https://get.rke2.io | sh - - systemctl enable rke2-server.service - systemctl start rke2-server.service - - -1. Repeat the same command on your third RKE2 server node. - -### 2. Confirm that RKE2 is Running - -Once you've launched the rke2 server process on all server nodes, ensure that the cluster has come up properly with - -``` -/var/lib/rancher/rke2/bin/kubectl \ - --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes -You should see your server nodes in the Ready state. -``` - -Then test the health of the cluster pods: -``` -/var/lib/rancher/rke2/bin/kubectl \ - --kubeconfig /etc/rancher/rke2/rke2.yaml get pods --all-namespaces -``` - -**Result:** You have successfully set up a RKE2 Kubernetes cluster. - -### 3. Save and Start Using the kubeconfig File - -When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/rke2/rke2.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `rke2.yaml`: - -```yml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your RKE2 cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### 4. Check the Health of Your Cluster Pods - -Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. - -Check that all the required pods and containers are healthy are ready to continue: - -``` - /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get pods -A -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system etcd-ip-172-31-18-145 1/1 Running 0 4m37s -kube-system etcd-ip-172-31-25-73 1/1 Running 0 20m -kube-system etcd-ip-172-31-31-210 1/1 Running 0 9m12s -kube-system helm-install-rke2-canal-th9k9 0/1 Completed 0 21m -kube-system helm-install-rke2-coredns-6njr6 0/1 Completed 0 21m -kube-system helm-install-rke2-ingress-nginx-vztsd 0/1 Completed 0 21m -kube-system helm-install-rke2-kube-proxy-6std5 0/1 Completed 0 21m -kube-system helm-install-rke2-metrics-server-9sl7m 0/1 Completed 0 21m -kube-system kube-apiserver-ip-172-31-18-145 1/1 Running 0 4m22s -kube-system kube-apiserver-ip-172-31-25-73 1/1 Running 0 20m -kube-system kube-apiserver-ip-172-31-31-210 1/1 Running 0 9m8s -kube-system kube-controller-manager-ip-172-31-18-145 1/1 Running 0 4m8s -kube-system kube-controller-manager-ip-172-31-25-73 1/1 Running 0 21m -kube-system kube-controller-manager-ip-172-31-31-210 1/1 Running 0 8m55s -kube-system kube-proxy-57twm 1/1 Running 0 10m -kube-system kube-proxy-f7pc6 1/1 Running 0 5m24s -kube-system kube-proxy-rj4t5 1/1 Running 0 21m -kube-system kube-scheduler-ip-172-31-18-145 1/1 Running 0 4m15s -kube-system kube-scheduler-ip-172-31-25-73 1/1 Running 0 21m -kube-system kube-scheduler-ip-172-31-31-210 1/1 Running 0 8m48s -kube-system rke2-canal-4x972 2/2 Running 0 10m -kube-system rke2-canal-flh8m 2/2 Running 0 5m24s -kube-system rke2-canal-zfhkr 2/2 Running 0 21m -kube-system rke2-coredns-rke2-coredns-6cd96645d6-cmstq 1/1 Running 0 21m -kube-system rke2-ingress-nginx-controller-54946dd48f-6mp76 1/1 Running 0 20m -kube-system rke2-ingress-nginx-default-backend-5795954f8-p92xx 1/1 Running 0 20m -kube-system rke2-metrics-server-5f9b5757dc-k5sgh 1/1 Running 0 20m -``` - -**Result:** You have confirmed that you can access the cluster with `kubectl` and the RKE2 cluster is running successfully. Now the Rancher management server can be installed on the cluster. - -### 5. Configure nginx to be a daemonset - -Currently, RKE2 deploys nginx-ingress as a deployment, and that can impact the Rancher deployment so that you cannot use all servers to proxy requests to the Rancher pods. - -To rectify that, place the following file in /var/lib/rancher/rke2/server/manifests on any of the server nodes: - -```yaml -apiVersion: helm.cattle.io/v1 -kind: HelmChartConfig -metadata: - name: rke2-ingress-nginx - namespace: kube-system -spec: - valuesContent: |- - controller: - kind: DaemonSet - daemonset: - useHostPort: true -``` diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md deleted file mode 100644 index 9c07f5478c..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Setting up a High-availability K3s Kubernetes Cluster for Rancher -shortTitle: Set up K3s for Rancher -weight: 2 ---- - -This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.]({{}}/rancher/v2.5/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) - -For systems without direct internet access, refer to the air gap installation instructions. - -> **Single-node Installation Tip:** -> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. -> -> To set up a single-node K3s cluster, run the Rancher server installation command on just one node instead of two nodes. -> -> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. - -# Prerequisites - -These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. -# Installing Kubernetes - -### 1. Install Kubernetes and Set up the K3s Server - -When running the command to start the K3s Kubernetes API server, you will pass in an option to use the external datastore that you set up earlier. - -1. Connect to one of the Linux nodes that you have prepared to run the Rancher server. -1. On the Linux node, run this command to start the K3s server and connect it to the external datastore: - ``` - curl -sfL https://get.k3s.io | sh -s - server \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" - ``` - To specify the K3s version, use the INSTALL_K3S_VERSION environment variable: - ```sh - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z sh -s - server \ - --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" - ``` - Note: The datastore endpoint can also be passed in using the environment variable `$K3S_DATASTORE_ENDPOINT`. - -1. Repeat the same command on your second K3s server node. - -### 2. Confirm that K3s is Running - -To confirm that K3s has been set up successfully, run the following command on either of the K3s server nodes: -``` -sudo k3s kubectl get nodes -``` - -Then you should see two nodes with the master role: -``` -ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes -NAME STATUS ROLES AGE VERSION -ip-172-31-60-194 Ready master 44m v1.17.2+k3s1 -ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1 -``` - -Then test the health of the cluster pods: -``` -sudo k3s kubectl get pods --all-namespaces -``` - -**Result:** You have successfully set up a K3s Kubernetes cluster. - -### 3. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -```yml -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### 4. Check the Health of Your Cluster Pods - -Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. - -Check that all the required pods and containers are healthy are ready to continue: - -``` -ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d -kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d -kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d -``` - -**Result:** You have confirmed that you can access the cluster with `kubectl` and the K3s cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md deleted file mode 100644 index 58e4c6ce1d..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/how-ha-works/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: About High-availability Installations -weight: 1 -aliases: - - /rancher/v2.x/en/installation/resources/k8s-tutorials/how-ha-works/ ---- - -We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. - -In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. - -Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. - -The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. - -For information on how Rancher works, regardless of the installation method, refer to the [architecture section.]({{}}/rancher/v2.5/en/overview/architecture) - -### Recommended Architecture - -- DNS for Rancher should resolve to a layer 4 load balancer -- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
-![High-availability Kubernetes Installation of Rancher]({{}}/img/rancher/ha/rancher2ha.svg) -Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md deleted file mode 100644 index da6f027e45..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. -shortTitle: Infrastructure Tutorials -weight: 5 -aliases: - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ ---- - -To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/) - - -To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md deleted file mode 100644 index 34a1c015d5..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Setting up Nodes in Amazon EC2 -weight: 3 -aliases: - - /rancher/v2.5/en/installation/options/ec2-node - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ ---- - -In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. - -If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. - -If the Rancher server is installed in a single Docker container, you only need one instance. - -### 1. Optional Preparation - -- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers/) -- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.]({{}}/rancher/v2.5/en/installation/requirements/#port-requirements) - -### 2. Provision Instances - -1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. -1. In the left panel, click **Instances.** -1. Click **Launch Instance.** -1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select.** -1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. -1. Click **Next: Configure Instance Details.** -1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. -1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. -1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** -1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements]({{}}/rancher/v2.5/en/installation/requirements/#port-requirements) for Rancher nodes. -1. Click **Review and Launch.** -1. Click **Launch.** -1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. -1. Click **Launch Instances.** - - -**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. - -**Note:** If the nodes are being used for an RKE Kubernetes cluster, install Docker on each node in the next step. For a K3s Kubernetes cluster, the nodes are now ready to install K3s. - -### 3. Install Docker and Create User for RKE Kubernetes Cluster Nodes - -1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. -1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect.** -1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: -``` -sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] -``` -1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: -``` -curl https://releases.rancher.com/install-docker/18.09.sh | sh -``` -1. When you are connected to the instance, run the following command on the instance to create a user: -``` -sudo usermod -aG docker ubuntu -``` -1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. - -> To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. - -**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. - -### Next Steps for RKE Kubernetes Cluster Nodes - -If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. - -RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md deleted file mode 100644 index 1b9101cbfb..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' -weight: 1 -aliases: - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/ - - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db/ ---- - -This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. - -The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. - -For more information about each installation option, refer to [this page.]({{}}/rancher/v2.5/en/installation) - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability K3s cluster, we recommend setting up the following infrastructure: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. We recommend MySQL. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set a [MySQL](https://www.mysql.com/) external database. Rancher has been tested on K3s Kubernetes clusters using MySQL version 5.7 as the datastore. - -When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the MySQL database, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/rds/) for setting up MySQL on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.5/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md deleted file mode 100644 index 685f1ba41d..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' -weight: 2 -aliases: - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/ ---- - -This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node/) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on any of the three nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.5/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md deleted file mode 100644 index 0f4d80d55c..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Set up Infrastructure for a High Availability RKE2 Kubernetes Cluster' -weight: 1 -aliases: - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/ ---- - -This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. - -The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a RKE2 Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. - -> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). - -To install the Rancher management server on a high-availability RKE2 cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. - -### 1. Set up Linux Nodes - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.5/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.5/en/installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on all nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE2 tool will deploy an Nginx Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Nginx Ingress controller to listen for traffic destined for the Rancher hostname. The Nginx Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.5/en/installation/options/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.5/en/installation/options/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md deleted file mode 100644 index 3f586b3b4f..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Setting up Amazon ELB Network Load Balancer -weight: 5 -aliases: - - /rancher/v2.5/en/installation/ha/create-nodes-lb/nlb - - /rancher/v2.5/en/installation/k8s-install/create-nodes-lb/nlb - - /rancher/v2.5/en/installation/options/nlb - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/ ---- - -This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. - -These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. - -Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. - -# Setting up the Load Balancer - -Configuring an Amazon NLB is a multistage process: - -1. [Create Target Groups](#1-create-target-groups) -2. [Register Targets](#2-register-targets) -3. [Create Your NLB](#3-create-your-nlb) -4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) - -# Requirements - -These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. - -# 1. Create Target Groups - -Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. - -Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. - -1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -1. Click **Create target group** to create the first target group, regarding TCP port 443. - -> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. - -| Option | Setting | -|-------------------|-------------------| -| Target Group Name | `rancher-tcp-443` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `443` | -| VPC | Choose your VPC | - -Health check settings: - -| Option | Setting | -|---------------------|-----------------| -| Protocol | TCP | -| Port | `override`,`80` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. - -| Option | Setting | -|-------------------|------------------| -| Target Group Name | `rancher-tcp-80` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `80` | -| VPC | Choose your VPC | - - -Health check settings: - -| Option |Setting | -|---------------------|----------------| -| Protocol | TCP | -| Port | `traffic port` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -# 2. Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -# 3. Create Your NLB - -Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. Then complete each form. - -- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) -- [Step 2: Configure Routing](#step-2-configure-routing) -- [Step 3: Register Targets](#step-3-register-targets) -- [Step 4: Review](#step-4-review) - -### Step 1: Configure Load Balancer - -Set the following fields in the form: - -- **Name:** `rancher` -- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. -- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. -- **Availability Zones:** Select Your **VPC** and **Availability Zones**. - -### Step 2: Configure Routing - -1. From the **Target Group** drop-down, choose **Existing target group**. -1. From the **Name** drop-down, choose `rancher-tcp-443`. -1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -### Step 3: Register Targets - -Since you registered your targets earlier, all you have to do is click **Next: Review**. - -### Step 4: Review - -Look over the load balancer details and click **Create** when you're satisfied. - -After AWS creates the NLB, click **Close**. - -# 4. Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to...** - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. - -# Health Check Paths for NGINX Ingress and Traefik Ingresses - -K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. - -For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. - -- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. -- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. - -To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md b/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md deleted file mode 100644 index 01b3560b6d..0000000000 --- a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Setting up a MySQL Database in Amazon RDS -weight: 4 -aliases: - - /rancher/v2.5/en/installation/options/rds - - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/ ---- -This tutorial describes how to set up a MySQL database in Amazon's RDS. - -This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. - -1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. In the left panel, click **Databases.** -1. Click **Create database.** -1. In the **Engine type** section, click **MySQL.** -1. In the **Version** section, choose **MySQL 5.7.22.** -1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. -1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. -1. Click **Create database.** - -You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. - -To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. - -- **Username:** Use the admin username. -- **Password:** Use the admin password. -- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. -- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. -- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name.** - -This information will be used to connect to the database in the following format: - -``` -mysql://username:password@tcp(hostname:3306)/database-name -``` - -For more information on configuring the datastore for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) diff --git a/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md b/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md deleted file mode 100644 index ebaad07f98..0000000000 --- a/content/rancher/v2.5/en/installation/resources/local-system-charts/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Setting up Local System Charts for Air Gapped Installations -weight: 120 -aliases: - - /rancher/v2.5/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md - - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md - - /rancher/v2.5/en/installation/options/local-system-charts - - /rancher/v2.x/en/installation/resources/local-system-charts/ - - /rancher/v2.x/en/installation/options/local-system-charts/ ---- - -The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. - -In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag. - -# Using Local System Charts - -A local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. - -Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation]({{}}/rancher/v2.5/en/installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation]({{}}/rancher/v2.5/en/installation/air-gap-high-availability/install-rancher/) instructions. - diff --git a/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md b/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md deleted file mode 100644 index ab2b18cec4..0000000000 --- a/content/rancher/v2.5/en/installation/resources/tls-secrets/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Adding TLS Secrets -weight: 2 -aliases: - - /rancher/v2.5/en/installation/resources/encryption/tls-secrets/ - - /rancher/v2.x/en/installation/resources/tls-secrets/ ---- - -Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. - -Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. - -For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. -This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. - -Use `kubectl` with the `tls` secret type to create the secrets. - -``` -kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. - -# Using a Private CA Signed Certificate - -If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. - -Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. - -``` -kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem=./cacerts.pem -``` - -> **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. - -# Updating a Private CA Certificate - -Follow the steps on [this page]({{}}/rancher/v2.5/en/installation/resources/update-rancher-cert) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/update-rancher-cert/_index.md b/content/rancher/v2.5/en/installation/resources/update-rancher-cert/_index.md deleted file mode 100644 index 75ea85f862..0000000000 --- a/content/rancher/v2.5/en/installation/resources/update-rancher-cert/_index.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: Updating the Rancher Certificate -weight: 10 -aliases: - - /rancher/v2.x/en/installation/resources/update-ca-cert/ ---- - -# Updating a Private CA Certificate - -Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) or to switch from the default self-signed certificate to a custom certificate. - -A summary of the steps is as follows: - -1. Create or update the `tls-rancher-ingress` Kubernetes secret resource with the new certificate and private key. -2. Create or update the `tls-ca` Kubernetes secret resource with the root CA certificate (only required when using a private CA). -3. Update the Rancher installation using the Helm CLI. -4. Reconfigure the Rancher agents to trust the new CA certificate. -5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher. - -The details of these instructions are below. - -## 1. Create/update the certificate secret resource - -First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. - -If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -Alternatively, to update an existing certificate secret: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -## 2. Create/update the CA certificate secret resource - -If the new certificate was signed by a private CA, you will need to copy the corresponding root CA certificate into a file named `cacerts.pem` and create or update the `tls-ca secret` in the `cattle-system` namespace. If the certificate was signed by an intermediate CA, then the `cacerts.pem` must contain both the intermediate and root CA certificates (in this order). - -To create the initial secret: - -``` -$ kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem -``` - -To update an existing `tls-ca` secret: - -``` -$ kubectl -n cattle-system create secret generic tls-ca \ - --from-file=cacerts.pem \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -## 3. Reconfigure the Rancher deployment - -> Before proceeding, generate an API token in the Rancher UI (User > API & Keys) and save the Bearer Token which you might need in step 4. - -This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). - -It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. - -To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: - -``` -$ helm get values rancher -n cattle-system -``` - -Also get the version string of the currently deployed Rancher chart: - -``` -$ helm ls -A -``` - -Upgrade the Helm application instance using the original configuration values and making sure to specify `ingress.tls.source=secret` as well as the current chart version to prevent an application upgrade. - -If the certificate was signed by a private CA, add the `set privateCA=true` argument as well. Also make sure to read the documentation describing the initial installation using custom certificates. - -``` -helm upgrade rancher rancher-stable/rancher \ - --namespace cattle-system \ - --version \ - --set hostname=rancher.my.org \ - --set ingress.tls.source=secret \ - --set ... -``` - -When the upgrade is completed, navigate to `https:///v3/settings/cacerts` to verify that the value matches the CA certificate written in the `tls-ca` secret earlier. - -## 4. Reconfigure Rancher agents to trust the private CA - -This section covers three methods to reconfigure Rancher agents to trust the private CA. This step is required if either of the following is true: - -- Rancher was initially configured to use the Rancher self-signed certificate (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`) -- The root CA certificate for the new custom certificate has changed - -### Why is this step required? - -When Rancher is configured with a certificate signed by a private CA, the CA certificate chain is downloaded into Rancher agent containers. Agents compare the checksum of the downloaded certificate against the `CATTLE_CA_CHECKSUM` environment variable. This means that, when the private CA certificate is changed on Rancher server side, the environvment variable `CATTLE_CA_CHECKSUM` must be updated accordingly. - -### Which method should I choose? - -Method 1 is the easiest one but requires all clusters to be connected to Rancher after the certificates have been rotated. This is usually the case if the process is performed right after updating the Rancher deployment (Step 3). - -If the clusters have lost connection to Rancher but you have [Authorized Cluster Endpoints](https://rancher.com/docs/rancher/v2.5/en/cluster-admin/cluster-access/ace/) enabled, then go with method 2. - -Method 3 can be used as a fallback if method 1 and 2 are unfeasible. - -### Method 1: Kubectl command - -For each cluster under Rancher management (except the `local` Rancher management cluster) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). - -``` -kubectl patch clusters.management.cattle.io -p '{"status":{"agentImage":"dummy"}}' --type merge -``` - -This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. - - -### Method 2: Manually update checksum - -Manually patch the agent Kubernetes resources by updating the `CATTLE_CA_CHECKSUM` environment variable to the value matching the checksum of the new CA certificate. Generate the new checksum value like so: - -``` -$ curl -k -s -fL /v3/settings/cacerts | jq -r .value > cacert.tmp -$ sha256sum cacert.tmp | awk '{print $1}' -``` - -Using a Kubeconfig for each downstream cluster update the environment variable for the two agent deployments. - -``` -$ kubectl edit -n cattle-system ds/cattle-node-agent -$ kubectl edit -n cattle-system deployment/cattle-cluster-agent -``` - -### Method 3: Recreate Rancher agents - -With this method you are recreating the Rancher agents by running a set of commands on a controlplane node of each downstream cluster. - -First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 - -Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: -https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b - - -## 5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher - -Select 'Force Update' for the clusters within the [Continuous Delivery]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet/#accessing-fleet-in-the-rancher-ui) view under Cluster Explorer in the Rancher UI to allow the fleet-agent in downstream clusters to successfully connect to Rancher. - -### Why is this step required? - -Fleet agents in Rancher managed clusters store kubeconfig that is used to connect to the Rancher proxied kube-api in the fleet-agent secret of the fleet-system namespace. The kubeconfig contains a certificate-authority-data block containing the Rancher CA. When changing the Rancher CA, this block needs to be updated for a successful connection of the fleet-agent to Rancher. - -# Updating from a Private CA Certificate to a Common Certificate - ->It is possible to perform the opposite procedure as shown above: you may change from a private certificate to a common, or non-private, certificate. The steps involved are outlined below. - -## 1. Create/update the certificate secret resource - -First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. - -If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key -``` - -Alternatively, to update an existing certificate secret: - -``` -$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ - --cert=tls.crt \ - --key=tls.key \ - --dry-run --save-config -o yaml | kubectl apply -f - -``` - -## 2. Delete the CA certificate secret resource - -You will delete the `tls-ca secret` in the `cattle-system` namespace as it is no longer needed. You may also optionally save a copy of the `tls-ca secret` if desired. - -To save the existing secret: - -``` -kubectl -n cattle-system get secret tls-ca -o yaml > tls-ca.yaml -``` - -To delete the existing `tls-ca` secret: - -``` -kubectl -n cattle-system delete secret tls-ca -``` - -## 3. Reconfigure the Rancher deployment - -> Before proceeding, [generate an API token in the Rancher UI]({{}}/rancher/v2.5/en/user-settings/api-keys/#creating-an-api-key) (User > API & Keys). - -This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). - -It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. - -To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: - -``` -$ helm get values rancher -n cattle-system -``` - -Also get the version string of the currently deployed Rancher chart: - -``` -$ helm ls -A -``` - -Upgrade the Helm application instance using the original configuration values and making sure to specify the current chart version to prevent an application upgrade. - -Also make sure to read the documentation describing the initial installation using custom certificates. - -``` -helm upgrade rancher rancher-stable/rancher \ - --namespace cattle-system \ - --version \ - --set hostname=rancher.my.org \ - --set ... -``` - -On upgrade, you can either - -- remove `--set ingress.tls.source=secret \` from the Helm upgrade command, as shown above, or - -- remove the `privateCA` parameter or set it to `false` because the CA is valid: - -``` -set privateCA=false -``` - -## 4. Reconfigure Rancher agents for the non-private/common certificate - -`CATTLE_CA_CHECKSUM` environment variable on the downstream cluster agents should be removed or set to "" (an empty string). - -## 5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher - -Select 'Force Update' for the clusters within the [Continuous Delivery]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet/#accessing-fleet-in-the-rancher-ui) view under Cluster Explorer in the Rancher UI to allow the fleet-agent in downstream clusters to successfully connect to Rancher. - -### Why is this step required? - -Fleet agents in Rancher managed clusters store kubeconfig that is used to connect to the Rancher proxied kube-api in the fleet-agent secret of the fleet-system namespace. The kubeconfig contains a certificate-authority-data block containing the Rancher CA. When changing the Rancher CA, this block needs to be updated for a successful connection of the fleet-agent to Rancher. \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md b/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md deleted file mode 100644 index 9b557f0ccc..0000000000 --- a/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/_index.md +++ /dev/null @@ -1,245 +0,0 @@ ---- -title: Upgrading Cert-Manager -weight: 4 -aliases: - - /rancher/v2.5/en/installation/options/upgrading-cert-manager - - /rancher/v2.5/en/installation/options/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.5/en/installation/resources/encryption/upgrading-cert-manager - - /rancher/v2.x/en/installation/resources/upgrading-cert-manager/ ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. - -# Upgrade Cert-Manager - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.]({{}}/rancher/v2.5/en/installation/options/upgrading-cert-manager/helm-2-instructions) - -In order to upgrade cert-manager, follow these instructions: - -### Option A: Upgrade cert-manager with Internet Access - -{{% accordion id="normal" label="Click to expand" %}} -1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) - - ```plain - helm uninstall cert-manager - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed - - ```plain - kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager if needed - - ```plain - kubectl create namespace cert-manager - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v0.12.0 - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Option B: Upgrade cert-manager in an Air Gap Environment - -{{% accordion id="airgap" label="Click to expand" %}} - -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.5/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - The Helm 3 command is as follows: - - ```plain - helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - - The Helm 2 command is as follows: - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager (old and new) - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n cert-manager \ - delete deployment,sa,clusterrole,clusterrolebinding \ - -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y you installed - - ```plain - kubectl delete -f cert-manager/cert-manager-crd-old.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager - - ```plain - kubectl create namespace cert-manager - ``` - -1. Install cert-manager - - ```plain - kubectl -n cert-manager apply -R -f ./cert-manager - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Verify the Deployment - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). - diff --git a/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md b/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md deleted file mode 100644 index 070dff28f0..0000000000 --- a/content/rancher/v2.5/en/installation/resources/upgrading-cert-manager/helm-2-instructions/_index.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Upgrading Cert-Manager with Helm 2 -weight: 2040 -aliases: - - /rancher/v2.5/en/installation/options/upgrading-cert-manager/helm-2-instructions - - /rancher/v2.5/en/installation/resources/choosing-version/encryption/upgrading-cert-manager/helm-2-instructions ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's offficial documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart]({{}}/rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. - -## Upgrade Cert-Manager Only - -> **Note:** -> These instructions are applied if you have no plan to upgrade Rancher. - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -In order to upgrade cert-manager, follow these instructions: - -{{% accordion id="normal" label="Upgrading cert-manager with Internet access" %}} -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml - ``` - -1. Delete the existing deployment - - ```plain - helm delete --purge cert-manager - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install --version 0.12.0 --name cert-manager --namespace kube-system jetstack/cert-manager - ``` -{{% /accordion %}} - -{{% accordion id="airgap" label="Upgrading cert-manager in an airgapped environment" %}} -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.5/en/installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace kube-system \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml - ``` - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n kube-system delete deployment,sa,clusterrole,clusterrolebinding -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - -1. Install cert-manager - - ```plain - kubectl -n kube-system apply -R -f ./cert-manager - ``` -{{% /accordion %}} - - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace kube-system - -NAME READY STATUS RESTARTS AGE -cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m -cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m -cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m -``` - -If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check cert-manager's [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. - -> **Note:** The above instructions ask you to add the disable-validation label to the kube-system namespace. Here are additional resources that explain why this is necessary: -> -> - [Information on the disable-validation label](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.4-0.5.html?highlight=certmanager.k8s.io%2Fdisable-validation#disabling-resource-validation-on-the-cert-manager-namespace) -> - [Information on webhook validation for certificates](https://docs.cert-manager.io/en/latest/getting-started/webhook.html) - -## Cert-Manager API change and data migration - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be `cert-manager.io` instead of `certmanager.k8s.io.` - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/content/rancher/v2.5/en/istio/_index.md b/content/rancher/v2.5/en/istio/_index.md deleted file mode 100644 index f947631d1e..0000000000 --- a/content/rancher/v2.5/en/istio/_index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Istio -weight: 14 -aliases: - - /rancher/v2.5/en/dashboard/istio - - /rancher/v2.x/en/istio/ - - /rancher/v2.x/en/istio/v2.5/ ---- - -[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. - -As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. - -Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to a team of developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -This core service mesh provides features that include but are not limited to the following: - -- **Traffic Management** such as ingress and egress routing, circuit breaking, mirroring. -- **Security** with resources to authenticate and authorize traffic and users, mTLS included. -- **Observability** of logs, metrics, and distributed traffic flows. - -After [setting up istio]({{}}/rancher/v2.5/en/istio/setup) you can leverage Istio's control plane functionality through the Cluster Explorer, `kubectl`, or `istioctl`. - -Istio needs to be set up by a `cluster-admin` before it can be used in a project. - -- [What's New in Rancher v2.5](#what-s-new-in-rancher-v2-5) -- [Tools Bundled with Istio](#tools-bundled-with-istio) -- [Prerequisites](#prerequisites) -- [Setup Guide](#setup-guide) -- [Remove Istio](#remove-istio) -- [Migrate from Previous Istio Version](#migrate-from-previous-istio-version) -- [Accessing Visualizations](#accessing-visualizations) -- [Architecture](#architecture) -- [Additional steps for installing Istio on an RKE2 cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) - -# What's New in Rancher v2.5 - -The overall architecture of Istio has been simplified. A single component, Istiod, has been created by combining Pilot, Citadel, Galley and the sidecar injector. Node Agent functionality has also been merged into istio-agent. - -Addons that were previously installed by Istio (cert-manager, Grafana, Jaeger, Kiali, Prometheus, Zipkin) will now need to be installed separately. Istio will support installation of integrations that are from the Istio Project and will maintain compatibility with those that are not. - -A Prometheus integration will still be available through an installation of [Rancher Monitoring]({{}}/rancher/v2.5/en/monitoring-alerting/), or by installing your own Prometheus operator. Rancher's Istio chart will also install Kiali by default to ensure you can get a full picture of your microservices out of the box. - -Istio has migrated away from Helm as a way to install Istio and now provides installation through the istioctl binary or Istio Operator. To ensure the easiest interaction with Istio, Rancher's Istio will maintain a Helm chart that utilizes the istioctl binary to manage your Istio installation. - -This Helm chart will be available via the Apps and Marketplace in the UI. A user that has access to the Rancher Chart's catalog will need to set up Istio before it can be used in the project. - -# Tools Bundled with Istio - -Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy Helm chart, including an overlay file option to allow complex customization. - -It also includes the following: - -### Kiali - -Kiali is a comprehensive visualization aid used for graphing traffic flow throughout the service mesh. It allows you to see how they are connected, including the traffic rates and latencies between them. - -You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. - -### Jaeger - -_Bundled as of v2.5.4_ - -Our Istio installer includes a quick-start, all-in-one installation of [Jaeger,](https://www.jaegertracing.io/) a tool used for tracing distributed systems. - -Note that this is not a production-qualified deployment of Jaeger. This deployment uses an in-memory storage component, while a persistent storage component is recommended for production. For more information on which deployment strategy you may need, refer to the [Jaeger documentation.](https://www.jaegertracing.io/docs/latest/operator/#production-strategy) - -# Prerequisites - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory]({{}}/rancher/v2.5/en/istio/resources) to run all of the components of Istio. - -If you are installing Istio on RKE2 cluster, some additional steps are required. For details, see [this section.](#additional-steps-for-installing-istio-on-an-rke2-cluster) - -# Setup Guide - -Refer to the [setup guide]({{}}/rancher/v2.5/en/istio/setup) for instructions on how to set up Istio and use it in a project. - -# Remove Istio - -To remove Istio components from a cluster, namespace, or workload, refer to the section on [uninstalling Istio.]({{}}/rancher/v2.5/en/istio/disabling-istio/) - -# Migrate From Previous Istio Version - -There is no upgrade path for Istio versions less than 1.7.x. To successfully install Istio in the **Cluster Explorer**, you will need to disable your existing Istio in the **Cluster Manager**. - -If you have a significant amount of additional Istio CRDs you might consider manually migrating CRDs that are supported in both versions of Istio. You can do this by running `kubectl get -n istio-system -o yaml`, save the output yaml and re-apply in the new version. - -Another option is to manually uninstall istio resources one at a time, but leave the resources that are supported in both versions of Istio and that will not be installed by the newest version. This method is more likely to result in issues installing the new version, but could be a good option depending on your situation. - -# Accessing Visualizations - -> By default, only cluster-admins have access to Kiali. For instructions on how to allow admin, edit or views roles to access them, see [this section.]({{}}/rancher/v2.5/en/istio/rbac/) - -After Istio is set up in a cluster, Grafana, Prometheus,and Kiali are available in the Rancher UI. - -To access the Grafana and Prometheus visualizations, from the **Cluster Explorer** navigate to the **Monitoring** app overview page, and click on **Grafana** or **Prometheus** - -To access the Kiali visualization, from the **Cluster Explorer** navigate to the **Istio** app overview page, and click on **Kiali**. From here you can access the **Traffic Graph** tab or the **Traffic Metrics** tab to see network visualizations and metrics. - -By default, all namespace will picked up by prometheus and make data available for Kiali graphs. Refer to [selector/scrape config setup](./configuration-reference/selectors-and-scrape) if you would like to use a different configuration for prometheus data scraping. - -Your access to the visualizations depend on your role. Grafana and Prometheus are only available for `cluster-admin` roles. The Kiali UI is available only to `cluster-admin` by default, but `cluster-admin` can allow other roles to access them by editing the Istio values.yaml. - -# Architecture - -Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. - -Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. - -When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. - -For more information on the Istio sidecar, refer to the [Istio sidecare-injection docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/) and for more information on Istio's architecture, refer to the [Istio Architecture docs](https://istio.io/latest/docs/ops/deployment/architecture/) - -### Multiple Ingresses - -By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. Istio also installs an ingress gateway by default into the `istio-system` namespace. The result is that your cluster will have two ingresses in your cluster. - -![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - - Additional Istio Ingress gateways can be enabled via the [overlay file]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/#overlay-file). - -### Egress Support - -By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/#overlay-file). - -# Additional Steps for Installing Istio on an RKE2 Cluster - -To install Istio on an RKE2 cluster, follow the steps in [this section.]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/rke2/) diff --git a/content/rancher/v2.5/en/istio/configuration-reference/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/_index.md deleted file mode 100644 index 9e32b645d8..0000000000 --- a/content/rancher/v2.5/en/istio/configuration-reference/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Configuration Options -weight: 3 -aliases: - - /rancher/v2.5/en/istio/v2.5/configuration-reference - - /rancher/v2.x/en/istio/v2.5/configuration-reference/ ---- - -- [Egress Support](#egress-support) -- [Enabling Automatic Sidecar Injection](#enabling-automatic-sidecar-injection) -- [Overlay File](#overlay-file) -- [Selectors and Scrape Configs](#selectors-and-scrape-configs) -- [Enable Istio with Pod Security Policies](#enable-istio-with-pod-security-policies) -- [Additional Steps for Installing Istio on an RKE2 Cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) -- [Additional Steps for Project Network Isolation](#additional-steps-for-project-network-isolation) - -### Egress Support - -By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). - -### Enabling Automatic Sidecar Injection - -Automatic sidecar injection is disabled by default. To enable this, set the `sidecarInjectorWebhook.enableNamespacesByDefault=true` in the values.yaml on install or upgrade. This automatically enables Istio sidecar injection into all new namespaces that are deployed. - -### Overlay File - -An Overlay File is designed to support extensive configuration of your Istio installation. It allows you to make changes to any values available in the [IstioOperator API](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/). This will ensure you can customize the default installation to fit any scenario. - -The Overlay File will add configuration on top of the default installation that is provided from the Istio chart installation. This means you do not need to redefine the components that already defined for installation. - -For more information on Overlay Files, refer to the [Istio documentation.](https://istio.io/latest/docs/setup/install/istioctl/#configure-component-settings) - -### Selectors and Scrape Configs - -The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which enables monitoring across all namespaces by default. This ensures you can view traffic, metrics and graphs for resources deployed in a namespace with `istio-injection=enabled` label. - -If you would like to limit Prometheus to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. Once you do this, you will need to add additional configuration to continue to monitor your resources. - -For details, refer to [this section.](./selectors-and-scrape) - -### Enable Istio with Pod Security Policies - -Refer to [this section.](./enable-istio-with-psp) - -### Additional Steps for Installing Istio on an RKE2 Cluster - -Refer to [this section.](./rke2) - -### Additional Steps for Project Network Isolation - -Refer to [this section.](./canal-and-project-network) \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md b/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md deleted file mode 100644 index 48d1d317f4..0000000000 --- a/content/rancher/v2.5/en/istio/configuration-reference/enable-istio-with-psp/_index.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Enable Istio with Pod Security Policies -weight: 1 -aliases: - - /rancher/v2.5/en/istio/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-cluster/enable-istio-with-psp - - /rancher/v2.5/en/istio/v2.5/configuration-reference/enable-istio-with-psp - - /rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp/ ---- - -If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. - -The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). - -The steps differ based on the Rancher version. - -{{% tabs %}} -{{% tab "v2.5.4+" %}} - -> **Prerequisites:** -> -> - The cluster must be an RKE Kubernetes cluster. -> - The cluster must have been created with a default PodSecurityPolicy. -> -> To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. - -1. [Set the PodSecurityPolicy to unrestricted](#1-set-the-podsecuritypolicy-to-unrestricted) -2. [Enable the CNI](#2-enable-the-cni) -3. [Verify that the CNI is working.](#3-verify-that-the-cni-is-working) - -### 1. Set the PodSecurityPolicy to unrestricted - -An unrestricted PSP allows Istio to be installed. - -Set the PSP to `unrestricted` in the project where is Istio is installed, or the project where you plan to install Istio. - -1. From the cluster view of the **Cluster Manager,** select **Projects/Namespaces.** -1. Find the **Project: System** and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click **Save.** - -### 2. Enable the CNI - -When installing or upgrading Istio through **Apps & Marketplace,** - -1. Click **Components.** -2. Check the box next to **Enabled CNI.** -3. Finish installing or upgrading Istio. - -The CNI can also be enabled by editing the `values.yaml`: - -``` -istio_cni.enabled: true -``` - -Istio should install successfully with the CNI enabled in the cluster. - -### 3. Verify that the CNI is working - -Verify that the CNI is working by deploying a [sample application](https://istio.io/latest/docs/examples/bookinfo/) or deploying one of your own applications. - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.3" %}} - -> **Prerequisites:** -> -> - The cluster must be an RKE Kubernetes cluster. -> - The cluster must have been created with a default PodSecurityPolicy. -> -> To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. - -1. [Configure the System Project Policy to allow Istio install.](#1-configure-the-system-project-policy-to-allow-istio-install) -2. [Install the CNI plugin in the System project.](#2-install-the-cni-plugin-in-the-system-project) -3. [Install Istio.](#3-install-istio) - -### 1. Configure the System Project Policy to allow Istio install - -1. From the cluster view of the **Cluster Manager,** select **Projects/Namespaces.** -1. Find the **Project: System** and select the **⋮ > Edit**. -1. Change the Pod Security Policy option to be unrestricted, then click Save. - -### 2. Install the CNI Plugin in the System Project - -1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. -1. Select the **Project: System** project. -1. Choose **Tools > Catalogs** in the navigation bar. -1. Add a catalog with the following: - 1. Name: istio-cni - 1. Catalog URL: https://github.com/istio/cni - 1. Branch: The branch that matches your current release, for example: `release-1.4`. -1. From the main menu select **Apps** -1. Click Launch and select istio-cni -1. Update the namespace to be "kube-system" -1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: - -``` ---- - logLevel: "info" - excludeNamespaces: - - "istio-system" - - "kube-system" -``` - -### 3. Install Istio - -Follow the [primary instructions]({{}}/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/), adding a custom answer: `istio_cni.enabled: true`. - -After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. - -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/rbac/_index.md b/content/rancher/v2.5/en/istio/rbac/_index.md deleted file mode 100644 index bfba9cbcb7..0000000000 --- a/content/rancher/v2.5/en/istio/rbac/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Role-based Access Control -weight: 3 -aliases: - - /rancher/v2.5/en/istio/rbac - - /rancher/v2.5/en/istio/v2.5/rbac - - /rancher/v2.x/en/istio/v2.5/rbac/ ---- - -This section describes the permissions required to access Istio features. - -The rancher istio chart installs three `ClusterRoles` - -## Cluster-Admin Access - -By default, only those with the `cluster-admin` `ClusterRole` can: - -- Install istio app in a cluster -- Configure resource allocations for Istio - - -## Admin and Edit access - -By default, only Admin and Edit roles can: - -- Enable and disable Istio sidecar auto-injection for namespaces -- Add the Istio sidecar to workloads -- View the traffic metrics and traffic graph for the cluster -- Configure Istio's resources (such as the gateway, destination rules, or virtual services) - -## Summary of Default Permissions for Kubernetes Default roles - -Istio creates three `ClusterRoles` and adds Istio CRD access to the following default K8s `ClusterRole`: - -ClusterRole create by chart | Default K8s ClusterRole | Rancher Role | - ------------------------------:| ---------------------------:|---------:| - `istio-admin` | admin| Project Owner | - `istio-edit`| edit | Project Member | - `istio-view` | view | Read-only | - -Rancher will continue to use cluster-owner, cluster-member, project-owner, project-member, etc as role names, but will utilize default roles to determine access. For each default K8s `ClusterRole` there are different Istio CRD permissions and K8s actions (Create ( C ), Get ( G ), List ( L ), Watch ( W ), Update ( U ), Patch ( P ), Delete( D ), All ( * )) that can be performed. - - -|CRDs | Admin | Edit | View -|----------------------------| ------| -----| ----- -|
  • `config.istio.io`
    • `adapters`
    • `attributemanifests`
    • `handlers`
    • `httpapispecbindings`
    • `httpapispecs`
    • `instances`
    • `quotaspecbindings`
    • `quotaspecs`
    • `rules`
    • `templates`
| GLW | GLW | GLW -|
  • `networking.istio.io`
    • `destinationrules`
    • `envoyfilters`
    • `gateways`
    • `serviceentries`
    • `sidecars`
    • `virtualservices`
    • `workloadentries`
| * | * | GLW -|
  • `security.istio.io`
    • `authorizationpolicies`
    • `peerauthentications`
    • `requestauthentications`
| * | * | GLW \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/resources/_index.md b/content/rancher/v2.5/en/istio/resources/_index.md deleted file mode 100644 index b1b4c7bafe..0000000000 --- a/content/rancher/v2.5/en/istio/resources/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: CPU and Memory Allocations -weight: 1 -aliases: - - /rancher/v2.5/en/project-admin/istio/configuring-resource-allocations/ - - /rancher/v2.5/en/project-admin/istio/config/ - - /rancher/v2.5/en/istio/resources - - /rancher/v2.5/en/istio/v2.5/resources - - /rancher/v2.x/en/istio/v2.5/resources/ ---- - -This section describes the minimum recommended computing resources for the Istio components in a cluster. - -The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) - -Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. - -> **Tip:** In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. - -The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each core Istio component. - -In Kubernetes, the resource request indicates that the workload will not be deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) - -{{% tabs %}} -{{% tab "v2.5.6+" %}} - -| Workload | CPU - Request | Memory - Request | CPU - Limit | Memory - Limit | -|----------------------|---------------|------------|-----------------|-------------------| -| ingress gateway | 100m | 128mi | 2000m | 1024mi | -| egress gateway | 100m | 128mi | 2000m | 1024mi | -| istiod | 500m | 2048mi | No limit | No limit | -| proxy | 10m | 10mi | 2000m | 1024mi | -| **Totals:** | **710m** | **2314Mi** | **6000m** | **3072Mi** | - -{{% /tab %}} -{{% tab "v2.5.0-v2.5.5" %}} - -Workload | CPU - Request | Memory - Request | CPU - Limit | Mem - Limit | Configurable ----------:|---------------:|---------------:|-------------:|-------------:|-------------: -Istiod | 500m | 2048Mi | No limit | No limit | Y | -Istio-Mixer | 1000m | 1000Mi | 4800m | 4000Mi | Y | -Istio-ingressgateway | 100m | 128Mi | 2000m | 1024Mi | Y | -Others | 10m | - | - | - | Y | -Totals: | 1710m | 3304Mi | >8800m | >6048Mi | - - -{{% /tab %}} -{{% /tabs %}} - - - - -# Configuring Resource Allocations - -You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. - -To make it easier to schedule the workloads to a node, a cluster-admin can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. - -You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/). - -To configure the resources allocated to an Istio component, - -1. In the Rancher **Cluster Explorer**, navigate to your Istio installation in **Apps & Marketplace** -1. Click **Upgrade** to edit the base components via changes to the values.yaml or add an [overlay file]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/#overlay-file). For more information about editing the overlay file, see [this section.](./#editing-the-overlay-file) -1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. -1. Click **Upgrade.** to rollout changes - -**Result:** The resource allocations for the Istio components are updated. - -### Editing the Overlay File - -The overlay file can contain any of the values in the [Istio Operator spec.](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec) The overlay file included with the Istio application is just one example of a potential configuration of the overlay file. - -As long as the file contains `kind: IstioOperator` and the YAML options are valid, the file can be used as an overlay. - -In the example overlay file provided with the Istio application, the following section allows you to change Kubernetes resources: - -``` -# k8s: -# resources: -# requests: -# cpu: 200m -``` diff --git a/content/rancher/v2.5/en/istio/setup/_index.md b/content/rancher/v2.5/en/istio/setup/_index.md deleted file mode 100644 index 642222540a..0000000000 --- a/content/rancher/v2.5/en/istio/setup/_index.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Setup Guide -weight: 2 -aliases: - - /rancher/v2.5/en/istio/setup - - /rancher/v2.5/en/istio/v2.5/setup/ - - /rancher/v2.x/en/istio/v2.5/setup/ ---- - -This section describes how to enable Istio and start using it in your projects. - -If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. - -# Prerequisites - -This guide assumes you have already [installed Rancher,]({{}}/rancher/v2.5/en/installation) and you have already [provisioned a separate Kubernetes cluster]({{}}/rancher/v2.5/en/cluster-provisioning) on which you will install Istio. - -The nodes in your cluster must meet the [CPU and memory requirements.]({{}}/rancher/v2.5/en/istio/resources/) - -The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) - - -# Install - -> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway]({{}}/rancher/v2.5/en/istio/setup/gateway) and [setting up Istio's components for traffic management.]({{}}/rancher/v2.5/en/istio/setup/set-up-traffic-management) - -1. [Enable Istio in the cluster.]({{}}/rancher/v2.5/en/istio/setup/enable-istio-in-cluster) -1. [Enable Istio in all the namespaces where you want to use it.]({{}}/rancher/v2.5/en/istio/setup/enable-istio-in-namespace) -1. [Add deployments and services that have the Istio sidecar injected.]({{}}/rancher/v2.5/en/istio/setup/deploy-workloads) -1. [Set up the Istio gateway. ]({{}}/rancher/v2.5/en/istio/setup/gateway) -1. [Set up Istio's components for traffic management.]({{}}/rancher/v2.5/en/istio/setup/set-up-traffic-management) -1. [Generate traffic and see Istio in action.]({{}}/rancher/v2.5/en/istio/v2.5/setup/view-traffic/ ) diff --git a/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md b/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md deleted file mode 100644 index 2875c2cc9a..0000000000 --- a/content/rancher/v2.5/en/istio/setup/deploy-workloads/_index.md +++ /dev/null @@ -1,351 +0,0 @@ ---- -title: 3. Add Deployments and Services with the Istio Sidecar -weight: 4 -aliases: - - /rancher/v2.5/en/istio/setup/deploy-workloads - - /rancher/v2.5/en/istio/v2.5/setup/deploy-workloads - - /rancher/v2.x/en/istio/v2.5/setup/deploy-workloads/ ---- - -> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have the Istio app installed. - -Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. - -To inject the Istio sidecar on an existing workload in the namespace, from the **Cluster Explorer** go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. - -Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see `istio-proxy` alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. - -### Add Deployments and Services - -There are a few ways to add new **Deployments** in your namespace - -1. From the **Cluster Explorer** click on **Workload > Overview.** -1. Click **Create.** -1. Select **Deployment** from the various workload options. -1. Fill out the form, or **Edit as Yaml.** -1. Click **Create.** - -Alternatively, you can select the specific workload you want to deploy from the **Workload** section of the left navigation bar and create it from there. - -To add a **Service** to your namespace - -1. From the **Cluster Explorer** click on **Service Discovery > Services** -1. Click **Create** -1. Select the type of service you want to create from the various options -1. Fill out the form, or **Edit as Yaml** -1. Click **Create** - -You can also create deployments and services using the kubectl **shell** - -1. Run `kubectl create -f .yaml` if your file is stored locally in the cluster -1. Or run `cat<< EOF | kubectl apply -f -`, paste the file contents into the terminal, then run `EOF` to complete the command. - -### Example Deployments and Services - -Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. - -1. From the **Cluster Explorer**, open the kubectl **shell** -1. Run `cat<< EOF | kubectl apply -f -` -1. Copy the below resources into the the shell -1. Run `EOF` - -This will set up the following sample resources from Istio's example BookInfo app: - -Details service and deployment: - -- A `details` Service -- A ServiceAccount for `bookinfo-details` -- A `details-v1` Deployment - -Ratings service and deployment: - -- A `ratings` Service -- A ServiceAccount for `bookinfo-ratings` -- A `ratings-v1` Deployment - -Reviews service and deployments (three versions): - -- A `reviews` Service -- A ServiceAccount for `bookinfo-reviews` -- A `reviews-v1` Deployment -- A `reviews-v2` Deployment -- A `reviews-v3` Deployment - -Productpage service and deployment: - -This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. - -- A `productpage` service -- A ServiceAccount for `bookinfo-productpage` -- A `productpage-v1` Deployment - -### Resource YAML - -```yaml -# Copyright 2017 Istio Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -################################################################################################## -# Details service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: details - labels: - app: details - service: details -spec: - ports: - - port: 9080 - name: http - selector: - app: details ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-details ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: details-v1 - labels: - app: details - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: details - version: v1 - template: - metadata: - labels: - app: details - version: v1 - spec: - serviceAccountName: bookinfo-details - containers: - - name: details - image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Ratings service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: ratings - labels: - app: ratings - service: ratings -spec: - ports: - - port: 9080 - name: http - selector: - app: ratings ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-ratings ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ratings-v1 - labels: - app: ratings - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: ratings - version: v1 - template: - metadata: - labels: - app: ratings - version: v1 - spec: - serviceAccountName: bookinfo-ratings - containers: - - name: ratings - image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Reviews service -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: reviews - labels: - app: reviews - service: reviews -spec: - ports: - - port: 9080 - name: http - selector: - app: reviews ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-reviews ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v1 - labels: - app: reviews - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v1 - template: - metadata: - labels: - app: reviews - version: v1 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v2 - labels: - app: reviews - version: v2 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v2 - template: - metadata: - labels: - app: reviews - version: v2 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: reviews-v3 - labels: - app: reviews - version: v3 -spec: - replicas: 1 - selector: - matchLabels: - app: reviews - version: v3 - template: - metadata: - labels: - app: reviews - version: v3 - spec: - serviceAccountName: bookinfo-reviews - containers: - - name: reviews - image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -################################################################################################## -# Productpage services -################################################################################################## -apiVersion: v1 -kind: Service -metadata: - name: productpage - labels: - app: productpage - service: productpage -spec: - ports: - - port: 9080 - name: http - selector: - app: productpage ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bookinfo-productpage ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: productpage-v1 - labels: - app: productpage - version: v1 -spec: - replicas: 1 - selector: - matchLabels: - app: productpage - version: v1 - template: - metadata: - labels: - app: productpage - version: v1 - spec: - serviceAccountName: bookinfo-productpage - containers: - - name: productpage - image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 9080 ---- -``` - -### [Next: Set up the Istio Gateway]({{}}/rancher/v2.5/en/istio/setup/gateway) diff --git a/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md b/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md deleted file mode 100644 index 92b8625be2..0000000000 --- a/content/rancher/v2.5/en/istio/setup/enable-istio-in-cluster/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: 1. Enable Istio in the Cluster -weight: 1 -aliases: - - /rancher/v2.5/en/istio/setup/enable-istio-in-cluster - - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-cluster - - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/ ---- - ->**Prerequisites:** -> ->- Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. ->- If you have pod security policies, you will need to install Istio with the CNI enabled. For details, see [this section.]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/enable-istio-with-psp) ->- To install Istio on an RKE2 cluster, additional steps are required. For details, see [this section.]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/rke2/) ->- To install Istio in a cluster where project network isolation is enabled, additional steps are required. For details, see [this section.]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/canal-and-project-network) - -1. From the **Cluster Explorer**, navigate to available **Charts** in **Apps & Marketplace** -1. Select the Istio chart from the rancher provided charts -1. If you have not already installed your own monitoring app, you will be prompted to install the rancher-monitoring app. Optional: Set your Selector or Scrape config options on rancher-monitoring app install. -1. Optional: Configure member access and [resource limits]({{}}/rancher/v2.5/en/istio/resources/) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. -1. Optional: Make additional configuration changes to values.yaml if needed. -1. Optional: Add additional resources or configuration via the [overlay file.]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/#overlay-file) -1. Click **Install**. - -**Result:** Istio is installed at the cluster level. - -# Additional Config Options - -For more information on configuring Istio, refer to the [configuration reference.]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference) diff --git a/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md b/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md deleted file mode 100644 index 6044a807cc..0000000000 --- a/content/rancher/v2.5/en/istio/setup/enable-istio-in-namespace/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 2. Enable Istio in a Namespace -weight: 2 -aliases: - - /rancher/v2.5/en/istio/setup/enable-istio-in-namespace - - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-namespace - - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-namespace/ ---- - -You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. - -This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. - -> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio installed. - -1. In the Rancher **Cluster Explorer,** open the kubectl shell. -1. Then run `kubectl label namespace istio-injection=enabled` - -**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. - -### Verifying that Automatic Istio Sidecar Injection is Enabled - -To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. - -### Excluding Workloads from Being Injected with the Istio Sidecar - -If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: - -``` -sidecar.istio.io/inject: “false” -``` - -To add the annotation to a workload, - -1. From the **Cluster Explorer** view, use the side-nav to select the **Overview** page for workloads. -1. Go to the workload that should not have the sidecar and edit as yaml -1. Add the following key, value `sidecar.istio.io/inject: false` as an annotation on the workload -1. Click **Save.** - -**Result:** The Istio sidecar will not be injected into the workload. - -> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. - - -### [Next: Select the Nodes ]({{}}/rancher/v2.5/en/istio/setup/node-selectors) \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/setup/gateway/_index.md b/content/rancher/v2.5/en/istio/setup/gateway/_index.md deleted file mode 100644 index 243815152c..0000000000 --- a/content/rancher/v2.5/en/istio/setup/gateway/_index.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: 4. Set up the Istio Gateway -weight: 5 -aliases: - - /rancher/v2.5/en/istio/setup/gateway - - /rancher/v2.5/en/istio/v2.5/setup/gateway - - /rancher/v2.x/en/istio/v2.5/setup/gateway/ ---- - -The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. - -You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. - -To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two Ingresses. - -You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. - -You can route traffic into the service mesh with a load balancer or use Istio's NodePort gateway. This section describes how to set up the NodePort gateway. - -For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) - -![In an Istio-enabled cluster, you can have two Ingresses: the default Nginx Ingress, and the default Istio controller.]({{}}/img/rancher/istio-ingress.svg) - -# Enable an Istio Gateway - -The ingress gateway is a Kubernetes service that will be deployed in your cluster. The Istio Gateway allows for more extensive customization and flexibility. - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click **Gateways** in the side nav bar. -1. Click **Create from Yaml**. -1. Paste your Istio Gateway yaml, or **Read from File**. -1. Click **Create**. - -**Result:** The gateway is deployed, and will now route traffic with applied rules - -# Example Istio Gateway - -We add the BookInfo app deployments in services when going through the Workloads example. Next we add an Istio Gateway so that the app is accessible from outside your cluster. - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click **Gateways** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the Gateway yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: Gateway -metadata: - name: bookinfo-gateway -spec: - selector: - istio: ingressgateway # use istio default controller - servers: - - port: - number: 80 - name: http - protocol: HTTP - hosts: - - "*" ---- -``` - -Then to deploy the VirtualService that provides the traffic routing for the Gateway - -1. Click **VirtualService** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the VirtualService yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: bookinfo -spec: - hosts: - - "*" - gateways: - - bookinfo-gateway - http: - - match: - - uri: - exact: /productpage - - uri: - prefix: /static - - uri: - exact: /login - - uri: - exact: /logout - - uri: - prefix: /api/v1/products - route: - - destination: - host: productpage - port: - number: 9080 -``` - -**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. - -Confirm that the resource exists by running: -``` -kubectl get gateway -A -``` - -The result should be something like this: -``` -NAME AGE -bookinfo-gateway 64m -``` - -### Access the ProductPage Service from a Web Browser - -To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: - -`http://:/productpage` - -To get the ingress gateway URL and port, - -1. From the **Cluster Explorer**, Click on **Workloads > Overview**. -1. Scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. -1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. - -**Result:** You should see the BookInfo app in the web browser. - -For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) - -# Troubleshooting - -The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. - -### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller - -You can try the steps in this section to make sure the Kubernetes gateway is configured properly. - -In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: - -1. From the **Cluster Explorer**, Click on **Workloads > Overview**. -1. Scroll down to the `istio-system` namespace. -1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. - -### [Next: Set up Istio's Components for Traffic Management]({{}}/rancher/v2.5/en/istio/setup/set-up-traffic-management) diff --git a/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md b/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md deleted file mode 100644 index c2fc4826af..0000000000 --- a/content/rancher/v2.5/en/istio/setup/set-up-traffic-management/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 5. Set up Istio's Components for Traffic Management -weight: 6 -aliases: - - /rancher/v2.5/en/istio/setup/set-up-traffic-management - - /rancher/v2.5/en/istio/v2.5/setup/set-up-traffic-management - - /rancher/v2.x/en/istio/v2.5/setup/set-up-traffic-management/ ---- - -A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. - -- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. -- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. - -This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. - -In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. - -After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. - -To deploy the virtual service and destination rules for the `reviews` service, - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click **DestinationRule** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the DestinationRule yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: DestinationRule -metadata: - name: reviews -spec: - host: reviews - subsets: - - name: v1 - labels: - version: v1 - - name: v2 - labels: - version: v2 - - name: v3 - labels: - version: v3 -``` - -Then to deploy the VirtualService that provides the traffic routing that utilizes the DestinationRule - -1. Click **VirtualService** in the side nav bar. -1. Click **Create from Yaml**. -1. Copy and paste the VirtualService yaml provided below. -1. Click **Create**. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - name: reviews -spec: - hosts: - - reviews - http: - - route: - - destination: - host: reviews - subset: v1 - weight: 50 - - destination: - host: reviews - subset: v3 - weight: 50 ---- -``` - -**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. - -### [Next: Generate and View Traffic]({{}}/rancher/v2.5/en/istio/setup/view-traffic) diff --git a/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md b/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md deleted file mode 100644 index aea0643b8b..0000000000 --- a/content/rancher/v2.5/en/istio/setup/view-traffic/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 6. Generate and View Traffic -weight: 7 -aliases: - - /rancher/v2.5/en/istio/setup/view-traffic - - /rancher/v2.5/en/istio/setup/view-traffic - - /rancher/v2.5/en/istio/v2.5/setup/view-traffic - - /rancher/v2.x/en/istio/v2.5/setup/view-traffic/ ---- - -This section describes how to view the traffic that is being managed by Istio. - -# The Kiali Traffic Graph - -The Istio overview page provides a link to the Kiali dashboard. From the Kiali dashboard, you are able to view graphs for each namespace. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. - ->**Prerequisite:** To enable traffic to show up in the graph, ensure you have prometheus installed in the cluster. Rancher-istio installs Kiali configured by default to work with the rancher-monitoring chart. You can use rancher-monitoring or install your own monitoring solution. Optional: you can change configuration on how data scraping occurs by setting the [Selectors & Scrape Configs]({{}}/rancher/v2.5/en/istio/v2.5/configuration-reference/selectors-and-scrape) options. - -To see the traffic graph, - -1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. -1. Click the **Kiali** link on the Istio **Overview** page. -1. Click on **Graph** in the side nav. -1. Change the namespace in the **Namespace** dropdown to view the traffic for each namespace. - -If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. - -For additional tools and visualizations, you can go to Grafana, and Prometheus dashboards from the **Monitoring** **Overview** page diff --git a/content/rancher/v2.5/en/k8s-in-rancher/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/_index.md deleted file mode 100644 index c1c3a0afe7..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Kubernetes Resources -weight: 18 -aliases: - - /rancher/v2.5/en/concepts/ - - /rancher/v2.5/en/tasks/ - - /rancher/v2.5/en/concepts/resources/ - - /rancher/v2.x/en/k8s-in-rancher/ ---- - -> The Cluster Explorer is a new feature in Rancher v2.5 that allows you to view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. This section will be updated to reflect the way that Kubernetes resources are handled in Rancher v2.5. - -## Workloads - -Deploy applications to your cluster nodes using [workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. - -When deploying a workload, you can deploy from any image. There are a variety of [workload types]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/#workload-types) to choose from which determine how your application should run. - -Following a workload deployment, you can continue working with it. You can: - -- [Upgrade]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads) the workload to a newer version of the application it's running. -- [Roll back]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads) a workload to a previous version, if an issue occurs during upgrade. -- [Add a sidecar]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar), which is a workload that supports a primary workload. - -## Load Balancing and Ingress - -### Load Balancers - -After you launch an application, it's only available within the cluster. It can't be reached externally. - -If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -#### Ingress - -Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. - -Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. - -For more information, see [Ingress]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress). - -When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. - -## Service Discovery - -After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. - -For more information, see [Service Discovery]({{}}/rancher/v2.5/en/k8s-in-rancher/service-discovery). - -## Pipelines - -After your project has been [configured to a version control provider]({{}}/rancher/v2.5/en/project-admin/pipelines/#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. - -For more information, see [Pipelines]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/). - -## Applications - -Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. - -For more information, see [Applications in a Project]({{}}/rancher/v2.5/en/catalog/apps/). - -## Kubernetes Resources - -Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. - -Resources include: - -- [Certificates]({{}}/rancher/v2.5/en/k8s-in-rancher/certificates/): Files used to encrypt/decrypt data entering or leaving the cluster. -- [ConfigMaps]({{}}/rancher/v2.5/en/k8s-in-rancher/configmaps/): Files that store general configuration information, such as a group of config files. -- [Secrets]({{}}/rancher/v2.5/en/k8s-in-rancher/secrets/): Files that store sensitive data like passwords, tokens, or keys. -- [Registries]({{}}/rancher/v2.5/en/k8s-in-rancher/registries/): Files that carry credentials used to authenticate with private registries. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md deleted file mode 100644 index 929081f9f3..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/certificates/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Encrypting HTTP Communication -description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments -weight: 3060 -aliases: - - /rancher/v2.5/en/tasks/projects/add-ssl-certificates/ - - /rancher/v2.5/en/k8s-in-rancher/certificates - - /rancher/v2.x/en/k8s-in-rancher/certificates/ ---- - -When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. - -Add SSL certificates to either projects, namespaces, or both. A project scoped certificate will be available in all its namespaces. - ->**Prerequisites:** You must have a TLS private key and certificate available to upload. - -1. From the **Global** view, select the project where you want to deploy your ingress. - -1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. - -1. Enter a **Name** for the certificate. - - >**Note:** Kubernetes classifies SSL certificates as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your SSL certificate must have a unique name among the other certificates, registries, and secrets within your project/workspace. - -1. Select the **Scope** of the certificate. - - - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. - - - **Available to a single namespace:** The certificate is only available for the deployments in one namespace. If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. - -1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Private key files end with an extension of `.key`. - -1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. - - Certificate files end with an extension of `.crt`. - -**Result:** Your certificate is added to the project or namespace. You can now add it to deployments. - -- If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. -- If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. -- Your certificate is added to the **Resources > Secrets > Certificates** view. - -## What's Next? - -Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md deleted file mode 100644 index dd80de5962..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/configmaps/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: ConfigMaps -weight: 3061 -aliases: - - /rancher/v2.5/en/tasks/projects/add-configmaps - - /rancher/v2.5/en/k8s-in-rancher/configmaps - - /rancher/v2.x/en/k8s-in-rancher/configmaps/ ---- - -While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). - -ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. - ->**Note:** ConfigMaps can only be applied to namespaces and not projects. - -1. From the **Global** view, select the project containing the namespace that you want to add a ConfigMap to. - -1. From the main menu, select **Resources > Config Maps**. Click **Add Config Map**. - -1. Enter a **Name** for the Config Map. - - >**Note:** Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. - -1. Select the **Namespace** you want to add Config Map to. You can also add a new namespace on the fly by clicking **Add to a new namespace**. - -1. From **Config Map Values**, click **Add Config Map Value** to add a key value pair to your ConfigMap. Add as many values as you need. - -1. Click **Save**. - - >**Note:** Don't use ConfigMaps to store sensitive data [use a secret]({{}}/rancher/v2.5/en/k8s-in-rancher/secrets/). - > - >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. - -## What's Next? - -Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: - -- Application environment variables. -- Specifying parameters for a Volume mounted to the workload. - -For more information on adding ConfigMaps to a workload, see [Deploying Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md deleted file mode 100644 index 8849de51ad..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: The Horizontal Pod Autoscaler -description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment -weight: 3026 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler - - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/ ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. - -Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. - -You can create, manage, and delete HPAs using the Rancher UI. It only supports HPA in the `autoscaling/v2beta2` API. - -## Managing HPAs - -The way that you manage HPAs is different based on your version of the Kubernetes API: - -- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. -- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. - -You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. -## Testing HPAs with a Service Deployment - -You can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/). - -You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] -({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md deleted file mode 100644 index 2b4f86ba18..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Background Information on HPAs -weight: 3027 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background - - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/ ---- - -The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. - -## Why Use Horizontal Pod Autoscaler? - -Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: - -- A minimum and maximum number of pods allowed to run, as defined by the user. -- Observed CPU/memory use, as reported in resource metrics. -- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. - -HPA improves your services by: - -- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. -- Increase/decrease performance as needed to accomplish service level agreements. - -## How HPA Works - -![HPA Schema]({{}}/img/rancher/horizontal-pod-autoscaler.jpg) - -HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: - -Flag | Default | Description | ----------|----------|----------| - `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. - `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. - `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. - - -For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). - -## Horizontal Pod Autoscaler API Objects - -HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. - -For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md deleted file mode 100644 index d1e3900f3e..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Managing HPAs with kubectl -weight: 3029 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl - - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/ ---- - -This section describes HPA management with `kubectl`. This document has instructions for how to: - -- Create an HPA -- Get information on HPAs -- Delete an HPA -- Configure your HPAs to scale with CPU or memory utilization -- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics - - -You can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. - -##### Basic kubectl Command for Managing HPAs - -If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: - -- Creating HPA - - - With manifest: `kubectl create -f ` - - - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` - -- Getting HPA info - - - Basic: `kubectl get hpa hello-world` - - - Detailed description: `kubectl describe hpa hello-world` - -- Deleting HPA - - - `kubectl delete hpa hello-world` - -##### HPA Manifest Definition Example - -The HPA manifest is the config file used for managing an HPA with `kubectl`. - -The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. - -```yml -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi -``` - - -Directive | Description ----------|----------| - `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | - `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | - `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | - `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. - `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. - `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. -
- -##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) - -Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. - -Run the following commands to check if metrics are available in your installation: - -``` -$ kubectl top nodes -NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% -node-controlplane 196m 9% 1623Mi 42% -node-etcd 80m 4% 1090Mi 28% -node-worker 64m 3% 1146Mi 29% -$ kubectl -n kube-system top pods -NAME CPU(cores) MEMORY(bytes) -canal-pgldr 18m 46Mi -canal-vhkgr 20m 45Mi -canal-x5q5v 17m 37Mi -canal-xknnz 20m 37Mi -kube-dns-7588d5b5f5-298j2 0m 22Mi -kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi -metrics-server-97bc649d5-jxrlt 0m 12Mi -$ kubectl -n kube-system logs -l k8s-app=metrics-server -I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true -I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 -I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version -I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 -I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink -I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) -I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ -I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 -``` - - -##### Configuring HPA to Scale Using Custom Metrics with Prometheus - -You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. - -For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: - -- Prometheus is deployed in the cluster. -- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. -- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` - -Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. - -For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). - -1. Initialize Helm in your cluster. - ``` - # kubectl -n kube-system create serviceaccount tiller - kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` - -1. Clone the `banzai-charts` repo from GitHub: - ``` - # git clone https://github.com/banzaicloud/banzai-charts - ``` - -1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. - ``` - # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system - ``` - -1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check that the service pod is `Running`. Enter the following command. - ``` - # kubectl get pods -n kube-system - ``` - From the resulting output, look for a status of `Running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h - ... - ``` - 1. Check the service logs to make sure the service is running correctly by entering the command that follows. - ``` - # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system - ``` - Then review the log output to confirm the service is running. - {{% accordion id="prometheus-logs" label="Prometheus Adaptor Logs" %}} - ... - I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds - I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: - I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT - I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json - I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 - I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} - I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.sslip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK - I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} - I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] - I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} - ... - {{% /accordion %}} - - - -1. Check that the metrics API is accessible from kubectl. - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} - - - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response-rancher" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md deleted file mode 100644 index 5c15feb071..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Managing HPAs with the Rancher UI -weight: 3028 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui - - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/ ---- - -The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. - -If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -## Creating an HPA - -1. From the **Global** view, open the project that you want to deploy a HPA to. - -1. Click **Resources > HPA.** - -1. Click **Add HPA.** - -1. Enter a **Name** for the HPA. - -1. Select a **Namespace** for the HPA. - -1. Select a **Deployment** as scale target for the HPA. - -1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. - -1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). - -1. Click **Create** to create the HPA. - -> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. - -## Get HPA Metrics and Status - -1. From the **Global** view, open the project with the HPAs you want to look at. - -1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. - -1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. - - -## Deleting an HPA - -1. From the **Global** view, open the project that you want to delete an HPA from. - -1. Click **Resources > HPA.** - -1. Find the HPA which you would like to delete. - -1. Click **⋮ > Delete**. - -1. Click **Delete** to confirm. - -> **Result:** The HPA is deleted from the current cluster. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md deleted file mode 100644 index 0816641dd5..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ /dev/null @@ -1,494 +0,0 @@ ---- -title: Testing HPAs with kubectl -weight: 3031 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa - - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/ ---- - -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.5/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). - -For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. - -1. Configure `kubectl` to connect to your Kubernetes cluster. - -2. Copy the `hello-world` deployment manifest below. -{{% accordion id="hello-world" label="Hello World Manifest" %}} -``` -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - resources: - requests: - cpu: 500m - memory: 64Mi - ports: - - containerPort: 80 - protocol: TCP - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: hello-world - namespace: default -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: hello-world -``` -{{% /accordion %}} - -1. Deploy it to your cluster. - - ``` - # kubectl create -f - ``` - -1. Copy one of the HPAs below based on the metric type you're using: -{{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 1000Mi -``` -{{% /accordion %}} -{{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi - - type: Pods - pods: - metricName: cpu_system - targetAverageValue: 20m -``` -{{% /accordion %}} - -1. View the HPA info and description. Confirm that metric data is shown. - {{% accordion id="hpa-info-resource-metrics" label="Resource Metrics" %}} -1. Enter the following commands. - ``` - # kubectl get hpa - NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE - hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m - # kubectl describe hpa - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 1253376 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - {{% accordion id="hpa-info-custom-metrics" label="Custom Metrics" %}} -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive the output that follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 3514368 / 100Mi - "cpu_system" on pods: 0 / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - - -1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). - -1. Test that pod autoscaling works as intended.

- **To Test Autoscaling Using Resource Metrics:** - {{% accordion id="observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to two pods based on CPU Usage. - -1. View your HPA. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10928128 / 100Mi - resource cpu on pods (as a percentage of request): 56% (280m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm you've scaled to two pods. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-upscale-3-pods-cpu-cooldown" label="Upscale to 3 pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 9424896 / 100Mi - resource cpu on pods (as a percentage of request): 66% (333m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - ``` -2. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-f46kh 0/1 Running 0 1m - hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10070016 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` - {{% /accordion %}} -
-**To Test Autoscaling Using Custom Metrics:** - {{% accordion id="custom-observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale two pods based on CPU usage. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8159232 / 100Mi - "cpu_system" on pods: 7m / 20m - resource cpu on pods (as a percentage of request): 64% (321m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm two pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` - {{% /accordion %}} -{{% accordion id="observe-upscale-3-pods-cpu-cooldown-2" label="Upscale to 3 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows: - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - ``` -1. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - # kubectl get pods - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="observe-upscale-4-pods" label="Upscale to 4 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm four pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m - hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="custom-metrics-observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive similar output to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8101888 / 100Mi - "cpu_system" on pods: 8m / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` -1. Enter the following command to confirm a single pods is running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} diff --git a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md deleted file mode 100644 index 2f5cd7ceee..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Set Up Load Balancer and Ingress Controller within Rancher -description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers -weight: 3040 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress - - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ ---- - -Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. - -## Load Balancers - -After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. - -If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. - -Rancher supports two types of load balancers: - -- [Layer-4 Load Balancers]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-4-load-balancer) -- [Layer-7 Load Balancers]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#layer-7-load-balancer) - -For more information, see [load balancers]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers). - -### Load Balancer Limitations - -Load Balancers have a couple of limitations you should be aware of: - -- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. - -- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: - - - - [Support for Layer-4 Load Balancing]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-4-load-balancing) - - - [Support for Layer-7 Load Balancing]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/#support-for-layer-7-load-balancing) - -## Ingress - -As mentioned in the limitations above, the disadvantages of using a load balancer are: - -- Load Balancers can only handle one IP address per service. -- If you run multiple services in your cluster, you must have a load balancer for each service. -- It can be expensive to have a load balancer for every service. - -In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. - -Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. - -Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. - -Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. - -Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). - -Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. - ->**Using Rancher in a High Availability Configuration?** -> ->Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. - -- For more information on how to set up ingress in Rancher, see [Ingress]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress). -- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) -- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md deleted file mode 100644 index dc700e5fce..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Adding Ingresses to Your Project -description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project -weight: 3042 -aliases: - - /rancher/v2.5/en/tasks/workloads/add-ingress/ - - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress - - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/ ---- - -Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. - -1. From the **Global** view, open the project that you want to add ingress to. -1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. Then click **Add Ingress**. -1. Enter a **Name** for the ingress. -1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new namespace on the fly by clicking **Add to a new namespace**. -1. Create ingress forwarding **Rules**. For help configuring the rules, refer to [this section.](#ingress-rule-configuration) If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. -1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. - -**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. - - -# Ingress Rule Configuration - -- [Automatically generate a sslip.io hostname](#automatically-generate-a-sslip-io-hostname) -- [Specify a hostname to use](#specify-a-hostname-to-use) -- [Use as the default backend](#use-as-the-default-backend) -- [Certificates](#certificates) -- [Labels and Annotations](#labels-and-annotations) - -### Automatically generate a sslip.io hostname - -If you choose this option, ingress routes requests to hostname to a DNS name that's automatically generated. Rancher uses [sslip.io](http://sslip.io/) to automatically generates the DNS name. This option is best used for testing, _not_ production environments. - ->**Note:** To use this option, you must be able to resolve to `sslip.io` addresses. - -1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. -1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. -1. Select a workload or service from the **Target** drop-down list for each target you've added. -1. Enter the **Port** number that each target operates on. - -### Specify a hostname to use - -If you use this option, ingress routes requests for a hostname to the service or workload that you specify. - -1. Enter the hostname that your ingress will handle request forwarding for. For example, `www.mysite.com`. -1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. -1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. -1. Select a workload or service from the **Target** drop-down list for each target you've added. -1. Enter the **Port** number that each target operates on. - -### Use as the default backend - -Use this option to set an ingress rule for handling requests that don't match any other ingress rules. For example, use this option to route requests that can't be found to a `404` page. - ->**Note:** If you deployed Rancher using RKE, a default backend for 404s and 202s is already configured. - -1. Add a **Target Backend**. Click either **Service** or **Workload** to add the target. -1. Select a service or workload from the **Target** drop-down list. - -### Certificates ->**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates]({{}}/rancher/v2.5/en/k8s-in-rancher/certificates/). - -1. Click **Add Certificate**. -1. Select a **Certificate** from the drop-down list. -1. Enter the **Host** using encrypted communication. -1. To add additional hosts that use the certificate, click **Add Hosts**. - -### Labels and Annotations - -Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. - -For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). \ No newline at end of file diff --git a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md deleted file mode 100644 index d30335a67f..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "Layer 4 and Layer 7 Load Balancing" -description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" -weight: 3041 -aliases: - - /rancher/v2.5/en/concepts/load-balancing/ - - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers - - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/ ---- -Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. - -## Layer-4 Load Balancer - -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. - -Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. - -> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. - -### Support for Layer-4 Load Balancing - -Support for layer-4 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-4 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Limited NGINX or third-party Ingress* -RKE on vSphere | Limited NGINX or third party-Ingress* -RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* -Third-party MetalLB | Limited NGINX or third-party Ingress* - -\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) - -## Layer-7 Load Balancer - -Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. - -### Support for Layer-7 Load Balancing - -Support for layer-7 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-7 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GKE cloud provider -Azure AKS | Not Supported -RKE on EC2 | Nginx Ingress Controller -RKE on DigitalOcean | Nginx Ingress Controller -RKE on vSphere | Nginx Ingress Controller -RKE on Custom Hosts
(e.g. bare-metal servers) | Nginx Ingress Controller - -### Host Names in Layer-7 Load Balancer - -Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. - -Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: - -1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. -2. Ask Rancher to generate an sslip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name ..a.b.c.d.sslip.io. - -The benefit of using sslip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. - -## Related Links - -- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md deleted file mode 100644 index 5d5767eec4..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/secrets/_index.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Secrets -weight: 3062 -aliases: - - /rancher/v2.5/en/tasks/projects/add-a-secret - - /rancher/v2.5/en/k8s-in-rancher/secrets - - /rancher/v2.x/en/k8s-in-rancher/secrets/ ---- - -[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. - -> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.]({{}}/rancher/v2.5/en/k8s-in-rancher/registries) - -When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# Creating Secrets - -When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. - -1. From the **Global** view, select the project containing the namespace(s) where you want to add a secret. - -2. From the main menu, select **Resources > Secrets**. Click **Add Secret**. - -3. Enter a **Name** for the secret. - - >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. - -4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single namespace. - -5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. - - >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. - > - > {{< img "/img/rancher/bulk-key-values.gif" "Bulk Key Value Pair Copy/Paste">}} - -1. Click **Save**. - -**Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. - -Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) - -# What's Next? - -Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. - -For more information on adding secret to a workload, see [Deploying Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md deleted file mode 100644 index 83b5147b30..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/_index.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Kubernetes Workloads and Pods" -description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" -weight: 3025 -aliases: - - /rancher/v2.5/en/concepts/workloads/ - - /rancher/v2.5/en/tasks/workloads/ - - /rancher/v2.5/en/k8s-in-rancher/workloads - - /rancher/v2.x/en/k8s-in-rancher/workloads/ ---- - -You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. - -### Pods - -[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. - -### Workloads - -_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. -Workloads let you define the rules for application scheduling, scaling, and upgrade. - -#### Workload Types - -Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: - -- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) - - _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. - -- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - - _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. - -- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - - _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. - -- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) - - _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. - -- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) - - _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. - -### Services - -In many use cases, a workload has to be either: - -- Accessed by other workloads in the cluster. -- Exposed to the outside world. - -You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. - -#### Service Types - -There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). - -- **ClusterIP** - - >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. - -- **NodePort** - - >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. - -- **LoadBalancer** - - >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. - -## Workload Options - -This section of the documentation contains instructions for deploying workloads and using workload options. - -- [Deploy Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/) -- [Upgrade Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/) -- [Rollback Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/) - -## Related Links - -### External Links - -- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md deleted file mode 100644 index 807020f2cb..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Adding a Sidecar -weight: 3029 -aliases: - - /rancher/v2.5/en/tasks/workloads/add-a-sidecar/ - - /rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar - - /rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/ ---- -A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. - -1. From the **Global** view, open the project running the workload you want to add a sidecar to. - -1. Click **Resources > Workloads.** - -1. Find the workload that you want to extend. Select **⋮ icon (...) > Add a Sidecar**. - -1. Enter a **Name** for the sidecar. - -1. Select a **Sidecar Type**. This option determines if the sidecar container is deployed before or after the main container is deployed. - - - **Standard Container:** - - The sidecar container is deployed after the main container. - - - **Init Container:** - - The sidecar container is deployed before the main container. - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. - -1. Set the remaining options. You can read about them in [Deploying Workloads](../deploy-workloads). - -1. Click **Launch**. - -**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. - -## Related Links - -- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md deleted file mode 100644 index 4d90d3f5db..0000000000 --- a/content/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Deploying Workloads -description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. -weight: 3026 -aliases: - - /rancher/v2.5/en/tasks/workloads/deploy-workloads/ - - /rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads - - /rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/ ---- - -Deploy a workload to run an application in one or more containers. - -1. From the **Global** view, open the project that you want to deploy a workload to. - -1. 1. Click **Resources > Workloads.** From the **Workloads** view, click **Deploy**. - -1. Enter a **Name** for the workload. - -1. Select a [workload type]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/). The workload defaults to a scalable deployment, but you can change the workload type by clicking **More options.** - -1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. - -1. Either select an existing namespace, or click **Add to a new namespace** and enter a new namespace. - -1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/#services). - -1. Configure the remaining options: - - - **Environment Variables** - - Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap]({{}}/rancher/v2.5/en/k8s-in-rancher/configmaps/). - - - **Node Scheduling** - - **Health Check** - - **Volumes** - - Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap]({{}}/rancher/v2.5/en/k8s-in-rancher/configmaps/). - - When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. - - - **Scaling/Upgrade Policy** - - >**Amazon Note for Volumes:** - > - > To mount an Amazon EBS volume: - > - >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. - > - >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/ec2/) or [Creating a Custom Cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/custom-nodes). - - -1. Click **Show Advanced Options** and configure: - - - **Command** - - **Networking** - - **Labels & Annotations** - - **Security and Host Config** - -1. Click **Launch**. - -**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.5/en/logging/_index.md b/content/rancher/v2.5/en/logging/_index.md deleted file mode 100644 index df4bdc8a9d..0000000000 --- a/content/rancher/v2.5/en/logging/_index.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: Rancher Integration with Logging Services -shortTitle: Logging -description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. -metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." -weight: 15 -aliases: - - /rancher/v2.5/en/dashboard/logging - - /rancher/v2.5/en/logging/v2.5 - - /rancher/v2.5/en/cluster-admin/tools/logging - - /rancher/v2.x/en/logging/ - - /rancher/v2.x/en/logging/v2.5/ ---- - -The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. - -For an overview of the changes in v2.5, see [this section.]({{}}/rancher/v2.5/en/logging/architecture/#changes-in-rancher-v2-5) For information about migrating from Logging V1, see [this page.](./migrating) - -- [Enabling Logging](#enabling-logging) -- [Uninstall Logging](#uninstall-logging) -- [Architecture](#architecture) -- [Role-based Access Control](#role-based-access-control) -- [Configuring the Logging Custom Resources](#configuring-the-logging-custom-resources) - - [Flows and ClusterFlows](#flows-and-clusterflows) - - [Outputs and ClusterOutputs](#outputs-and-clusteroutputs) -- [Configuring the Logging Helm Chart](#configuring-the-logging-helm-chart) - - [Windows Support](#windows-support) - - [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) - - [Working with Taints and Tolerations](#working-with-taints-and-tolerations) - - [Logging V2 with SELinux](#logging-v2-with-selinux) - - [Additional Logging Sources](#additional-logging-sources) -- [Troubleshooting](#troubleshooting) - -# Enabling Logging - -You can enable the logging for a Rancher managed cluster by going to the Apps page and installing the logging app. - -1. In the Rancher UI, go to the cluster where you want to install logging and click **Cluster Explorer**. -1. Click **Apps**. -1. Click the `rancher-logging` app. -1. Scroll to the bottom of the Helm chart README and click **Install**. - -**Result:** The logging app is deployed in the `cattle-logging-system` namespace. - -# Uninstall Logging - -1. From the **Cluster Explorer**, click **Apps & Marketplace**. -1. Click **Installed Apps**. -1. Go to the `cattle-logging-system` namespace and check the boxes for `rancher-logging` and `rancher-logging-crd`. -1. Click **Delete**. -1. Confirm **Delete**. - -**Result** `rancher-logging` is uninstalled. - -# Architecture - -For more information about how the logging application works, see [this section.](./architecture) - - - -# Role-based Access Control - -Rancher logging has two roles, `logging-admin` and `logging-view`. For more information on how and when to use these roles, see [this page.](./rbac) - -# Configuring Logging Custom Resources - -To manage `Flows,` `ClusterFlows`, `Outputs`, and `ClusterOutputs`, go to the **Cluster Explorer** in the Rancher UI. In the upper left corner, click **Cluster Explorer > Logging**. - -### Flows and ClusterFlows - -For help with configuring `Flows` and `ClusterFlows`, see [this page.](./custom-resource-config/flows) - -### Outputs and ClusterOutputs - -For help with configuring `Outputs` and `ClusterOutputs`, see [this page.](./custom-resource-config/outputs) - -# Configuring the Logging Helm Chart - -For a list of options that can be configured when the logging application is installed or upgraded, see [this page.](./helm-chart-options) - -### Windows Support - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} -As of Rancher v2.5.8, logging support for Windows clusters has been added and logs can be collected from Windows nodes. - -For details on how to enable or disable Windows node logging, see [this section.](./helm-chart-options/#enable-disable-windows-node-logging) - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} -Clusters with Windows workers support exporting logs from Linux nodes, but Windows node logs are currently unable to be exported. -Only Linux node logs are able to be exported. - -To allow the logging pods to be scheduled on Linux nodes, tolerations must be added to the pods. Refer to the [Working with Taints and Tolerations]({{}}/rancher/v2.5/en/logging/taints-tolerations/) section for details and an example. -{{% /tab %}} -{{% /tabs %}} - - -### Working with a Custom Docker Root Directory - -For details on using a custom Docker root directory, see [this section.](./helm-chart-options/#working-with-a-custom-docker-root-directory) - - -### Working with Taints and Tolerations - -For information on how to use taints and tolerations with the logging application, see [this page.](./taints-tolerations) - - -### Logging V2 with SELinux - -_Available as of v2.5.8_ - -For information on enabling the logging application for SELinux-enabled nodes, see [this section.](./helm-chart-options/#enabling-the-logging-application-to-work-with-selinux) - -### Additional Logging Sources - -By default, Rancher collects logs for control plane components and node components for all cluster types. In some cases additional logs can be collected. For details, see [this section.](./helm-chart-options/#enabling-the-logging-application-to-work-with-selinux) - - -# Troubleshooting - -### The `cattle-logging` Namespace Being Recreated - -If your cluster previously deployed logging from the Cluster Manager UI, you may encounter an issue where its `cattle-logging` namespace is continually being recreated. - -The solution is to delete all `clusterloggings.management.cattle.io` and `projectloggings.management.cattle.io` custom resources from the cluster specific namespace in the management cluster. -The existence of these custom resources causes Rancher to create the `cattle-logging` namespace in the downstream cluster if it does not exist. - -The cluster namespace matches the cluster ID, so we need to find the cluster ID for each cluster. - -1. In your web browser, navigate to your cluster(s) in either the Cluster Manager UI or the Cluster Explorer UI. -2. Copy the `` portion from one of the URLs below. The `` portion is the cluster namespace name. - -```bash -# Cluster Management UI -https:///c// - -# Cluster Explorer UI (Dashboard) -https:///dashboard/c// -``` - -Now that we have the `` namespace, we can delete the CRs that cause `cattle-logging` to be continually recreated. -*Warning:* ensure that logging, the version installed from the Cluster Manager UI, is not currently in use. - -```bash -kubectl delete clusterloggings.management.cattle.io -n -kubectl delete projectloggings.management.cattle.io -n -``` diff --git a/content/rancher/v2.5/en/logging/architecture/_index.md b/content/rancher/v2.5/en/logging/architecture/_index.md deleted file mode 100644 index 7c397a4a82..0000000000 --- a/content/rancher/v2.5/en/logging/architecture/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Architecture -weight: 1 ---- - -This section summarizes the architecture of the Rancher logging application. - -For more details about how the Banzai Cloud Logging operator works, see the [official documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) - -### Changes in Rancher v2.5 - -The following changes were introduced to logging in Rancher v2.5: - -- The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. -- [Fluent Bit](https://fluentbit.io/) is now used to aggregate the logs, and [Fluentd](https://www.fluentd.org/) is used for filtering the messages and routing them to the `Outputs`. Previously, only Fluentd was used. -- Logging can be configured with a Kubernetes manifest, because logging now uses a Kubernetes operator with Custom Resource Definitions. -- We now support filtering logs. -- We now support writing logs to multiple `Outputs`. -- We now always collect Control Plane and etcd logs. - -### How the Banzai Cloud Logging Operator Works - -The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. - -Fluent Bit queries the Kubernetes API and enriches the logs with metadata about the pods, and transfers both the logs and the metadata to Fluentd. Fluentd receives, filters, and transfers logs to multiple `Outputs`. - -The following custom resources are used to define how logs are filtered and sent to their `Outputs`: - -- A `Flow` is a namespaced custom resource that uses filters and selectors to route log messages to the appropriate `Outputs`. -- A `ClusterFlow` is used to route cluster-level log messages. -- An `Output` is a namespaced resource that defines where the log messages are sent. -- A `ClusterOutput` defines an `Output` that is available from all `Flows` and `ClusterFlows`. - -Each `Flow` must reference an `Output`, and each `ClusterFlow` must reference a `ClusterOutput`. - -The following figure from the [Banzai documentation](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) shows the new logging architecture: - -
How the Banzai Cloud Logging Operator Works with Fluentd and Fluent Bit
- -![How the Banzai Cloud Logging Operator Works with Fluentd]({{}}/img/rancher/banzai-cloud-logging-operator.png) \ No newline at end of file diff --git a/content/rancher/v2.5/en/logging/custom-resource-config/flows/_index.md b/content/rancher/v2.5/en/logging/custom-resource-config/flows/_index.md deleted file mode 100644 index 7f2a7dc321..0000000000 --- a/content/rancher/v2.5/en/logging/custom-resource-config/flows/_index.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: Flows and ClusterFlows -weight: 1 ---- - -For the full details on configuring `Flows` and `ClusterFlows`, see the [Banzai Cloud Logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/output/) - -- [Configuration](#configuration) -- [YAML Example](#yaml-example) - -# Configuration - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -- [Flows](#flows-2-5-8) - - [Matches](#matches-2-5-8) - - [Filters](#filters-2-5-8) - - [Outputs](#outputs-2-5-8) -- [ClusterFlows](#clusterflows-2-5-8) - -# Changes in v2.5.8 - -The `Flows` and `ClusterFlows` can now be configured by filling out forms in the Rancher UI. - - - - -# Flows - -A `Flow` defines which logs to collect and filter and which output to send the logs to. - -The `Flow` is a namespaced resource, which means logs will only be collected from the namespace that the `Flow` is deployed in. - -For more details about the `Flow` custom resource, see [FlowSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/flow_types/) - - - - -### Matches - -Match statements are used to select which containers to pull logs from. - -You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies. - -Matches can be configured by filling out the `Flow` or `ClusterFlow` forms in the Rancher UI. - -For detailed examples on using the match statement, see the [official documentation on log routing.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/log-routing/) - - - -### Filters - -You can define one or more filters within a `Flow`. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. The filters in the `Flow` are applied in the order in the definition. - -For a list of filters supported by the Banzai Cloud Logging operator, see [this page.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/filters/) - -Filters need to be configured in YAML. - - - -### Outputs - -This `Output` will receive logs from the `Flow`. Because the `Flow` is a namespaced resource, the `Output` must reside in same namespace as the `Flow`. - -`Outputs` can be referenced when filling out the `Flow` or `ClusterFlow` forms in the Rancher UI. - - - -# ClusterFlows - -Matches, filters and `Outputs` are configured for `ClusterFlows` in the same way that they are configured for `Flows`. The key difference is that the `ClusterFlow` is scoped at the cluster level and can configure log collection across all namespaces. - -After `ClusterFlow` selects logs from all namespaces in the cluster, logs from the cluster will be collected and logged to the selected `ClusterOutput`. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - -- [Flows](#flows-2-5-0) - - [Matches](#matches-2-5-0) - - [Filters](#filters-2-5-0) - - [Outputs](#outputs-2-5-0) -- [ClusterFlows](#clusterflows-2-5-0) - - - - -# Flows - -A `Flow` defines which logs to collect and filter and which `Output` to send the logs to. The `Flow` is a namespaced resource, which means logs will only be collected from the namespace that the `Flow` is deployed in. - -`Flows` need to be defined in YAML. - -For more details about the `Flow` custom resource, see [FlowSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/flow_types/) - - - - -### Matches - -Match statements are used to select which containers to pull logs from. - -You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies. - -For detailed examples on using the match statement, see the [official documentation on log routing.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/log-routing/) - - - -### Filters - -You can define one or more filters within a `Flow`. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. The filters in the `Flow` are applied in the order in the definition. - -For a list of filters supported by the Banzai Cloud Logging operator, see [this page.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/filters/) - - - -### Outputs - -This `Output` will receive logs from the `Flow`. - -Because the `Flow` is a namespaced resource, the `Output` must reside in same namespace as the `Flow`. - - - -# ClusterFlows - -Matches, filters and `Outputs` are also configured for `ClusterFlows`. The only difference is that the `ClusterFlow` is scoped at the cluster level and can configure log collection across all namespaces. - -`ClusterFlow` selects logs from all namespaces in the cluster. Logs from the cluster will be collected and logged to the selected `ClusterOutput`. - -`ClusterFlows` need to be defined in YAML. - -{{% /tab %}} -{{% /tabs %}} - - -# YAML Example - -The following example `Flow` transforms the log messages from the default namespace and sends them to an S3 `Output`: - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: Flow -metadata: - name: flow-sample - namespace: default -spec: - filters: - - parser: - remove_key_name_field: true - parse: - type: nginx - - tag_normaliser: - format: ${namespace_name}.${pod_name}.${container_name} - localOutputRefs: - - s3-output - match: - - select: - labels: - app: nginx -``` diff --git a/content/rancher/v2.5/en/logging/custom-resource-config/outputs/_index.md b/content/rancher/v2.5/en/logging/custom-resource-config/outputs/_index.md deleted file mode 100644 index 6e86e5d54c..0000000000 --- a/content/rancher/v2.5/en/logging/custom-resource-config/outputs/_index.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: Outputs and ClusterOutputs -weight: 2 ---- - -For the full details on configuring `Outputs` and `ClusterOutputs`, see the [Banzai Cloud Logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/output/) - -- [Configuration](#configuration) -- [YAML Examples](#yaml-examples) - - [Cluster Output to ElasticSearch](#cluster-output-to-elasticsearch) - - [Output to Splunk](#output-to-splunk) - - [Output to Syslog](#output-to-syslog) - - [Unsupported Outputs](#unsupported-outputs) - -# Configuration - -{{% tabs %}} -{{% tab "v2.5.8+" %}} - -- [Outputs](#outputs-2-5-8) -- [ClusterOutputs](#clusteroutputs-2-5-8) - -# Changes in v2.5.8 - -The `Outputs` and `ClusterOutputs` can now be configured by filling out forms in the Rancher UI. - - - -# Outputs - -The `Output` resource defines where your `Flows` can send the log messages. `Outputs` are the final stage for a logging `Flow`. - -The `Output` is a namespaced resource, which means only a `Flow` within the same namespace can access it. - -You can use secrets in these definitions, but they must also be in the same namespace. - -For the details of `Output` custom resource, see [OutputSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/output_types/) - -The Rancher UI provides forms for configuring the following `Output` types: - -- Amazon ElasticSearch -- Azure Storage -- Cloudwatch -- Datadog -- Elasticsearch -- File -- Fluentd -- GCS -- Kafka -- Kinesis Stream -- LogDNA -- LogZ -- Loki -- New Relic -- Splunk -- SumoLogic -- Syslog - -The Rancher UI provides forms for configuring the `Output` type, target, and access credentials if applicable. - -For example configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) - - - -# ClusterOutputs - -`ClusterOutput` defines an `Output` without namespace restrictions. It is only effective when deployed in the same namespace as the logging operator. - -For the details of the `ClusterOutput` custom resource, see [ClusterOutput.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/clusteroutput_types/) - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} - - -- [Outputs](#outputs-2-5-0) -- [ClusterOutputs](#clusteroutputs-2-5-0) - - - -# Outputs - -The `Output` resource defines where your `Flows` can send the log messages. `Outputs` are the final stage for a logging `Flow`. - -The `Output` is a namespaced resource, which means only a `Flow` within the same namespace can access it. - -You can use secrets in these definitions, but they must also be in the same namespace. - -`Outputs` are configured in YAML. For the details of `Output` custom resource, see [OutputSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/output_types/) - -For examples of configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) - - - -# ClusterOutputs - -`ClusterOutput` defines an `Output` without namespace restrictions. It is only effective when deployed in the same namespace as the logging operator. - -The Rancher UI provides forms for configuring the `ClusterOutput` type, target, and access credentials if applicable. - -`ClusterOutputs` are configured in YAML. For the details of `ClusterOutput` custom resource, see [ClusterOutput.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/clusteroutput_types/) - -For example configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) - -{{% /tab %}} -{{% /tabs %}} - - -# YAML Examples - -Once logging is installed, you can use these examples to help craft your own logging pipeline. - -- [Cluster Output to ElasticSearch](#cluster-output-to-elasticsearch) -- [Output to Splunk](#output-to-splunk) -- [Output to Syslog](#output-to-syslog) -- [Unsupported Outputs](#unsupported-outputs) - -### Cluster Output to ElasticSearch - -Let's say you wanted to send all logs in your cluster to an `elasticsearch` cluster. First, we create a cluster `Output`. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterOutput -metadata: - name: "example-es" - namespace: "cattle-logging-system" -spec: - elasticsearch: - host: elasticsearch.example.com - port: 9200 - scheme: http -``` - -We have created this `ClusterOutput`, without elasticsearch configuration, in the same namespace as our operator: `cattle-logging-system.`. Any time we create a `ClusterFlow` or `ClusterOutput`, we have to put it in the `cattle-logging-system` namespace. - -Now that we have configured where we want the logs to go, let's configure all logs to go to that `ClusterOutput`. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterFlow -metadata: - name: "all-logs" - namespace: "cattle-logging-system" -spec: - globalOutputRefs: - - "example-es" -``` - -We should now see our configured index with logs in it. - - -### Output to Splunk - -What if we have an application team who only wants logs from a specific namespaces sent to a `splunk` server? For this case, we can use namespaced `Outputs` and `Flows`. - -Before we start, let's set up that team's application: `coolapp`. - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: devteam ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coolapp - namespace: devteam - labels: - app: coolapp -spec: - replicas: 2 - selector: - matchLabels: - app: coolapp - template: - metadata: - labels: - app: coolapp - spec: - containers: - - name: generator - image: paynejacob/loggenerator:latest -``` - -With `coolapp` running, we will follow a similar path as when we created a `ClusterOutput`. However, unlike `ClusterOutputs`, we create our `Output` in our application's namespace. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: Output -metadata: - name: "devteam-splunk" - namespace: "devteam" -spec: - splunkHec: - hec_host: splunk.example.com - hec_port: 8088 - protocol: http -``` - -Once again, let's feed our `Output` some logs: - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: Flow -metadata: - name: "devteam-logs" - namespace: "devteam" -spec: - localOutputRefs: - - "devteam-splunk" -``` - - -### Output to Syslog - -Let's say you wanted to send all logs in your cluster to an `syslog` server. First, we create a `ClusterOutput`: - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterOutput -metadata: - name: "example-syslog" - namespace: "cattle-logging-system" -spec: - syslog: - buffer: - timekey: 30s - timekey_use_utc: true - timekey_wait: 10s - flush_interval: 5s - format: - type: json - app_name_field: test - host: syslog.example.com - insecure: true - port: 514 - transport: tcp -``` - -Now that we have configured where we want the logs to go, let's configure all logs to go to that `Output`. - -```yaml -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterFlow -metadata: - name: "all-logs" - namespace: cattle-logging-system -spec: - globalOutputRefs: - - "example-syslog" -``` - -### Unsupported Outputs - -For the final example, we create an `Output` to write logs to a destination that is not supported out of the box: - -> **Note on syslog** As of Rancher v2.5.4, `syslog` is a supported `Output`. However, this example still provides an overview on using unsupported plugins. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: syslog-config - namespace: cattle-logging-system -type: Opaque -stringData: - fluent-bit.conf: | - [INPUT] - Name forward - Port 24224 - - [OUTPUT] - Name syslog - InstanceName syslog-output - Match * - Addr syslog.example.com - Port 514 - Cluster ranchers - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fluentbit-syslog-forwarder - namespace: cattle-logging-system - labels: - output: syslog -spec: - selector: - matchLabels: - output: syslog - template: - metadata: - labels: - output: syslog - spec: - containers: - - name: fluentbit - image: paynejacob/fluent-bit-out-syslog:latest - ports: - - containerPort: 24224 - volumeMounts: - - mountPath: "/fluent-bit/etc/" - name: configuration - volumes: - - name: configuration - secret: - secretName: syslog-config ---- -apiVersion: v1 -kind: Service -metadata: - name: syslog-forwarder - namespace: cattle-logging-system -spec: - selector: - output: syslog - ports: - - protocol: TCP - port: 24224 - targetPort: 24224 ---- -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterFlow -metadata: - name: all-logs - namespace: cattle-logging-system -spec: - globalOutputRefs: - - syslog ---- -apiVersion: logging.banzaicloud.io/v1beta1 -kind: ClusterOutput -metadata: - name: syslog - namespace: cattle-logging-system -spec: - forward: - servers: - - host: "syslog-forwarder.cattle-logging-system" - require_ack_response: false - ignore_network_errors_at_startup: false -``` - -Let's break down what is happening here. First, we create a deployment of a container that has the additional `syslog` plugin and accepts logs forwarded from another `fluentd`. Next we create an `Output` configured as a forwarder to our deployment. The deployment `fluentd` will then forward all logs to the configured `syslog` destination. diff --git a/content/rancher/v2.5/en/logging/helm-chart-options/_index.md b/content/rancher/v2.5/en/logging/helm-chart-options/_index.md deleted file mode 100644 index a3786c264b..0000000000 --- a/content/rancher/v2.5/en/logging/helm-chart-options/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: rancher-logging Helm Chart Options -shortTitle: Helm Chart Options -weight: 4 ---- - -- [Enable/Disable Windows Node Logging](#enable-disable-windows-node-logging) -- [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) -- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) -- [Enabling the Logging Application to Work with SELinux](#enabling-the-logging-application-to-work-with-selinux) -- [Additional Logging Sources](#additional-logging-sources) - - -### Enable/Disable Windows Node Logging - -_Available as of v2.5.8_ - -You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. - -By default, Windows node logging will be enabled if the Cluster Explorer UI is used to install the logging application on a Windows cluster. - -In this scenario, setting `global.cattle.windows.enabled` to `false` will disable Windows node logging on the cluster. -When disabled, logs will still be collected from Linux nodes within the Windows cluster. - -> Note: Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists where Windows nodeAgents are not deleted when performing a `helm upgrade` after disabling Windows logging in a Windows cluster. In this scenario, users may need to manually remove the Windows nodeAgents if they are already installed. - -### Working with a Custom Docker Root Directory - -_Applies to v2.5.6+_ - -If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. - -This will ensure that the Logging CRs created will use your specified path rather than the default Docker `data-root` location. - -Note that this only affects Linux nodes. - -If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. - -### Adding NodeSelector Settings and Tolerations for Custom Taints - -You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](../taints-tolerations) - -### Enabling the Logging Application to Work with SELinux - -_Available as of v2.5.8_ - -> **Requirements:** Logging v2 was tested with SELinux on RHEL/CentOS 7 and 8. - -[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. - -To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.]({{}}/rancher/v2.5/en/security/selinux/#installing-the-rancher-selinux-rpm) - -Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. - -### Additional Logging Sources - -By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. - -In some cases, Rancher may be able to collect additional logs. - -The following table summarizes the sources where additional logs may be collected for each node types: - -| Logging Source | Linux Nodes (including in Windows cluster) | Windows Nodes | -| --- | --- | ---| -| RKE | ✓ | ✓ | -| RKE2 | ✓ | | -| K3s | ✓ | | -| AKS | ✓ | | -| EKS | ✓ | | -| GKE | ✓ | | - -To enable hosted Kubernetes providers as additional logging sources, go to **Cluster Explorer > Logging > Chart Options** and select the **Enable enhanced cloud provider logging** option. - -When enabled, Rancher collects all additional node and control plane logs the provider has made available, which may vary between providers - -If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. diff --git a/content/rancher/v2.5/en/logging/migrating/_index.md b/content/rancher/v2.5/en/logging/migrating/_index.md deleted file mode 100644 index b648cb27fd..0000000000 --- a/content/rancher/v2.5/en/logging/migrating/_index.md +++ /dev/null @@ -1,195 +0,0 @@ ---- -title: Migrating to Rancher v2.5 Logging -weight: 2 -aliases: - - /rancher/v2.5/en/logging/v2.5/migrating - - /rancher/v2.x/en/logging/v2.5/migrating/ ---- -Starting in v2.5, the logging feature available within Rancher has been completely overhauled. The [logging operator](https://github.com/banzaicloud/logging-operator) from Banzai Cloud has been adopted; Rancher configures this tooling for use when deploying logging. - -Among the many features and changes in the new logging functionality is the removal of project-specific logging configurations. Instead, one now configures logging at the namespace level. Cluster-level logging remains available, but configuration options differ. - -> Note: The pre-v2.5 user interface is now referred to as the _Cluster Manager_. The v2.5+ dashboard is referred to as the _Cluster Explorer_. - -- [Installation](#installation) - - [Terminology](#terminology) -- [Cluster Logging](#cluster-logging) -- [Project Logging](#project-logging) -- [Output Configuration](#output-configuration) - - [Elasticsearch](#elasticsearch) - - [Splunk](#splunk) - - [Kafka](#kafka) - - [Fluentd](#fluentd) - - [Syslog](#syslog) -- [Custom Log Fields](#custom-log-fields) -- [System Logging](#system-logging) - -# Installation - -To install logging in Rancher v2.5+, refer to the [installation instructions]({{}}/rancher/v2.5/en/logging/#enabling-logging). - -### Terminology - -In v2.5, logging configuration is centralized under a _Logging_ menu option available in the _Cluster Explorer_. It is from this menu option that logging for both cluster and namespace is configured. - -> Note: Logging is installed on a per-cluster basis. You will need to navigate between clusters to configure logging for each cluster. - -There are four key concepts to understand for v2.5+ logging: - -1. Outputs - - `Outputs` are a configuration resource that determine a destination for collected logs. This is where settings for aggregators such as ElasticSearch, Kafka, etc. are stored. `Outputs` are namespaced resources. - -2. Flows - - `Flows` are a configuration resource that determine collection, filtering, and destination rules for logs. It is within a flow that one will configure what logs to collect, how to mutate or filter them, and which `Outputs` to send the logs to. `Flows` are namespaced resources, and can connect either to an `Output` in the same namespace, or a `ClusterOutput`. - -3. ClusterOutputs - - `ClusterOutputs` serve the same functionality as `Outputs`, except they are a cluster-scoped resource. `ClusterOutputs` are necessary when collecting logs cluster-wide, or if you wish to provide an `Output` to all namespaces in your cluster. - -4. ClusterFlows - - `ClusterFlows` serve the same function as `Flows`, but at the cluster level. They are used to configure log collection for an entire cluster, instead of on a per-namespace level. `ClusterFlows` are also where mutations and filters are defined, same as `Flows` (in functionality). - -# Cluster Logging - -To configure cluster-wide logging for v2.5+ logging, one needs to set up a `ClusterFlow`. This object defines the source of logs, any transformations or filters to be applied, and finally the `Output` (or `Outputs`) for the logs. - -> Important: `ClusterFlows` must be defined within the `cattle-logging-system` namespace. `ClusterFlows` will not work if defined in any other namespace. - -In legacy logging, in order to collect logs from across the entire cluster, one only needed to enable cluster-level logging and define the desired `Output`. This basic approach remains in v2.5+ logging. To replicate legacy cluster-level logging, follow these steps: - -1. Define a `ClusterOutput` according to the instructions found under [Output Configuration](#output-configuration) -2. Create a `ClusterFlow`, ensuring that it is set to be created in the `cattle-logging-system` namespace - 1. Remove all _Include_ and _Exclude_ rules from the `Flow` definition. This ensures that all logs are gathered. - 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation - 3. Define your cluster `Output` or `Outputs` - -This will result in logs from all sources in the cluster (all pods, and all system components) being collected and sent to the `Output` or `Outputs` you defined in the `ClusterFlow`. - -# Project Logging - -Logging in v2.5+ is not project-aware. This means that in order to collect logs from pods running in project namespaces, you will need to define `Flows` for those namespaces. - -To collect logs from a specific namespace, follow these steps: - -1. Define an `Output` or `ClusterOutput` according to the instructions found under [Output Configuration](#output-configuration) -2. Create a `Flow`, ensuring that it is set to be created in the namespace in which you want to gather logs. - 1. If you wish to define _Include_ or _Exclude_ rules, you may do so. Otherwise, removal of all rules will result in all pods in the target namespace having their logs collected. - 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation - 3. Define your outputs - these can be either `ClusterOutput` or `Output` objects. - -This will result in logs from all sources in the namespace (pods) being collected and sent to the `Output` (or `Outputs`) you defined in your `Flow`. - -> To collect logs from a project, repeat the above steps for every namespace within the project. Alternatively, you can label your project workloads with a common label (e.g. `project=my-project`) and use a `ClusterFlow` to collect logs from all pods matching this label. - -# Output Configuration -In legacy logging, there are five logging destinations to choose from: Elasticsearch, Splunk, Kafka, Fluentd, and Syslog. With the exception of Syslog, all of these destinations are available in logging v2.5+. - - -### Elasticsearch - -| Legacy Logging | v2.5+ Logging | Notes | -|-----------------------------------------------|-----------------------------------|-----------------------------------------------------------| -| Endpoint | Target -> Host | Make sure to specify Scheme (https/http), as well as Port | -| X-Pack Security -> Username | Access -> User | | -| X-Pack Security -> Password | Access -> Password | Password must now be stored in a secret | -| SSL Configuration -> Client Private Key | SSL -> Client Key | Key must now be stored in a secret | -| SSL Configuration -> Client Certificate | SSL -> Client Cert | Certificate must now be stored in a secret | -| SSL Configuration -> Client Key Password | SSL -> Client Key Pass | Password must now be stored in a secret | -| SSL Configuration -> Enabled SSL Verification | SSL -> Certificate Authority File | Certificate must now be stored in a secret | - - -In legacy logging, indices were automatically created according to the format in the "Index Patterns" section. In v2.5 logging, default behavior has been changed to logging to a single index. You can still configure index pattern functionality on the `Output` object by editing as YAML and inputting the following values: - -``` -... -spec: - elasticsearch: - ... - logstash_format: true - logstash_prefix: - logstash_dateformat: "%Y-%m-%d" -``` - -Replace `` with the prefix for the indices that will be created. In legacy logging, this defaulted to the name of the cluster. - -### Splunk - -| Legacy Logging | v2.5+ Logging | Notes | -|------------------------------------------|----------------------------------------|----------------------------------------------------------------------------------------| -| HEC Configuration -> Endpoint | Target -> Host | Protocol (https/http) and port must be defined separately from the host | -| HEC Configuration -> Token | Access -> Token | Token must now be stored as a secret | -| HEC Configuration -> Index | Edit as YAML -> `index` | `index` field must be added as YAML key under `spec.splunkHec` | -| HEC Configuration -> Source | Edit as YAML -> `source` | `source` field must be added as YAML key under `spec.splunkHec` | -| SSL Configuration -> Client Private Key | Edit as YAML -> `client_key` | `client_key` field must be added as YAML key under `spec.splunkHec`. See (1) | -| SSL Configuration -> Client Certificate | Edit as YAML -> `client_cert` | `client_cert` field must be added as YAML key under `spec.splunkHec`. See (1) | -| SSL Configuration -> Client Key Password | _Not Supported_ | Specifying a password for the client private key is not currently supported. | -| SSL Configuration -> SSL Verify | Edit as YAML -> `ca_file` or `ca_path` | `ca_file` or `ca_path` field must be added as YAML key under `spec.splunkHec`. See (2) | - -_(1) `client_key` and `client_cert` values must be paths to the key and cert files, respectively. These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ - -_(2) Users can configure either `ca_file` (a path to a PEM-encoded CA certificate) or `ca_path` (a path to a directory containing CA certificates in PEM format). These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ - -### Kafka - -| Legacy Logging | v2.5+ Logging | Notes | -|-----------------------------------------|----------------------------|------------------------------------------------------| -| Kafka Configuration -> Endpoint Type | - | Zookeeper is no longer supported as an endpoint type | -| Kafka Configuration -> Endpoint | Target -> Brokers | Comma-separated list of brokers (host:port) | -| Kafka Configuration -> Topic | Target -> Default Topic | | -| SSL Configuration -> Client Private Key | SSL -> SSL Client Cert | Certificate must be stored as a secret | -| SSL Configuration -> Client Certificate | SSL -> SSL Client Cert Key | Key must be stored as a secret | -| SSL Configuration -> CA Certificate PEM | SSL -> SSL CA Cert | Certificate must be stored as a secret | -| SASL Configuration -> Username | Access -> Username | Username must be stored in a secret | -| SASL Configuration -> Password | Access -> Password | Password must be stored in a secret | -| SASL Configuration -> Scram Mechanism | Access -> Scram Mechanism | Input mechanism as string, e.g. "sha256" or "sha512" | - -### Fluentd - -As of v2.5.2, it is only possible to add a single Fluentd server using the "Edit as Form" option. To add multiple servers, edit the `Output` as YAML and input multiple servers. - -| Legacy Logging | v2.5+ Logging | Notes | -|------------------------------------------|-----------------------------------------------------|----------------------------------------------------------------------| -| Fluentd Configuration -> Endpoint | Target -> Host, Port | Input the host and port separately | -| Fluentd Configuration -> Shared Key | Access -> Shared Key | Shared key must be stored as a secret | -| Fluentd Configuration -> Username | Access -> Username | Username must be stored as a secret | -| Fluentd Configuration -> Password | Access -> Password | Password must be stored as a secret | -| Fluentd Configuration -> Hostname | Edit as YAML -> `host` | `host` field set as YAML key under `spec.forward.servers[n]` | -| Fluentd Configuration -> Weight | Edit as YAML -> `weight` | `weight` field set as YAML key under `spec.forward.servers[n]` | -| SSL Configuration -> Use TLS | - | Do not need to explicitly enable. Define client cert fields instead. | -| SSL Configuration -> Client Private Key | Edit as YAML -> `tls_private_key_path` | Field set as YAML key under `spec.forward`. See (1) | -| SSL Configuration -> Client Certificate | Edit as YAML -> `tls_client_cert_path` | Field set as YAML key under `spec.forward`. See (1) | -| SSL Configuration -> Client Key Password | Edit as YAML -> `tls_client_private_key_passphrase` | Field set as YAML key under `spec.forward`. See (1) | -| SSL Configuration -> SSL Verify | Edit as YAML -> `tls_insecure_mode` | Field set as YAML key under `spec.forward`. Default: `false` | -| SSL Configuration -> CA Certificate PEM | Edit as YAML -> `tls_cert_path` | Field set as YAML key under `spec.forward`. See (1) | -| Enable Gzip Compression | - | No longer supported in v2.5+ logging | - -_(1) These values are to be specified as paths to files. Those files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ - -### Syslog - -As of v2.5.2, syslog is not currently supported for `Outputs` using v2.5+ logging. - -# Custom Log Fields - -In order to add custom log fields, you will need to add the following YAML to your `Flow` configuration: - -``` -... -spec: - filters: - - record_modifier: - records: - - foo: "bar" -``` - -(replace `foo: "bar"` with custom log fields you wish to add) - -# System Logging - -In legacy logging, collecting logs from system components was accomplished by checking a box labeled "Include System Log" when setting up cluster logging. In v2.5+ logging, system logs are gathered in one of two ways: - -1. Gather all cluster logs, not specifying any match or exclusion rules. This results in all container logs from the cluster being collected, which includes system logs. -2. Specifically target system logs by adding match rules for system components. Specific match rules depend on the component being collected. \ No newline at end of file diff --git a/content/rancher/v2.5/en/logging/taints-tolerations/_index.md b/content/rancher/v2.5/en/logging/taints-tolerations/_index.md deleted file mode 100644 index 9e75640385..0000000000 --- a/content/rancher/v2.5/en/logging/taints-tolerations/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Working with Taints and Tolerations -weight: 6 ---- - -"Tainting" a Kubernetes node causes pods to repel running on that node. - -Unless the pods have a `toleration` for that node's taint, they will run on other nodes in the cluster. - -[Taints and tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) can work in conjunction with the `nodeSelector` [field](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) within the `PodSpec`, which enables the *opposite* effect of a taint. - -Using `nodeSelector` gives pods an affinity towards certain nodes. - -Both provide choice for the what node(s) the pod will run on. - -- [Default Implementation in Rancher's Logging Stack](#default-implementation-in-rancher-s-logging-stack) -- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) - - -### Default Implementation in Rancher's Logging Stack - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} -By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. -The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. -Moreover, most logging stack pods run on Linux only and have a `nodeSelector` added to ensure they run on Linux nodes. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} -By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. -The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. -Moreover, we can populate the `nodeSelector` to ensure that our pods *only* run on Linux nodes. - -{{% /tab %}} -{{% /tabs %}} - -This example Pod YAML file shows a nodeSelector being used with a toleration: - -```yaml -apiVersion: v1 -kind: Pod -# metadata... -spec: - # containers... - tolerations: - - key: cattle.io/os - operator: "Equal" - value: "linux" - effect: NoSchedule - nodeSelector: - kubernetes.io/os: linux -``` - -In the above example, we ensure that our pod only runs on Linux nodes, and we add a `toleration` for the taint we have on all of our Linux nodes. - -You can do the same with Rancher's existing taints, or with your own custom ones. - -### Adding NodeSelector Settings and Tolerations for Custom Taints - -If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. - -```yaml -tolerations: - # insert tolerations... -nodeSelector: - # insert nodeSelector... -``` - -These values will add both settings to the `fluentd`, `fluentbit`, and `logging-operator` containers. -Essentially, these are global settings for all pods in the logging stack. - -However, if you would like to add tolerations for *only* the `fluentbit` container, you can add the following to the chart's values. - -```yaml -fluentbit_tolerations: - # insert tolerations list for fluentbit containers only... -``` diff --git a/content/rancher/v2.5/en/longhorn/_index.md b/content/rancher/v2.5/en/longhorn/_index.md deleted file mode 100644 index 2fc1fe7698..0000000000 --- a/content/rancher/v2.5/en/longhorn/_index.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: Longhorn - Cloud native distributed block storage for Kubernetes -shortTitle: Longhorn Storage -weight: 19 -aliases: - - /rancher/v2.x/en/longhorn/ ---- - -[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. - -Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. You can learn more about its architecture [here.](https://longhorn.io/docs/1.0.2/concepts/) - -With Longhorn, you can: - -- Use Longhorn volumes as persistent storage for the distributed stateful applications in your Kubernetes cluster -- Partition your block storage into Longhorn volumes so that you can use Kubernetes volumes with or without a cloud provider -- Replicate block storage across multiple nodes and data centers to increase availability -- Store backup data in external storage such as NFS or AWS S3 -- Create cross-cluster disaster recovery volumes so that data from a primary Kubernetes cluster can be quickly recovered from backup in a second Kubernetes cluster -- Schedule recurring snapshots of a volume, and schedule recurring backups to NFS or S3-compatible secondary storage -- Restore volumes from backup -- Upgrade Longhorn without disrupting persistent volumes - -
Longhorn Dashboard
-![Longhorn Dashboard]({{}}/img/rancher/longhorn-screenshot.png) - -### New in Rancher v2.5 - -Before Rancher v2.5, Longhorn could be installed as a Rancher catalog app. In Rancher v2.5, the catalog system was replaced by the **Apps & Marketplace,** and it became possible to install Longhorn as an app from that page. - -The **Cluster Explorer** now allows you to manipulate Longhorn's Kubernetes resources from the Rancher UI. So now you can control the Longhorn functionality with the Longhorn UI, or with kubectl, or by manipulating Longhorn's Kubernetes custom resources in the Rancher UI. - -These instructions assume you are using Rancher v2.5, but Longhorn can be installed with earlier Rancher versions. For documentation about installing Longhorn as a catalog app using the legacy Rancher UI, refer to the [Longhorn documentation.](https://longhorn.io/docs/1.0.2/deploy/install/install-with-rancher/) - -### Installing Longhorn with Rancher - -1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/1.1.0/deploy/install/#installation-requirements) -1. Go to the **Cluster Explorer** in the Rancher UI. -1. Click **Apps.** -1. Click `longhorn`. -1. Optional: To customize the initial settings, click **Longhorn Default Settings** and edit the configuration. For help customizing the settings, refer to the [Longhorn documentation.](https://longhorn.io/docs/1.0.2/references/settings/) -1. Click **Install.** - -**Result:** Longhorn is deployed in the Kubernetes cluster. - -### Accessing Longhorn from the Rancher UI - -1. From the **Cluster Explorer," go to the top left dropdown menu and click **Cluster Explorer > Longhorn.** -1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section. - -**Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. - -### Uninstalling Longhorn from the Rancher UI - -1. Click **Cluster Explorer > Apps & Marketplace.** -1. Click **Installed Apps.** -1. Go to the `longhorn-system` namespace and check the boxes next to the `longhorn` and `longhorn-crd` apps. -1. Click **Delete,** and confirm **Delete.** - -**Result:** Longhorn is uninstalled. - -### GitHub Repository - -The Longhorn project is available [here.](https://github.com/longhorn/longhorn) - -### Documentation - -The Longhorn documentation is [here.](https://longhorn.io/docs/) - -### Architecture - -Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. - -The storage controller and replicas are themselves orchestrated using Kubernetes. - -You can learn more about its architecture [here.](https://longhorn.io/docs/1.0.2/concepts/) - -
Longhorn Architecture
-![Longhorn Architecture]({{}}/img/rancher/longhorn-architecture.svg) diff --git a/content/rancher/v2.5/en/monitoring-alerting/_index.md b/content/rancher/v2.5/en/monitoring-alerting/_index.md deleted file mode 100644 index 268b3dc85b..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/_index.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Monitoring and Alerting -shortTitle: Monitoring/Alerting -description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring -weight: 13 -aliases: - - /rancher/v2.x/en/monitoring-alerting/ - - /rancher/v2.x/en/monitoring-alerting/v2.5/ ---- - -Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. - -- [Features](#features) -- [How Monitoring Works](#how-monitoring-works) -- [Default Components and Deployments](#default-components-and-deployments) -- [Role-based Access Control](#role-based-access-control) -- [Guides](#guides) -- [Windows Cluster Support](#windows-cluster-support) -- [Known Issues](#known-issues) - -### Features - -Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. - -By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, or restore crashed servers. - -The `rancher-monitoring` operator, introduced in Rancher v2.5, is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) - -The monitoring application allows you to: - -- Monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments -- Define alerts based on metrics collected via Prometheus -- Create custom Grafana dashboards -- Configure alert-based notifications via Email, Slack, PagerDuty, etc. using Prometheus Alertmanager -- Defines precomputed, frequently needed or computationally expensive expressions as new time series based on metrics collected via Prometheus -- Expose collected metrics from Prometheus to the Kubernetes Custom Metrics API via Prometheus Adapter for use in HPA - -# How Monitoring Works - -For an explanation of how the monitoring components work together, see [this page.](./how-monitoring-works) - -# Default Components and Deployments - -### Built-in Dashboards - -By default, the monitoring application deploys Grafana dashboards (curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project) onto a cluster. - -It also deploys an Alertmanager UI and a Prometheus UI. For more information about these tools, see [Built-in Dashboards.](./dashboards) -### Default Metrics Exporters - -By default, Rancher Monitoring deploys exporters (such as [node-exporter](https://github.com/prometheus/node_exporter) and [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)). - -These default exporters automatically scrape metrics for CPU and memory from all components of your Kubernetes cluster, including your workloads. - -### Default Alerts - -The monitoring application deploys some alerts by default. To see the default alerts, go to the [Alertmanager UI](./dashboards/#alertmanager-ui) and click **Expand all groups.** - -### Components Exposed in the Rancher UI - -For a list of monitoring components exposed in the Rancher UI, along with common use cases for editing them, see [this section.](./how-monitoring-works/#components-exposed-in-the-rancher-ui) - -# Role-based Access Control - -For information on configuring access to monitoring, see [this page.](./rbac) - -# Guides - -- [Enable monitoring](./guides/enable-monitoring) -- [Uninstall monitoring](./guides/uninstall) -- [Monitoring workloads](./guides/monitoring-workloads) -- [Customizing Grafana dashboards](./guides/customize-grafana) -- [Persistent Grafana dashboards](./guides/persist-grafana) -- [Debugging high memory usage](./guides/memory-usage) -- [Migrating from Monitoring V1 to V2](./guides/migrating) - -# Configuration - -### Configuring Monitoring Resources in Rancher - -> The configuration reference assumes familiarity with how monitoring components work together. For more information, see [How Monitoring Works.](./how-monitoring-works) - -- [ServiceMonitor and PodMonitor](./configuration/servicemonitor-podmonitor) -- [Receiver](./configuration/receiver) -- [Route](./configuration/route) -- [PrometheusRule](./configuration/advanced/prometheusrules) -- [Prometheus](./configuration/advanced/prometheus) -- [Alertmanager](./configuration/advanced/alertmanager) - -### Configuring Helm Chart Options - -For more information on `rancher-monitoring` chart options, including options to set resource limits and requests, see [this page.](./configuration/helm-chart-options) - -# Windows Cluster Support - -_Available as of v2.5.8_ - -When deployed onto an RKE1 Windows cluster, Monitoring V2 will now automatically deploy a [windows-exporter](https://github.com/prometheus-community/windows_exporter) DaemonSet and set up a ServiceMonitor to collect metrics from each of the deployed Pods. This will populate Prometheus with `windows_` metrics that are akin to the `node_` metrics exported by [node_exporter](https://github.com/prometheus/node_exporter) for Linux hosts. - -To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts must have a minimum [wins](https://github.com/rancher/wins) version of v0.1.0. - -For more details on how to upgrade wins on existing Windows hosts, refer to the section on [Windows cluster support for Monitoring V2.](./windows-clusters) - - - -# Known Issues - -There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more default memory. If you are enabling monitoring on a K3s cluster, we recommend setting `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. - -For tips on debugging high memory usage, see [this page.](./guides/memory-usage) diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md deleted file mode 100644 index 2d7867cbc4..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: Configuration -weight: 5 -aliases: - - /rancher/v2.5/en/monitoring-alerting/configuration - - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/ - - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/ ---- - -This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. - -For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. - -# Setting Resource Limits and Requests - -The resource requests and limits for the monitoring application can be configured when installing `rancher-monitoring`. For more information about the default limits, see [this page.](./helm-chart-options/#configuring-resource-limits-and-requests) - ->**Note:** On an idle cluster, Monitoring V2 has significantly higher CPU usage (up to 70%) as compared to Monitoring V1. To improve performance and achieve similar results as in Monitoring V1, turn off the Prometheus adapter. - -# Prometheus Configuration - -It is usually not necessary to directly edit the Prometheus custom resource. - -Instead, to configure Prometheus to scrape custom metrics, you will only need to create a new ServiceMonitor or PodMonitor to configure Prometheus to scrape additional metrics. - - -### ServiceMonitor and PodMonitor Configuration - -For details, see [this page.](./servicemonitor-podmonitor) - -### Advanced Prometheus Configuration - -For more information about directly editing the Prometheus custom resource, which may be helpful in advanced use cases, see [this page.](./advanced/prometheus) - -# Alertmanager Configuration - -The Alertmanager custom resource usually doesn't need to be edited directly. For most common use cases, you can manage alerts by updating Routes and Receivers. - -Routes and receivers are part of the configuration of the alertmanager custom resource. In the Rancher UI, Routes and Receivers are not true custom resources, but pseudo-custom resources that the Prometheus Operator uses to synchronize your configuration with the Alertmanager custom resource. When routes and receivers are updated, the monitoring application will automatically update Alertmanager to reflect those changes. - -For some advanced use cases, you may want to configure alertmanager directly. For more information, refer to [this page.](./advanced/alertmanager) - -### Receivers - -Receivers are used to set up notifications. For details on how to configure receivers, see [this page.](./receiver) -### Routes - -Routes filter notifications before they reach receivers. Each route needs to refer to a receiver that has already been configured. For details on how to configure routes, see [this page.](./route) - -### Advanced - -For more information about directly editing the Alertmanager custom resource, which may be helpful in advanced use cases, see [this page.](./advanced/alertmanager) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/alertmanager/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/alertmanager/_index.md deleted file mode 100644 index 3ae1ab9c02..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/alertmanager/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Alertmanager Configuration -weight: 1 ---- - -It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. - -When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. - -> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../../how-monitoring-works/#how-alertmanager-works) - -# About the Alertmanager Custom Resource - -By default, Rancher Monitoring deploys a single Alertmanager onto a cluster that uses a default Alertmanager Config Secret. - -You may want to edit the Alertmanager custom resource if you would like to take advantage of advanced options that are not exposed in the Rancher UI forms, such as the ability to create a routing tree structure that is more than two levels deep. - -It is also possible to create more than one Alertmanager in a cluster, which may be useful if you want to implement namespace-scoped monitoring. In this case, you should manage the Alertmanager custom resources using the same underlying Alertmanager Config Secret. - -### Deeply Nested Routes - -While the Rancher UI only supports a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager YAML. - -### Multiple Alertmanager Replicas - -As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster. The replicas can all be managed using the same underlying Alertmanager Config Secret. - -This Secret should be updated or modified any time you want to: - -- Add in new notifiers or receivers -- Change the alerts that should be sent to specific notifiers or receivers -- Change the group of alerts that are sent out - -By default, you can either choose to supply an existing Alertmanager Config Secret (i.e. any Secret in the `cattle-monitoring-system` namespace) or allow Rancher Monitoring to deploy a default Alertmanager Config Secret onto your cluster. - -By default, the Alertmanager Config Secret created by Rancher will never be modified or deleted on an upgrade or uninstall of the `rancher-monitoring` chart. This restriction prevents users from losing or overwriting their alerting configuration when executing operations on the chart. - -For more information on what fields can be specified in the Alertmanager Config Secret, please look at the [Prometheus Alertmanager docs.](https://prometheus.io/docs/alerting/latest/alertmanager/) - -The full spec for the Alertmanager configuration file and what it takes in can be found [here.](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheus/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheus/_index.md deleted file mode 100644 index 358b0cc87a..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheus/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Prometheus Configuration -weight: 1 -aliases: - - /rancher/v2.5/en/monitoring-alerting/configuration/prometheusrules - - /rancher/v2.5/en/monitoring-alerting/configuration/prometheusrules - - /rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules ---- - -It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. -> This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../how-monitoring-works/) - -# About the Prometheus Custom Resource - -The Prometheus CR defines a desired Prometheus deployment. The Prometheus Operator observes the Prometheus CR. When the CR changes, the Prometheus Operator creates `prometheus-rancher-monitoring-prometheus`, a Prometheus deployment based on the CR configuration. - -The Prometheus CR specifies details such as rules and what Alertmanagers are connected to Prometheus. Rancher builds this CR for you. - -Monitoring V2 only supports one Prometheus per cluster. However, you might want to edit the Prometheus CR if you want to limit monitoring to certain namespaces. \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md deleted file mode 100644 index d727bc37fd..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Configuring PrometheusRules -weight: 3 -aliases: - - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/prometheusrules/ ---- - -A PrometheusRule defines a group of Prometheus alerting and/or recording rules. - -> This section assumes familiarity with how monitoring components work together. For more information, see [this section.]({{}}/rancher/v2.5/en/monitoring-alerting/how-monitoring-works) - -### Creating PrometheusRules in the Rancher UI - -_Available as of v2.5.4_ - -> **Prerequisite:** The monitoring application needs to be installed. - -To create rule groups in the Rancher UI, - -1. Click **Cluster Explorer > Monitoring** and click **Prometheus Rules.** -1. Click **Create.** -1. Enter a **Group Name.** -1. Configure the rules. In Rancher's UI, we expect a rule group to contain either alert rules or recording rules, but not both. For help filling out the forms, refer to the configuration options below. -1. Click **Create.** - -**Result:** Alerts can be configured to send notifications to the receiver(s). - -### About the PrometheusRule Custom Resource - -When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Alertmanager to figure out which Route should receive this Alert. For example, an Alert with the label `team: front-end` will be sent to all Routes that match on that label. - -Prometheus rule files are held in PrometheusRule custom resources. A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: - -- The name of the new alert or record -- A PromQL expression for the new alert or record -- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) -- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. - -For more information on what fields can be specified, please look at the [Prometheus Operator spec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec) - -Use the label selector field `ruleSelector` in the Prometheus object to define the rule files that you want to be mounted into Prometheus. - -For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) - -# Configuration - -{{% tabs %}} -{{% tab "Rancher v2.5.4" %}} -Rancher v2.5.4 introduced the capability to configure PrometheusRules by filling out forms in the Rancher UI. - - -### Rule Group - -| Field | Description | -|-------|----------------| -| Group Name | The name of the group. Must be unique within a rules file. | -| Override Group Interval | Duration in seconds for how often rules in the group are evaluated. | - - -### Alerting Rules - -[Alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) allow you to define alert conditions based on PromQL (Prometheus Query Language) expressions and to send notifications about firing alerts to an external service. - -| Field | Description | -|-------|----------------| -| Alert Name | The name of the alert. Must be a valid label value. | -| Wait To Fire For | Duration in seconds. Alerts are considered firing once they have been returned for this long. Alerts which have not yet fired for long enough are considered pending. | -| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and all resultant time series will become pending/firing alerts. For more information, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../../../expression) | -| Labels | Labels to add or overwrite for each alert. | -| Severity | When enabled, labels are attached to the alert or record that identify it by the severity level. | -| Severity Label Value | Critical, warning, or none | -| Annotations | Annotations are a set of informational labels that can be used to store longer additional information, such as alert descriptions or runbook links. A [runbook](https://en.wikipedia.org/wiki/Runbook) is a set of documentation about how to handle alerts. The annotation values can be [templated.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#templating) | - -### Recording Rules - -[Recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) allow you to precompute frequently needed or computationally expensive PromQL (Prometheus Query Language) expressions and save their result as a new set of time series. - -| Field | Description | -|-------|----------------| -| Time Series Name | The name of the time series to output to. Must be a valid metric name. | -| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and the result will be recorded as a new set of time series with the metric name as given by 'record'. For more information about expressions, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../expression) | -| Labels | Labels to add or overwrite before storing the result. | - -{{% /tab %}} -{{% tab "Rancher v2.5.0-v2.5.3" %}} -For Rancher v2.5.0-v2.5.3, PrometheusRules must be configured in YAML. For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/examples/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/examples/_index.md deleted file mode 100644 index 7488fbf127..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/examples/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Examples -weight: 400 ---- - -### ServiceMonitor - -An example ServiceMonitor custom resource can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) - -### PodMonitor - -An example PodMonitor can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml) An example Prometheus resource that refers to it can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml) - -### PrometheusRule - -For users who are familiar with Prometheus, a PrometheusRule contains the alerting and recording rules that you would normally place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). - -For a more fine-grained application of PrometheusRules within your cluster, the ruleSelector field on a Prometheus resource allows you to select which PrometheusRules should be loaded onto Prometheus based on the labels attached to the PrometheusRules resources. - -An example PrometheusRule is on [this page.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md) - -### Alertmanager Config - -For an example configuration, refer to [this section.](../advanced/alertmanager/#example-alertmanager-config) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/_index.md deleted file mode 100644 index f50178d38e..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/_index.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Helm Chart Options -weight: 8 ---- - -- [Configuring Resource Limits and Requests](#configuring-resource-limits-and-requests) -- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) -- [Additional Scrape Configurations](#additional-scrape-configurations) -- [Configuring Applications Packaged within Monitoring V2](#configuring-applications-packaged-within-monitoring-v2) -- [Increase the Replicas of Alertmanager](#increase-the-replicas-of-alertmanager) -- [Configuring the Namespace for a Persistent Grafana Dashboard](#configuring-the-namespace-for-a-persistent-grafana-dashboard) - - -# Configuring Resource Limits and Requests - -The resource requests and limits can be configured when installing `rancher-monitoring`. - -The default values are in the [values.yaml](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/values.yaml) in the `rancher-monitoring` Helm chart. - -The default values in the table below are the minimum required resource limits and requests. - -| Resource Name | Memory Limit | CPU Limit | Memory Request | CPU Request | -| ------------- | ------------ | ----------- | ---------------- | ------------------ | -| alertmanager | 500Mi | 1000m | 100Mi | 100m | -| grafana | 200Mi | 200m | 100Mi | 100m | -| kube-state-metrics subchart | 200Mi | 100m | 130Mi | 100m | -| prometheus-node-exporter subchart | 50Mi | 200m | 30Mi | 100m | -| prometheusOperator | 500Mi | 200m | 100Mi | 100m | -| prometheus | 2500Mi | 1000m | 1750Mi | 750m | -| **Total** | **3950Mi** | **2700m** | **2210Mi** | **1250m** | - -At least 50Gi storage is recommended. - - -# Trusted CA for Notifiers - -If you need to add a trusted CA to your notifier, follow these steps: - -1. Create the `cattle-monitoring-system` namespace. -1. Add your trusted CA secret to the `cattle-monitoring-system` namespace. -1. Deploy or upgrade the `rancher-monitoring` Helm chart. In the chart options, reference the secret in **Alerting > Additional Secrets.** - -**Result:** The default Alertmanager custom resource will have access to your trusted CA. - - -# Additional Scrape Configurations - -If the scrape configuration you want cannot be specified via a ServiceMonitor or PodMonitor at the moment, you can provide an `additionalScrapeConfigSecret` on deploying or upgrading `rancher-monitoring`. - -A [scrape_config section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) specifies a set of targets and parameters describing how to scrape them. In the general case, one scrape configuration specifies a single job. - -An example of where this might be used is with Istio. For more information, see [this section.](https://rancher.com/docs/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape) - - -# Configuring Applications Packaged within Monitoring v2 - -We deploy kube-state-metrics and node-exporter with monitoring v2. Node exporter are deployed as DaemonSets. In the monitoring v2 helm chart, in the values.yaml, each of the things are deployed as sub charts. - -We also deploy grafana which is not managed by prometheus. - -If you look at what the helm chart is doing like in kube-state-metrics, there are plenty more values that you can set that aren’t exposed in the top level chart. - -But in the top level chart you can add values that override values that exist in the sub chart. - -### Increase the Replicas of Alertmanager - -As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster. The replicas can all be managed using the same underlying Alertmanager Config Secret. For more information on the Alertmanager Config Secret, refer to [this section]({{}}/monitoring-alerting/configuration/advanced/alertmanager/#multiple-alertmanager-replicas) - -### Configuring the Namespace for a Persistent Grafana Dashboard - -To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: - -``` -grafana.sidecar.dashboards.searchNamespace=ALL -``` - -Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/receiver/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/receiver/_index.md deleted file mode 100644 index 71dcad6a19..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/receiver/_index.md +++ /dev/null @@ -1,397 +0,0 @@ ---- -title: Receiver Configuration -shortTitle: Receivers -weight: 1 -aliases: - - /rancher/v2.5/en/monitoring-alerting/configuration/alertmanager - - rancher/v2.5/en/monitoring-alerting/legacy/notifiers/ - - /rancher/v2.5/en/cluster-admin/tools/notifiers - - /rancher/v2.5/en/cluster-admin/tools/alerts - - /rancher/v2.5/en/monitoring-alerting/configuration/alertmanager ---- - -The [Alertmanager Config](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) Secret contains the configuration of an Alertmanager instance that sends out notifications based on alerts it receives from Prometheus. - -> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../how-monitoring-works/#3-how-alertmanager-works) - -- [Creating Receivers in the Rancher UI](#creating-receivers-in-the-rancher-ui) -- [Receiver Configuration](#receiver-configuration) - - [Slack](#slack) - - [Email](#email) - - [PagerDuty](#pagerduty) - - [Opsgenie](#opsgenie) - - [Webhook](#webhook) - - [Custom](#custom) - - [Teams](#teams) - - [SMS](#sms) -- [Route Configuration](#route-configuration) - - [Receiver](#receiver) - - [Grouping](#grouping) - - [Matching](#matching) -- [Configuring Multiple Receivers](#configuring-multiple-receivers) -- [Example Alertmanager Config](../examples/#example-alertmanager-config) -- [Example Route Config for CIS Scan Alerts](#example-route-config-for-cis-scan-alerts) -- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) - -# Creating Receivers in the Rancher UI -_Available as of v2.5.4_ - -> **Prerequisites:** -> ->- The monitoring application needs to be installed. ->- If you configured monitoring with an existing Alertmanager Secret, it must have a format that is supported by Rancher's UI. Otherwise you will only be able to make changes based on modifying the Alertmanager Secret directly. Note: We are continuing to make enhancements to what kinds of Alertmanager Configurations we can support using the Routes and Receivers UI, so please [file an issue](https://github.com/rancher/rancher/issues/new) if you have a request for a feature enhancement. - -To create notification receivers in the Rancher UI, - -1. Click **Cluster Explorer > Monitoring** and click **Receiver.** -2. Enter a name for the receiver. -3. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. -4. Click **Create.** - -**Result:** Alerts can be configured to send notifications to the receiver(s). - -# Receiver Configuration - -The notification integrations are configured with the `receiver`, which is explained in the [Prometheus documentation.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) - -### Native vs. Non-native Receivers - -By default, AlertManager provides native integration with some receivers, which are listed in [this section.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) All natively supported receivers are configurable through the Rancher UI. - -For notification mechanisms not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI. - -Currently the Rancher Alerting Drivers app provides access to the following integrations: -- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver -- SMS, based on the [Sachet](https://github.com/messagebird/sachet) driver - -### Changes in Rancher v2.5.8 - -Rancher v2.5.8 added Microsoft Teams and SMS as configurable receivers in the Rancher UI. - -### Changes in Rancher v2.5.4 - -Rancher v2.5.4 introduced the capability to configure receivers by filling out forms in the Rancher UI. - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -The following types of receivers can be configured in the Rancher UI: - -- Slack -- Email -- PagerDuty -- Opsgenie -- Webhook -- Custom -- Teams -- SMS - -The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. - -# Slack - -| Field | Type | Description | -|------|--------------|------| -| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | -| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | -| Proxy URL | String | Proxy for the webhook notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -# Email - -| Field | Type | Description | -|------|--------------|------| -| Default Recipient Address | String | The email address that will receive notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -SMTP options: - -| Field | Type | Description | -|------|--------------|------| -| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | -| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | -| Use TLS | Bool | Use TLS for encryption. | -| Username | String | Enter a username to authenticate with the SMTP server. | -| Password | String | Enter a password to authenticate with the SMTP server. | - -# PagerDuty - -| Field | Type | Description | -|------|------|-------| -| Integration Type | String | `Events API v2` or `Prometheus`. | -| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | -| Proxy URL | String | Proxy for the PagerDuty notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -# Opsgenie - -| Field | Description | -|------|-------------| -| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | -| Proxy URL | Proxy for the Opsgenie notifications. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -Opsgenie Responders: - -| Field | Type | Description | -|-------|------|--------| -| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | -| Send To | String | Id, Name, or Username of the Opsgenie recipient. | - -# Webhook - -| Field | Description | -|-------|--------------| -| URL | Webhook URL for the app of your choice. | -| Proxy URL | Proxy for the webhook notification. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - - - -# Custom - -The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. - -# Teams - -### Enabling the Teams Receiver for Rancher Managed Clusters - -The Teams receiver is not a native receiver and must be enabled before it can be used. You can enable the Teams receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the Teams option selected. - -1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Cluster Explorer**. -1. Click **Apps**. -1. Click the **Alerting Drivers** app. -1. Click the **Helm Deploy Options** tab -1. Select the **Teams** option and click **Install**. -1. Take note of the namespace used as it will be required in a later step. - -### Configure the Teams Receiver - -The Teams receiver can be configured by updating its ConfigMap. For example, the following is a minimal Teams receiver configuration. - -```yaml -[Microsoft Teams] -teams-instance-1: https://your-teams-webhook-url -``` - -When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). - -Use the example below as the URL where: - -- `ns-1` is replaced with the namespace where the `rancher-alerting-drivers` app is installed - -```yaml -url: http://rancher-alerting-drivers-prom2teams.ns-1.svc:8089/v2/teams-instance-1 -``` - - - -# SMS - -### Enabling the SMS Receiver for Rancher Managed Clusters - -The SMS receiver is not a native receiver and must be enabled before it can be used. You can enable the SMS receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the SMS option selected. - -1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Cluster Explorer**. -1. Click **Apps**. -1. Click the **Alerting Drivers** app. -1. Click the **Helm Deploy Options** tab -1. Select the **SMS** option and click **Install**. -1. Take note of the namespace used as it will be required in a later step. - -### Configure the SMS Receiver - -The SMS receiver can be configured by updating its ConfigMap. For example, the following is a minimal SMS receiver configuration. - -```yaml -providers: - telegram: - token: 'your-token-from-telegram' - -receivers: -- name: 'telegram-receiver-1' - provider: 'telegram' - to: - - '123456789' -``` - -When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). - -Use the example below as the name and URL, where: - -- the name assigned to the receiver, e.g. `telegram-receiver-1`, must match the name in the `receivers.name` field in the ConfigMap, e.g. `telegram-receiver-1` -- `ns-1` in the URL is replaced with the namespace where the `rancher-alerting-drivers` app is installed - -```yaml -name: telegram-receiver-1 -url http://rancher-alerting-drivers-sachet.ns-1.svc:9876/alert -``` - - - -{{% /tab %}} -{{% tab "Rancher v2.5.4-2.5.7" %}} - -The following types of receivers can be configured in the Rancher UI: - -- Slack -- Email -- PagerDuty -- Opsgenie -- Webhook -- Custom - -The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. - -### Slack {#slack-254-257} - -| Field | Type | Description | -|------|--------------|------| -| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | -| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | -| Proxy URL | String | Proxy for the webhook notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -### Email {#email-254-257} - -| Field | Type | Description | -|------|--------------|------| -| Default Recipient Address | String | The email address that will receive notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -SMTP options: - -| Field | Type | Description | -|------|--------------|------| -| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | -| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | -| Use TLS | Bool | Use TLS for encryption. | -| Username | String | Enter a username to authenticate with the SMTP server. | -| Password | String | Enter a password to authenticate with the SMTP server. | - -### PagerDuty {#pagerduty-254-257} - -| Field | Type | Description | -|------|------|-------| -| Integration Type | String | `Events API v2` or `Prometheus`. | -| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | -| Proxy URL | String | Proxy for the PagerDuty notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -### Opsgenie {#opsgenie-254-257} - -| Field | Description | -|------|-------------| -| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | -| Proxy URL | Proxy for the Opsgenie notifications. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -Opsgenie Responders: - -| Field | Type | Description | -|-------|------|--------| -| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | -| Send To | String | Id, Name, or Username of the Opsgenie recipient. | - -### Webhook {#webhook-1} - -| Field | Description | -|-------|--------------| -| URL | Webhook URL for the app of your choice. | -| Proxy URL | Proxy for the webhook notification. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -### Custom {#custom-254-257} - -The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. - -{{% /tab %}} -{{% tab "Rancher v2.5.0-2.5.3" %}} -The Alertmanager must be configured in YAML, as shown in these [examples.](#example-alertmanager-configs) -{{% /tab %}} -{{% /tabs %}} - -# Configuring Multiple Receivers - -By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. - -It is also possible to send alerts to multiple notification systems. One way is to configure the Receiver using custom YAML, in which case you can add the configuration for multiple notification systems, as long as you are sure that both systems should receive the same messages. - -You can also set up multiple receivers by using the `continue` option for a route, so that the alerts sent to a receiver continue being evaluated in the next level of the routing tree, which could contain another receiver. - - -# Example Alertmanager Configs - -### Slack -To set up notifications via Slack, the following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret, where the `api_url` should be updated to use your Webhook URL from Slack: - -```yaml -route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 3h - receiver: 'slack-notifications' -receivers: -- name: 'slack-notifications' - slack_configs: - - send_resolved: true - text: '{{ template "slack.rancher.text" . }}' - api_url: -templates: -- /etc/alertmanager/config/*.tmpl -``` - -### PagerDuty -To set up notifications via PagerDuty, use the example below from the [PagerDuty documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) as a guideline. This example sets up a route that captures alerts for a database service and sends them to a receiver linked to a service that will directly notify the DBAs in PagerDuty, while all other alerts will be directed to a default receiver with a different PagerDuty integration key. - -The following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret. The `service_key` should be updated to use your PagerDuty integration key and can be found as per the "Integrating with Global Event Routing" section of the PagerDuty documentation. For the full list of configuration options, refer to the [Prometheus documentation](https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config). - -```yaml -route: - group_by: [cluster] - receiver: 'pagerduty-notifications' - group_interval: 5m - routes: - - match: - service: database - receiver: 'database-notifcations' - -receivers: -- name: 'pagerduty-notifications' - pagerduty_configs: - - service_key: 'primary-integration-key' - -- name: 'database-notifcations' - pagerduty_configs: - - service_key: 'database-integration-key' -``` - -# Example Route Config for CIS Scan Alerts - -While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. - -For example, the following example route configuration could be used with a Slack receiver named `test-cis`: - -```yaml -spec: - receiver: test-cis - group_by: -# - string - group_wait: 30s - group_interval: 30s - repeat_interval: 30s - match: - job: rancher-cis-scan -# key: string - match_re: - {} -# key: string -``` - -For more information on enabling alerting for `rancher-cis-benchmark`, see [this section.]({{}}/rancher/v2.5/en/cis-scans/v2.5/#enabling-alerting-for-rancher-cis-benchmark) - - -# Trusted CA for Notifiers - -If you need to add a trusted CA to your notifier, follow the steps in [this section.](../helm-chart-options/#trusted-ca-for-notifiers) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/route/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/route/_index.md deleted file mode 100644 index 75c3294da7..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/route/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Route Configuration -shortTitle: Routes -weight: 5 ---- - -The route configuration is the section of the Alertmanager custom resource that controls how the alerts fired by Prometheus are grouped and filtered before they reach the receiver. - -When a Route is changed, the Prometheus Operator regenerates the Alertmanager custom resource to reflect the changes. - -For more information about configuring routes, refer to the [official Alertmanager documentation.](https://www.prometheus.io/docs/alerting/latest/configuration/#route) - -> This section assumes familiarity with how monitoring components work together. For more information, see [this section.]({{}}/rancher/v2.5/en/monitoring-alerting/how-monitoring-works) - -- [Route Restrictions](#route-restrictions) -- [Route Configuration](#route-configuration) - - [Receiver](#receiver) - - [Grouping](#grouping) - - [Matching](#matching) - -# Route Restrictions - -Alertmanager proxies alerts for Prometheus based on its receivers and a routing tree that filters alerts to certain receivers based on labels. - -Alerting drivers proxy alerts for Alertmanager to non-native receivers, such as Microsoft Teams and SMS. - -In the Rancher UI for configuring routes and receivers, you can configure routing trees with one root and then a depth of one more level, for a tree with a depth of two. But if you use a `continue` route when configuring Alertmanager directly, you can make the tree deeper. - -Each receiver is for one or more notification providers. So if you know that every alert for Slack should also go to PagerDuty, you can configure both in the same receiver. - -# Route Configuration - -### Note on Labels and Annotations - -Labels should be used for identifying information that can affect the routing of notifications. Identifying information about the alert could consist of a container name, or the name of the team that should be notified. - -Annotations should be used for information that does not affect who receives the alert, such as a runbook url or error message. - -{{% tabs %}} -{{% tab "Rancher v2.5.4+" %}} - -### Receiver -The route needs to refer to a [receiver](#receiver-configuration) that has already been configured. - -### Grouping - -| Field | Default | Description | -|-------|--------------|---------| -| Group By | N/a | The labels by which incoming alerts are grouped together. For example, `[ group_by: '[' , ... ']' ]` Multiple alerts coming in for labels such as `cluster=A` and `alertname=LatencyHigh` can be batched into a single group. To aggregate by all possible labels, use the special value `'...'` as the sole label name, for example: `group_by: ['...']` Grouping by `...` effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. | -| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | -| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | -| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | - -### Matching - -The **Match** field refers to a set of equality matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs to the Rancher UI, they correspond to the YAML in this format: - -```yaml -match: - [ : , ... ] -``` - -The **Match Regex** field refers to a set of regex-matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs in the Rancher UI, they correspond to the YAML in this format: - -```yaml -match_re: - [ : , ... ] -``` - -{{% /tab %}} -{{% tab "Rancher v2.5.0-2.5.3" %}} -The Alertmanager must be configured in YAML, as shown in this [example.](../examples/#alertmanager-config) -{{% /tab %}} -{{% /tabs %}} \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/servicemonitor-podmonitor/_index.md b/content/rancher/v2.5/en/monitoring-alerting/configuration/servicemonitor-podmonitor/_index.md deleted file mode 100644 index 39ddfd2b5a..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/configuration/servicemonitor-podmonitor/_index.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: ServiceMonitor and PodMonitor Configuration -shortTitle: ServiceMonitors and PodMonitors -weight: 7 ---- - -ServiceMonitors and PodMonitors are both pseudo-CRDs that map the scrape configuration of the Prometheus custom resource. - -These configuration objects declaratively specify the endpoints that Prometheus will scrape metrics from. - -ServiceMonitors are more commonly used than PodMonitors, and we recommend them for most use cases. - -> This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../how-monitoring-works/) - -### ServiceMonitors - -This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. - -When a ServiceMonitor is created, the Prometheus Operator updates the Prometheus scrape configuration to include the ServiceMonitor configuration. Then Prometheus begins scraping metrics from the endpoint defined in the ServiceMonitor. - -Any Services in your cluster that match the labels located within the ServiceMonitor `selector` field will be monitored based on the `endpoints` specified on the ServiceMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) provided by Prometheus Operator. - -For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) - -### PodMonitors - -This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. - -When a PodMonitor is created, the Prometheus Operator updates the Prometheus scrape configuration to include the PodMonitor configuration. Then Prometheus begins scraping metrics from the endpoint defined in the PodMonitor. - -Any Pods in your cluster that match the labels located within the PodMonitor `selector` field will be monitored based on the `podMetricsEndpoints` specified on the PodMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitorspec) provided by Prometheus Operator. diff --git a/content/rancher/v2.5/en/monitoring-alerting/dashboards/_index.md b/content/rancher/v2.5/en/monitoring-alerting/dashboards/_index.md deleted file mode 100644 index 6cc8089f3c..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/dashboards/_index.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: Built-in Dashboards -weight: 3 ---- - -- [Grafana UI](#grafana-ui) -- [Alertmanager UI](#alertmanager-ui) -- [Prometheus UI](#prometheus-ui) - -# Grafana UI - -[Grafana](https://grafana.com/grafana/) allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. - -To see the default dashboards for time series data visualization, go to the Grafana UI. - -### Customizing Grafana - -To view and customize the PromQL queries powering the Grafana dashboard, see [this page.]({{}}/rancher/v2.5/en/monitoring-alerting/guides/customize-grafana) - -### Persistent Grafana Dashboards - -To create a persistent Grafana dashboard, see [this page.]({{}}/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana) - -### Access to Grafana - -For information about role-based access control for Grafana, see [this section.]({{}}/rancher/v2.5/en/monitoring-alerting/rbac/#role-based-access-control-for-grafana) - - -# Alertmanager UI - -When `rancher-monitoring` is installed, the Prometheus Alertmanager UI is deployed, allowing you to view your alerts and the current Alertmanager configuration. - -> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../how-monitoring-works/#how-alertmanager-works) - - -### Accessing the Alertmanager UI - -The Alertmanager UI lets you see the most recently fired alerts. - -> **Prerequisite:** The `rancher-monitoring` application must be installed. - -To see the Alertmanager UI, go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Alertmanager.** - -**Result:** The Alertmanager UI opens in a new tab. For help with configuration, refer to the [official Alertmanager documentation.](https://prometheus.io/docs/alerting/latest/alertmanager/) - -
The Alertmanager UI
-![Alertmanager UI]({{}}/img/rancher/alertmanager-ui.png) - - -### Viewing Default Alerts - -To see alerts that are fired by default, go to the Alertmanager UI and click **Expand all groups.** - - -# Prometheus UI - -By default, the [kube-state-metrics service](https://github.com/kubernetes/kube-state-metrics) provides a wealth of information about CPU and memory utilization to the monitoring application. These metrics cover Kubernetes resources across namespaces. This means that in order to see resource metrics for a service, you don't need to create a new ServiceMonitor for it. Because the data is already in the time series database, you can go to the Prometheus UI and run a PromQL query to get the information. The same query can be used to configure a Grafana dashboard to show a graph of those metrics over time. - -To see the Prometheus UI, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Graph.** - -
Prometheus Graph UI
-![Prometheus Graph UI]({{}}/img/rancher/prometheus-graph-ui.png) - -### Viewing the Prometheus Targets - -To see what services you are monitoring, you will need to see your targets. Targets are set up by ServiceMonitors and PodMonitors as sources to scrape metrics from. You won't need to directly edit targets, but the Prometheus UI can be useful for giving you an overview of all of the sources of metrics that are being scraped. - -To see the Prometheus Targets, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Targets.** - -
Targets in the Prometheus UI
-![Prometheus Targets UI]({{}}/img/rancher/prometheus-targets-ui.png) - -### Viewing the PrometheusRules - -When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Alertmanager to figure out which Route should receive a certain Alert. - -To see the PrometheusRules, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Rules.** - -You can also see the rules in the Prometheus UI: - -
Rules in the Prometheus UI
-![PrometheusRules UI]({{}}/img/rancher/prometheus-rules-ui.png) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md b/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md deleted file mode 100644 index 5b170407ef..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/expression/_index.md +++ /dev/null @@ -1,435 +0,0 @@ ---- -title: PromQL Expression Reference -weight: 6 -aliases: - - /rancher/v2.5/en/project-admin/tools/monitoring/expression - - /rancher/v2.5/en/cluster-admin/tools/monitoring/expression - - /rancher/v2.5/en/monitoring-alerting/expression - - /rancher/v2.5/en/monitoring-alerting/configuration/expression - - /rancher/v2.5/en/monitoring/alerting/configuration/expression - - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/expression/ ---- - -The PromQL expressions in this doc can be used to configure alerts. - -For more information about querying the Prometheus time series database, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) - - - -- [Cluster Metrics](#cluster-metrics) - - [Cluster CPU Utilization](#cluster-cpu-utilization) - - [Cluster Load Average](#cluster-load-average) - - [Cluster Memory Utilization](#cluster-memory-utilization) - - [Cluster Disk Utilization](#cluster-disk-utilization) - - [Cluster Disk I/O](#cluster-disk-i-o) - - [Cluster Network Packets](#cluster-network-packets) - - [Cluster Network I/O](#cluster-network-i-o) -- [Node Metrics](#node-metrics) - - [Node CPU Utilization](#node-cpu-utilization) - - [Node Load Average](#node-load-average) - - [Node Memory Utilization](#node-memory-utilization) - - [Node Disk Utilization](#node-disk-utilization) - - [Node Disk I/O](#node-disk-i-o) - - [Node Network Packets](#node-network-packets) - - [Node Network I/O](#node-network-i-o) -- [Etcd Metrics](#etcd-metrics) - - [Etcd Has a Leader](#etcd-has-a-leader) - - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) - - [Number of Failed Proposals](#number-of-failed-proposals) - - [GRPC Client Traffic](#grpc-client-traffic) - - [Peer Traffic](#peer-traffic) - - [DB Size](#db-size) - - [Active Streams](#active-streams) - - [Raft Proposals](#raft-proposals) - - [RPC Rate](#rpc-rate) - - [Disk Operations](#disk-operations) - - [Disk Sync Duration](#disk-sync-duration) -- [Kubernetes Components Metrics](#kubernetes-components-metrics) - - [API Server Request Latency](#api-server-request-latency) - - [API Server Request Rate](#api-server-request-rate) - - [Scheduling Failed Pods](#scheduling-failed-pods) - - [Controller Manager Queue Depth](#controller-manager-queue-depth) - - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) - - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) - - [Ingress Controller Connections](#ingress-controller-connections) - - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) -- [Rancher Logging Metrics](#rancher-logging-metrics) - - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) - - [Fluentd Input Rate](#fluentd-input-rate) - - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) - - [Fluentd Output Rate](#fluentd-output-rate) -- [Workload Metrics](#workload-metrics) - - [Workload CPU Utilization](#workload-cpu-utilization) - - [Workload Memory Utilization](#workload-memory-utilization) - - [Workload Network Packets](#workload-network-packets) - - [Workload Network I/O](#workload-network-i-o) - - [Workload Disk I/O](#workload-disk-i-o) -- [Pod Metrics](#pod-metrics) - - [Pod CPU Utilization](#pod-cpu-utilization) - - [Pod Memory Utilization](#pod-memory-utilization) - - [Pod Network Packets](#pod-network-packets) - - [Pod Network I/O](#pod-network-i-o) - - [Pod Disk I/O](#pod-disk-i-o) -- [Container Metrics](#container-metrics) - - [Container CPU Utilization](#container-cpu-utilization) - - [Container Memory Utilization](#container-memory-utilization) - - [Container Disk I/O](#container-disk-i-o) - - - -# Cluster Metrics - -### Cluster CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | - -### Cluster Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
| -| Summary |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
| - -### Cluster Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | - -### Cluster Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | - -### Cluster Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total[5m]))`
written`sum(rate(node_disk_written_bytes_total[5m]))`
| - -### Cluster Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -### Cluster Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| - -# Node Metrics - -### Node CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | -| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | - -### Node Load Average - -| Catalog | Expression | -| --- | --- | -| Detail |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| -| Summary |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| - -### Node Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | -| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | - -### Node Disk Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | -| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | - -### Node Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| -| Summary |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| - -### Node Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -### Node Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| -| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| - -# Etcd Metrics - -### Etcd Has a Leader - -`max(etcd_server_has_leader)` - -### Number of Times the Leader Changes - -`max(etcd_server_leader_changes_seen_total)` - -### Number of Failed Proposals - -`sum(etcd_server_proposals_failed_total)` - -### GRPC Client Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
| - -### Peer Traffic - -| Catalog | Expression | -| --- | --- | -| Detail |
in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
| -| Summary |
in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
| - -### DB Size - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | -| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | - -### Active Streams - -| Catalog | Expression | -| --- | --- | -| Detail |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
| -| Summary |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
| - -### Raft Proposals - -| Catalog | Expression | -| --- | --- | -| Detail |
applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
| -| Summary |
applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
pending`sum(increase(etcd_server_proposals_pending[5m]))`
failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
| - -### RPC Rate - -| Catalog | Expression | -| --- | --- | -| Detail |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
| -| Summary |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
| - -### Disk Operations - -| Catalog | Expression | -| --- | --- | -| Detail |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
| -| Summary |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
| - -### Disk Sync Duration - -| Catalog | Expression | -| --- | --- | -| Detail |
wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
| -| Summary |
wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
| - -# Kubernetes Components Metrics - -### API Server Request Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | -| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | - -### API Server Request Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | -| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | - -### Scheduling Failed Pods - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | -| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | - -### Controller Manager Queue Depth - -| Catalog | Expression | -| --- | --- | -| Detail |
volumes`sum(volumes_depth) by instance`
deployment`sum(deployment_depth) by instance`
replicaset`sum(replicaset_depth) by instance`
service`sum(service_depth) by instance`
serviceaccount`sum(serviceaccount_depth) by instance`
endpoint`sum(endpoint_depth) by instance`
daemonset`sum(daemonset_depth) by instance`
statefulset`sum(statefulset_depth) by instance`
replicationmanager`sum(replicationmanager_depth) by instance`
| -| Summary |
volumes`sum(volumes_depth)`
deployment`sum(deployment_depth)`
replicaset`sum(replicaset_depth)`
service`sum(service_depth)`
serviceaccount`sum(serviceaccount_depth)`
endpoint`sum(endpoint_depth)`
daemonset`sum(daemonset_depth)`
statefulset`sum(statefulset_depth)`
replicationmanager`sum(replicationmanager_depth)`
| - -### Scheduler E2E Scheduling Latency - -| Catalog | Expression | -| --- | --- | -| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | -| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | - -### Scheduler Preemption Attempts - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | -| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | - -### Ingress Controller Connections - -| Catalog | Expression | -| --- | --- | -| Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| -| Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| - -### Ingress Controller Request Process Time - -| Catalog | Expression | -| --- | --- | -| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | -| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | - -# Rancher Logging Metrics - - -### Fluentd Buffer Queue Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | - -### Fluentd Input Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | - -### Fluentd Output Errors Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | -| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | - -### Fluentd Output Rate - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | -| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | - -# Workload Metrics - -### Workload CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | -| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | - -### Workload Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -### Workload Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| - -# Pod Metrics - -### Pod CPU Utilization - -| Catalog | Expression | -| --- | --- | -| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
| -| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
| - -### Pod Memory Utilization - -| Catalog | Expression | -| --- | --- | -| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | -| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | - -### Pod Network Packets - -| Catalog | Expression | -| --- | --- | -| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Network I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| -| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -### Pod Disk I/O - -| Catalog | Expression | -| --- | --- | -| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
| -| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| - -# Container Metrics - -### Container CPU Utilization - -| Catalog | Expression | -| --- | --- | -| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | - -### Container Memory Utilization - -`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` - -### Container Disk I/O - -| Catalog | Expression | -| --- | --- | -| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | -| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/customize-grafana/_index.md b/content/rancher/v2.5/en/monitoring-alerting/guides/customize-grafana/_index.md deleted file mode 100644 index 225cc83b2b..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/guides/customize-grafana/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Customizing Grafana Dashboards -weight: 5 ---- - -In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. - -### Prerequisites - -Before you can customize a Grafana dashboard, the `rancher-monitoring` application must be installed. - -To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.]({{}}/rancher/v2.5/en/monitoring-alerting/rbac/#users-with-rancher-cluster-manager-based-permissions) - -### Signing in to Grafana - -1. In the Rancher UI, go to the cluster that has the dashboard you want to customize. -1. In the left navigation menu, click **Monitoring.** -1. Click **Grafana.** The Grafana dashboard should open in a new tab. -1. Go to the log in icon in the lower left corner and click **Sign In.** -1. Log in to Grafana. The default Admin username and password for the Grafana instance is `admin/prom-operator`. (Regardless of who has the password, cluster administrator permission in Rancher is still required access the Grafana instance.) Alternative credentials can also be supplied on deploying or upgrading the chart. - - -### Getting the PromQL Query Powering a Grafana Panel - -For any panel, you can click the title and click **Explore** to get the PromQL queries powering the graphic. - -For this example, we would like to get the CPU usage for the Alertmanager container, so we click **CPU Utilization > Inspect.** - -The **Data** tab shows the underlying data as a time series, with the time in first column and the PromQL query result in the second column. Copy the PromQL query. - - ``` - (1 - (avg(irate({__name__=~"node_cpu_seconds_total|windows_cpu_time_total",mode="idle"}[5m])))) * 100 - - ``` - -You can then modify the query in the Grafana panel or create a new Grafana panel using the query. - -See also: - -- [Grafana docs on editing a panel](https://grafana.com/docs/grafana/latest/panels/panel-editor/) -- [Grafana docs on adding a panel to a dashboard](https://grafana.com/docs/grafana/latest/panels/add-a-panel/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/enable-monitoring/_index.md b/content/rancher/v2.5/en/monitoring-alerting/guides/enable-monitoring/_index.md deleted file mode 100644 index 8301ccf6b6..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/guides/enable-monitoring/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Enable Monitoring -weight: 1 ---- - -As an [administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/) or [cluster owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. - -This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. - -You can enable monitoring with or without SSL. - -# Requirements - -- Make sure that you are allowing traffic on port 9796 for each of your nodes because Prometheus will scrape metrics from here. -- Make sure your cluster fulfills the resource requirements. The cluster should have at least 1950Mi memory available, 2700m CPU, and 50Gi storage. A breakdown of the resource limits and requests is [here.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/#configuring-resource-limits-and-requests) -- When installing monitoring on an RKE cluster using RancherOS or Flatcar Linux nodes, change the etcd node certificate directory to `/opt/rke/etc/kubernetes/ssl`. -- For clusters provisioned with the RKE CLI and the address is set to a hostname instead of an IP address, set `rkeEtcd.clients.useLocalhost` to `true` during the Values configuration step of the installation. The YAML snippet will look like the following: - -```yaml -rkeEtcd: - clients: - useLocalhost: true -``` - -> **Note:** If you want to set up Alertmanager, Grafana or Ingress, it has to be done with the settings on the Helm chart deployment. It's problematic to create Ingress outside the deployment. - -# Setting Resource Limits and Requests - -The resource requests and limits can be configured when installing `rancher-monitoring`. To configure Prometheus resources from the Rancher UI, click **Apps & Marketplace > Monitoring** in the upper left corner. - -For more information about the default limits, see [this page.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/#configuring-resource-limits-and-requests) - -# Install the Monitoring Application - -{{% tabs %}} -{{% tab "Rancher v2.5.8" %}} - -### Enable Monitoring for use without SSL - -1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** -1. Click **Apps.** -1. Click the `rancher-monitoring` app. -1. Optional: Click **Chart Options** and configure alerting, Prometheus and Grafana. For help, refer to the [configuration reference.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/) -1. Scroll to the bottom of the Helm chart README and click **Install.** - -**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. - -### Enable Monitoring for use with SSL - -1. Follow the steps on [this page]({{}}/rancher/v2.5/en/k8s-in-rancher/secrets/) to create a secret in order for SSL to be used for alerts. - - The secret should be created in the `cattle-monitoring-system` namespace. If it doesn't exist, create it first. - - Add the `ca`, `cert`, and `key` files to the secret. -1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** -1. Click **Apps.** -1. Click the `rancher-monitoring` app. -1. Click **Alerting**. -1. Click **Additional Secrets** and add the secrets created earlier. - -**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. - -When [creating a receiver,]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/alertmanager/#creating-receivers-in-the-rancher-ui) SSL-enabled receivers such as email or webhook will have a **SSL** section with fields for **CA File Path**, **Cert File Path**, and **Key File Path**. Fill in these fields with the paths to each of `ca`, `cert`, and `key`. The path will be of the form `/etc/alertmanager/secrets/name-of-file-in-secret`. - -For example, if you created a secret with these key-value pairs: - -```yaml -ca.crt=`base64-content` -cert.pem=`base64-content` -key.pfx=`base64-content` -``` - -Then **Cert File Path** would be set to `/etc/alertmanager/secrets/cert.pem`. - -{{% /tab %}} -{{% tab "Rancher v2.5.0-2.5.7" %}} - -1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** -1. Click **Apps.** -1. Click the `rancher-monitoring` app. -1. Optional: Click **Chart Options** and configure alerting, Prometheus and Grafana. For help, refer to the [configuration reference.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration/helm-chart-options/) -1. Scroll to the bottom of the Helm chart README and click **Install.** - -**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. - -{{% /tab %}} - -{{% /tabs %}} diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md b/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md deleted file mode 100644 index 39ac78de43..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/guides/migrating/_index.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Migrating to Rancher v2.5 Monitoring -weight: 9 -aliases: - - /rancher/v2.5/en/monitoring-alerting/migrating - - /rancher/v2.x/en/monitoring-alerting/v2.5/migrating/ ---- - -If you previously enabled Monitoring, Alerting, or Notifiers in Rancher before v2.5, there is no automatic upgrade path for switching to the new monitoring/alerting solution. Before deploying the new monitoring solution via Cluster Explore, you will need to disable and remove all existing custom alerts, notifiers and monitoring installations for the whole cluster and in all projects. - -- [Monitoring Before Rancher v2.5](#monitoring-before-rancher-v2-5) -- [Monitoring and Alerting via Cluster Explorer in Rancher v2.5](#monitoring-and-alerting-via-cluster-explorer-in-rancher-v2-5) -- [Changes to Role-based Access Control](#changes-to-role-based-access-control) -- [Migrating from Monitoring V1 to Monitoring V2](#migrating-from-monitoring-v1-to-monitoring-v2) - - [Migrating Grafana Dashboards](#migrating-grafana-dashboards) - - [Migrating Alerts](#migrating-alerts) - - [Migrating Notifiers](#migrating-notifiers) - - [Migrating for RKE Template Users](#migrating-for-rke-template-users) - -# Monitoring Before Rancher v2.5 - -As of v2.2.0, Rancher's Cluster Manager allowed users to enable Monitoring & Alerting V1 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) independently within a cluster. - -When Monitoring is enabled, Monitoring V1 deploys [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/docs/grafana/latest/getting-started/what-is-grafana/) onto a cluster to monitor the state of processes of your cluster nodes, Kubernetes components, and software deployments and create custom dashboards to make it easy to visualize collected metrics. - -Monitoring V1 could be configured on both a cluster-level and on a project-level and would automatically scrape certain workloads deployed as Apps on the Rancher cluster. - -When Alerts or Notifiers are enabled, Alerting V1 deploys [Prometheus Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) and a set of Rancher controllers onto a cluster that allows users to define alerts and configure alert-based notifications via Email, Slack, PagerDuty, etc. Users can choose to create different types of alerts depending on what needs to be monitored (e.g. System Services, Resources, CIS Scans, etc.); however, PromQL Expression-based alerts can only be created if Monitoring V1 is enabled. - -# Monitoring and Alerting via Cluster Explorer in Rancher 2.5 - -As of v2.5.0, Rancher's Cluster Explorer now allows users to enable Monitoring & Alerting V2 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) together within a cluster. - -Unlike in Monitoring & Alerting V1, both features are packaged in a single Helm chart found [here](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring). The behavior of this chart and configurable fields closely matches [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), a Prometheus Community Helm chart, and any deviations from the upstream chart can be found in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md) maintained with the chart. - -Monitoring V2 can only be configured on the cluster level. Project-level monitoring and alerting is no longer supported. - -For more information on how to configure Monitoring & Alerting V2, see [this page.]({{}}/rancher/v2.5/en/monitoring-alerting/configuration) - -# Changes to Role-based Access Control - -Project owners and members no longer get access to Grafana or Prometheus by default. If view-only users had access to Grafana, they would be able to see data from any namespace. For Kiali, any user can edit things they don’t own in any namespace. - -For more information about role-based access control in `rancher-monitoring`, refer to [this page.]({{}}/rancher/v2.5/en/monitoring-alerting/rbac) - -# Migrating from Monitoring V1 to Monitoring V2 - -While there is no automatic migration available, it is possible to manually migrate custom Grafana dashboards and alerts that were created in Monitoring V1 to Monitoring V2. - -Before you can install Monitoring V2, Monitoring V1 needs to be uninstalled completely. In order to uninstall Monitoring V1: - -* Remove all cluster and project specific alerts and alerts groups. -* Remove all notifiers. -* Disable all project monitoring installations under Cluster -> Project -> Tools -> Monitoring. -* Ensure that all project-monitoring apps in all projects have been removed and are not recreated after a few minutes -* Disable the cluster monitoring installation under Cluster -> Tools -> Monitoring. -* Ensure that the cluster-monitoring app and the monitoring-operator app in the System project have been removed and are not recreated after a few minutes. - -#### RKE Template Clusters - -To prevent V1 monitoring from being re-enabled, disable monitoring and in future RKE template revisions via modification of the RKE template yaml: - -```yaml -enable_cluster_alerting: false -enable_cluster_monitoring: false -``` - -#### Migrating Grafana Dashboards - -You can migrate any dashboard added to Grafana in Monitoring V1 to Monitoring V2. In Monitoring V1 you can export an existing dashboard like this: - -* Sign into Grafana -* Navigate to the dashboard you want to export -* Go to the dashboard settings -* Copy the [JSON Model](https://grafana.com/docs/grafana/latest/dashboards/json-model/) - -In the JSON Model, change all `datasource` fields from `RANCHER_MONITORING` to `Prometheus`. You can easily do this by replacing all occurrences of `"datasource": "RANCHER_MONITORING"` with `"datasource": "Prometheus"`. - -If Grafana is backed by a persistent volume, you can now [import](https://grafana.com/docs/grafana/latest/dashboards/export-import/) this JSON Model into the Monitoring V2 Grafana UI. -It is recommended to provide the dashboard to Grafana with a ConfigMap in the `cattle-dashboards` namespace that has the label `grafana_dashboard: "1"`: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: custom-dashboard - namespace: cattle-dashboards - labels: - grafana_dashboard: "1" -data: - custom-dashboard.json: | - { - ... - } -``` - -Once this ConfigMap is created, the dashboard will automatically be added to Grafana. - -### Migrating Alerts - -It is only possible to directly migrate expression-based alerts to Monitoring V2. Fortunately, the event-based alerts that could be set up to alert on system component, node or workload events, are already covered out-of-the-box by the alerts that are part of Monitoring V2. So it is not necessary to migrate them. - -To migrate the following expression alert - -{{< img "/img/rancher/monitoring/migration/alert_2.4_to_2.5_source.png" "">}} - -you have to either create a PrometheusRule configuration like this in any namespace - -```yaml -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: custom-rules - namespace: default -spec: - groups: - - name: custom.rules - rules: - - alert: Custom Expression Alert - expr: prometheus_query > 5 - for: 5m - labels: - severity: critical - annotations: - summary: "The result of prometheus_query has been larger than 5 for 5m. Current value {{ $value }}" -``` - -or add the Prometheus Rule through the Cluster Explorer - -{{< img "/img/rancher/monitoring/migration/alert_2.4_to_2.5_target.png" "">}} - -For more details on how to configure PrometheusRules in Monitoring V2 see [Monitoring Configuration]({{}}/rancher/v2.5/en/monitoring-alerting/configuration#prometheusrules). - -### Migrating Notifiers - -There is no direct equivalent for how notifiers work in Monitoring V1. Instead you have to replicate the desired setup with [Routes and Receivers]({{}}/rancher/v2.5/en/monitoring-alerting/configuration#alertmanager-config) in Monitoring V2. - - -### Migrating for RKE Template Users - -If the cluster is managed using an RKE template, you will need to disable monitoring in future RKE template revisions to prevent legacy monitoring from being re-enabled. \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md b/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md deleted file mode 100644 index 40fa07ee3d..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/guides/persist-grafana/_index.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Persistent Grafana Dashboards -weight: 6 -aliases: - - /rancher/v2.5/en/monitoring-alerting/persist-grafana - - /rancher/v2.x/en/monitoring-alerting/v2.5/persist-grafana/ ---- - -To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - -- [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) -- [Known Issues](#known-issues) - -# Creating a Persistent Grafana Dashboard - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -> **Prerequisites:** -> -> - The monitoring application needs to be installed. -> - To create the persistent dashboard, you must have at least the **Manage Config Maps** Rancher RBAC permissions assigned to you in the project or namespace that contains the Grafana Dashboards. This correlates to the `monitoring-dashboard-edit` or `monitoring-dashboard-admin` Kubernetes native RBAC Roles exposed by the Monitoring chart. -> - To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.]({{}}/rancher/v2.5/en/monitoring-alerting/rbac/#users-with-rancher-cluster-manager-based-permissions) - -### 1. Get the JSON model of the dashboard that you want to persist - -To create a persistent dashboard, you will need to get the JSON model of the dashboard you want to persist. You can use a premade dashboard or build your own. - -To use a premade dashboard, go to [https://grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards), open up its detail page, and click on the **Download JSON** button to get the JSON model for the next step. - -To use your own dashboard: - -1. Click on the link to open Grafana. From the **Cluster Explorer,** click **Cluster Explorer > Monitoring.** -1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. - - > **Note:** Regardless of who has the password, in order to access the Grafana instance, you still need at least the Manage Services or View Monitoring permissions in the project that Rancher Monitoring is deployed into. Alternative credentials can also be supplied on deploying or upgrading the chart. -1. Create a dashboard using Grafana's UI. Once complete, go to the dashboard's settings by clicking on the gear icon in the top navigation menu. In the left navigation menu, click **JSON Model.** -1. Copy the JSON data structure that appears. - -### 2. Create a ConfigMap using the Grafana JSON model - -Create a ConfigMap in the namespace that contains your Grafana Dashboards (e.g. cattle-dashboards by default). - -The ConfigMap should look like this: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - grafana_dashboard: "1" - name: - namespace: cattle-dashboards # Change if using a non-default namespace -data: - .json: |- - -``` - -By default, Grafana is configured to watch all ConfigMaps with the `grafana_dashboard` label within the `cattle-dashboards` namespace. - -To specify that you would like Grafana to watch for ConfigMaps across all namespaces, refer to [this section.](#configuring-namespaces-for-the-grafana-dashboard-configmap) - -To create the ConfigMap in the Rancher UI, - -1. Go to the Cluster Explorer. -1. Click **Core > ConfigMaps**. -1. Click **Create**. -1. Set up the key-value pairs similar to the example above. When entering the value for `.json`, click **Read from File** to upload the JSON data model as the value. -1. Click **Create**. - -**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. - -Dashboards that are persisted using ConfigMaps cannot be deleted or edited from the Grafana UI. - -If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. - -### Configuring Namespaces for the Grafana Dashboard ConfigMap - -To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: - -``` -grafana.sidecar.dashboards.searchNamespace=ALL -``` - -Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} -> **Prerequisites:** -> -> - The monitoring application needs to be installed. -> - You must have the cluster-admin ClusterRole permission. - -1. Open the Grafana dashboard. From the **Cluster Explorer,** click **Cluster Explorer > Monitoring.** -1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. - - > **Note:** Regardless of who has the password, cluster administrator permission in Rancher is still required to access the Grafana instance. -1. Go to the dashboard that you want to persist. In the top navigation menu, go to the dashboard settings by clicking the gear icon. -1. In the left navigation menu, click **JSON Model.** -1. Copy the JSON data structure that appears. -1. Create a ConfigMap in the `cattle-dashboards` namespace. The ConfigMap needs to have the label `grafana_dashboard: "1"`. Paste the JSON into the ConfigMap in the format shown in the example below: - - ```yaml - apiVersion: v1 - kind: ConfigMap - metadata: - labels: - grafana_dashboard: "1" - name: - namespace: cattle-dashboards - data: - .json: |- - - ``` - -**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. - -Dashboards that are persisted using ConfigMaps cannot be deleted from the Grafana UI. If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. - -To prevent the persistent dashboard from being deleted when Monitoring v2 is uninstalled, add the following annotation to the `cattle-dashboards` namespace: - -``` -helm.sh/resource-policy: "keep" -``` - -{{% /tab %}} -{{% /tabs %}} - -# Known Issues - -For users who are using Monitoring V2 v9.4.203 or below, uninstalling the Monitoring chart will delete the `cattle-dashboards` namespace, which will delete all persisted dashboards, unless the namespace is marked with the annotation `helm.sh/resource-policy: "keep"`. - -This annotation will be added by default in the new monitoring chart released by Rancher v2.5.8, but it still needs to be manually applied for users of earlier Rancher versions. diff --git a/content/rancher/v2.5/en/monitoring-alerting/how-monitoring-works/_index.md b/content/rancher/v2.5/en/monitoring-alerting/how-monitoring-works/_index.md deleted file mode 100644 index 30b19cdbc6..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/how-monitoring-works/_index.md +++ /dev/null @@ -1,256 +0,0 @@ ---- -title: How Monitoring Works -weight: 1 ---- - -1. [Architecture Overview](#1-architecture-overview) -2. [How Prometheus Works](#2-how-prometheus-works) -3. [How Alertmanager Works](#3-how-alertmanager-works) -4. [Monitoring V2 Specific Components](#4-monitoring-v2-specific-components) -5. [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics) - -# 1. Architecture Overview - -_**The following sections describe how data flows through the Monitoring V2 application:**_ - -### Prometheus Operator - -Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created. When the Prometheus configuration resources are created, Prometheus Operator calls the Prometheus API to sync the new configuration. As the diagram at the end of this section shows, the Prometheus Operator acts as the intermediary between Prometheus and Kubernetes, calling the Prometheus API to synchronize Prometheus with the monitoring-related resources in Kubernetes. - -### ServiceMonitors and PodMonitors - -ServiceMonitors and PodMonitors declaratively specify targets, such as Services and Pods, that need to be monitored. - -- Targets are scraped on a recurring schedule based on the configured Prometheus scrape interval, and the metrics that are scraped are stored into the Prometheus Time Series Database (TSDB). - -- In order to perform the scrape, ServiceMonitors and PodMonitors are defined with label selectors that determine which Services or Pods should be scraped and endpoints that determine how the scrape should happen on the given target, e.g., scrape/metrics in TCP 10252, proxying through IP addr x.x.x.x. - -- Out of the box, Monitoring V2 comes with certain pre-configured exporters that are deployed based on the type of Kubernetes cluster that it is deployed on. For more information, see [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics). - -### How PushProx Works - -- Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called **PushProx**. The Kubernetes components that expose metrics to Prometheus through PushProx are the following: -`kube-controller-manager`, `kube-scheduler`, `etcd`, and `kube-proxy`. - -- For each PushProx exporter, we deploy one PushProx client onto all target nodes. For example, a PushProx client is deployed onto all controlplane nodes for kube-controller-manager, all etcd nodes for kube-etcd, and all nodes for kubelet. - -- We deploy exactly one PushProx proxy per exporter. The process for exporting metrics is as follows: - -1. The PushProx Client establishes an outbound connection with the PushProx Proxy. -1. The client then polls the proxy for scrape requests that have come into the proxy. -1. When the proxy receives a scrape request from Prometheus, the client sees it as a result of the poll. -1. The client scrapes the internal component. -1. The internal component responds by pushing metrics back to the proxy. - - -

Process for Exporting Metrics with PushProx:
- -![Process for Exporting Metrics with PushProx]({{}}/img/rancher/pushprox-process.svg) - -### PrometheusRules - -PrometheusRules allow users to define rules for what metrics or time series database queries should result in alerts being fired. Rules are evaluated on an interval. - -- **Recording rules** create a new time series based on existing series that have been collected. They are frequently used to precompute complex queries. -- **Alerting rules** run a particular query and fire an alert from Prometheus if the query evaluates to a non-zero value. - -### Alert Routing - -Once Prometheus determines that an alert needs to be fired, alerts are forwarded to **Alertmanager**. - -- Alerts contain labels that come from the PromQL query itself and additional labels and annotations that can be provided as part of specifying the initial PrometheusRule. - -- Before receiving any alerts, Alertmanager will use the **routes** and **receivers** specified in its configuration to form a routing tree on which all incoming alerts are evaluated. Each node of the routing tree can specify additional grouping, labeling, and filtering that needs to happen based on the labels attached to the Prometheus alert. A node on the routing tree (usually a leaf node) can also specify that an alert that reaches it needs to be sent out to a configured Receiver, e.g., Slack, PagerDuty, SMS, etc. Note that Alertmanager will send an alert first to **alertingDriver**, then alertingDriver will send or forward alert to the proper destination. - -- Routes and receivers are also stored in the Kubernetes API via the Alertmanager Secret. When the Secret is updated, Alertmanager is also updated automatically. Note that routing occurs via labels only (not via annotations, etc.). - -
How data flows through the monitoring application:
- - -# 2. How Prometheus Works - -### Storing Time Series Data - -After collecting metrics from exporters, Prometheus stores the time series in a local on-disk time series database. Prometheus optionally integrates with remote systems, but `rancher-monitoring` uses local storage for the time series database. - -Once stored, users can query this TSDB using PromQL, the query language for Prometheus. - -PromQL queries can be visualized in one of two ways: - -1. By supplying the query in Prometheus's Graph UI, which will show a simple graphical view of the data. -1. By creating a Grafana Dashboard that contains the PromQL query and additional formatting directives that label axes, add units, change colors, use alternative visualizations, etc. - -### Defining Rules for Prometheus - -Rules define queries that Prometheus needs to execute on a regular `evaluationInterval` to perform certain actions, such as firing an alert (alerting rules) or precomputing a query based on others existing in its TSDB (recording rules). These rules are encoded in PrometheusRules custom resources. When PrometheusRule custom resources are created or updated, the Prometheus Operator observes the change and calls the Prometheus API to synchronize the set of rules that Prometheus is currently evaluating on a regular interval. - -A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: - -- The name of the new alert or record -- A PromQL expression for the new alert or record -- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) -- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. - -On evaluating a [rule](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#rule), Prometheus will execute the provided PromQL query, add additional provided labels (or annotations - only for alerting rules), and execute the appropriate action for the rule. For example, an Alerting Rule that adds `team: front-end` as a label to the provided PromQL query will append that label to the fired alert, which will allow Alertmanager to forward the alert to the correct Receiver. - -### Alerting and Recording Rules - -Prometheus doesn't maintain the state of whether alerts are active. It fires alerts repetitively at every evaluation interval, relying on Alertmanager to group and filter the alerts into meaningful notifications. - -The `evaluation_interval` constant defines how often Prometheus evaluates its alerting rules against the time series database. Similar to the `scrape_interval`, the `evaluation_interval` also defaults to one minute. - -The rules are contained in a set of rule files. Rule files include both alerting rules and recording rules, but only alerting rules result in alerts being fired after their evaluation. - -For recording rules, Prometheus runs a query, then stores it as a time series. This synthetic time series is useful for storing the results of an expensive or time-consuming query so that it can be queried more quickly in the future. - -Alerting rules are more commonly used. Whenever an alerting rule evaluates to a positive number, Prometheus fires an alert. - -The Rule file adds labels and annotations to alerts before firing them, depending on the use case: - -- Labels indicate information that identifies the alert and could affect the routing of the alert. For example, if when sending an alert about a certain container, the container ID could be used as a label. - -- Annotations denote information that doesn't affect where an alert is routed, for example, a runbook or an error message. - -# 3. How Alertmanager Works - -The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of the following tasks: - -- Deduplicating, grouping, and routing alerts to the correct receiver integration such as email, PagerDuty, or OpsGenie - -- Silencing and inhibition of alerts - -- Tracking alerts that fire over time - -- Sending out the status of whether an alert is currently firing, or if it is resolved - -### Alerts Forwarded by alertingDrivers - -When alertingDrivers are installed, this creates a `Service` that can be used as the receiver's URL for Teams or SMS, based on the alertingDriver's configuration. The URL in the Receiver points to the alertingDrivers; so the Alertmanager sends alert first to alertingDriver, then alertingDriver forwards or sends alert to the proper destination. - -### Routing Alerts to Receivers - -Alertmanager coordinates where alerts are sent. It allows you to group alerts based on labels and fire them based on whether certain labels are matched. One top-level route accepts all alerts. From there, Alertmanager continues routing alerts to receivers based on whether they match the conditions of the next route. - -While the Rancher UI forms only allow editing a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager Secret. - -### Configuring Multiple Receivers - -By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. - -By editing custom YAML in the Alertmanager or Receiver configuration, you can also send alerts to multiple notification systems. For more information, see the section on configuring [Receivers.](../configuration/receiver/#configuring-multiple-receivers) - -# 4. Monitoring V2 Specific Components - -Prometheus Operator introduces a set of [Custom Resource Definitions](https://github.com/prometheus-operator/prometheus-operator#customresourcedefinitions) that allow users to deploy and manage Prometheus and Alertmanager instances by creating and modifying those custom resources on a cluster. - -Prometheus Operator will automatically update your Prometheus configuration based on the live state of the resources and configuration options that are edited in the Rancher UI. - -### Resources Deployed by Default - -By default, a set of resources curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project are deployed onto your cluster as part of installing the Rancher Monitoring Application to set up a basic Monitoring/Alerting stack. - -The resources that get deployed onto your cluster to support this solution can be found in the [`rancher-monitoring`](https://github.com/rancher/charts/tree/main/charts/rancher-monitoring) Helm chart, which closely tracks the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community with certain changes tracked in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md). - -### Default Exporters - -Monitoring V2 deploys three default exporters that provide additional metrics for Prometheus to store: - -1. `node-exporter`: exposes hardware and OS metrics for Linux hosts. For more information on `node-exporter`, refer to the [upstream documentation](https://prometheus.io/docs/guides/node-exporter/). - -1. `windows-exporter`: exposes hardware and OS metrics for Windows hosts (only deployed on Windows clusters). For more information on `windows-exporter`, refer to the [upstream documentation](https://github.com/prometheus-community/windows_exporter). - -1. `kube-state-metrics`: expose additional metrics that track the state of resources contained in the Kubernetes API (e.g., pods, workloads, etc.). For more information on `kube-state-metrics`, refer to the [upstream documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs). - -ServiceMonitors and PodMonitors will scrape these exporters, as defined [here](#defining-what-metrics-are-scraped). Prometheus stores these metrics, and you can query the results via either Prometheus's UI or Grafana. - -See the [architecture](#1-architecture-overview) section for more information on recording rules, alerting rules, and Alertmanager. - -### Components Exposed in the Rancher UI - -When the monitoring application is installed, you will be able to edit the following components in the Rancher UI: - -| Component | Type of Component | Purpose and Common Use Cases for Editing | -|--------------|------------------------|---------------------------| -| ServiceMonitor | Custom resource | Sets up Kubernetes Services to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | -| PodMonitor | Custom resource | Sets up Kubernetes Pods to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | -| Receiver | Configuration block (part of Alertmanager) | Modifies information on where to send an alert (e.g., Slack, PagerDuty, etc.) and any necessary information to send the alert (e.g., TLS certs, proxy URLs, etc.). Automatically updates the Alertmanager custom resource. | -| Route | Configuration block (part of Alertmanager) | Modifies the routing tree that is used to filter, label, and group alerts based on labels and send them to the appropriate Receiver. Automatically updates the Alertmanager custom resource. | -| PrometheusRule | Custom resource | Defines additional queries that need to trigger alerts or define materialized views of existing series that are within Prometheus's TSDB. Automatically updates the Prometheus custom resource. | - -### PushProx - -PushProx allows Prometheus to scrape metrics across a network boundary, which prevents users from having to expose metrics ports for internal Kubernetes components on each node in a Kubernetes cluster. - -Since the metrics for Kubernetes components are generally exposed on the host network of nodes in the cluster, PushProx deploys a DaemonSet of clients that sit on the hostNetwork of each node and make an outbound connection to a single proxy that is sitting on the Kubernetes API. Prometheus can then be configured to proxy scrape requests through the proxy to each client, which allows it to scrape metrics from the internal Kubernetes components without requiring any inbound node ports to be open. - -Refer to [Scraping Metrics with PushProx](#scraping-metrics-with-pushprox) for more. - -# 5. Scraping and Exposing Metrics - -### Defining what Metrics are Scraped - -ServiceMonitors and PodMonitors define targets that are intended for Prometheus to scrape. The [Prometheus custom resource](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md#prometheus) tells Prometheus which ServiceMonitors or PodMonitors it should use to find out where to scrape metrics from. - -The Prometheus Operator observes the ServiceMonitors and PodMonitors. When it observes that they are created or updated, it calls the Prometheus API to update the scrape configuration in the Prometheus custom resource and keep it in sync with the scrape configuration in the ServiceMonitors or PodMonitors. This scrape configuration tells Prometheus which endpoints to scrape metrics from and how it will label the metrics from those endpoints. - -Prometheus scrapes all of the metrics defined in its scrape configuration at every `scrape_interval`, which is one minute by default. - -The scrape configuration can be viewed as part of the Prometheus custom resource that is exposed in the Rancher UI. - -### How the Prometheus Operator Sets up Metrics Scraping - -The Prometheus Deployment or StatefulSet scrapes metrics, and the configuration of Prometheus is controlled by the Prometheus custom resources. The Prometheus Operator watches for Prometheus and Alertmanager resources, and when they are created, the Prometheus Operator creates a Deployment or StatefulSet for Prometheus or Alertmanager with the user-defined configuration. - -When the Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created, it knows that the scrape configuration needs to be updated in Prometheus. It updates Prometheus by first updating the configuration and rules files in the volumes of Prometheus's Deployment or StatefulSet. Then it calls the Prometheus API to sync the new configuration, resulting in the Prometheus Deployment or StatefulSet to be modified in place. - -### How Kubernetes Component Metrics are Exposed - -Prometheus scrapes metrics from deployments known as [exporters,](https://prometheus.io/docs/instrumenting/exporters/) which export the time series data in a format that Prometheus can ingest. In Prometheus, time series consist of streams of timestamped values belonging to the same metric and the same set of labeled dimensions. - -### Scraping Metrics with PushProx - -Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called PushProx. For detailed information on PushProx, refer [here](#how-pushprox-works) and to the above [architecture](#1-architecture-overview) section. - -### Scraping Metrics - -The following Kubernetes components are directly scraped by Prometheus: - -- kubelet* -- ingress-nginx** -- coreDns/kubeDns -- kube-api-server - -\* You can optionally use `hardenedKubelet.enabled` to use a PushProx, but that is not the default. - -** For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. - - -### Scraping Metrics Based on Kubernetes Distribution - -Metrics are scraped differently based on the Kubernetes distribution. For help with terminology, refer [here](#terminology). For details, see the table below: - -
How Metrics are Exposed to Prometheus
- -| Kubernetes Component | RKE | RKE2 | KubeADM | K3s | -|-----|-----|-----|-----|-----| -| kube-controller-manager | rkeControllerManager.enabled |rke2ControllerManager.enabled | kubeAdmControllerManager.enabled | k3sServer.enabled | -| kube-scheduler | rkeScheduler.enabled | rke2Scheduler.enabled |kubeAdmScheduler.enabled | k3sServer.enabled | -| etcd | rkeEtcd.enabled | rke2Etcd.enabled | kubeAdmEtcd.enabled | Not available | -| kube-proxy | rkeProxy.enabled | rke2Proxy.enabled | kubeAdmProxy.enabled | k3sServer.enabled | -| kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | -| ingress-nginx* | Collects metrics directly exposed by kubelet, exposed by rkeIngressNginx.enabled | Collects metrics directly exposed by kubelet, Exposed by rke2IngressNginx.enabled | Not available | Not available | -| coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | -| kube-api-server | Collects metrics directly exposed by kube-api-server |Collects metrics directly exposed by kube-api-server | Collects metrics directly exposed by kube-appi-server | Collects metrics directly exposed by kube-api-server | - -\* For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. - -### Terminology - -- **kube-scheduler:** The internal Kubernetes component that uses information in the pod spec to decide on which node to run a pod. -- **kube-controller-manager:** The internal Kubernetes component that is responsible for node management (detecting if a node fails), pod replication and endpoint creation. -- **etcd:** The internal Kubernetes component that is the distributed key/value store which Kubernetes uses for persistent storage of all cluster information. -- **kube-proxy:** The internal Kubernetes component that watches the API server for pods/services changes in order to maintain the network up to date. -- **kubelet:** The internal Kubernetes component that watches the API server for pods on a node and makes sure they are running. -- **ingress-nginx:** An Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer. -- **coreDns/kubeDns:** The internal Kubernetes component responsible for DNS. -- **kube-api-server:** The main internal Kubernetes component that is responsible for exposing APIs for the other master components. diff --git a/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md b/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md deleted file mode 100644 index 3260cf8cf9..0000000000 --- a/content/rancher/v2.5/en/monitoring-alerting/rbac/_index.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: Role-based Access Control -shortTitle: RBAC -weight: 2 -aliases: - - /rancher/v2.5/en/cluster-admin/tools/monitoring/rbac - - /rancher/v2.5/en/monitoring-alerting/rbac - - /rancher/v2.5/en/monitoring-alerting/grafana - - /rancher/v2.x/en/monitoring-alerting/v2.5/rbac/ ---- -This section describes the expectations for RBAC for Rancher Monitoring. - -- [Cluster Admins](#cluster-admins) -- [Users with Kubernetes ClusterRole-based Permissions](#users-with-kubernetes-clusterrole-based-permissions) - - [Users with Kubernetes Admin/Edit Permissions](#users-with-kubernetes-admin-edit-permissions) - - [Users with Kubernetes View Permissions](#users-with-kubernetes-view-permissions) - - [Additional Monitoring Roles](#additional-monitoring-roles) - - [Additional Monitoring ClusterRoles](#additional-monitoring-clusterroles) -- [Users with Rancher Cluster Manager Based Permissions](#users-with-rancher-cluster-manager-based-permissions) - - [Differences in 2.5.x](#differences-in-2-5-x) - - [Assigning Additional Access](#assigning-additional-access) -- [Role-based Access Control for Grafana](#role-based-access-control-for-grafana) - -# Cluster Admins - -By default, only those with the cluster-admin `ClusterRole` should be able to: - -- Install the `rancher-monitoring` App onto a cluster and all other relevant configuration performed on the chart deploy - - e.g. whether default dashboards are created, what exporters are deployed onto the cluster to collect metrics, etc. -- Create / modify / delete Prometheus deployments in the cluster via Prometheus CRs -- Create / modify / delete Alertmanager deployments in the cluster via Alertmanager CRs -- Persist new Grafana dashboards or datasources via creating ConfigMaps in the appropriate namespace -- Expose certain Prometheus metrics to the k8s Custom Metrics API for HPA via a Secret in the `cattle-monitoring-system` namespace - -# Users with Kubernetes ClusterRole-based Permissions - -The `rancher-monitoring` chart installs the following three `ClusterRoles`. By default, they aggregate into the corresponding k8s `ClusterRoles`: - -| ClusterRole | Aggregates To Default K8s ClusterRole | -| ------------------------------| ---------------------------| -| `monitoring-admin` | `admin`| -| `monitoring-edit` | `edit` | -| `monitoring-view` | `view ` | - -These `ClusterRoles` provide different levels of access to the Monitoring CRDs based on the actions that can be performed: - -| CRDs (monitoring.coreos.com) | Admin | Edit | View | -| ------------------------------| ---------------------------| ---------------------------| ---------------------------| -|
  • `prometheuses`
  • `alertmanagers`
| Get, List, Watch | Get, List, Watch | Get, List, Watch | -|
  • `servicemonitors`
  • `podmonitors`
  • `prometheusrules`
| * | * | Get, List, Watch | - -On a high level, the following permissions are assigned by default as a result. - -### Users with Kubernetes Admin/Edit Permissions - -Only those with the the cluster-admin, admin or edit `ClusterRole` should be able to: - -- Modify the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs -- Modify the alerting / recording rules of a Prometheus deployment via PrometheusRules CRs - -### Users with Kubernetes View Permissions - -Only those with who have some Kubernetes `ClusterRole` should be able to: - -- View the configuration of Prometheuses that are deployed within the cluster -- View the configuration of Alertmanagers that are deployed within the cluster -- View the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs -- View the alerting/recording rules of a Prometheus deployment via PrometheusRules CRs - -### Additional Monitoring Roles - -Monitoring also creates additional `Roles` that are not assigned to users by default but are created within the cluster. They can be bound to a namespace by deploying a `RoleBinding` that references it. To define a `RoleBinding` with `kubectl` instead of through Rancher, click [here](#assigning-roles-and-clusterroles-with-kubectl). - -Admins should use these roles to provide more fine-grained access to users: - -| Role | Purpose | -| ------------------------------| ---------------------------| -| monitoring-config-admin | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | -| monitoring-config-edit | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | -| monitoring-config-view | Allow admins to assign roles to users to be able to view Secrets and ConfigMaps within the cattle-monitoring-system namespace. Viewing Secrets / ConfigMaps in this namespace could allow users to observe the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | -| monitoring-dashboard-admin | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | -| monitoring-dashboard-edit | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | -| monitoring-dashboard-view | Allow admins to assign roles to users to be able to view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | - -### Additional Monitoring ClusterRoles - -Monitoring also creates additional `ClusterRoles` that are not assigned to users by default but are created within the cluster. They are not aggregated by default but can be bound to a namespace by deploying a `RoleBinding` or `ClusterRoleBinding` that references it. To define a `RoleBinding` with `kubectl` instead of through Rancher, click [here](#assigning-roles-and-clusterroles-with-kubectl). - -| Role | Purpose | -| ------------------------------| ---------------------------| -| monitoring-ui-view | _Available as of Monitoring v2 14.5.100+_ Provides read-only access to external Monitoring UIs by giving a user permission to list the Prometheus, Alertmanager, and Grafana endpoints and make GET requests to Prometheus, Grafana, and Alertmanager UIs through the Rancher proxy. | - -### Assigning Roles and ClusterRoles with kubectl - -An alternative method to using Rancher to attach a `Role` or `ClusterRole` to a user or group is by defining bindings in YAML files that you create. You must first configure the `RoleBinding` with the YAML file, then you apply the config changes by running the `kubectl apply` command. - - -* **Roles**: Below is an example of a YAML file to help you configure `RoleBindings` in Kubernetes to attach to a user. You will need to fill in the name below, and name is case-sensitive. - -``` -# monitoring-config-view-role-binding.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: monitoring-config-view - namespace: cattle-monitoring-system -roleRef: - kind: Role - name: monitoring-config-view - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: User - name: u-b4qkhsnliz # this can be found via `kubectl get users -A` - apiGroup: rbac.authorization.k8s.io -``` - -* **kubectl**: Below is an example of a `kubectl` command used to apply the binding you've created in the YAML file. As noted, you will need to fill in your YAML filename accordingly. - - * **`kubectl apply -f monitoring-config-view-role-binding.yaml` - -# Users with Rancher Cluster Manager Based Permissions - -The relationship between the default roles deployed by Rancher Cluster Manager (i.e. cluster-owner, cluster-member, project-owner, project-member), the default k8s roles, and the roles deployed by the rancher-monitoring chart are detailed in the table below: - -
Default Rancher Permissions and Corresponding Kubernetes ClusterRoles
- -| Cluster Manager Role | k8s Role | Monitoring ClusterRole / Role | ClusterRoleBinding or RoleBinding? | -| --------- | --------- | --------- | --------- | -| cluster-owner | cluster-admin | N/A | ClusterRoleBinding | -| cluster-member | admin | monitoring-admin | ClusterRoleBinding | -| project-owner | admin | monitoring-admin | RoleBinding within Project namespace | -| project-member | edit | monitoring-edit | RoleBinding within Project namespace | - -In addition to these default Roles, the following additional Rancher project roles can be applied to members of your Cluster to provide additional access to Monitoring. These Rancher Roles will be tied to ClusterRoles deployed by the Monitoring chart: - -
Non-default Rancher Permissions and Corresponding Kubernetes ClusterRoles
- -| Cluster Manager Role | Kubernetes ClusterRole | Available In Rancher From | Available in Monitoring v2 From | -|--------------------------|-------------------------------|-------|------| -| View Monitoring* | [monitoring-ui-view](#monitoring-ui-view) | 2.4.8+ | 9.4.204+ | - -\* A User bound to the **View Monitoring** Rancher Role only has permissions to access external Monitoring UIs if provided links to those UIs. In order to access the Monitoring Pane on Cluster Explorer to get those links, the User must be a Project Member of at least one Project. - -### Differences in 2.5.x - -Users with the project-member or project-owners roles assigned will not be given access to either Prometheus or Grafana in Rancher 2.5.x since we only create Grafana or Prometheus on a cluster-level. - -In addition, while project owners will still be only able to add ServiceMonitors / PodMonitors that scrape resources within their project's namespace by default, PrometheusRules are not scoped to a single namespace / project. Therefore, any alert rules or recording rules created by project-owners within their project namespace will be applied across the entire cluster, although they will be unable to view / edit / delete any rules that were created outside the project's namespace. - -### Assigning Additional Access - -If cluster-admins would like to provide additional admin/edit access to users outside of the roles offered by the rancher-monitoring chart, the following table identifies the potential impact: - -|CRDs (monitoring.coreos.com) | Can it cause impact outside of a namespace / project? | Impact | -|----------------------------| ------| ----------------------------| -| `prometheuses`| Yes, this resource can scrape metrics from any targets across the entire cluster (unless the Operator itself is otherwise configured). | User will be able to define the configuration of new cluster-level Prometheus deployments that should be created in the cluster. | -| `alertmanagers`| No | User will be able to define the configuration of new cluster-level Alertmanager deployments that should be created in the cluster. Note: if you just want to allow users to configure settings like Routes and Receivers, you should just provide access to the Alertmanager Config Secret instead. | -|
  • `servicemonitors`
  • `podmonitors`
| No, not by default; this is configurable via `ignoreNamespaceSelectors` on the Prometheus CR. | User will be able to set up scrapes by Prometheus on endpoints exposed by Services / Pods within the namespace they are given this permission in. | -| `prometheusrules`| Yes, PrometheusRules are cluster-scoped. | User will be able to define alert or recording rules on Prometheus based on any series collected across the entire cluster. | - -| k8s Resources | Namespace | Can it cause impact outside of a namespace / project? | Impact | -|----------------------------| ------| ------| ----------------------------| -|
  • `secrets`
  • `configmaps`
| `cattle-monitoring-system` | Yes, Configs and Secrets in this namespace can impact the entire monitoring / alerting pipeline. | User will be able to create or edit Secrets / ConfigMaps such as the Alertmanager Config, Prometheus Adapter Config, TLS secrets, additional Grafana datasources, etc. This can have broad impact on all cluster monitoring / alerting. | -|
  • `secrets`
  • `configmaps`
| `cattle-dashboards` | Yes, Configs and Secrets in this namespace can create dashboards that make queries on all metrics collected at a cluster-level. | User will be able to create Secrets / ConfigMaps that persist new Grafana Dashboards only. | - - - -# Role-based Access Control for Grafana - -Rancher allows any users who are authenticated by Kubernetes and have access the Grafana service deployed by the Rancher Monitoring chart to access Grafana via the Rancher Dashboard UI. By default, all users who are able to access Grafana are given the [Viewer](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#viewer-role) role, which allows them to view any of the default dashboards deployed by Rancher. - -However, users can choose to log in to Grafana as an [Admin](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#admin-role) if necessary. The default Admin username and password for the Grafana instance will be `admin`/`prom-operator`, but alternative credentials can also be supplied on deploying or upgrading the chart. - -To see the Grafana UI, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Grafana. - -
Cluster Compute Resources Dashboard in Grafana
-![Cluster Compute Resources Dashboard in Grafana]({{}}/img/rancher/cluster-compute-resources-dashboard.png) - -
Default Dashboards in Grafana
-![Default Dashboards in Grafana]({{}}/img/rancher/grafana-default-dashboard.png) \ No newline at end of file diff --git a/content/rancher/v2.5/en/overview/_index.md b/content/rancher/v2.5/en/overview/_index.md deleted file mode 100644 index 0bfde58d33..0000000000 --- a/content/rancher/v2.5/en/overview/_index.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Overview -weight: 1 -aliases: - - /rancher/v2.x/en/overview/ ---- -Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. - -# Run Kubernetes Everywhere - -Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. - -# Meet IT requirements - -Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: - -- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. -- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. -- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. - -# Empower DevOps Teams - -Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. - -The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. - -![Platform]({{}}/img/rancher/platform.png) - -# Features of the Rancher API Server - -The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: - -### Authorization and Role-Based Access Control - -- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.5/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. -- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.5/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/) policies. - -### Working with Kubernetes - -- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.5/en/cluster-admin/upgrading-kubernetes) -- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.5/en/catalog/) that make it easy to repeatedly deploy applications. -- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.5/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.5/en/k8s-in-rancher/) -- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.5/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. -- **Istio:** Our [integration with Istio]({{}}/rancher/v2.5/en/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -### Working with Cloud Infrastructure - -- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.5/en/cluster-admin/nodes/) in all clusters. -- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/) in the cloud. - -### Cluster Visibility - -- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. -- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. -- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. - -# Editing Downstream Clusters with Rancher - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/) - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.5/en/cluster-provisioning/cluster-capabilities-table" %}} diff --git a/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md b/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md deleted file mode 100644 index 25029e8366..0000000000 --- a/content/rancher/v2.5/en/overview/architecture-recommendations/_index.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Architecture Recommendations -weight: 3 -aliases: - - /rancher/v2.x/en/overview/architecture-recommendations/ ---- - -Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) - -This section covers the following topics: - -- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) -- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) -- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-kubernetes-installations) -- [Environment for Kubernetes Installations](#environment-for-kubernetes-installations) -- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-kubernetes-installations) -- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) - -# Separation of Rancher and User Clusters - -A user cluster is a downstream Kubernetes cluster that runs your apps and services. - -If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. - -If Rancher is intended to manage downstream Kubernetes clusters, the Kubernetes cluster that the Rancher server runs on should also be separate from the downstream user clusters. - -![Separation of Rancher Server from User Clusters]({{}}/img/rancher/rancher-architecture-separation-of-rancher-server.svg) - -# Why HA is Better for Rancher in Production - -We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. - -We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. - -### K3s Kubernetes Cluster Installations - -One option for the underlying Kubernetes cluster is to use K3s Kubernetes. K3s is Rancher's CNCF certified Kubernetes distribution. It is easy to install and uses half the memory of Kubernetes, all in a binary of less than 100 MB. Another advantage of K3s is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. - -
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
-![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server]({{}}/img/rancher/k3s-server-storage.svg) - -### RKE Kubernetes Cluster Installations - -In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. - -
Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
-![Architecture of an RKE Kubernetes cluster running the Rancher management server]({{}}/img/rancher/rke-server-storage.svg) - -# Recommended Load Balancer Configuration for Kubernetes Installations - -We recommend the following configurations for the load balancer and Ingress controllers: - -* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) -* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. -* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. -* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. - -
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
-![Rancher HA]({{}}/img/rancher/ha/rancher2ha.svg) - -# Environment for Kubernetes Installations - -It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. - -For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.5/en/cluster-provisioning/) for running your workloads. - -# Recommended Node Roles for Kubernetes Installations - -The below recommendations apply when Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. - -### K3s Cluster Roles - -In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. - -For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. - -### RKE Cluster Roles - -If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. - -### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters - -Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. - -Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. - -For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. - -![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters]({{}}/img/rancher/rancher-architecture-node-roles.svg) - -RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. - -We recommend that downstream user clusters should have at least: - -- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available -- **Two nodes with only the controlplane role** to make the master component highly available -- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services - -With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: - -* It allows one `etcd` node failure. -* It maintains multiple instances of the master components by having multiple `controlplane` nodes. -* No other workloads than Rancher itself should be created on this cluster. - -Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. - -For more best practices for downstream clusters, refer to the [production checklist]({{}}/rancher/v2.5/en/cluster-provisioning/production) or our [best practices guide.]({{}}/rancher/v2.5/en/best-practices/v2.5/) - -# Architecture for an Authorized Cluster Endpoint - -If you are using an [authorized cluster endpoint,]({{}}/rancher/v2.5/en/overview/architecture/#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. - -If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files]({{}}/rancher/v2.5/en/k8s-in-rancher/kubeconfig/) and [API keys]({{}}/rancher/v2.5/en/user-settings/api-keys/#creating-an-api-key) for more information. diff --git a/content/rancher/v2.5/en/overview/architecture/_index.md b/content/rancher/v2.5/en/overview/architecture/_index.md deleted file mode 100644 index b23c9d4f73..0000000000 --- a/content/rancher/v2.5/en/overview/architecture/_index.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Architecture -weight: 1 -aliases: - - /rancher/v2.x/en/overview/architecture/ ---- - -This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. - -For information on the different ways that Rancher can be installed, refer to the [overview of installation options.]({{}}/rancher/v2.5/en/installation/#overview-of-installation-options) - -For a list of main features of the Rancher API server, refer to the [overview section.]({{}}/rancher/v2.5/en/overview/#features-of-the-rancher-api-server) - -For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.]({{}}/rancher/v2.5/en/overview/architecture-recommendations) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.5/en/overview/concepts) page. - -This section covers the following topics: - -- [Rancher server architecture](#rancher-server-architecture) -- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) - - [The authentication proxy](#1-the-authentication-proxy) - - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) - - [Node agents](#3-node-agents) - - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) -- [Important files](#important-files) -- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) -- [Rancher server components and source code](#rancher-server-components-and-source-code) - -# Rancher Server Architecture - -The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. - -The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). - -For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters]({{}}/rancher/v2.5/en/cluster-provisioning/) for running your workloads. - -The diagram below shows how users can manipulate both [Rancher-launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters and [hosted Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/) clusters through Rancher's authentication proxy: - -
Managing Kubernetes Clusters through Rancher's Authentication Proxy
- -![Architecture]({{}}/img/rancher/rancher-architecture-rancher-api-server.svg) - -You can install Rancher on a single node, or on a high-availability Kubernetes cluster. - -A high-availability Kubernetes installation is recommended for production. - -A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: - -The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.5/en/backups/migrating-rancher) - -The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. - -# Communicating with Downstream User Clusters - -This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. - -The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. - -
Communicating with Downstream Clusters
- -![Rancher Components]({{}}/img/rancher/rancher-architecture-cluster-controller.svg) - -The following descriptions correspond to the numbers in the diagram above: - -1. [The Authentication Proxy](#1-the-authentication-proxy) -2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) -3. [Node Agents](#3-node-agents) -4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) - -### 1. The Authentication Proxy - -In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see -the pods. Bob is authenticated through Rancher's authentication proxy. - -The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. - -Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. - -By default, Rancher generates a [kubeconfig file]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_cluster.yml`) contains full access to the cluster. - -### 2. Cluster Controllers and Cluster Agents - -Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. - -There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: - -- Watches for resource changes in the downstream cluster -- Brings the current state of the downstream cluster to the desired state -- Configures access control policies to clusters and projects -- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE - -By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. - -The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: - -- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters -- Manages workloads, pod creation and deployment within each cluster -- Applies the roles and bindings defined in each cluster's global policies -- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health - -### 3. Node Agents - -If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. - -The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. - -### 4. Authorized Cluster Endpoint - -An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. - -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters) to provision the cluster. It is not available for registered clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. - -There are two main reasons why a user might need the authorized cluster endpoint: - -- To access a downstream user cluster while Rancher is down -- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance - -The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. - -Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. - -> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. - -With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. - -You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl) - -# Important Files - -The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. -- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file]({{}}/rancher/v2.5/en/cluster-admin/cluster-access/kubectl/) documentation. - -# Tools for Provisioning Kubernetes Clusters - -The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. - -### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider - -Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) - -### Rancher Launched Kubernetes for Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. - -Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) - -### Hosted Kubernetes Providers - -When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) - -### Registered Kubernetes Clusters - -In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. - -# Rancher Server Components and Source Code - -This diagram shows each component that the Rancher server is composed of: - -![Rancher Components]({{}}/img/rancher/rancher-architecture-rancher-components.svg) - -The GitHub repositories for Rancher can be found at the following links: - -- [Main Rancher server repository](https://github.com/rancher/rancher) -- [Rancher UI](https://github.com/rancher/ui) -- [Rancher API UI](https://github.com/rancher/api-ui) -- [Norman,](https://github.com/rancher/norman) Rancher's API framework -- [Types](https://github.com/rancher/types) -- [Rancher CLI](https://github.com/rancher/cli) -- [Catalog applications](https://github.com/rancher/helm) - -This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.]({{}}/rancher/v2.5/en/contributing/#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/content/rancher/v2.5/en/overview/concepts/_index.md b/content/rancher/v2.5/en/overview/concepts/_index.md deleted file mode 100644 index 30d5374875..0000000000 --- a/content/rancher/v2.5/en/overview/concepts/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Kubernetes Concepts -weight: 4 -aliases: - - /rancher/v2.x/en/overview/concepts/ ---- - -This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) - -This section covers the following topics: - -- [About Docker](#about-docker) -- [About Kubernetes](#about-kubernetes) -- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) -- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) - - [etcd Nodes](#etcd-nodes) - - [Controlplane Nodes](#controlplane-nodes) - - [Worker Nodes](#worker-nodes) -- [About Helm](#about-helm) - -# About Docker - -Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. - ->**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. - -# About Kubernetes - -Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. - -# What is a Kubernetes Cluster? - -A cluster is a group of computers that work together as a single system. - -A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. - -# Roles for Nodes in Kubernetes Clusters - -Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. - -A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. - -### etcd Nodes - -Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. - -The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. - -The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. - -Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. - -Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. - -### Controlplane Nodes - -Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although three or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. - -### Worker Nodes - -Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: - -- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. -- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. - -Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/). - -# About Helm - -For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. - -Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). - -For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) diff --git a/content/rancher/v2.5/en/pipelines/_index.md b/content/rancher/v2.5/en/pipelines/_index.md deleted file mode 100644 index 8c7ac545a8..0000000000 --- a/content/rancher/v2.5/en/pipelines/_index.md +++ /dev/null @@ -1,269 +0,0 @@ ---- -title: Pipelines -weight: 10 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/pipelines - - /rancher/v2.x/en/pipelines/ ---- - -> As of Rancher v2.5, Git-based deployment pipelines are now deprecated. We recommend handling pipelines with Rancher Continuous Delivery powered by [Fleet]({{}}/rancher/v2.5/en/deploy-across-clusters/fleet), available in Cluster Explorer. -> ->**Notice:** Fleet does not replace Rancher pipelines; the distinction is that Rancher pipelines are now powered by Fleet. - -Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. - -Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - ->**Note:** Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. - -This section covers the following topics: - -- [Concepts](#concepts) -- [How Pipelines Work](#how-pipelines-work) -- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) -- [Setting up Pipelines](#setting-up-pipelines) - - [Configure version control providers](#1-configure-version-control-providers) - - [Configure repositories](#2-configure-repositories) - - [Configure the pipeline](#3-configure-the-pipeline) -- [Pipeline Configuration Reference](#pipeline-configuration-reference) -- [Running your Pipelines](#running-your-pipelines) -- [Triggering a Pipeline](#triggering-a-pipeline) - - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) - -# Concepts - -For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/concepts) - -# How Pipelines Work - -After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. - -A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. - -Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. - -When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: - - - **Jenkins:** - - The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. - - >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. - - - **Docker Registry:** - - Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. - - - **Minio:** - - Minio storage is used to store the logs for pipeline executions. - - >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/storage). - -# Roles-based Access Control for Pipelines - -If you can access a project, you can enable repositories to start building pipelines. - -Only [administrators]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. - -Project members can only configure repositories and pipelines. - -# Setting up Pipelines - -To set up pipelines, you will need to do the following: - -1. [Configure version control providers](#1-configure-version-control-providers) -2. [Configure repositories](#2-configure-repositories) -3. [Configure the pipeline](#3-configure-the-pipeline) - -### 1. Configure Version Control Providers - -Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider: - -- GitHub -- GitLab -- Bitbucket - -Select your provider's tab below and follow the directions. - -{{% tabs %}} -{{% tab "GitHub" %}} -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to setup an OAuth App in Github. - -1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. - -1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "GitLab" %}} - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. - -1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. - -1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. - -1. Click **Authenticate**. - ->**Note:** -> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. -> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. -{{% /tab %}} -{{% tab "Bitbucket Cloud" %}} - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use public Bitbucket Cloud** option. - -1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. - -1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. - -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "Bitbucket Server" %}} - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Select **Tools > Pipelines** in the navigation bar. - -1. Choose the **Use private Bitbucket Server setup** option. - -1. Follow the directions displayed to **Setup a Bitbucket Server application**. - -1. Enter the host address of your Bitbucket server installation. - -1. Click **Authenticate**. - ->**Note:** -> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: -> -> 1. Setup Rancher server with a certificate from a trusted CA. -> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). -> -{{% /tab %}} -{{% /tabs %}} - -**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. - -### 2. Configure Repositories - -After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** - -1. Click on **Configure Repositories**. - -1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. - -1. For each repository that you want to set up a pipeline, click on **Enable**. - -1. When you're done enabling all your repositories, click on **Done**. - -**Results:** You have a list of repositories that you can start configuring pipelines for. - -### 3. Configure the Pipeline - -Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** - -1. Find the repository that you want to set up a pipeline for. - -1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/config) - - * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. - * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. - -1. Select which `branch` to use from the list of branches. - -1. Optional: Set up notifications. - -1. Set up the trigger rules for the pipeline. - -1. Enter a **Timeout** for the pipeline. - -1. When all the stages and steps are configured, click **Done**. - -**Results:** Your pipeline is now configured and ready to be run. - - -# Pipeline Configuration Reference - -Refer to [this page]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: - -- Run a script -- Build and publish images -- Publish catalog templates -- Deploy YAML -- Deploy a catalog app - -The configuration reference also covers how to configure: - -- Notifications -- Timeouts -- The rules that trigger a pipeline -- Environment variables -- Secrets - - -# Running your Pipelines - -Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** Find your pipeline and select the vertical **⋮ > Run**. - -During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: - -- `docker-registry` -- `jenkins` -- `minio` - -This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. - -# Triggering a Pipeline - -When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. - -Available Events: - -* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. -* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. -* **Tag**: When a tag is created in the repository, the pipeline is triggered. - -> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/example-repos/). - -### Modifying the Event Triggers for the Repository - -1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. - -1. 1. Click **Resources > Pipelines.** - -1. Find the repository that you want to modify the event triggers. Select the vertical **⋮ > Setting**. - -1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. - -1. Click **Save**. diff --git a/content/rancher/v2.5/en/pipelines/config/_index.md b/content/rancher/v2.5/en/pipelines/config/_index.md deleted file mode 100644 index 7add0b7d93..0000000000 --- a/content/rancher/v2.5/en/pipelines/config/_index.md +++ /dev/null @@ -1,648 +0,0 @@ ---- -title: Pipeline Configuration Reference -weight: 1 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/pipelines/config - - /rancher/v2.x/en/pipelines/config/ ---- - -In this section, you'll learn how to configure pipelines. - -- [Step Types](#step-types) -- [Step Type: Run Script](#step-type-run-script) -- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) -- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) -- [Step Type: Deploy YAML](#step-type-deploy-yaml) -- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) -- [Notifications](#notifications) -- [Timeouts](#timeouts) -- [Triggers and Trigger Rules](#triggers-and-trigger-rules) -- [Environment Variables](#environment-variables) -- [Secrets](#secrets) -- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) -- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) - - [Executor Quota](#executor-quota) - - [Resource Quota for Executors](#resource-quota-for-executors) - - [Custom CA](#custom-ca) -- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) -- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) - -# Step Types - -Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. - -Step types include: - -- [Run Script](#step-type-run-script) -- [Build and Publish Images](#step-type-build-and-publish-images) -- [Publish Catalog Template](#step-type-publish-catalog-template) -- [Deploy YAML](#step-type-deploy-yaml) -- [Deploy Catalog App](#step-type-deploy-catalog-app) - - - -### Configuring Steps By UI - -If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. - -1. Add stages to your pipeline execution by clicking **Add Stage**. - - 1. Enter a **Name** for each stage of your pipeline. - 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. - -1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. - -### Configuring Steps by YAML - -For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com -``` -# Step Type: Run Script - -The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configuring Script by UI - -1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. - -1. Click **Add**. - -### Configuring Script by YAML -```yaml -# example -stages: -- name: Build something - steps: - - runScriptConfig: - image: golang - shellScript: go build -``` -# Step Type: Build and Publish Images - -The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. - -The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. - -### Configuring Building and Publishing Images by UI -1. From the **Step Type** drop-down, choose **Build and Publish**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | - Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | - Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | - Build Context

(**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). - -### Configuring Building and Publishing Images by YAML - -You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: - -Variable Name | Description -------------------------|------------------------------------------------------------ -PLUGIN_DRY_RUN | Disable docker push -PLUGIN_DEBUG | Docker daemon executes in debug mode -PLUGIN_MIRROR | Docker daemon registry mirror -PLUGIN_INSECURE | Docker daemon allows insecure registries -PLUGIN_BUILD_ARGS | Docker build args, a comma separated list - -
- -```yaml -# This example shows an environment variable being used -# in the Publish Image step. This variable allows you to -# publish an image to an insecure registry: - -stages: -- name: Publish Image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - pushRemote: true - registry: example.com - env: - PLUGIN_INSECURE: "true" -``` - -# Step Type: Publish Catalog Template - -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a git hosted chart repository. It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. - -### Configuring Publishing a Catalog Template by UI - -1. From the **Step Type** drop-down, choose **Publish Catalog Template**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | - Catalog Template Name | The name of the template. For example, wordpress. | - Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | - Protocol | You can choose to publish via HTTP(S) or SSH protocol. | - Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | - Git URL | The Git URL of the chart repository that the template will be published to. | - Git Branch | The Git branch of the chart repository that the template will be published to. | - Author Name | The author name used in the commit message. | - Author Email | The author email used in the commit message. | - - -### Configuring Publishing a Catalog Template by YAML - -You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: - -* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. -* CatalogTemplate: The name of the template. -* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. -* GitUrl: The git URL of the chart repository that the template will be published to. -* GitBranch: The git branch of the chart repository that the template will be published to. -* GitAuthor: The author name used in the commit message. -* GitEmail: The author email used in the commit message. -* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. - -```yaml -# example -stages: -- name: Publish Wordpress Template - steps: - - publishCatalogConfig: - path: ./charts/wordpress/latest - catalogTemplate: wordpress - version: ${CICD_GIT_TAG} - gitUrl: git@github.com:myrepo/charts.git - gitBranch: master - gitAuthor: example-user - gitEmail: user@example.com - envFrom: - - sourceName: publish-keys - sourceKey: DEPLOY_KEY -``` - -# Step Type: Deploy YAML - -This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configure Deploying YAML by UI - -1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. - -1. Enter the **YAML Path**, which is the path to the manifest file in the source code. - -1. Click **Add**. - -### Configure Deploying YAML by YAML - -```yaml -# example -stages: -- name: Deploy - steps: - - applyYamlConfig: - path: ./deployment.yaml -``` - -# Step Type :Deploy Catalog App - -The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. - -### Configure Deploying Catalog App by UI - -1. From the **Step Type** drop-down, choose **Deploy Catalog App**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Catalog | The catalog from which the app template will be used. | - Template Name | The name of the app template. For example, wordpress. | - Template Version | The version of the app template you want to deploy. | - Namespace | The target namespace where you want to deploy the app. | - App Name | The name of the app you want to deploy. | - Answers | Key-value pairs of answers used to deploy the app. | - - -### Configure Deploying Catalog App by YAML - -You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: - -* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. -* Version: The version of the template you want to deploy. -* Answers: Key-value pairs of answers used to deploy the app. -* Name: The name of the app you want to deploy. -* TargetNamespace: The target namespace where you want to deploy the app. - -```yaml -# example -stages: -- name: Deploy App - steps: - - applyAppConfig: - catalogTemplate: cattle-global-data:library-mysql - version: 0.3.8 - answers: - persistence.enabled: "false" - name: testmysql - targetNamespace: test -``` - -# Timeouts - -By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. - -### Configuring Timeouts by UI - -Enter a new value in the **Timeout** field. - -### Configuring Timeouts by YAML - -In the `timeout` section, enter the timeout value in minutes. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -# timeout in minutes -timeout: 30 -``` - -# Notifications - -You can enable notifications to any notifiers based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers]({{}}/rancher/v2.5/en/monitoring-alerting/legacy/notifiers/) so it will be easy to add recipients immediately. - -### Configuring Notifications by UI - -1. Within the **Notification** section, turn on notifications by clicking **Enable**. - -1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. - -1. If you don't have any existing notifiers, Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.5/en/monitoring-alerting/legacy/notifiers/) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. - - > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. - -1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. - -### Configuring Notifications by YAML - -In the `notification` section, you will provide the following information: - -* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. - * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. - * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. -* **Condition:** Select which conditions of when you want the notification to be sent. -* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. - -```yaml -# Example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` - -# Triggers and Trigger Rules - -After you configure a pipeline, you can trigger it using different methods: - -- **Manually:** - - After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. - -- **Automatically:** - - When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. - - To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. - -Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: - -- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. - -- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. - -If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. - -Wildcard character (`*`) expansion is supported in `branch` conditions. - -This section covers the following topics: - -- [Configuring pipeline triggers](#configuring-pipeline-triggers) -- [Configuring stage triggers](#configuring-stage-triggers) -- [Configuring step triggers](#configuring-step-triggers) -- [Configuring triggers by YAML](#configuring-triggers-by-yaml) - -### Configuring Pipeline Triggers - -1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. - -1. Click **Resources > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Click on **Show Advanced Options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. - - 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. - - 1. **Optional:** Add more branches that trigger a build. - -1. Click **Done.** - -### Configuring Stage Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the stage. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the stage and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the stage. | - | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - -### Configuring Step Triggers - -1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. - -1. Click **Resources > Pipelines.** - -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. - -1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. - -1. Click **Show advanced options**. - -1. In the **Trigger Rules** section, configure rules to run or skip the step. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the step and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the step. | - | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - - -### Configuring Triggers by YAML - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -``` - -# Environment Variables - -When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. - -### Configuring Environment Variables by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. - -1. Add your environment variable(s) into either the script or file. - -1. Click **Save**. - -### Configuring Environment Variables by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 -``` - -# Secrets - -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.5/en/k8s-in-rancher/secrets/). - -### Prerequisite -Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. -
- ->**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). - -### Configuring Secrets by UI - -1. From the **Global** view, navigate to the project that you want to configure pipelines. - -1. Click **Resources > Pipelines.** - -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. - -1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. - -1. Click **Show advanced options**. - -1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. - -1. Click **Save**. - -### Configuring Secrets by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${ALIAS_ENV} - # environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV -``` - -# Pipeline Variable Substitution Reference - -For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. - -Variable Name | Description -------------------------|------------------------------------------------------------ -`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). -`CICD_GIT_URL` | URL of the Git repository. -`CICD_GIT_COMMIT` | Git commit ID being executed. -`CICD_GIT_BRANCH` | Git branch of this event. -`CICD_GIT_REF` | Git reference specification of this event. -`CICD_GIT_TAG` | Git tag name, set on tag event. -`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). -`CICD_PIPELINE_ID` | Rancher ID for the pipeline. -`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. -`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. -`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. -`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

[Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) - -# Global Pipeline Execution Settings - -After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. These settings can be edited by selecting **Tools > Pipelines** in the navigation bar. - -- [Executor Quota](#executor-quota) -- [Resource Quota for Executors](#resource-quota-for-executors) -- [Custom CA](#custom-ca) - -### Executor Quota - -Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. - -### Resource Quota for Executors - -Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. - -Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. - -To configure compute resources for pipeline-step containers: - -You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. - -In a step, you will provide the following information: - -* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. -* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. -* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. -* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi -``` - ->**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. - -### Custom CA - -If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. - -1. Click **Edit cacerts**. - -1. Paste in the CA root certificates and click **Save cacerts**. - -**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. - -# Persistent Data for Pipeline Components - -The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/storage) - -# Example rancher-pipeline.yml - -An example pipeline configuration file is on [this page.]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/example) diff --git a/content/rancher/v2.5/en/pipelines/example-repos/_index.md b/content/rancher/v2.5/en/pipelines/example-repos/_index.md deleted file mode 100644 index ccc66147bb..0000000000 --- a/content/rancher/v2.5/en/pipelines/example-repos/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Example Repositories -weight: 500 -aliases: - - /rancher/v2.5/en/tools/pipelines/quick-start-guide/ - - /rancher/v2.5/en/k8s-in-rancher/pipelines/example-repos - - /rancher/v2.x/en/pipelines/example-repos/ ---- - -Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: - -- Go -- Maven -- php - -> **Note:** The example repositories are only available if you have not [configured a version control provider]({{}}/rancher/v2.5/en/project-admin/pipelines). - -To start using these example repositories, - -1. [Enable the example repositories](#1-enable-the-example-repositories) -2. [View the example pipeline](#2-view-the-example-pipeline) -3. [Run the example pipeline](#3-run-the-example-pipeline) - -### 1. Enable the Example Repositories - -By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** - -1. Click **Configure Repositories**. - - **Step Result:** A list of example repositories displays. - - >**Note:** Example repositories only display if you haven't fetched your own repos. - -1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. - -**Results:** - -- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. - -- The following workloads are deployed to a new namespace: - - - `docker-registry` - - `jenkins` - - `minio` - -### 2. View the Example Pipeline - -After enabling an example repository, review the pipeline to see how it is set up. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** - -1. Find the example repository, select the vertical **⋮**. There are two ways to view the pipeline: - * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. - * **YAML**: Click on View/Edit YAML to view the `./rancher-pipeline.yml` file. - -### 3. Run the Example Pipeline - -After enabling an example repository, run the pipeline to see how it works. - -1. From the **Global** view, navigate to the project that you want to test out pipelines. - -1. Click **Resources > Pipelines.** - -1. Find the example repository, select the vertical **⋮ > Run**. - - >**Note:** When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. - -**Result:** The pipeline runs. You can see the results in the logs. - -### What's Next? - -For detailed information about setting up your own pipeline for your repository, [configure a version control provider]({{}}/rancher/v2.5/en/project-admin/pipelines), enable a repository and finally configure your pipeline. diff --git a/content/rancher/v2.5/en/pipelines/example/_index.md b/content/rancher/v2.5/en/pipelines/example/_index.md deleted file mode 100644 index 796e290419..0000000000 --- a/content/rancher/v2.5/en/pipelines/example/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Example YAML File -weight: 501 -aliases: - - /rancher/v2.5/en/tools/pipelines/reference/ - - /rancher/v2.5/en/k8s-in-rancher/pipelines/example - - /rancher/v2.x/en/pipelines/example/ ---- - -Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. - -In the [pipeline configuration reference]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. - -Below is a full example `rancher-pipeline.yml` for those who want to jump right in. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} - # Set environment variables in container for the step - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 - # Set environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com - - name: Deploy some workloads - steps: - - applyYamlConfig: - path: ./deployment.yaml -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -# timeout in minutes -timeout: 30 -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` diff --git a/content/rancher/v2.5/en/pipelines/storage/_index.md b/content/rancher/v2.5/en/pipelines/storage/_index.md deleted file mode 100644 index f5eb987d4b..0000000000 --- a/content/rancher/v2.5/en/pipelines/storage/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: Configuring Persistent Data for Pipeline Components -weight: 600 -aliases: - - /rancher/v2.5/en/k8s-in-rancher/pipelines/storage - - /rancher/v2.x/en/pipelines/storage/ ---- - -The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/how-storage-works/) - ->**Prerequisites (for both parts A and B):** -> ->[Persistent volumes]({{}}/rancher/v2.5/en/cluster-admin/volumes-and-storage/) must be available for the cluster. - -### A. Configuring Persistent Data for Docker Registry - -1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** - -1. Find the `docker-registry` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. - -1. Click **Upgrade**. - -### B. Configuring Persistent Data for Minio - -1. From the project view, click **Resources > Workloads.** Find the `minio` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} - -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. - -1. Click **Upgrade**. - -**Result:** Persistent storage is configured for your pipeline components. diff --git a/content/rancher/v2.5/en/project-admin/_index.md b/content/rancher/v2.5/en/project-admin/_index.md deleted file mode 100644 index f8ecaa0b69..0000000000 --- a/content/rancher/v2.5/en/project-admin/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Project Administration -weight: 9 -aliases: - - /rancher/v2.5/en/project-admin/editing-projects/ - - /rancher/v2.x/en/project-admin/ ---- - -_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. - -In terms of hierarchy: - -- Clusters contain projects -- Projects contain namespaces - -Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! - -Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. - -You can use projects to perform actions like: - -- [Assign users access to a group of namespaces]({{}}/rancher/v2.5/en/project-admin/project-members) -- Assign users [specific roles in a project]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles). A role can be owner, member, read-only, or [custom]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles/) -- [Set resource quotas]({{}}/rancher/v2.5/en/project-admin/resource-quotas/) -- [Manage namespaces]({{}}/rancher/v2.5/en/project-admin/namespaces/) -- [Configure tools]({{}}/rancher/v2.5/en/project-admin/tools/) -- [Set up pipelines for continuous integration and deployment]({{}}/rancher/v2.5/en/project-admin/pipelines) -- [Configure pod security policies]({{}}/rancher/v2.5/en/project-admin/pod-security-policies) - -### Authorization - -Non-administrative users are only authorized for project access after an [administrator]({{}}/rancher/v2.5/en/admin-settings/rbac/global-permissions/), [cluster owner or member]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles) adds them to the project's **Members** tab. - -Whoever creates the project automatically becomes a [project owner]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/#project-roles). - -## Switching between Projects - -To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. - -1. From the **Global** view, navigate to the project that you want to configure. - -1. Select **Projects/Namespaces** from the navigation bar. - -1. Select the link for the project that you want to open. diff --git a/content/rancher/v2.5/en/project-admin/namespaces/_index.md b/content/rancher/v2.5/en/project-admin/namespaces/_index.md deleted file mode 100644 index f07a9c69bf..0000000000 --- a/content/rancher/v2.5/en/project-admin/namespaces/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Namespaces -weight: 2520 -aliases: - - /rancher/v2.x/en/project-admin/namespaces/ ---- - -Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. - -Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. - -Resources that you can assign directly to namespaces include: - -- [Workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/) -- [Load Balancers/Ingress]({{}}/rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/) -- [Service Discovery Records]({{}}/rancher/v2.5/en/k8s-in-rancher/service-discovery/) -- [Certificates]({{}}/rancher/v2.5/en/k8s-in-rancher/certificates/) -- [ConfigMaps]({{}}/rancher/v2.5/en/k8s-in-rancher/configmaps/) -- [Registries]({{}}/rancher/v2.5/en/k8s-in-rancher/registries/) -- [Secrets]({{}}/rancher/v2.5/en/k8s-in-rancher/secrets/) - -To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. - -> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher]({{}}/rancher/v2.5/en/project-admin/namespaces) to ensure that you will have permission to access the namespace. - - -### Creating Namespaces - -Create a new namespace to isolate apps and resources in a project. - ->**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads]({{}}/rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads/), [certificates]({{}}/rancher/v2.5/en/k8s-in-rancher/certificates/), [ConfigMaps]({{}}/rancher/v2.5/en/k8s-in-rancher/configmaps), etc.) you can create a namespace on the fly. - -1. From the **Global** view, open the project where you want to create a namespace. - - >**Tip:** As a best practice, we recommend creating namespaces from the project level. However, cluster owners and members can create them from the cluster level as well. - -1. From the main menu, select **Namespace**. The click **Add Namespace**. - -1. **Optional:** If your project has [Resource Quotas]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). - -1. Enter a **Name** and then click **Create**. - -**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. - -### Moving Namespaces to Another Project - -Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. - -1. From the **Global** view, open the cluster that contains the namespace you want to move. - -1. From the main menu, select **Projects/Namespaces**. - -1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. - - >**Notes:** - > - >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. - >- You cannot move a namespace into a project that already has a [resource quota]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas/) configured. - >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. - -1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. - -**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. - -### Editing Namespace Resource Quotas - -You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -For more information, see how to [edit namespace resource quotas]({{}}/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/). \ No newline at end of file diff --git a/content/rancher/v2.5/en/project-admin/pipelines/_index.md b/content/rancher/v2.5/en/project-admin/pipelines/_index.md deleted file mode 100644 index b7d5e26df4..0000000000 --- a/content/rancher/v2.5/en/project-admin/pipelines/_index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Rancher's CI/CD Pipelines -description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users -weight: 4000 -aliases: - - /rancher/v2.5/en/concepts/ci-cd-pipelines/ - - /rancher/v2.5/en/tasks/pipelines/ - - /rancher/v2.5/en/tools/pipelines/configurations/ - - /rancher/v2.x/en/project-admin/pipelines/ ---- -Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - -For details, refer to the [pipelines]({{}}/rancher/v2.5/en/k8s-in-rancher/pipelines) section. \ No newline at end of file diff --git a/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md b/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md deleted file mode 100644 index ddc2483a78..0000000000 --- a/content/rancher/v2.5/en/project-admin/pod-security-policies/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Pod Security Policies -weight: 5600 -aliases: - - /rancher/v2.x/en/project-admin/pod-security-policies/ ---- - -> These cluster options are only available for [clusters in which Rancher has launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/). - -You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. - -### Prerequisites - -- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies]({{}}/rancher/v2.5/en/admin-settings/pod-security-policies/). -- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster]({{}}/rancher/v2.5/en/cluster-admin/pod-security-policy). - -### Applying a Pod Security Policy - -1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit**. -1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. - Assigning a PSP to a project will: - - - Override the cluster's default PSP. - - Apply the PSP to the project. - - Apply the PSP to any namespaces you add to the project later. - -1. Click **Save**. - -**Result:** The PSP is applied to the project and any namespaces added to the project. - ->**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/content/rancher/v2.5/en/project-admin/project-members/_index.md b/content/rancher/v2.5/en/project-admin/project-members/_index.md deleted file mode 100644 index 45a149e90e..0000000000 --- a/content/rancher/v2.5/en/project-admin/project-members/_index.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Adding Users to Projects -weight: 2505 -aliases: - - /rancher/v2.5/en/tasks/projects/add-project-members/ - - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/project-members - - /rancher/v2.x/en/project-admin/project-members/ ---- - -If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. - -You can add members to a project as it is created, or add them to an existing project. - ->**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members]({{}}/rancher/v2.5/en/cluster-provisioning/cluster-members/) instead. - -### Adding Members to a New Project - -You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) - -### Adding Members to an Existing Project - -Following project creation, you can add users as project members so that they can access its resources. - -1. From the **Global** view, open the project that you want to add members to. - -2. From the main menu, select **Members**. Then click **Add Member**. - -3. Search for the user or group that you want to add to the project. - - If external authentication is configured: - - - Rancher returns users from your external authentication source as you type. - - - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. - - >**Note:** If you are logged in as a local user, external users do not display in your search results. - -1. Assign the user or group **Project** roles. - - [What are Project Roles?]({{}}/rancher/v2.5/en/admin-settings/rbac/cluster-project-roles/) - - >**Notes:** - > - >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - > - >- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. - > - >- For `Custom` roles, you can modify the list of individual roles available for assignment. - > - > - To add roles to the list, [Add a Custom Role]({{}}/rancher/v2.5/en/admin-settings/rbac/default-custom-roles). - > - To remove roles from the list, [Lock/Unlock Roles]({{}}/rancher/v2.5/en/admin-settings/rbac/locked-roles/). - -**Result:** The chosen users are added to the project. - -- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. -- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md deleted file mode 100644 index 5bd7f0ae07..0000000000 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Project Resource Quotas -weight: 2515 -aliases: - - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas - - /rancher/v2.x/en/project-admin/resource-quotas/ ---- - -In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. - -This page is a how-to guide for creating resource quotas in existing projects. - -Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/#creating-projects) - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](./quotas-for-projects) - -### Applying Resource Quotas to Existing Projects - -Edit [resource quotas]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas) when: - -- You want to limit the resources that a project and its namespaces can use. -- You want to scale the resources available to a project up or down when a research quota is already in effect. - -1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit**. - -1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. - -1. Select a Resource Type. For more information on types, see the [quota type reference.](./quota-type-reference) - -1. Enter values for the **Project Limit** and the **Namespace Default Limit**. - - | Field | Description | - | ----------------------- | -------------------------------------------------------------------------------------------------------- | - | Project Limit | The overall resource limit for the project. | - | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | - -1. **Optional:** Add more quotas. - -1. Click **Create**. - -**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, you may still create namespaces, but they will be given a resource quota of 0. Subsequently, Rancher will not allow you to create any resources restricted by this quota. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md deleted file mode 100644 index b92fa7e37e..0000000000 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/override-container-default/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Setting Container Default Resource Limits -weight: 3 -aliases: - - /rancher/v2.x/en/project-admin/resource-quotas/override-container-default/ ---- - -When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. - -To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. - -### Editing the Container Default Resource Limit - -Edit [container default resource limit]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas/) when: - -- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. -- You want to edit the default container resource limit. - -1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. -1. From the main menu, select **Projects/Namespaces**. -1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit**. -1. Expand **Container Default Resource Limit** and edit the values. - -### Resource Limit Propagation - -When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. - -You can set a default container resource limit on a project and launch any catalog applications. - -Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. - -### Container Resource Quota Types - -The following resource limits can be configured: - -| Resource Type | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| -| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | -| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | -| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md deleted file mode 100644 index a3ce9a6af3..0000000000 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/override-namespace-default/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Overriding the Default Limit for a Namespace -weight: 2 -aliases: - - /rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/ ---- - -Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. - -In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) for `Namespace 3` so that the namespace can access more resources. - -Namespace Default Limit Override -![Namespace Default Limit Override]({{}}/img/rancher/rancher-resource-quota-override.svg) - -How to: [Editing Namespace Resource Quotas]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/) - -### Editing Namespace Resource Quotas - -If there is a [resource quota]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. - -1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. - -1. From the main menu, select **Projects/Namespaces**. - -1. Find the namespace for which you want to edit the resource quota. Select **⋮ > Edit**. - -1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. - - For more information about each **Resource Type**, see [Resource Quotas]({{}}/rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas/). - - >**Note:** - > - >- If a resource quota is not configured for the project, these options will not be available. - >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. - -**Result:** Your override is applied to the namespace's resource quota. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md deleted file mode 100644 index 45ee11e398..0000000000 --- a/content/rancher/v2.5/en/project-admin/resource-quotas/quotas-for-projects/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: How Resource Quotas Work in Rancher Projects -weight: 1 -aliases: - - /rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/ ---- - -Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. - -In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. - -In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. - -Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace -![Native Kubernetes Resource Quota Implementation]({{}}/img/rancher/kubernetes-resource-quota.svg) - -Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. - -The resource quota includes two limits, which you set while creating or editing a project: - - -- **Project Limits:** - - This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. - -- **Namespace Default Limits:** - - This value is the default resource limit available for each namespace. When the resource quota is created at the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you override it. - -In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. - -Rancher: Resource Quotas Propagating to Each Namespace -![Rancher Resource Quota Implementation]({{}}/img/rancher/rancher-resource-quota.png) - -Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. - -The following table explains the key differences between the two quota types. - -| Rancher Resource Quotas | Kubernetes Resource Quotas | -| ---------------------------------------------------------- | -------------------------------------------------------- | -| Applies to projects and namespace. | Applies to namespaces only. | -| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | -| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/content/rancher/v2.5/en/project-admin/tools/_index.md b/content/rancher/v2.5/en/project-admin/tools/_index.md deleted file mode 100644 index 568b142cb5..0000000000 --- a/content/rancher/v2.5/en/project-admin/tools/_index.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Tools for Logging, Monitoring, and Visibility -weight: 2525 -aliases: - - /rancher/v2.x/en/project-admin/tools/ ---- - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - - -- [Notifiers and Alerts](#notifiers-and-alerts) -- [Logging](#logging) -- [Monitoring](#monitoring) - - - -## Notifiers and Alerts - -Notifiers and alerts are two features that work together to inform you of events in the Rancher system. Before they can be enabled, the monitoring application must be installed. - -Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. - -Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. - -## Logging - -Logging is helpful because it allows you to: - -- Capture and analyze the state of your cluster -- Look for trends in your environment -- Save your logs to a safe location outside of your cluster -- Stay informed of events like a container crashing, a pod eviction, or a node dying -- More easily debugg and troubleshoot problems - -Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. - -For details, refer to the [logging section.]({{}}/rancher/v2.5/en/logging) - -## Monitoring - -Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.]({{}}/rancher/v2.5/en/monitoring-alerting) diff --git a/content/rancher/v2.5/en/quick-start-guide/_index.md b/content/rancher/v2.5/en/quick-start-guide/_index.md deleted file mode 100644 index 6e8bb1f38e..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Rancher Deployment Quick Start Guides -metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. -short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. -weight: 2 ---- ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. - -We have Quick Start Guides for: - -- [Deploying Rancher Server]({{}}/rancher/v2.5/en/quick-start-guide/deployment/): Get started running Rancher using the method most convenient for you. - -- [Deploying Workloads]({{}}/rancher/v2.5/en/quick-start-guide/workload/): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md deleted file mode 100644 index b11ac98a15..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Deploying Rancher Server -weight: 100 -aliases: - - /rancher/v2.x/en/quick-start-guide/deployment/ ---- - -Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. - -- [DigitalOcean](./digital-ocean-qs) (uses Terraform) -- [AWS](./amazon-aws-qs) (uses Terraform) -- [Azure](./microsoft-azure-qs) (uses Terraform) -- [GCP](./google-gcp-qs) (uses Terraform) -- [Vagrant](./quickstart-vagrant) - -If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. - -- [Manual Install](./quickstart-manual-setup) diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md deleted file mode 100644 index 17c2a48507..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Rancher AWS Quick Start Guide -description: Read this step by step Rancher AWS guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher server on AWS in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Amazon AWS will incur charges. - -- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. -- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. -- [IAM Policy created](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start): Defines the permissions an account attached with this policy has. -- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. - -### Example IAM Policy - -The AWS module just creates an EC2 KeyPair, an EC2 SecurityGroup and an EC2 instance. A simple policy would be: - -``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "ec2:*", - "Resource": "*" - } - ] -} -``` - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the AWS folder containing the terraform files by executing `cd quickstart/rancher/aws`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `aws_access_key` - Amazon AWS Access Key - - `aws_secret_key` - Amazon AWS Secret Key - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. -Suggestions include: - - `aws_region` - Amazon AWS region, choose the closest instead of the default (`us-east-1`) - - `prefix` - Prefix for all created resources - - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget - - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher server using the `id_rsa` key generated in `quickstart/rancher/aws`. - -#### Result - -Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.5/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/aws` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md deleted file mode 100644 index 90e5300358..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Rancher DigitalOcean Quick Start Guide -description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher server on DigitalOcean in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to DigitalOcean will incur charges. - -- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. -- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/rancher/do`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `do_token` - DigitalOcean access key - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. -Suggestions include: - - `do_region` - DigitalOcean region, choose the closest instead of the default (`nyc1`) - - `prefix` - Prefix for all created resources - - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 15 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/do`. - -#### Result - -Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.5/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/do` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md deleted file mode 100644 index aaf1d716a3..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/google-gcp-qs/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Rancher GCP Quick Start Guide -description: Read this step by step Rancher GCP guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher server on GCP in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Google GCP will incur charges. - -- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. -- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. -- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the GCP folder containing the terraform files by executing `cd quickstart/rancher/gcp`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `gcp_account_json` - GCP service account file path and file name - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. -Suggestions include: - - `gcp_region` - Google GCP region, choose the closest instead of the default (`us-east4`) - - `gcp_zone` - Google GCP zone, choose the closest instead of the default (`us-east4-a`) - - `prefix` - Prefix for all created resources - - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/gcp`. - -#### Result - -Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.5/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/gcp` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md deleted file mode 100644 index 591fb2d0a1..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Rancher Azure Quick Start Guide -description: Read this step by step Rancher Azure guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 100 ---- - -The following steps will quickly deploy a Rancher server on Azure in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Microsoft Azure will incur charges. - -- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. -- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. -- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. -- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the Azure folder containing the terraform files by executing `cd quickstart/rancher/azure`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `azure_subscription_id` - Microsoft Azure Subscription ID - - `azure_client_id` - Microsoft Azure Client ID - - `azure_client_secret` - Microsoft Azure Client Secret - - `azure_tenant_id` - Microsoft Azure Tenant ID - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. -Suggestions include: - - `azure_location` - Microsoft Azure region, choose the closest instead of the default (`East US`) - - `prefix` - Prefix for all created resources - - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget - - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster - - `windows_admin_password` - The admin password of the windows worker node - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/azure`. - -#### Result - -Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.5/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/azure` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md deleted file mode 100644 index e9051c7308..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: Manual Quick Start -weight: 300 -aliases: - - /rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/ ---- -Howdy Partner! This tutorial walks you through: - -- Installation of Rancher 2.x -- Creation of your first cluster -- Deployment of an application, Nginx - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -## Quick Start Outline - -This Quick Start Guide is divided into different tasks for easier consumption. - - - - -1. [Provision a Linux Host](#1-provision-a-linux-host) - -1. [Install Rancher](#2-install-rancher) - -1. [Log In](#3-log-in) - -1. [Create the Cluster](#4-create-the-cluster) - - -
-### 1. Provision a Linux Host - - Begin creation of a custom cluster by provisioning a Linux host. Your host can be: - -- A cloud-host virtual machine (VM) -- An on-prem VM -- A bare-metal server - - >**Note:** - > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. - > - > For a full list of port requirements, refer to [Docker Installation]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/). - - Provision the host according to our [Requirements]({{}}/rancher/v2.5/en/installation/requirements/). - -### 2. Install Rancher - -To install Rancher on your host, connect to it and then use a shell to install. - -1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. - -1. From your shell, enter the following command: - - ``` - sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged rancher/rancher - ``` - -**Result:** Rancher is installed. - -### 3. Log In - -Log in to Rancher to begin using the application. After you log in, you'll make some one-time configurations. - -1. Open a web browser and enter the IP address of your host: `https://`. - - Replace `` with your host IP address. - -1. When prompted, create a password for the default `admin` account there cowpoke! - -1. Set the **Default View**. - - If `I want to create or manage multiple clusters` is selected, the Cluster Manager UI is used as the default view. - - If `I'm only going to use the cluster Rancher was installed on` is selected, the Cluster Explorer UI is used as the default view. - -1. Set the **Rancher Server URL**. The URL can either be an IP address or a host name. However, each node added to your cluster must be able to connect to this URL.

If you use a hostname in the URL, this hostname must be resolvable by DNS on the nodes you want to add to you cluster. - -
- -### 4. Create the Cluster - -Welcome to Rancher! You are now able to create your first Kubernetes cluster. - -In this task, you can use the versatile **Custom** option. This option lets you add _any_ Linux host (cloud-hosted VM, on-prem VM, or bare-metal) to be used in a cluster. - -1. If you chose `I'm only going to use the cluster Rancher was installed on` when setting the default view, click the **Cluster Manager** button in the upper-right of the UI to access the **Clusters** page. - -1. From the **Clusters** page, click **Add Cluster**. - -1. Choose **Existing Nodes**. - -1. Enter a **Cluster Name**. - -1. Skip **Member Roles** and **Cluster Options**. We'll tell you about them later. - -1. Click **Next**. - -1. From **Node Role**, select _all_ the roles: **etcd**, **Control**, and **Worker**. - -1. **Optional**: Rancher auto-detects the IP addresses used for Rancher communication and cluster communication. You can override these using `Public Address` and `Internal Address` in the **Node Address** section. - -1. Skip the **Labels** stuff. It's not important for now. - -1. Copy the command displayed on screen to your clipboard. - -1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. - -1. When you finish running the command on your Linux host, click **Done**. - -**Result:** - -Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active.** - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -#### Finished - -Congratulations! You have created your first cluster. - -#### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.5/en/quick-start-guide/workload). diff --git a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md b/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md deleted file mode 100644 index b53f581fe7..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Vagrant Quick Start -weight: 200 -aliases: - - /rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/ ---- -The following steps quickly deploy a Rancher Server with a single node cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.5/en/installation/). - -## Prerequisites - -- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. -- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. -- At least 4GB of free RAM. - -### Note -- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: - - `vagrant plugin install vagrant-vboxmanage` - - `vagrant plugin install vagrant-vbguest` - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the folder containing the Vagrantfile by executing `cd quickstart/rancher/vagrant`. - -3. **Optional:** Edit `config.yaml` to: - - - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) - - Change the password of the `admin` user for logging into Rancher. (`default_password`) - -4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. - -5. Once provisioning finishes, go to `https://192.168.56.101` in the browser. The default user/password is `admin/admin`. - -**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.5/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/vagrant` folder execute `vagrant destroy -f`. - -2. Wait for the confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.5/en/quick-start-guide/workload/_index.md b/content/rancher/v2.5/en/quick-start-guide/workload/_index.md deleted file mode 100644 index 62df76b39f..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/workload/_index.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Deploying Workloads -weight: 200 -aliases: - - /rancher/v2.x/en/quick-start-guide/workload/ ---- - -These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - -- [Workload with Ingress](./quickstart-deploy-workload-ingress) -- [Workload with NodePort](./quickstart-deploy-workload-nodeport) diff --git a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md deleted file mode 100644 index cf197acad0..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Workload with Ingress Quick Start -weight: 100 -aliases: - - /rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/ ---- - -### Prerequisite - -You have a running cluster with at least 1 node. - -### 1. Deploying a Workload - -You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. - -For this workload, you'll be deploying the application Rancher Hello-World. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. - -3. Open the **Project: Default** project. - -4. Click **Resources > Workloads.** - -5. Click **Deploy**. - - **Step Result:** The **Deploy Workload** page opens. - -6. Enter a **Name** for your workload. - -7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. - -8. Leave the remaining options on their default setting. We'll tell you about them later. - -9. Click **Launch**. - -**Result:** - -* Your workload is deployed. This process might take a few minutes to complete. -* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. - -
-### 2. Expose The Application Via An Ingress - -Now that the application is up and running it needs to be exposed so that other services can connect. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects**. - -3. Open the **Default** project. - -4. Click **Resources > Workloads > Load Balancing.** Click on the **Load Balancing** tab. - -5. Click **Add Ingress**. - -6. Enter a name i.e. **hello**. - -7. In the **Target** field, drop down the list and choose the name that you set for your service. - -8. Enter `80` in the **Port** field. - -9. Leave everything else as default and click **Save**. - -**Result:** The application is assigned a `sslip.io` address and exposed. It may take a minute or two to populate. - -### View Your Application - -From the **Load Balancing** page, click the target link, which will look something like `hello.default.xxx.xxx.xxx.xxx.sslip.io > hello-world`. - -Your application will open in a separate window. - -#### Finished - -Congratulations! You have successfully deployed a workload exposed via an ingress. - -#### What's Next? - -When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: - -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md deleted file mode 100644 index 9984dc2beb..0000000000 --- a/content/rancher/v2.5/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: Workload with NodePort Quick Start -weight: 200 -aliases: - - /rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/ ---- - -### Prerequisite - -You have a running cluster with at least 1 node. - -### 1. Deploying a Workload - -You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. - -For this workload, you'll be deploying the application Rancher Hello-World. - -1. From the **Clusters** page, open the cluster that you just created. - -2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. - -3. Open the **Project: Default** project. - -4. Click **Resources > Workloads.** - -5. Click **Deploy**. - - **Step Result:** The **Deploy Workload** page opens. - -6. Enter a **Name** for your workload. - -7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. - -8. From **Port Mapping**, click **Add Port**. - -9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. - - ![As a dropdown, NodePort (On every node selected)]({{}}/img/rancher/nodeport-dropdown.png) - -10. From the **On Listening Port** field, leave the **Random** value in place. - - ![On Listening Port, Random selected]({{}}/img/rancher/listening-port-field.png) - -11. From the **Publish the container port** field, enter port `80`. - - ![Publish the container port, 80 entered]({{}}/img/rancher/container-port-field.png) - -12. Leave the remaining options on their default setting. We'll tell you about them later. - -13. Click **Launch**. - -**Result:** - -* Your workload is deployed. This process might take a few minutes to complete. -* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. - -
- -### 2. Viewing Your Application - -From the **Workloads** page, click the link underneath your workload. If your deployment succeeded, your application opens. - -### Attention: Cloud-Hosted Sandboxes - -When using a cloud-hosted virtual machine, you may not have access to the port running the container. In this event, you can test Nginx in an ssh session on the local machine using `Execute Shell`. Use the port number after the `:` in the link under your workload if available, which is `31568` in this example. - -```sh -gettingstarted@rancher:~$ curl http://localhost:31568 - - - - Rancher - - - - - -

Hello world!

-

My hostname is hello-world-66b4b9d88b-78bhx

-
-

k8s services found 2

- - INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
- - KUBERNETES tcp://10.43.0.1:443
- -
-
- - -
- - -
- - - -gettingstarted@rancher:~$ - -``` - -### Finished - -Congratulations! You have successfully deployed a workload exposed via a NodePort. - -#### What's Next? - -When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: - -- [Amazon AWS: Destroying the Environment]({{}}/rancher/v2.5/en/quick-start-guide/deployment/amazon-aws-qs/#destroying-the-environment) -- [DigitalOcean: Destroying the Environment]({{}}/rancher/v2.5/en/quick-start-guide/deployment/digital-ocean-qs/#destroying-the-environment) -- [Vagrant: Destroying the Environment]({{}}/rancher/v2.5/en/quick-start-guide/deployment/quickstart-vagrant/#destroying-the-environment) diff --git a/content/rancher/v2.5/en/security/_index.md b/content/rancher/v2.5/en/security/_index.md deleted file mode 100644 index 877e29b6ac..0000000000 --- a/content/rancher/v2.5/en/security/_index.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Security -weight: 20 -aliases: - - /rancher/v2.x/en/security/rancher-2.5/ ---- - - - - - - - -
-

Security policy

-

Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

-
-

Reporting process

-

Please submit possible security issues by emailing security-rancher@suse.com .

-
-

Announcements

-

Subscribe to the Rancher announcements forum for release updates.

-
- -Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,]({{}}/rancher/v2.5/en/admin-settings/rbac) Rancher makes your Kubernetes clusters even more secure. - -On this page, we provide security related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: - -- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) -- [SELinux RPM](#selinux-rpm) -- [Guide to hardening Rancher installations](#rancher-hardening-guide) -- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) -- [Third-party penetration test reports](#third-party-penetration-test-reports) -- [Rancher Security Advisories and CVEs](#rancher-security-advisories-and-cves) -- [Kubernetes Security Best Practices](#kubernetes-security-best-practices) - -### Running a CIS Security Scan on a Kubernetes Cluster - -Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the [CIS](https://www.cisecurity.org/cis-benchmarks/) (Center for Internet Security) Kubernetes Benchmark. - -The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. - -The Center for Internet Security (CIS) is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". - -CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. - -The Benchmark provides recommendations of two types: Automated and Manual. We run tests related to only Automated recommendations. - -When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. - -For details, refer to the section on [security scans]({{}}/rancher/v2.5/en/cis-scans). - -### SELinux RPM - -[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. - -We provide two RPMs (Red Hat packages) that enable Rancher products to function properly on SELinux-enforcing hosts: `rancher-selinux` and `rke2-selinux`. For details, see [this page]({{}}/rancher/v2.5/en/security/selinux). - -### Rancher Hardening Guide - -The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. - -The hardening guides provide prescriptive guidance for hardening a production installation of Rancher. See Rancher's guides for [Self Assessment of the CIS Kubernetes Benchmark](#the-cis-benchmark-and-self-sssessment) for the full list of security controls. - -> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. - -Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher. - -### The CIS Benchmark and Self-Assessment - -The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). - -Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark. - -### Third-party Penetration Test Reports - -Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. - -Results: - -- [Cure53 Pen Test - July 2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) -- [Untamed Theory Pen Test - March 2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) - -### Rancher Security Advisories and CVEs - -Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](./cve) - -### Kubernetes Security Best Practices - -For recommendations on securing your Kubernetes cluster, refer to the [Kubernetes Security Best Practices](./best-practices) guide. diff --git a/content/rancher/v2.5/en/security/cve/_index.md b/content/rancher/v2.5/en/security/cve/_index.md deleted file mode 100644 index 2a827a7d84..0000000000 --- a/content/rancher/v2.5/en/security/cve/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Security Advisories and CVEs -weight: 300 ---- - -Rancher is committed to informing the community of security issues in our products. Rancher will publish security advisories and CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. New security advisories are also published in Rancher's GitHub [security page](https://github.com/rancher/rancher/security/advisories). - -| ID | Description | Date | Resolution | -|----|-------------|------|------------| -| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) Container Network Interface (CNI) when configured through [RKE templates](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | -| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | -| [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | This vulnerability only affects customers using the `restricted-admin` role in Rancher. A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 where the `global-data` role in `cattle-global-data` namespace grants write access to the Catalogs. Since each user with any level of catalog access was bound to the `global-data` role, this grants write access to templates (`CatalogTemplates`) and template versions (`CatalogTemplateVersions`) for any user with any level of catalog access. New users created in Rancher are by default assigned to the `user` role (standard user), which is not designed to grant write catalog access. This vulnerability effectively elevates the privilege of any user to write access for the catalog template and catalog template version resources. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | -| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | -| [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.11 and from 2.6.0 up to and including 2.6.2, where an insufficient check of the same-origin policy when downloading Helm charts from a configured private repository can lead to exposure of the repository credentials to a third-party provider. This issue only happens when the user configures access credentials to a private repository in Rancher inside `Apps & Marketplace > Repositories`. The issue was found and reported by Martin Andreas Ullrich. | 14 Apr 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) and [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) | -| [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | A vulnerability was discovered in versions of Rancher starting 2.0 up to and including 2.6.3. The `restricted` pod security policy (PSP) provided in Rancher deviated from the upstream `restricted` policy provided in Kubernetes on account of which Rancher's PSP had `runAsUser` set to `runAsAny`, while upstream had `runAsUser` set to `MustRunAsNonRoot`. This allowed containers to run as any user, including a privileged user (`root`), even when Rancher's `restricted` policy was enforced on a project or at the cluster level. | 31 Mar 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | -| [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | A vulnerability was discovered in Rancher versions up to and including 2.4.17, 2.5.11 and 2.6.2. After removing a `Project Role` associated with a group from the project, the bindings that granted access to cluster-scoped resources for those subjects were not deleted. This was due to an incomplete authorization logic check. A user who was a member of the affected group with authenticated access to Rancher could exploit this vulnerability to access resources they shouldn't have had access to. The exposure level would depend on the original permission level granted to the affected project role. This vulnerability only affected customers using group based authentication in Rancher. | 31 Mar 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3), [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) and [Rancher v2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) | -| [CVE-2021-36776](https://github.com/rancher/rancher/security/advisories/GHSA-gvh9-xgrq-r8hw) | A vulnerability was discovered in Rancher versions starting 2.5.0 up to and including 2.5.9, that allowed an authenticated user to impersonate any user on a cluster through an API proxy, without requiring knowledge of the impersonated user's credentials. This was due to the API proxy not dropping the impersonation header before sending the request to the Kubernetes API. A malicious user with authenticated access to Rancher could use this to impersonate another user with administrator access in Rancher, thereby gaining administrator level access to the cluster. | 31 Mar 2022 | [Rancher v2.6.0](https://github.com/rancher/rancher/releases/tag/v2.6.0) and [Rancher v2.5.10](https://github.com/rancher/rancher/releases/tag/v2.5.10) | -| [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher versions 2.0 through the aforementioned fixed versions, where users were granted access to resources regardless of the resource's API group. For example, Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. Resources affected in the **Downstream clusters** and **Rancher management cluster** can be found [here](https://github.com/rancher/rancher/security/advisories/GHSA-f9xf-jq4j-vqw4). There is not a direct mitigation besides upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered in Rancher 2.0.0 through the aforementioned patched versions, where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then correctly removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server and incorrectly return the information. The vulnerability is limited to valid Rancher users with some level of permissions on the cluster. There is not a direct mitigation besides upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher 2.2.0 through the aforementioned patched versions, where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud-credential ID that was valid for a given cloud provider, could call that cloud provider's API through the proxy API, and the cloud-credential would be attached. The exploit is limited to valid Rancher users. There is not a direct mitigation outside of upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | -| [CVE-2021-25313](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25313) | A security vulnerability was discovered on all Rancher 2 versions. When accessing the Rancher API with a browser, the URL was not properly escaped, making it vulnerable to an XSS attack. Specially crafted URLs to these API endpoints could include JavaScript which would be embedded in the page and execute in a browser. There is no direct mitigation. Avoid clicking on untrusted links to your Rancher server. | 2 Mar 2021 | [Rancher v2.5.6](https://github.com/rancher/rancher/releases/tag/v2.5.6), [Rancher v2.4.14](https://github.com/rancher/rancher/releases/tag/v2.4.14), and [Rancher v2.3.11](https://github.com/rancher/rancher/releases/tag/v2.3.11) | -| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | -| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | -| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | -| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | -| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | -| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/rollbacks). | diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md deleted file mode 100644 index 4fed8f4055..0000000000 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/_index.md +++ /dev/null @@ -1,2267 +0,0 @@ ---- -title: CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5 -weight: 201 -aliases: - - /rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/ ---- - -### CIS v1.5 Kubernetes Benchmark - Rancher v2.5 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.5_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ----------------------------|----------|---------|------- -Hardening Guide with CIS 1.5 Benchmark | Rancher v2.5 | CIS v1.5| Kubernetes v1.15 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- -## 1 Master Node Security Configuration -### 1.1 Master Node Configuration Files - -#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. - -#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. - -#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). For example, - -``` bash -chmod 700 /var/lib/etcd -``` - -**Audit Script:** 1.1.11.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a -``` - -**Audit Execution:** - -``` -./1.1.11.sh etcd -``` - -**Expected result**: - -``` -'700' is equal to '700' -``` - -#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) - -**Result:** PASS - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, -from the below command: - -``` bash -ps -ef | grep etcd -``` - -Run the below command (based on the etcd data directory found above). -For example, -``` bash -chown etcd:etcd /var/lib/etcd -``` - -**Audit Script:** 1.1.12.sh - -``` -#!/bin/bash -e - -etcd_bin=${1} - -test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') - -docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G -``` - -**Audit Execution:** - -``` -./1.1.12.sh etcd -``` - -**Expected result**: - -``` -'etcd:etcd' is present -``` - -#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. -We recommend that this `kube_config_cluster.yml` file be kept in secure store. - -#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. - -#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. - -#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chown -R root:root /etc/kubernetes/ssl -``` - -**Audit:** - -``` -stat -c %U:%G /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 644 /etc/kubernetes/ssl -``` - -**Audit Script:** check_files_permissions.sh - -``` -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit -``` - -**Audit Execution:** - -``` -./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' -``` - -**Expected result**: - -``` -'true' is present -``` - -#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, - -``` bash -chmod -R 600 /etc/kubernetes/ssl/certs/serverca -``` - -**Audit Script:** 1.1.21.sh - -``` -#!/bin/bash -e -check_dir=${1:-/etc/kubernetes/ssl} - -for file in $(find ${check_dir} -name "*key.pem"); do - file_permission=$(stat -c %a ${file}) - if [[ "${file_permission}" == "600" ]]; then - continue - else - echo "FAIL: ${file} ${file_permission}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./1.1.21.sh /etc/kubernetes/ssl -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 1.2 API Server - -#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--basic-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--basic-auth-file' is not present -``` - -#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--token-auth-file=` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--token-auth-file' is not present -``` - -#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the `--kubelet-https` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-https' is present OR '--kubelet-https' is not present -``` - -#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -kubelet client certificate and key parameters as below. - -``` bash ---kubelet-client-certificate= ---kubelet-client-key= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the -`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. -`--kubelet-certificate-authority=` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--kubelet-certificate-authority' is present -``` - -#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. -One such example could be as below. - -``` bash ---authorization-mode=RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' not have 'AlwaysAllow' -``` - -#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'Node' -``` - -#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, -for example: - -``` bash ---authorization-mode=Node,RBAC -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'Node,RBAC' has 'RBAC' -``` - -#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a -value that does not include `AlwaysAdmit`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and ensure that the `--disable-admission-plugins` parameter is set to a -value that does not include `ServiceAccount`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present -``` - -#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--disable-admission-plugins` parameter to -ensure it does not include `NamespaceLifecycle`. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present -``` - -#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `PodSecurityPolicy`: - -``` bash ---enable-admission-plugins=...,PodSecurityPolicy,... -``` - -Then restart the API Server. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--enable-admission-plugins` parameter to a -value that includes `NodeRestriction`. - -``` bash ---enable-admission-plugins=...,NodeRestriction,... -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and remove the `--insecure-bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--insecure-bind-address' is not present -``` - -#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---insecure-port=0 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and either remove the `--secure-port` parameter or -set it to a different **(non-zero)** desired port. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -6443 is greater than 0 OR '--secure-port' is not present -``` - -#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-path` parameter to a suitable path and -file where you would like audit logs to be written, for example: - -``` bash ---audit-log-path=/var/log/apiserver/audit.log -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--audit-log-path' is present -``` - -#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: - -``` bash ---audit-log-maxage=30 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -30 is greater or equal to 30 -``` - -#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate -value. - -``` bash ---audit-log-maxbackup=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -10 is greater or equal to 10 -``` - -#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. -For example, to set it as `100` **MB**: - -``` bash ---audit-log-maxsize=100 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -100 is greater or equal to 100 -``` - -#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -and set the below parameter as appropriate and if needed. -For example, - -``` bash ---request-timeout=300s -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--request-timeout' is not present OR '--request-timeout' is present -``` - -#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the below parameter. - -``` bash ---service-account-lookup=true -``` - -Alternatively, you can delete the `--service-account-lookup` parameter from this file so -that the default takes effect. - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--service-account-key-file` parameter -to the public key file for service accounts: - -``` bash ---service-account-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-key-file' is present -``` - -#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the **etcd** certificate and **key** file parameters. - -``` bash ---etcd-certfile= ---etcd-keyfile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the TLS certificate and private key file parameters. - -``` bash ---tls-cert-file= ---tls-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the client certificate authority file. - -``` bash ---client-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the etcd certificate authority file parameter. - -``` bash ---etcd-cafile= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--etcd-cafile' is present -``` - -#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` -on the master node and set the `--encryption-provider-config` parameter to the path of that file: - -``` bash ---encryption-provider-config= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected result**: - -``` -'--encryption-provider-config' is present -``` - -#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) - -**Result:** PASS - -**Remediation:** -Follow the Kubernetes documentation and configure a `EncryptionConfig` file. -In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. - -**Audit Script:** 1.2.34.sh - -``` -#!/bin/bash -e - -check_file=${1} - -grep -q -E 'aescbc|kms|secretbox' ${check_file} -if [ $? -eq 0 ]; then - echo "--pass" - exit 0 -else - echo "fail: encryption provider found in ${check_file}" - exit 1 -fi -``` - -**Audit Execution:** - -``` -./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 1.3 Controller Manager - -#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, -for example: - -``` bash ---terminated-pod-gc-threshold=10 -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--terminated-pod-gc-threshold' is present -``` - -#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node to set the below parameter. - -``` bash ---use-service-account-credentials=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'true' is not equal to 'false' -``` - -#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--service-account-private-key-file` parameter -to the private key file for service accounts. - -``` bash ---service-account-private-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--service-account-private-key-file' is present -``` - -#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. - -``` bash ---root-ca-file= -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--root-ca-file' is present -``` - -#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' -``` - -#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' argument is set to 127.0.0.1 -``` - -### 1.4 Scheduler - -#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file -on the master node and set the below parameter. - -``` bash ---profiling=false -``` - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` -on the master node and ensure the correct value for the `--bind-address` parameter. - -**Audit:** - -``` -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected result**: - -``` -'--bind-address' argument is set to 127.0.0.1 -``` - -## 2 Etcd Node Configuration -### 2 Etcd Node Configuration Files - -#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` -on the master node and set the below parameters. - -``` bash ---cert-file= ---key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--cert-file' is present AND '--key-file' is present -``` - -#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---client-cert-auth="true" -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--auto-tls` parameter or set it to `false`. - -``` bash - --auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the -master node and set the below parameters. - -``` bash ---peer-client-file= ---peer-key-file= -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and set the below parameter. - -``` bash ---peer-client-cert-auth=true -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master -node and either remove the `--peer-auto-tls` parameter or set it to `false`. - -``` bash ---peer-auto-tls=false -``` - -**Audit:** - -``` -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected result**: - -``` -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -## 3 Control Plane Configuration -### 3.2 Logging - -#### 3.2.1 Ensure that a minimal audit policy is created (Scored) - -**Result:** PASS - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit Script:** 3.2.1.sh - -``` -#!/bin/bash -e - -api_server_bin=${1} - -/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep -``` - -**Audit Execution:** - -``` -./3.2.1.sh kube-apiserver -``` - -**Expected result**: - -``` -'--audit-policy-file' is present -``` - -## 4 Worker Node Security Configuration -### 4.1 Worker Node Configuration Files - -#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is present -``` - -#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, - -``` bash -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the file permissions of the - -``` bash ---client-ca-file chmod 644 -``` - -**Audit:** - -``` -stat -c %a /etc/kubernetes/ssl/kube-ca.pem -``` - -**Expected result**: - -``` -'644' is equal to '644' OR '640' is present OR '600' is present -``` - -#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) - -**Result:** PASS - -**Remediation:** -Run the following command to modify the ownership of the `--client-ca-file`. - -``` bash -chown root:root -``` - -**Audit:** - -``` -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' -``` - -**Expected result**: - -``` -'root:root' is equal to 'root:root' -``` - -#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -### 4.2 Kubelet - -#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to -`false`. -If using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---anonymous-auth=false -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'false' is equal to 'false' -``` - -#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If -using executable arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---authorization-mode=Webhook -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'Webhook' not have 'AlwaysAllow' -``` - -#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_AUTHZ_ARGS` variable. - -``` bash ---client-ca-file= -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--client-ca-file' is present -``` - -#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---read-only-port=0 -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` - -#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a -value other than `0`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---streaming-connection-idle-timeout=5m -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. - -``` bash ---protect-kernel-defaults=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove the `--make-iptables-util-chains` argument from the -`KUBELET_SYSTEM_PODS_ARGS` variable. -Based on your system, restart the kubelet service. For example: - -```bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' OR '--make-iptables-util-chains' is not present -``` - -#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) - -**Result:** Not Applicable - -**Remediation:** -RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. - -#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) - -**Result:** PASS - -**Remediation:** -If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and -remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` -variable. -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'--rotate-certificates' is present OR '--rotate-certificates' is not present -``` - -#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) - -**Result:** PASS - -**Remediation:** -Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` -on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. - -``` bash ---feature-gates=RotateKubeletServerCertificate=true -``` - -Based on your system, restart the kubelet service. For example: - -``` bash -systemctl daemon-reload -systemctl restart kubelet.service -``` - -**Audit:** - -``` -/bin/ps -fC kubelet -``` - -**Audit Config:** - -``` -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected result**: - -``` -'true' is equal to 'true' -``` - -## 5 Kubernetes Policies -### 5.1 RBAC and Service Accounts - -#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) - -**Result:** PASS - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value - -``` bash -automountServiceAccountToken: false -``` - -**Audit Script:** 5.1.5.sh - -``` -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" - -if [[ "${accounts}" != "" ]]; then - echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" - exit 1 -fi - -default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" - -if [[ "${default_binding}" -gt 0 ]]; then - echo "fail: default service accounts have non default bindings" - exit 1 -fi - -echo "--pass" -exit 0 -``` - -**Audit Execution:** - -``` -./5.1.5.sh -``` - -**Expected result**: - -``` -'--pass' is present -``` - -### 5.2 Pod Security Policies - -#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostPID` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostIPC` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.hostNetwork` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) - -**Result:** PASS - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. - -**Audit:** - -``` -kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected result**: - -``` -1 is greater than 0 -``` - -### 5.3 Network Policies and CNI - -#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) - -**Result:** PASS - -**Remediation:** -Follow the documentation and create `NetworkPolicy` objects as you need them. - -**Audit Script:** 5.3.2.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} - -kubectl version > /dev/null -if [ $? -ne 0 ]; then - echo "fail: kubectl failed" - exit 1 -fi - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "fail: ${namespace}" - exit 1 - fi -done - -echo "pass" -``` - -**Audit Execution:** - -``` -./5.3.2.sh -``` - -**Expected result**: - -``` -'pass' is present -``` - -### 5.6 General Policies - -#### 5.6.4 The default namespace should not be used (Scored) - -**Result:** PASS - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** 5.6.4.sh - -``` -#!/bin/bash -e - -export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} - -kubectl version > /dev/null -if [[ $? -gt 0 ]]; then - echo "fail: kubectl failed" - exit 1 -fi - -default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) - -echo "--count=${default_resources}" -``` - -**Audit Execution:** - -``` -./5.6.4.sh -``` - -**Expected result**: - -``` -'0' is equal to '0' -``` diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md deleted file mode 100644 index 491aec9c08..0000000000 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.5-hardening-2.5/_index.md +++ /dev/null @@ -1,724 +0,0 @@ ---- -title: Hardening Guide with CIS 1.5 Benchmark -weight: 200 -aliases: - - /rancher/v2.x/en/security/rancher-2.5/1.5-hardening-2.5/ ---- - -This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - - Rancher Version | CIS Benchmark Version | Kubernetes Version -----------------|-----------------------|------------------ - Rancher v2.5 | Benchmark v1.5 | Kubernetes 1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_Hardening_Guide_CIS_1.5.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.5 with Kubernetes v1.15 or provisioning a RKE cluster with Kubernetes 1.15 to be used within Rancher v2.5. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5]({{< baseurl >}}/rancher/v2.5/en/security/rancher-2.5/1.5-benchmark-2.5/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -``` -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. - -``` -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -``` yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: - -``` -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -``` yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -``` yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -``` -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration - -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes - - -``` yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -kubernetes_version: "v1.15.9-rancher1-1" -enable_network_policy: true -default_pod_security_policy_template_id: "restricted" -# the nodes directive is required and will vary depending on your environment -# documentation for node configuration can be found here: -# https://rancher.com/docs/rke/latest/en/config-options/nodes -nodes: -services: - etcd: - uid: 52034 - gid: 52034 - kube-api: - pod_security_policy: true - secrets_encryption_config: - enabled: true - audit_log: - enabled: true - admission_configuration: - event_rate_limit: - enabled: true - kube-controller: - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - kubelet: - generate_serving_certificate: true - extra_args: - feature-gates: "RotateKubeletServerCertificate=true" - protect-kernel-defaults: "true" - tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" - extra_binds: [] - extra_env: [] - cluster_domain: "" - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] -cluster_name: "" -prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} -restore: - restore: false - snapshot_name: "" -dns: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.5/en/installation) for additional installation and RKE Template details. - -``` yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 30 - addons: |- - --- - apiVersion: v1 - kind: Namespace - metadata: - name: ingress-nginx - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: ingress-nginx - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: ingress-nginx - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: Namespace - metadata: - name: cattle-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: default-psp-role - namespace: cattle-system - rules: - - apiGroups: - - extensions - resourceNames: - - default-psp - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: default-psp-rolebinding - namespace: cattle-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: default-psp-role - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: tiller - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: tiller - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - subjects: - - kind: ServiceAccount - name: tiller - namespace: kube-system - ignore_docker_version: true - kubernetes_version: v1.15.9-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - bind-address: 127.0.0.1 - address: 127.0.0.1 - feature-gates: RotateKubeletServerCertificate=true - profiling: 'false' - terminated-pod-gc-threshold: '1000' - kubelet: - extra_args: - anonymous-auth: 'false' - event-qps: '0' - feature-gates: RotateKubeletServerCertificate=true - make-iptables-util-chains: 'true' - protect-kernel-defaults: 'true' - streaming-connection-idle-timeout: 1800s - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - bind-address: 127.0.0.1 - address: 127.0.0.1 - profiling: 'false' - ssh_agent_auth: false -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -``` yaml -#cloud-config -packages: - - curl - - jq -runcmd: - - sysctl -w vm.overcommit_memory=1 - - sysctl -w kernel.panic=10 - - sysctl -w kernel.panic_on_oops=1 - - curl https://releases.rancher.com/install-docker/18.09.sh | sh - - usermod -aG docker ubuntu - - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done - - addgroup --gid 52034 etcd - - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -write_files: - - path: /etc/sysctl.d/kubelet.conf - owner: root:root - permissions: "0644" - content: | - vm.overcommit_memory=1 - kernel.panic=10 - kernel.panic_on_oops=1 -``` diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md deleted file mode 100644 index e0dc1e45c5..0000000000 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md +++ /dev/null @@ -1,3319 +0,0 @@ ---- -title: CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4 -weight: 101 -aliases: - - /rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/ ---- - -### CIS 1.6 Kubernetes Benchmark - Rancher v2.5.4 with Kubernetes v1.18 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.6_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.5.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ----------------------------|----------|---------|------- -Hardening Guide with CIS 1.6 Benchmark | Rancher v2.5.4 | CIS 1.6| Kubernetes v1.18 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark 1.6. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -### Controls - -## 1.1 Etcd Node Configuration Files -### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, -chmod 700 /var/lib/etcd - - -**Audit:** - -```bash -stat -c %a /node/var/lib/etcd -``` - -**Expected Result**: - -```console -'700' is equal to '700' -``` - -**Returned Value**: - -```console -700 - -``` -### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). -For example, chown etcd:etcd /var/lib/etcd - -A system service account is required for etcd data directory ownership. -Refer to Rancher's hardening guide for more details on how to configure this ownership. - - -**Audit:** - -```bash -stat -c %U:%G /node/var/lib/etcd -``` - -**Expected Result**: - -```console -'etcd:etcd' is present -``` - -**Returned Value**: - -```console -etcd:etcd - -``` -### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown -R root:root /etc/kubernetes/pki/ - - -**Audit:** - -```bash -check_files_owner_in_dir.sh /node/etc/kubernetes/ssl -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to ensure the owner is set to root:root for -# the given directory and all the files in it -# -# inputs: -# $1 = /full/path/to/directory -# -# outputs: -# true/false - -INPUT_DIR=$1 - -if [[ "${INPUT_DIR}" == "" ]]; then - echo "false" - exit -fi - -if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then - echo "false" - exit -fi - -statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) -while read -r statInfoLine; do - f=$(echo ${statInfoLine} | cut -d' ' -f1) - p=$(echo ${statInfoLine} | cut -d' ' -f2) - - if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then - if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "root:root" ]]; then - echo "false" - exit - fi - fi -done <<< "${statInfoLines}" - - -echo "true" -exit - -``` -**Returned Value**: - -```console -true - -``` -### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 644 /etc/kubernetes/pki/*.crt - - -**Audit:** - -```bash -check_files_permissions.sh /node/etc/kubernetes/ssl/!(*key).pem -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` -**Returned Value**: - -```console -true - -``` -### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 600 /etc/kubernetes/ssl/*key.pem - - -**Audit:** - -```bash -check_files_permissions.sh /node/etc/kubernetes/ssl/*key.pem 600 -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` -**Returned Value**: - -```console -true - -``` -### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-apiserver.yaml; fi' -``` - - -### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml; fi' -``` - - -### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-controller-manager.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-controller-manager.yaml; fi' -``` - - -### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-controller-manager.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml; fi' -``` - - -### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-scheduler.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-scheduler.yaml; fi' -``` - - -### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-scheduler.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml; fi' -``` - - -### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/etcd.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/etcd.yaml; fi' -``` - - -### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/manifests/etcd.yaml; then stat -c %U:%G /etc/kubernetes/manifests/etcd.yaml; fi' -``` - - -### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 - - -**Audit:** - -```bash -stat -c permissions=%a -``` - - -### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root - - -**Audit:** - -```bash -stat -c %U:%G -``` - - -### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi' -``` - - -### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi' -``` - - -### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e scheduler; then stat -c permissions=%a scheduler; fi' -``` - - -### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e scheduler; then stat -c %U:%G scheduler; fi' -``` - - -### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e controllermanager; then stat -c permissions=%a controllermanager; fi' -``` - - -### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e controllermanager; then stat -c %U:%G controllermanager; fi' -``` - - -## 1.2 API Server -### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---anonymous-auth=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --basic-auth-file= parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--basic-auth-file' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --token-auth-file= parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--token-auth-file' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --kubelet-https parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-https' is not present OR '--kubelet-https' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the -kubelet client certificate and key parameters as below. ---kubelet-client-certificate= ---kubelet-client-key= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the ---kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. ---kubelet-certificate-authority= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-certificate-authority' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. -One such example could be as below. ---authorization-mode=RBAC - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console - 'Node,RBAC' not have 'AlwaysAllow' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes Node. ---authorization-mode=Node,RBAC - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'Node' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes RBAC, -for example: ---authorization-mode=Node,RBAC - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'RBAC' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set the desired limits in a configuration file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameters. ---enable-admission-plugins=...,EventRateLimit,... ---admission-control-config-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'EventRateLimit' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --enable-admission-plugins parameter, or set it to a -value that does not include AlwaysAdmit. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console - 'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -AlwaysPullImages. ---enable-admission-plugins=...,AlwaysPullImages,... - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - - -### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -SecurityContextDeny, unless PodSecurityPolicy is already in place. ---enable-admission-plugins=...,SecurityContextDeny,... - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - - -### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and ensure that the --disable-admission-plugins parameter is set to a -value that does not include ServiceAccount. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --disable-admission-plugins parameter to -ensure it does not include NamespaceLifecycle. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes PodSecurityPolicy: ---enable-admission-plugins=...,PodSecurityPolicy,... -Then restart the API Server. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes NodeRestriction. ---enable-admission-plugins=...,NodeRestriction,... - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --insecure-bind-address parameter. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--insecure-bind-address' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---insecure-port=0 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'0' is equal to '0' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --secure-port parameter or -set it to a different (non-zero) desired port. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -6443 is greater than 0 OR '--secure-port' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.21 Ensure that the --profiling argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---profiling=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-path parameter to a suitable path and -file where you would like audit logs to be written, for example: ---audit-log-path=/var/log/apiserver/audit.log - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-log-path' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: ---audit-log-maxage=30 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -30 is greater or equal to 30 -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate -value. ---audit-log-maxbackup=10 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -10 is greater or equal to 10 -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. -For example, to set it as 100 MB: ---audit-log-maxsize=100 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -100 is greater or equal to 100 -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameter as appropriate and if needed. -For example, ---request-timeout=300s - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--request-timeout' is not present OR '--request-timeout' is not present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---service-account-lookup=true -Alternatively, you can delete the --service-account-lookup parameter from this file so -that the default takes effect. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --service-account-key-file parameter -to the public key file for service accounts: ---service-account-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-key-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate and key file parameters. ---etcd-certfile= ---etcd-keyfile= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the TLS certificate and private key file parameters. ---tls-cert-file= ---tls-private-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the client certificate authority file. ---client-ca-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--client-ca-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate authority file parameter. ---etcd-cafile= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-cafile' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--encryption-provider-config' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -In this file, choose aescbc, kms or secretbox as the encryption provider. - - -**Audit:** - -```bash -check_encryption_provider_config.sh aescbc kms secretbox -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -# This script is used to check the encrption provider config is set to aesbc -# -# outputs: -# true/false - -# TODO: Figure out the file location from the kube-apiserver commandline args -ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" - -if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then - echo "false" - exit -fi - -for provider in "$@" -do - if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then - echo "true" - exit - fi -done - -echo "false" -exit - -``` -**Returned Value**: - -```console - - aescbc: -true - -``` -### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM -_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM -_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM -_SHA384 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - - -## 1.3 Controller Manager -### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, -for example: ---terminated-pod-gc-threshold=10 - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--terminated-pod-gc-threshold' is present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.2 Ensure that the --profiling argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the below parameter. ---profiling=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node to set the below parameter. ---use-service-account-credentials=true - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'true' is not equal to 'false' -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --service-account-private-key-file parameter -to the private key file for service accounts. ---service-account-private-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-private-key-file' is present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --root-ca-file parameter to the certificate bundle file`. ---root-ca-file= - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--root-ca-file' is present -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) - -**Result:** notApplicable - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. ---feature-gates=RotateKubeletServerCertificate=true - -Cluster provisioned by RKE handles certificate rotation directly through RKE. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - - -### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and ensure the correct value for the --bind-address parameter - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--bind-address' argument is set to 127.0.0.1 -``` - -**Returned Value**: - -```console -root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --bind-address=127.0.0.1 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=127.0.0.1 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true - -``` -## 1.4 Scheduler -### 1.4.1 Ensure that the --profiling argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file -on the master node and set the below parameter. ---profiling=false - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 4947 4930 1 16:16 ? 00:00:02 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --address=0.0.0.0 - -``` -### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml -on the master node and ensure the correct value for the --bind-address parameter - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'--bind-address' argument is set to 127.0.0.1 -``` - -**Returned Value**: - -```console -root 4947 4930 1 16:16 ? 00:00:02 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --address=127.0.0.1 --bind-address=127.0.0.1 - -``` -## 2 Etcd Node Configuration Files -### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml -on the master node and set the below parameters. ---cert-file= ---key-file= - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--cert-file' is present AND '--key-file' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and set the below parameter. ---client-cert-auth="true" - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--client-cert-auth' is present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the -master node and set the below parameters. ---peer-client-file= ---peer-key-file= - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and set the below parameter. ---peer-client-cert-auth=true - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-client-cert-auth' is present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and either remove the --peer-auto-tls parameter or set it to false. ---peer-auto-tls=false - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) - -**Result:** pass - -**Remediation:** -[Manual test] -Follow the etcd documentation and create a dedicated certificate authority setup for the -etcd service. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the -master node and set the below parameter. ---trusted-ca-file= - - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--trusted-ca-file' is present -``` - -**Returned Value**: - -```console -etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem -root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h -root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User -root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json - -``` -## 3.1 Authentication and Authorization -### 3.1.1 Client certificate authentication should not be used for users (Manual) - -**Result:** warn - -**Remediation:** -Alternative mechanisms provided by Kubernetes such as the use of OIDC should be -implemented in place of client certificates. - - -**Audit:** - -```bash - -``` - - -## 3.2 Logging -### 3.2.1 Ensure that a minimal audit policy is created (Automated) - -**Result:** pass - -**Remediation:** -Create an audit policy file for your cluster. - - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-policy-file' is present -``` - -**Returned Value**: - -```console -root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User - -``` -### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) - -**Result:** warn - -**Remediation:** -Consider modification of the audit policy in use on the cluster to include these items, at a -minimum. - - -**Audit:** - -```bash - -``` - - -## 4.1 Worker Node Configuration Files -### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c permissions=%a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' -``` - - -### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c %U:%G /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' -``` - - -### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 $proykubeconfig - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected Result**: - -```console -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -**Returned Value**: - -```console -600 - -``` -### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is not present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present -``` - -### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root - -``` -### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) - -**Result:** pass - -**Remediation:** -Run the following command to modify the file permissions of the ---client-ca-file chmod 644 - - -**Audit:** - -```bash -check_cafile_permissions.sh -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') -if test -z $CAFILE; then CAFILE=$kubeletcafile; fi -if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi - -``` -### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) - -**Result:** pass - -**Remediation:** -Run the following command to modify the ownership of the --client-ca-file. -chown root:root - - -**Audit:** - -```bash -check_cafile_ownership.sh -``` - -**Expected Result**: - -```console -'root:root' is not present -``` - -**Audit Script:** -```bash -#!/usr/bin/env bash - -CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') -if test -z $CAFILE; then CAFILE=$kubeletcafile; fi -if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi - -``` -### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) - -**Result:** notApplicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chmod 644 /var/lib/kubelet/config.yaml - -Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c permissions=%a /var/lib/kubelet/config.yaml; fi' -``` - - -### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) - -**Result:** notApplicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chown root:root /var/lib/kubelet/config.yaml - -Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. -All configuration is passed in as arguments at container run time. - - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %U:%G /var/lib/kubelet/config.yaml; fi' -``` - - -## 4.2 Kubelet -### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to -false. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---anonymous-auth=false -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If -using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---authorization-mode=Webhook -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---client-ca-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set readOnlyPort to 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---read-only-port=0 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present OR '' is not present -``` - -### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a -value other than 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---streaming-connection-idle-timeout=5m -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD -root 5103 5086 7 16:16 ? 00:00:12 kubelet --resolv-conf=/etc/resolv.conf --read-only-port=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --feature-gates=RotateKubeletServerCertificate=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --make-iptables-util-chains=true --streaming-connection-idle-timeout=30m --cluster-dns=10.43.0.10 --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225-key.pem --address=0.0.0.0 --cni-bin-dir=/opt/cni/bin --anonymous-auth=false --protect-kernel-defaults=true --cloud-provider= --hostname-override=cis-aio-0 --fail-swap-on=false --cgroups-per-qos=True --authentication-token-webhook=true --event-qps=0 --v=2 --pod-infra-container-image=rancher/pause:3.1 --authorization-mode=Webhook --network-plugin=cni --cluster-domain=cluster.local --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --volume-plugin-dir=/var/lib/kubelet/volumeplugins --cni-conf-dir=/etc/cni/net.d --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225.pem --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf - -``` -### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set protectKernelDefaults: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---protect-kernel-defaults=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove the --make-iptables-util-chains argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present OR '' is not present -``` - -### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) - -**Result:** notApplicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and remove the --hostname-override argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - - -### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set tlsCertFile to the location -of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile -to the location of the corresponding private key file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameters in KUBELET_CERTIFICATE_ARGS variable. ---tls-cert-file= ---tls-private-key-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present AND '' is not present -``` - -### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to add the line rotateCertificates: true or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS -variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'--rotate-certificates' is not present OR '--rotate-certificates' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD -root 5103 5086 6 16:16 ? 00:00:12 kubelet --resolv-conf=/etc/resolv.conf --read-only-port=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --feature-gates=RotateKubeletServerCertificate=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --make-iptables-util-chains=true --streaming-connection-idle-timeout=30m --cluster-dns=10.43.0.10 --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225-key.pem --address=0.0.0.0 --cni-bin-dir=/opt/cni/bin --anonymous-auth=false --protect-kernel-defaults=true --cloud-provider= --hostname-override=cis-aio-0 --fail-swap-on=false --cgroups-per-qos=True --authentication-token-webhook=true --event-qps=0 --v=2 --pod-infra-container-image=rancher/pause:3.1 --authorization-mode=Webhook --network-plugin=cni --cluster-domain=cluster.local --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --volume-plugin-dir=/var/lib/kubelet/volumeplugins --cni-conf-dir=/etc/cni/net.d --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225.pem --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf - -``` -### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) - -**Result:** notApplicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. ---feature-gates=RotateKubeletServerCertificate=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -Clusters provisioned by RKE handles certificate rotation directly through RKE. - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - - -### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set TLSCipherSuites: to -TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -or to a subset of these values. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the --tls-cipher-suites parameter as follows, or to a subset of these values. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Expected Result**: - -```console -'' is not present -``` - -## 5.1 RBAC and Service Accounts -### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) - -**Result:** warn - -**Remediation:** -Identify all clusterrolebindings to the cluster-admin role. Check if they are used and -if they need this role or if they could use a role with fewer privileges. -Where possible, first bind users to a lower privileged role and then remove the -clusterrolebinding to the cluster-admin role : -kubectl delete clusterrolebinding [name] - - -**Audit:** - -```bash - -``` - - -### 5.1.2 Minimize access to secrets (Manual) - -**Result:** warn - -**Remediation:** -Where possible, remove get, list and watch access to secret objects in the cluster. - - -**Audit:** - -```bash - -``` - - -### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) - -**Result:** warn - -**Remediation:** -Where possible replace any use of wildcards in clusterroles and roles with specific -objects or actions. - - -**Audit:** - -```bash - -``` - - -### 5.1.4 Minimize access to create pods (Manual) - -**Result:** warn - -**Remediation:** -Where possible, remove create access to pod objects in the cluster. - - -**Audit:** - -```bash - -``` - - -### 5.1.5 Ensure that default service accounts are not actively used. (Automated) - -**Result:** pass - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value -automountServiceAccountToken: false - - -**Audit:** - -```bash -check_for_default_sa.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) -if [[ ${count_sa} -gt 0 ]]; then - echo "false" - exit -fi - -for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") -do - for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') - do - read kind name <<<$(IFS=","; echo $result) - resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) - if [[ ${resource_count} -gt 0 ]]; then - echo "false" - exit - fi - done -done - - -echo "true" -``` -**Returned Value**: - -```console -true - -``` -### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) - -**Result:** warn - -**Remediation:** -Modify the definition of pods and service accounts which do not need to mount service -account tokens to disable it. - - -**Audit:** - -```bash - -``` - - -## 5.2 Pod Security Policies -### 5.2.1 Minimize the admission of privileged containers (Manual) - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that -the .spec.privileged field is omitted or set to false. - - -**Audit:** - -```bash - -``` - - -### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostPID field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostIPC field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostNetwork field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.allowPrivilegeEscalation field is omitted or set to false. - - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 - -``` -### 5.2.6 Minimize the admission of root containers (Manual) - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of -UIDs not including 0. - - -**Audit:** - -```bash - -``` - - -### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - - -**Audit:** - -```bash - -``` - - -### 5.2.8 Minimize the admission of containers with added capabilities (Manual) - -**Result:** warn - -**Remediation:** -Ensure that allowedCapabilities is not present in PSPs for the cluster unless -it is set to an empty array. - - -**Audit:** - -```bash - -``` - - -### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) - -**Result:** warn - -**Remediation:** -Review the use of capabilites in applications runnning on your cluster. Where a namespace -contains applicaions which do not require any Linux capabities to operate consider adding -a PSP which forbids the admission of containers which do not drop all capabilities. - - -**Audit:** - -```bash - -``` - - -## 5.3 Network Policies and CNI -### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) - -**Result:** warn - -**Remediation:** -If the CNI plugin in use does not support network policies, consideration should be given to -making use of a different plugin, or finding an alternate mechanism for restricting traffic -in the Kubernetes cluster. - - -**Audit:** - -```bash - -``` - - -### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) - -**Result:** pass - -**Remediation:** -Follow the documentation and create NetworkPolicy objects as you need them. - - -**Audit:** - -```bash -check_for_network_policies.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [[ ${policy_count} -eq 0 ]]; then - echo "false" - exit - fi -done - -echo "true" - -``` -**Returned Value**: - -```console -true - -``` -## 5.4 Secrets Management -### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) - -**Result:** warn - -**Remediation:** -if possible, rewrite application code to read secrets from mounted secret files, rather than -from environment variables. - - -**Audit:** - -```bash - -``` - - -### 5.4.2 Consider external secret storage (Manual) - -**Result:** warn - -**Remediation:** -Refer to the secrets management options offered by your cloud provider or a third-party -secrets management solution. - - -**Audit:** - -```bash - -``` - - -## 5.5 Extensible Admission Control -### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and setup image provenance. - - -**Audit:** - -```bash - -``` - - -## 5.7 General Policies -### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) - -**Result:** warn - -**Remediation:** -Follow the documentation and create namespaces for objects in your deployment as you need -them. - - -**Audit:** - -```bash - -``` - - -### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) - -**Result:** warn - -**Remediation:** -Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you -would need to enable alpha features in the apiserver by passing "--feature- -gates=AllAlpha=true" argument. -Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS -parameter to "--feature-gates=AllAlpha=true" -KUBE_API_ARGS="--feature-gates=AllAlpha=true" -Based on your system, restart the kube-apiserver service. For example: -systemctl restart kube-apiserver.service -Use annotations to enable the docker/default seccomp profile in your pod definitions. An -example is as below: -apiVersion: v1 -kind: Pod -metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default -spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - - -**Audit:** - -```bash - -``` - - -### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and apply security contexts to your pods. For a -suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker -Containers. - - -**Audit:** - -```bash - -``` - - -### 5.7.4 The default namespace should not be used (Automated) - -**Result:** pass - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - - -**Audit:** - -```bash -check_for_default_ns.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Audit Script:** -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) -if [[ ${count} -gt 0 ]]; then - echo "false" - exit -fi - -echo "true" - - -``` -**Returned Value**: - -```console -true - -``` diff --git a/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md deleted file mode 100644 index d628bfd8c5..0000000000 --- a/content/rancher/v2.5/en/security/rancher-2.5/1.6-hardening-2.5/_index.md +++ /dev/null @@ -1,578 +0,0 @@ ---- -title: Hardening Guide with CIS 1.6 Benchmark -weight: 100 -aliases: - - /rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/ ---- - -This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. - -This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: - - Rancher Version | CIS Benchmark Version | Kubernetes Version -----------------|-----------------------|------------------ - Rancher v2.5.4 | Benchmark 1.6 | Kubernetes v1.18 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_Hardening_Guide_CIS_1.6.pdf) - -### Overview - -This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.5.4 with Kubernetes v1.18 or provisioning a RKE cluster with Kubernetes v1.18 to be used within Rancher v2.5.4. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). - -For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4]({{< baseurl >}}/rancher/v2.5/en/security/rancher-2.5/1.6-benchmark-2.5/). - -#### Known Issues - -- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.6 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. -- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.6 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -Migration Rancher from 2.4 to 2.5. Addons were removed in HG 2.5, and therefore namespaces on migration may be not created on the downstream clusters. Pod may fail to run because of missing namesapce like ingress-nginx, cattle-system. - -### Configure Kernel Runtime Parameters - -The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: - -```ini -vm.overcommit_memory=1 -vm.panic_on_oom=0 -kernel.panic=10 -kernel.panic_on_oops=1 -kernel.keys.root_maxbytes=25000000 -``` - -Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. - -### Configure `etcd` user and group -A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. - -#### create `etcd` user and group -To create the **etcd** group run the following console commands. - -The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. - -```bash -groupadd --gid 52034 etcd -useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` - -Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: - -```yaml -services: - etcd: - gid: 52034 - uid: 52034 -``` - -#### Set `automountServiceAccountToken` to `false` for `default` service accounts -Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. - -For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: - -```yaml -automountServiceAccountToken: false -``` - -Save the following yaml to a file called `account_update.yaml` - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: default -automountServiceAccountToken: false -``` - -Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. - -```bash -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" -done -``` - -### Ensure that all Namespaces have Network Policies defined - -Running different applications on the same Kubernetes cluster creates a risk of one -compromised application attacking a neighboring application. Network segmentation is -important to ensure that containers can communicate only with those they are supposed -to. A network policy is a specification of how selections of pods are allowed to -communicate with each other and other network endpoints. - -Network Policies are namespace scoped. When a network policy is introduced to a given -namespace, all traffic not allowed by the policy is denied. However, if there are no network -policies in a namespace all traffic will be allowed into and out of the pods in that -namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. -This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. -Additional information about CNI providers can be found -[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) - -Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a -**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace -(even if policies are added that cause some pods to be treated as “isolated”), -you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as -`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -about network policies can be found on the Kubernetes site. - -> This `NetworkPolicy` is not recommended for production use - -```yaml ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-allow-all -spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress -``` - -Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to -`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. - -```bash -#!/bin/bash -e - -for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do - kubectl apply -f default-allow-all.yaml -n ${namespace} -done -``` - -Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. - -### Reference Hardened RKE `cluster.yml` configuration - -The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install -of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is -provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes - - -```yaml -# If you intend to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -# https://rancher.com/docs/rke/latest/en/installation/ - -# the nodes directive is required and will vary depending on your environment -# documentation for node configuration can be found here: -# https://rancher.com/docs/rke/latest/en/config-options/nodes -nodes: [] -services: - etcd: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - external_urls: [] - ca_cert: "" - cert: "" - key: "" - path: "" - uid: 52034 - gid: 52034 - snapshot: false - retention: "" - creation: "" - backup_config: null - kube-api: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - service_cluster_ip_range: "" - service_node_port_range: "" - pod_security_policy: true - always_pull_images: false - secrets_encryption_config: - enabled: true - custom_config: null - audit_log: - enabled: true - configuration: null - admission_configuration: null - event_rate_limit: - enabled: true - configuration: null - kube-controller: - image: "" - extra_args: - feature-gates: RotateKubeletServerCertificate=true - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - cluster_cidr: "" - service_cluster_ip_range: "" - scheduler: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - kubelet: - image: "" - extra_args: - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: "true" - tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] - cluster_domain: cluster.local - infra_container_image: "" - cluster_dns_server: "" - fail_swap_on: false - generate_serving_certificate: true - kubeproxy: - image: "" - extra_args: {} - extra_binds: [] - extra_env: [] - win_extra_args: {} - win_extra_binds: [] - win_extra_env: [] -network: - plugin: "" - options: {} - mtu: 0 - node_selector: {} - update_strategy: null -authentication: - strategy: "" - sans: [] - webhook: null -addons: | - apiVersion: policy/v1beta1 - kind: PodSecurityPolicy - metadata: - name: restricted - spec: - requiredDropCapabilities: - - NET_RAW - privileged: false - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - emptyDir - - secret - - persistentVolumeClaim - - downwardAPI - - configMap - - projected - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: psp:restricted - rules: - - apiGroups: - - extensions - resourceNames: - - restricted - resources: - - podsecuritypolicies - verbs: - - use - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: psp:restricted - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: psp:restricted - subjects: - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:serviceaccounts - - apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:authenticated - --- - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: default-allow-all - spec: - podSelector: {} - ingress: - - {} - egress: - - {} - policyTypes: - - Ingress - - Egress - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: default - automountServiceAccountToken: false -addons_include: [] -system_images: - etcd: "" - alpine: "" - nginx_proxy: "" - cert_downloader: "" - kubernetes_services_sidecar: "" - kubedns: "" - dnsmasq: "" - kubedns_sidecar: "" - kubedns_autoscaler: "" - coredns: "" - coredns_autoscaler: "" - nodelocal: "" - kubernetes: "" - flannel: "" - flannel_cni: "" - calico_node: "" - calico_cni: "" - calico_controllers: "" - calico_ctl: "" - calico_flexvol: "" - canal_node: "" - canal_cni: "" - canal_controllers: "" - canal_flannel: "" - canal_flexvol: "" - weave_node: "" - weave_cni: "" - pod_infra_container: "" - ingress: "" - ingress_backend: "" - metrics_server: "" - windows_pod_infra_container: "" -ssh_key_path: "" -ssh_cert_path: "" -ssh_agent_auth: false -authorization: - mode: "" - options: {} -ignore_docker_version: false -kubernetes_version: v1.18.12-rancher1-1 -private_registries: [] -ingress: - provider: "" - options: {} - node_selector: {} - extra_args: {} - dns_policy: "" - extra_envs: [] - extra_volumes: [] - extra_volume_mounts: [] - update_strategy: null - http_port: 0 - https_port: 0 - network_mode: "" -cluster_name: -cloud_provider: - name: "" -prefix_path: "" -win_prefix_path: "" -addon_job_timeout: 0 -bastion_host: - address: "" - port: "" - user: "" - ssh_key: "" - ssh_key_path: "" - ssh_cert: "" - ssh_cert_path: "" -monitoring: - provider: "" - options: {} - node_selector: {} - update_strategy: null - replicas: null -restore: - restore: false - snapshot_name: "" -dns: null -upgrade_strategy: - max_unavailable_worker: "" - max_unavailable_controlplane: "" - drain: null - node_drain_input: null -``` - -### Reference Hardened RKE Template configuration - -The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. -RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher -[documentaion](https://rancher.com/docs/rancher/v2.5/en/installation) for additional installation and RKE Template details. - -```yaml -# -# Cluster Config -# -default_pod_security_policy_template_id: restricted -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: - addon_job_timeout: 45 - ignore_docker_version: true - kubernetes_version: v1.18.12-rancher1-1 -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - mtu: 0 - plugin: canal - rotate_encryption_key: false -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: false - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: '5000' - heartbeat-interval: '500' - gid: 52034 - retention: 72h - snapshot: false - uid: 52034 - kube_api: - always_pull_images: false - audit_log: - enabled: true - event_rate_limit: - enabled: true - pod_security_policy: true - secrets_encryption_config: - enabled: true - service_node_port_range: 30000-32767 - kube_controller: - extra_args: - feature-gates: RotateKubeletServerCertificate=true - bind-address: 127.0.0.1 - address: 127.0.0.1 - kubelet: - extra_args: - feature-gates: RotateKubeletServerCertificate=true - protect-kernel-defaults: 'true' - tls-cipher-suites: >- - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - fail_swap_on: false - generate_serving_certificate: true - scheduler: - extra_args: - bind-address: 127.0.0.1 - address: 127.0.0.1 - ssh_agent_auth: false - upgrade_strategy: - max_unavailable_controlplane: '1' - max_unavailable_worker: 10% -windows_prefered_cluster: false -``` - -### Hardened Reference Ubuntu 20.04 LTS **cloud-config**: - -The reference **cloud-config** is generally used in cloud infrastructure environments to allow for -configuration management of compute instances. The reference config configures Ubuntu operating system level settings -needed before installing kubernetes. - -```yaml -#cloud-config -apt: - sources: - docker.list: - source: deb [arch=amd64] http://download.docker.com/linux/ubuntu $RELEASE stable - keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 -system_info: - default_user: - groups: - - docker -write_files: -- path: "/etc/apt/preferences.d/docker" - owner: root:root - permissions: '0600' - content: | - Package: docker-ce - Pin: version 5:19* - Pin-Priority: 800 -- path: "/etc/sysctl.d/90-kubelet.conf" - owner: root:root - permissions: '0644' - content: | - vm.overcommit_memory=1 - vm.panic_on_oom=0 - kernel.panic=10 - kernel.panic_on_oops=1 - kernel.keys.root_maxbytes=25000000 -package_update: true -packages: -- docker-ce -- docker-ce-cli -- containerd.io -runcmd: -- sysctl -p /etc/sysctl.d/90-kubelet.conf -- groupadd --gid 52034 etcd -- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd -``` diff --git a/content/rancher/v2.5/en/security/rancher-2.5/_index.md b/content/rancher/v2.5/en/security/rancher-2.5/_index.md deleted file mode 100644 index 299b1ba66c..0000000000 --- a/content/rancher/v2.5/en/security/rancher-2.5/_index.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Self-Assessment and Hardening Guides for Rancher v2.5 -shortTitle: Rancher v2.5 Guides -weight: 1 ---- - -Rancher v2.5 introduced the capability to deploy Rancher on any Kubernetes cluster. For that reason, we now provide separate security hardening guides for Rancher deployments on each of Rancher's Kubernetes distributions. - -- [Rancher Kubernetes Distributions](#rancher-kubernetes-distributions) -- [Hardening Guides and Benchmark Versions](#hardening-guides-and-benchmark-versions) - - [RKE Guides](#rke-guides) - - [RKE2 Guides](#rke2-guides) - - [K3s Guides](#k3s) -- [Rancher with SELinux](#rancher-with-selinux) - -# Rancher Kubernetes Distributions - -Rancher has the following Kubernetes distributions: - -- [**RKE,**]({{}}/rke/latest/en/) Rancher Kubernetes Engine, is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. -- [**K3s,**]({{}}/k3s/latest/en/) is a fully conformant, lightweight Kubernetes distribution. It is easy to install, with half the memory of upstream Kubernetes, all in a binary of less than 100 MB. -- [**RKE2**](https://docs.rke2.io/) is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. - -To harden a Kubernetes cluster outside of Rancher's distributions, refer to your Kubernetes provider docs. - -# Hardening Guides and Benchmark Versions - -These guides have been tested along with the Rancher v2.5 release. Each self-assessment guide is accompanied with a hardening guide and tested on a specific Kubernetes version and CIS benchmark version. If a CIS benchmark has not been validated for your Kubernetes version, you can choose to use the existing guides until a newer version is added. - -### RKE Guides - -Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides ----|---|---|--- -Kubernetes v1.15+ | CIS v1.5 | [Link](./1.5-benchmark-2.5) | [Link](./1.5-hardening-2.5) -Kubernetes v1.18+ | CIS v1.6 | [Link](./1.6-benchmark-2.5) | [Link](./1.6-hardening-2.5) - -### RKE2 Guides - -Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides ----|---|---|--- -Kubernetes v1.18 | CIS v1.5 | [Link](https://docs.rke2.io/security/cis_self_assessment15/) | [Link](https://docs.rke2.io/security/hardening_guide/) -Kubernetes v1.20 | CIS v1.6 | [Link](https://docs.rke2.io/security/cis_self_assessment16/) | [Link](https://docs.rke2.io/security/hardening_guide/) - -### K3s Guides - -Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guide ----|---|---|--- -Kubernetes v1.17, v1.18, & v1.19 | CIS v1.5 | [Link]({{}}/k3s/latest/en/security/self_assessment/) | [Link]({{}}/k3s/latest/en/security/hardening_guide/) - - -# Rancher with SELinux - -_Available as of v2.5.8_ - -[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. - -To use Rancher with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.]({{}}/rancher/v2.5/en/security/selinux/#installing-the-rancher-selinux-rpm) diff --git a/content/rancher/v2.5/en/security/security-scan/_index.md b/content/rancher/v2.5/en/security/security-scan/_index.md deleted file mode 100644 index 0538df7a3f..0000000000 --- a/content/rancher/v2.5/en/security/security-scan/_index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Security Scans -weight: 299 ---- - -The documentation about CIS security scans has moved [here.]({{}}/rancher/v2.5/en/cis-scans) diff --git a/content/rancher/v2.5/en/system-tools/_index.md b/content/rancher/v2.5/en/system-tools/_index.md deleted file mode 100644 index 2114aadf25..0000000000 --- a/content/rancher/v2.5/en/system-tools/_index.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: System Tools -weight: 22 -aliases: - - /rancher/v2.x/en/system-tools/ ---- - -System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters or [installations of Rancher on an RKE cluster.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) The tasks include: - -* Collect logging and system metrics from nodes. -* Remove Kubernetes resources created by Rancher. - -The following commands are available: - -| Command | Description -|---|--- -| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. -| [stats](#stats) | Stream system metrics from nodes. -| [remove](#remove) | Remove Kubernetes resources created by Rancher. - -# Download System Tools - -You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. - -Operating System | Filename ------------------|----- -MacOS | `system-tools_darwin-amd64` -Linux | `system-tools_linux-amd64` -Windows | `system-tools_windows-amd64.exe` - -After you download the tools, complete the following actions: - -1. Rename the file to `system-tools`. - -1. Give the file executable permissions by running the following command: - - > **Using Windows?** - The file is already an executable, you can skip this step. - - ``` - chmod +x system-tools - ``` - -# Logs - -The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/). See [Troubleshooting]({{}}//rancher/v2.5/en/troubleshooting/) for a list of core Kubernetes cluster components. - -System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. - -### Usage - -``` -./system-tools_darwin-amd64 logs --kubeconfig -``` - -The following are the options for the logs command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. -| `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. - -# Stats - -The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/). - -System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. - -### Usage - -``` -./system-tools_darwin-amd64 stats --kubeconfig -``` - -The following are the options for the stats command: - -| Option | Description -| ------------------------------------------------------ | ------------------------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file. -| `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. -| `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. - -# Remove - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.5/en/backups/back-up-rancher) before executing the command. - -When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: - -- The Rancher deployment namespace (`cattle-system` by default). -- Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates. -- Labels, annotations, and finalizers. -- Rancher Deployment. -- Machines, clusters, projects, and user custom resource deployments (CRDs). -- All resources create under the `management.cattle.io` API Group. -- All CRDs created by Rancher v2.x. - ->**Using 2.0.8 or Earlier?** -> ->These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. - -### Usage - -When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. - ->**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd]({{}}/rancher/v2.5/en/backups/back-up-rancher) before executing the command. - -``` -./system-tools remove --kubeconfig --namespace -``` - -The following are the options for the `remove` command: - -| Option | Description -| ---------------------------------------------- | ------------ -| `--kubeconfig , -c ` | The cluster's kubeconfig file -| `--namespace , -n cattle-system` | Rancher 2.x deployment namespace (``). If no namespace is defined, the options defaults to `cattle-system`. -| `--force` | Skips the interactive removal confirmation and removes the Rancher deployment without prompt. diff --git a/content/rancher/v2.5/en/troubleshooting/_index.md b/content/rancher/v2.5/en/troubleshooting/_index.md deleted file mode 100644 index 7fbcdd4019..0000000000 --- a/content/rancher/v2.5/en/troubleshooting/_index.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: Troubleshooting -weight: 26 -aliases: - - /rancher/v2.x/en/troubleshooting/ ---- - -This section contains information to help you troubleshoot issues when using Rancher. - -- [Kubernetes components]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-components/) - - If you need help troubleshooting core Kubernetes cluster components like: - * `etcd` - * `kube-apiserver` - * `kube-controller-manager` - * `kube-scheduler` - * `kubelet` - * `kube-proxy` - * `nginx-proxy` - -- [Kubernetes resources]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-resources/) - - Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. - -- [Networking]({{}}/rancher/v2.5/en/troubleshooting/networking/) - - Steps to troubleshoot networking issues can be found here. - -- [DNS]({{}}/rancher/v2.5/en/troubleshooting/dns/) - - When you experience name resolution issues in your cluster. - -- [Troubleshooting Rancher installed on Kubernetes]({{}}/rancher/v2.5/en/troubleshooting/rancherha/) - - If you experience issues with your [Rancher server installed on Kubernetes]({{}}/rancher/v2.5/en/installation/install-rancher-on-k8s/) - -- [Logging]({{}}/rancher/v2.5/en/troubleshooting/logging/) - - Read more about what log levels can be configured and how to configure a log level. - diff --git a/content/rancher/v2.5/en/troubleshooting/dns/_index.md b/content/rancher/v2.5/en/troubleshooting/dns/_index.md deleted file mode 100644 index 3822399c32..0000000000 --- a/content/rancher/v2.5/en/troubleshooting/dns/_index.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: DNS -weight: 103 -aliases: - - /rancher/v2.x/en/troubleshooting/dns/ ---- - -The commands/steps listed on this page can be used to check name resolution issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -Before running the DNS checks, check the [default DNS provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/options/#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly]({{}}/rancher/v2.5/en/troubleshooting/networking/#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. - -### Check if DNS pods are running - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns -``` - -Example output when using CoreDNS: -``` -NAME READY STATUS RESTARTS AGE -coredns-799dffd9c4-6jhlz 1/1 Running 0 76m -``` - -Example output when using kube-dns: -``` -NAME READY STATUS RESTARTS AGE -kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s -``` - -### Check if the DNS service is present with the correct cluster-ip - -``` -kubectl -n kube-system get svc -l k8s-app=kube-dns -``` - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s -``` - -### Check if domain names are resolving - -Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: kubernetes.default -Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local -pod "busybox" deleted -``` - -Check if external names are resolving (in this example, `www.google.com`) - -``` -kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com -``` - -Example output: -``` -Server: 10.43.0.10 -Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local - -Name: www.google.com -Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net -Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net -pod "busybox" deleted -``` - -If you want to check resolving of domain names on all of the hosts, execute the following steps: - -1. Save the following file as `ds-dnstest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: dnstest - spec: - selector: - matchLabels: - name: dnstest - template: - metadata: - labels: - name: dnstest - spec: - tolerations: - - operator: Exists - containers: - - image: busybox:1.28 - imagePullPolicy: Always - name: alpine - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - ``` - -2. Launch it using `kubectl create -f ds-dnstest.yml` -3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. -4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). - - ``` - export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" - ``` - -5. When this command has finished running, the output indicating everything is correct is: - - ``` - => Start DNS resolve test - => End DNS resolve test - ``` - -If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. - -Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. - -``` -=> Start DNS resolve test -command terminated with exit code 1 -209.97.182.150 cannot resolve www.google.com -=> End DNS resolve test -``` - -Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. - -### CoreDNS specific - -#### Check CoreDNS logging - -``` -kubectl -n kube-system logs -l k8s-app=kube-dns -``` - -#### Check configuration - -CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. - -``` -kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} -``` - -#### Check upstream nameservers in resolv.conf - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. - -``` -kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' -``` - -#### Enable query logging - -Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: - -``` -kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - -``` - -All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). - -### kube-dns specific - -#### Check upstream nameservers in kubedns container - -By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). - -Use the following command to check the upstream nameservers used by the kubedns container: - -``` -kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done -``` - -Example output: -``` -Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x -nameserver 1.1.1.1 -nameserver 8.8.4.4 -``` - -If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: - -* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. -* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): - -``` -services: - kubelet: - extra_args: - resolv-conf: "/run/resolvconf/resolv.conf" -``` - -> **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. - -See [Editing Cluster as YAML]({{}}/rancher/v2.5/en/cluster-admin/editing-clusters/#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: - -``` -kubectl delete pods -n kube-system -l k8s-app=kube-dns -pod "kube-dns-5fd74c7488-6pwsf" deleted -``` - -Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). - -If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: - -``` -kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' -``` - -Example output: -``` -upstreamNameservers:["1.1.1.1"] -``` diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md deleted file mode 100644 index 7e935b42d1..0000000000 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/_index.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Kubernetes Components -weight: 100 -aliases: - - /rancher/v2.x/en/troubleshooting/kubernetes-components/ ---- - -The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters. - -This section includes troubleshooting tips in the following categories: - -- [Troubleshooting etcd Nodes]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd) -- [Troubleshooting Controlplane Nodes]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane) -- [Troubleshooting nginx-proxy Nodes]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy) -- [Troubleshooting Worker Nodes and Generic Components]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic) - -# Kubernetes Component Diagram - -![Cluster diagram]({{}}/img/rancher/clusterdiagram.svg)
-Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md deleted file mode 100644 index 0508f71fb4..0000000000 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/controlplane/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Troubleshooting Controlplane Nodes -weight: 2 -aliases: - - /rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/ ---- - -This section applies to nodes with the `controlplane` role. - -# Check if the Controlplane Containers are Running - -There are three specific containers launched on nodes with the `controlplane` role: - -* `kube-apiserver` -* `kube-controller-manager` -* `kube-scheduler` - -The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. - -``` -docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' -``` - -Example output: -``` -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver -f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler -bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager -``` - -# Controlplane Container Logging - -> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election]({{}}/rancher/v2.5/en/troubleshooting/kubernetes-resources/#kubernetes-leader-election) how to retrieve the current leader. - -The logging of the containers can contain information on what the problem could be. - -``` -docker logs kube-apiserver -docker logs kube-controller-manager -docker logs kube-scheduler -``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md b/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md deleted file mode 100644 index 0aac78e17b..0000000000 --- a/content/rancher/v2.5/en/troubleshooting/kubernetes-resources/_index.md +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: Kubernetes resources -weight: 101 -aliases: - - /rancher/v2.x/en/troubleshooting/kubernetes-resources/ ---- - -The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/) clusters. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -- [Nodes](#nodes) - - [Get nodes](#get-nodes) - - [Get node conditions](#get-node-conditions) -- [Kubernetes leader election](#kubernetes-leader-election) - - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) - - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) -- [Ingress controller](#ingress-controller) - - [Pod details](#pod-details) - - [Pod container logs](#pod-container-logs) - - [Namespace events](#namespace-events) - - [Debug logging](#debug-logging) - - [Check configuration](#check-configuration) -- [Rancher agents](#rancher-agents) - - [cattle-node-agent](#cattle-node-agent) - - [cattle-cluster-agent](#cattle-cluster-agent) -- [Jobs and pods](#jobs-and-pods) - - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) - - [Describe pod](#describe-pod) - - [Pod container logs](#pod-container-logs) - - [Describe job](#describe-job) - - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) - - [Evicted pods](#evicted-pods) - - [Job does not complete](#job-does-not-complete) - -# Nodes - -### Get nodes - -Run the command below and check the following: - -- All nodes in your cluster should be listed, make sure there is not one missing. -- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) -- Check if all nodes report the correct version. -- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) - - -``` -kubectl get nodes -o wide -``` - -Example output: - -``` -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 -``` - -### Get node conditions - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' -``` - -Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. - -``` -kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' -``` - -Example output: - -``` -worker-0: DiskPressure:True -``` - -# Kubernetes leader election - -### Kubernetes Controller Manager leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -### Kubernetes Scheduler leader - -The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). - -``` -kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' -{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> -``` - -# Ingress Controller - -The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. - -Check if the pods are running on all nodes: - -``` -kubectl -n ingress-nginx get pods -o wide -``` - -Example output: - -``` -kubectl -n ingress-nginx get pods -o wide -NAME READY STATUS RESTARTS AGE IP NODE -default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 -nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 -nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 -``` - -If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. - -### Pod details - -``` -kubectl -n ingress-nginx describe pods -l app=ingress-nginx -``` - -### Pod container logs - -``` -kubectl -n ingress-nginx logs -l app=ingress-nginx -``` - -### Namespace events - -``` -kubectl -n ingress-nginx get events -``` - -### Debug logging - -To enable debug logging: - -``` -kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' -``` - -### Check configuration - -Retrieve generated configuration in each pod: - -``` -kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done -``` - -# Rancher agents - -Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. - -#### cattle-node-agent - -Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 -cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 -cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 -cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 -cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 -cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 -cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 -``` - -Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: - -``` -kubectl -n cattle-system logs -l app=cattle-agent -``` - -#### cattle-cluster-agent - -Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: - -``` -kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide -``` - -Example output: - -``` -NAME READY STATUS RESTARTS AGE IP NODE -cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 -``` - -Check logging of cattle-cluster-agent pod: - -``` -kubectl -n cattle-system logs -l app=cattle-cluster-agent -``` - -# Jobs and Pods - -### Check that pods or jobs have status **Running**/**Completed** - -To check, run the command: - -``` -kubectl get pods --all-namespaces -``` - -If a pod is not in **Running** state, you can dig into the root cause by running: - -### Describe pod - -``` -kubectl describe pod POD_NAME -n NAMESPACE -``` - -### Pod container logs - -``` -kubectl logs POD_NAME -n NAMESPACE -``` - -If a job is not in **Completed** state, you can dig into the root cause by running: - -### Describe job - -``` -kubectl describe job JOB_NAME -n NAMESPACE -``` - -### Logs from the containers of pods of the job - -``` -kubectl logs -l job-name=JOB_NAME -n NAMESPACE -``` - -### Evicted pods - -Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). - -Retrieve a list of evicted pods (podname and namespace): - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' -``` - -To delete all evicted pods: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done -``` - -Retrieve a list of evicted pods, scheduled node and the reason: - -``` -kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done -``` - -### Job does not complete - -If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.]({{}}/rancher/v2.5/en/istio/setup/enable-istio-in-namespace) - -Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/networking/_index.md b/content/rancher/v2.5/en/troubleshooting/networking/_index.md deleted file mode 100644 index 771719bba7..0000000000 --- a/content/rancher/v2.5/en/troubleshooting/networking/_index.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: Networking -weight: 102 -aliases: - - /rancher/v2.x/en/troubleshooting/networking/ ---- - -The commands/steps listed on this page can be used to check networking related issues in your cluster. - -Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. - -### Double check if all the required ports are opened in your (host) firewall - -Double check if all the [required ports]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. -### Check if overlay network is functioning correctly - -The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. - -To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `swiss-army-knife` container on every host (image was developed by Rancher engineers and can be found here: https://github.com/rancherlabs/swiss-army-knife), which we will use to run a `ping` test between containers on all hosts. - -> **Note:** This container [does not support ARM nodes](https://github.com/leodotcloud/swiss-army-knife/issues/18), such as a Raspberry Pi. This will be seen in the pod logs as `exec user process caused: exec format error`. - -1. Save the following file as `overlaytest.yml` - - ``` - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: overlaytest - spec: - selector: - matchLabels: - name: overlaytest - template: - metadata: - labels: - name: overlaytest - spec: - tolerations: - - operator: Exists - containers: - - image: rancherlabs/swiss-army-knife - imagePullPolicy: Always - name: overlaytest - command: ["sh", "-c", "tail -f /dev/null"] - terminationMessagePath: /dev/termination-log - - ``` - -2. Launch it using `kubectl create -f overlaytest.yml` -3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. -4. Run the following script, from the same location. It will have each `overlaytest` container on every host ping each other: - ``` - #!/bin/bash - echo "=> Start network overlay test" - kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | - while read spod shost - do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | - while read tip thost - do kubectl --request-timeout='10s' exec $spod -c overlaytest -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1" - RC=$? - if [ $RC -ne 0 ] - then echo FAIL: $spod on $shost cannot reach pod IP $tip on $thost - else echo $shost can reach $thost - fi - done - done - echo "=> End network overlay test" - ``` - -5. When this command has finished running, it will output the state of each route: - - ``` - => Start network overlay test - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.7.3 on wk2 - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.0.5 on cp1 - Error from server (NotFound): pods "wk2" not found - FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.2.12 on wk1 - command terminated with exit code 1 - FAIL: overlaytest-v4qkl on cp1 cannot reach pod IP 10.42.7.3 on wk2 - cp1 can reach cp1 - cp1 can reach wk1 - command terminated with exit code 1 - FAIL: overlaytest-xpxwp on wk1 cannot reach pod IP 10.42.7.3 on wk2 - wk1 can reach cp1 - wk1 can reach wk1 - => End network overlay test - ``` - If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports]({{}}/rancher/v2.5/en/cluster-provisioning/node-requirements/#networking-requirements) for overlay networking are not opened for `wk2`. -6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. - - -### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices - -When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: - -* `websocket: bad handshake` -* `Failed to connect to proxy` -* `read tcp: i/o timeout` - -See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. - -### Resolved issues - -#### Overlay network broken when using Canal/Flannel due to missing node annotations - -| | | -|------------|------------| -| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | -| Resolved in | v2.1.2 | - -To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): - -``` -kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' -``` - -If there is no output, the cluster is not affected. diff --git a/content/rancher/v2.5/en/user-settings/_index.md b/content/rancher/v2.5/en/user-settings/_index.md deleted file mode 100644 index f8ed1a7c76..0000000000 --- a/content/rancher/v2.5/en/user-settings/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: User Settings -weight: 23 -aliases: - - /rancher/v2.5/en/tasks/user-settings/ - - /rancher/v2.x/en/user-settings/ ---- - -Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. - -![User Settings Menu]({{}}/img/rancher/user-settings.png) - -The available user settings are: - -- [API & Keys]({{}}/rancher/v2.5/en/user-settings/api-keys/): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. -- [Cloud Credentials]({{}}/rancher/v2.5/en/user-settings/cloud-credentials/): Manage cloud credentials [used by node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) to [provision nodes for clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters). -- [Node Templates]({{}}/rancher/v2.5/en/user-settings/node-templates): Manage templates [used by Rancher to provision nodes for clusters]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters). -- [Preferences]({{}}/rancher/v2.5/en/user-settings/preferences): Sets superficial preferences for the Rancher UI. -- Log Out: Ends your user session. diff --git a/content/rancher/v2.5/en/user-settings/api-keys/_index.md b/content/rancher/v2.5/en/user-settings/api-keys/_index.md deleted file mode 100644 index 555a99ad05..0000000000 --- a/content/rancher/v2.5/en/user-settings/api-keys/_index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: API Keys -weight: 7005 -aliases: - - /rancher/v2.5/en/concepts/api-keys/ - - /rancher/v2.5/en/tasks/user-settings/api-keys/ - - /rancher/v2.x/en/user-settings/api-keys/ ---- - -## API Keys and User Authentication - -If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. - -An API key is also required for using Rancher CLI. - -API Keys are composed of four components: - -- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. -- **Access Key:** The token's username. -- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. -- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. - -## Creating an API Key - -1. Select **User Avatar** > **API & Keys** from the **User Settings** menu in the upper-right. - -2. Click **Add Key**. - -3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. - - The API key won't be valid after expiration. Shorter expiration periods are more secure. - - Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. - - A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints]({{}}/rancher/v2.5/en/overview/architecture/#4-authorized-cluster-endpoint) for more information. - -4. Click **Create**. - - **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. - - Use the **Bearer Token** to authenticate with Rancher CLI. - -5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. - -## What's Next? - -- Enter your API key information into the application that will send requests to the Rancher API. -- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. -- API keys are used for API calls and [Rancher CLI]({{}}/rancher/v2.5/en/cli). - -## Deleting API Keys - -If you need to revoke an API key, delete it. You should delete API keys: - -- That may have been compromised. -- That have expired. - -To delete an API, select the stale key and click **Delete**. diff --git a/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md b/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md deleted file mode 100644 index 1c7847e8fa..0000000000 --- a/content/rancher/v2.5/en/user-settings/cloud-credentials/_index.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: Managing Cloud Credentials -weight: 7011 -aliases: - - /rancher/v2.x/en/user-settings/cloud-credentials/ ---- - -When you create a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. - -Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. - -Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. - -You can create cloud credentials in two contexts: - -- [During creation of a node template]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for a cluster. -- In the **User Settings** - -All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. - -## Creating a Cloud Credential from User Settings - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Click **Add Cloud Credential**. -1. Enter a name for the cloud credential. -1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers]({{}}/rancher/v2.5/en/admin-settings/drivers/node-drivers/) in Rancher. -1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. -1. Click **Create**. - -**Result:** The cloud credential is created and can immediately be used to [create node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). - -## Updating a Cloud Credential - -When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. -1. Update the credential information and click **Save**. - -**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/). - -## Deleting a Cloud Credential - -In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates]({{}}/rancher/v2.5/en/user-settings/node-templates/#deleting-a-node-template) that are still associated to that cloud credential. - -1. From your user settings, select **User Avatar > Cloud Credentials**. -1. You can either individually delete a cloud credential or bulk delete. - - - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. - - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. -1. Confirm that you want to delete these cloud credentials. diff --git a/content/rancher/v2.5/en/user-settings/node-templates/_index.md b/content/rancher/v2.5/en/user-settings/node-templates/_index.md deleted file mode 100644 index b33e05d261..0000000000 --- a/content/rancher/v2.5/en/user-settings/node-templates/_index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Managing Node Templates -weight: 7010 -aliases: - - /rancher/v2.x/en/user-settings/node-templates/ ---- - -When you provision a cluster [hosted by an infrastructure provider]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools), [node templates]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: - -- While [provisioning a node pool cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools). -- At any time, from your [user settings](#creating-a-node-template-from-user-settings). - -When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. - -## Creating a Node Template from User Settings - -1. From your user settings, select **User Avatar > Node Templates**. -1. Click **Add Template**. -1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. - -**Result:** The template is configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools). - -## Updating a Node Template - -1. From your user settings, select **User Avatar > Node Templates**. -1. Choose the node template that you want to edit and click the **⋮ > Edit**. - - > **Note:** As of v2.2.0, the default `active` [node drivers]({{}}/rancher/v2.5/en/admin-settings/drivers/node-drivers/) and any node driver, that has fields marked as `password`, are required to use [cloud credentials]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. - -1. Edit the required information and click **Save**. - -**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. - -## Cloning Node Templates - -When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Find the template you want to clone. Then select **⋮ > Clone**. -1. Complete the rest of the form. - -**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster]({{}}/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools). - -## Deleting a Node Template - -When you no longer use a node template, you can delete it from your user settings. - -1. From your user settings, select **User Avatar > Node Templates**. -1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/content/rancher/v2.5/en/user-settings/preferences/_index.md b/content/rancher/v2.5/en/user-settings/preferences/_index.md deleted file mode 100644 index a692f70e7d..0000000000 --- a/content/rancher/v2.5/en/user-settings/preferences/_index.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: User Preferences -weight: 7012 -aliases: - - /rancher/v2.x/en/user-settings/preferences/ ---- - -Each user can choose preferences to personalize their Rancher experience. To change preference settings, open the **User Settings** menu and then select **Preferences**. - -The preferences available will differ depending on whether the **User Settings** menu was accessed while on the Cluster Manager UI or the Cluster Explorer UI. - -{{% tabs %}} -{{% tab "Cluster Manager" %}} -## Theme - -Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. - -## My Account - -This section displays the **Name** (your display name) and **Username** (your login) used for your session. To change your login's current password, click the **Change Password** button. - -## Table Row per Page - -On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. - -{{% /tab %}} -{{% tab "Cluster Explorer" %}} -## Theme - -Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. - -## Login Landing Page - -Choose the default page to display after logging in. - -## Date Format - -Choose your preferred format to display dates. By default, dates are displayed in the form `Wed, Jun 9 2021`. - -## Time Format - -Choose your preferred format to display time. By default, the 12-hour format is used. - -## Table Row per Page - -On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. - -## YAML Editor Key Mapping - -Choose the editor used when editing YAML configurations. When Emacs or Vim is chosen, the editor's shortcut commands can also be used. - -## Enable Developer Tools & Features - -Enables developer tools and features to be used. - -## Hide All Type Description Boxes - -Hides all description boxes. - -## Helm Charts - -When deploying applications from the "Apps & Marketplace", choose whether to show only released versions of the Helm chart or to include prerelease versions as well. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.6/_index.md b/content/rancher/v2.6/_index.md deleted file mode 100644 index 5060194d87..0000000000 --- a/content/rancher/v2.6/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Rancher 2.6 -weight: 1 -showBreadcrumb: false ---- diff --git a/content/rancher/v2.6/en/_index.md b/content/rancher/v2.6/en/_index.md deleted file mode 100644 index 98bdedcac7..0000000000 --- a/content/rancher/v2.6/en/_index.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "Rancher 2.6" -shortTitle: "Rancher 2.6 (Latest)" -description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -metaTitle: "Rancher 2.6 Docs: What is New?" -metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." -insertOneSix: false -weight: 1 -ctaBanner: 0 -aliases: - - /rancher/v2.x/en/ ---- -Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher 2 exclusively deploys and manages Kubernetes clusters running anywhere, on any provider. - -Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. - -One Rancher server installation can manage thousands of Kubernetes clusters and thousands of nodes from the same user interface. - -Rancher adds significant value on top of Kubernetes, first by centralizing authentication and role-based access control (RBAC) for all of the clusters, giving global admins the ability to control cluster access from one location. - -It then enables detailed monitoring and alerting for clusters and their resources, ships logs to external providers, and integrates directly with Helm via the Application Catalog. If you have an external CI/CD system, you can plug it into Rancher, but if you don't, Rancher even includes Fleet to help you automatically deploy and upgrade workloads. - -Rancher is a _complete_ container management platform for Kubernetes, giving you the tools to successfully run Kubernetes anywhere. diff --git a/content/rancher/v2.6/en/admin-settings/authentication/keycloak-saml/_index.md b/content/rancher/v2.6/en/admin-settings/authentication/keycloak-saml/_index.md deleted file mode 100644 index ca2952111f..0000000000 --- a/content/rancher/v2.6/en/admin-settings/authentication/keycloak-saml/_index.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: Configuring Keycloak (SAML) -description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins -weight: 1200 ---- - -If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. - -## Prerequisites - -- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. -- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. - - Setting | Value - ------------|------------ - `Sign Documents` | `ON` 1 - `Sign Assertions` | `ON` 1 - All other `ON/OFF` Settings | `OFF` - `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 - `Client Name` | (e.g. `rancher`) - `Client Protocol` | `SAML` - `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` - - >1: Optionally, you can enable either one or both of these settings. - >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. - - {{< img "/img/rancher/keycloak/keycloak-saml-client-configuration.png" "">}} - -- In the new SAML client, create Mappers to expose the users fields - - Add all "Builtin Protocol Mappers" - {{< img "/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png" "">}} - - Create a new "Group list" mapper to map the member attribute to a user's groups - {{< img "/img/rancher/keycloak/keycloak-saml-client-group-mapper.png" "">}} - -## Getting the IDP Metadata - -{{% tabs %}} -{{% tab "Keycloak 5 and earlier" %}} -To get the IDP metadata, export a `metadata.xml` file from your Keycloak client. -From the **Installation** tab, choose the **SAML Metadata IDPSSODescriptor** format option and download your file. -{{% /tab %}} -{{% tab "Keycloak 6-13" %}} - -1. From the **Configure** section, click the **Realm Settings** tab. -1. Click the **General** tab. -1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. - -Verify the IDP metadata contains the following attributes: - -``` -xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" -xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" -xmlns:ds="http://www.w3.org/2000/09/xmldsig#" -``` - -Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. - -The following is an example process for Firefox, but will vary slightly for other browsers: - -1. Press **F12** to access the developer console. -1. Click the **Network** tab. -1. From the table, click the row containing `descriptor`. -1. From the details pane, click the **Response** tab. -1. Copy the raw response data. - -The XML obtained contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: - -1. Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. -1. Remove the `` tag from the beginning. -1. Remove the `` from the end of the xml. - -You are left with something similar as the example below: - -``` - -.... - -``` - -{{% /tab %}} -{{% tab "Keycloak 14+" %}} - -1. From the **Configure** section, click the **Realm Settings** tab. -1. Click the **General** tab. -1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. - -Verify the IDP metadata contains the following attributes: - -``` -xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" -xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" -xmlns:ds="http://www.w3.org/2000/09/xmldsig#" -``` - -Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. - -The following is an example process for Firefox, but will vary slightly for other browsers: - -1. Press **F12** to access the developer console. -1. Click the **Network** tab. -1. From the table, click the row containing `descriptor`. -1. From the details pane, click the **Response** tab. -1. Copy the raw response data. - -{{% /tab %}} -{{% /tabs %}} - -## Configuring Keycloak in Rancher - - -1. In the top left corner, click **☰ > Users & Authentication**. -1. In the left navigation menu, click **Auth Provider**. -1. Click **Keycloak SAML**. -1. Complete the **Configure Keycloak Account** form. For help with filling the form, see the [configuration reference](#configuration-reference). -1. After you complete the **Configure a Keycloak Account** form, click **Enable**. - - Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. - - >**Note:** You may have to disable your popup blocker to see the IdP login page. - -**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. - -{{< saml_caveats >}} - -## Configuration Reference - -| Field | Description | -| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Display Name Field | The attribute that contains the display name of users.

Example: `givenName` | -| User Name Field | The attribute that contains the user name/given name.

Example: `email` | -| UID Field | An attribute that is unique to every user.

Example: `email` | -| Groups Field | Make entries for managing group memberships.

Example: `member` | -| Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | -| Rancher API Host | The URL for your Rancher Server. | -| Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | -| IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | - ->**Tip:** You can generate a key/certificate pair using an openssl command. For example: -> -> openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert - -## Annex: Troubleshooting - -If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.6/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. - -### You are not redirected to Keycloak - -When you click on **Authenticate with Keycloak**, you are not redirected to your IdP. - - * Verify your Keycloak client configuration. - * Make sure `Force Post Binding` set to `OFF`. - - -### Forbidden message displayed after IdP login - -You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. - - * Check the Rancher debug log. - * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. - -### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata - -This is usually due to the metadata not being created until a SAML provider is configured. -Try configuring and saving keycloak as your SAML provider and then accessing the metadata. - -### Keycloak Error: "We're sorry, failed to process response" - - * Check your Keycloak log. - * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. - -### Keycloak Error: "We're sorry, invalid requester" - - * Check your Keycloak log. - * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/content/rancher/v2.6/en/admin-settings/branding/_index.md b/content/rancher/v2.6/en/admin-settings/branding/_index.md deleted file mode 100644 index 4e5cff17e2..0000000000 --- a/content/rancher/v2.6/en/admin-settings/branding/_index.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: Custom Branding -weight: 90 ---- - -Rancher v2.6 introduced the ability to customize Rancher’s branding and navigation links. - -- [Changing Brand Settings](#changing-brand-settings) -- [Brand Configuration](#brand-configuration) -- [Custom Navigation Links](#custom-navigation-links) -- [Link Configuration](#link-configuration) -- [Link Examples](#link-examples) - -# Changing Brand Settings - -> **Prerequisite:** You will need to have at least cluster member permissions. - -To configure the brand settings, - -1. Click **☰ > Global settings**. -2. Click **Branding**. - -# Brand Configuration - -### Private Label Company Name - -This option replaces “Rancher” with the value you provide in most places. Files that need to have Rancher in the name, such as “rancher-compose.yml”, will not be changed. - -### Support Links - -Use a url address to send new "File an Issue" reports instead of sending users to the Github issues page. Optionally show Rancher community support links. - -### Logo - -Upload light and dark logos to replace the Rancher logo in the top-level navigation header. - -### Primary Color - -You can override the primary color used throughout the UI with a custom color of your choice. - -### Fixed Banners - -{{% tabs %}} -{{% tab "Rancher before v2.6.4" %}} -Display a custom fixed banner in the header, footer, or both. -{{% /tab %}} -{{% tab "Rancher v2.6.4+" %}} -Display a custom fixed banner in the header, footer, or both. - -As of Rancher v2.6.4, configuration of fixed banners has moved from the **Branding** tab to the **Banners** tab. - -To configure banner settings, - -1. Click **☰ > Global settings**. -2. Click **Banners**. -{{% /tab %}} -{{% /tabs %}} - -# Custom Navigation Links - -In this section, you'll learn how to configure the links in the left navigation bar of the **Cluster Dashboard**. To get to the cluster dashboard, - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the cluster where you want custom navigation links and click **Explore**. - -It can be useful to add a link for quick access to services installed on a cluster. For example, you could add a link to the Kiali UI for clusters with Istio installed, or you could add a link to the Grafana UI for clusters with Rancher monitoring installed. - -The custom links don't affect who has access to each service. - -Links can be created at the top level and multiple links can be grouped together. - -### Adding a Custom Navigation Link - -> **Prerequisite:** You will need to have at least cluster member or project member permissions. - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the cluster where you would like to add custom navigation links and click **Explore**. -2. In the top navigation menu, click **🔍 (Resource Search)**. -3. Type **Nav** and click **Nav Links**. -4. Click **Create from YAML**. -5. The simplest way to create a navigation link is to add these fields: - - name: linkname - toURL: https://example.com - - For more details on setting up links, including optional fields, see [Link Configuration.](#link-configuration) -6. Click **Create**. - -# Link Configuration - -### `name` - -Display name for the link. Required. - -### `group` - -Name of a group of links that expands when clicked. - -Optional. If not provided, the link appears standalone. - -Groups are displayed separately from standalone links, as shown below: - -![Screenshot of group and standalone link]({{}}/img/rancher/grouped-vs-standalone-links.png) - -### `iconSrc` - -Icon source in in base64 format. - -Below is an example of the Grafana logo in base64 format: - -``` -data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAgAElEQVR4Aey9d5xkZZn3zb/P+3mffZ9nDcDAzHTuqs49PZEhCBhBJc10nO7pHKbD9PR07p5AWlEEZM2ioph3dXGNqLgCxhVBRIYRmNy5cjqnTlWdqu/7ue5zTk+DiNIsCn4suOacrjrxvq/fle/7PofX+ycDZNT/pIAUaUUmaRBKp8FMQ1L9qL6S4+VYUikIeuBrn+LppiuJNG/BvHYd7HbhbXLBZ/dB8AkwFokCHsAH6Kb8IxTHJIyBHwgDMTBT6h7yWAmb1L3sxyEjfxkYxDDQMa0nWV0vyE2slwZ5JtUO8v5JUhgk0EkRw5TnUg9sgJm03lsezkyTIU0C2VrNJU2WsdtTPVQyAmYU4mGIhDESaULAcSG5RjwJEQ8YsxgEOZoOcSxjvXsyKM8nL24QJ4UmF5TnlL7IWLure/G/3lnn/PVu9SrdaQVApO3/CCAZAYjNSLIVBrD/VMwSnsf4/B38ofWthFu3krhhPZmaLJZuyEY7vJPIV++AwEnImJwJ66qTFcMLSswkGWKkiKqtMIICwB890zL/2lwoHGLYnCIMtMqP3Md5N7mMDRDTBkhKAURAqNBs3TOdspjWERgrmkYuJbQMEPUeKdAEIBrIuSIKEiZ+B0ShADz7W/Tjv8TMLOIjybxcJwNJOUgAkjHQSFoAkedMWn2g7rXKV/9rnfZ3DRCRjgoN0ik2A0nDLgMkbYD3aU7dPcRT7ZfhadmEVldA/IZ1RN6TRahxO8f37CTyxU/B4pySvOlYHNM0sZhQRKgwTRrRWALOZc0lWksoY30n0lPkuMBCPYCcKn/Ic632Iy/ivNvy+4k2EOAamDbJ/rIKFYEhx74AWPIWcpRhv8dyu5mQTEEkmUYTjSsaMBWDiA9mjsF/foWTH76Z33zqEL6jD5IgRkTeR64valfOQQSL6My0Ap/c32qvlNJgq339v8Z5fzcAkcayBKrFkNIBCiDOD/Lj8jG2iZWOwvGHefxQNY+3beHk7grCDS7067JhRwn6dZX4d72L3zVei/6Vz0HYB/EQJINKawRTQXVP+UfYQEjdRphITBlTNIrFqBopIsqcsZnaQYww0iv5yA1XkuJ+eRJREStJvrMFhnO8A5S09ewCDoMkCQUw67KxOMQMCBkpkok4JIIQnYGf3k/s7mlO7N3Fw7VX8J2Ot3Pi/rvBXMLMJKxbx+UR5L4CEJ0IKYUbeV0xAUXDZVSrvZIGeHXPff0DRDGE9PRZPhGec8jhBWcr30uXCWEG4Xf/wW+H38ajXVUcaS7GX+dGvyYX6jeQvL6MZO1lzFx7Mc81XkPyM3eC/xlIz5LJzBIz/bbUtUyKZaksUtPUIS06wzK/LGluM6jwrVg9wkCvECDOe51lE2kL5w2drdU+Ths5bSBbMacsVMtGtKDFug5+5Q00Iw2JFOhhmD0C3/goS6M1HKvfiqfxMo7t3MLv2i7F97nDoJ+BpG45IXLysgYRgIhvJPeRu4QVibZ7LX/+rgDiNLTT58IADjM4rPI8HyXpgYc+yXODF3G0ZyPPtZSxUFeM/9p8MrUVJK4rIX5NMfEdm1jauZ1j7Vfj/exNcPoRSB2HxIICiHI+Hb4U00mYSWkP4RAhHTKiUexggfCEPJDiUOepX/5W3tN5R9m3PpZZJ6bdWbJ+kWPkto51JyaVxbBnpYtcT35XwFDXt8Ee8cJ//wj9X6c40fF2TtZU4qspJV5bidawCU/HxWgfHoTwccuhl4srE0saRnwQwwaIZQKa+BH6B0CcfnvVttIBK8jmFId5zjKEmA+WhJTIkeoYYxY+t5/FtmJ8zeUEWzdypqmM07VFhGpK0W9wYVyXh15dQLxnG/NdF/NE00V4PzgAv/0OLB5RbyWKQD2CML5wV1KMbIlmCSgkapQEkcLCNPJ72mJAsdXl+Vb7cRh+mcnlQvKl0IomUfs2mOT28rwCaiErgiW+hXWOaBSnzRSw4/Mw/wR87zN4xht55vqNzNxQQXj3VoyWzRjX5ZPcWUigrozozU0QeMbSNAnIyA0EcaQRE1N8EPWn0hoSDxSSRnntfl73GkTMAsvXsDnCYZAXMERc2dei2i0HVnWMdpro4etYuv58orUujLatLLZsZKapkqXaYqINRZi7XWQ63ASacwh2lhPtvZwjdVs4M94ETz4M8ajFjI5TLlsJLavwsu0GCA84JpX4uEAAVHBYGHa1H3lVuZaQxXgvAIh86QDFDqu+ECDSIstS3AGWnCdmUnwRjv4Y/XMHON51OSerSwjv2kCqdRta81ZiDZWwqwjq8onWFKIfrIPQs5CIKl/ekJvZDyagFJJbWKFuiQFLePwfJtZq+//PnieNLc64lUuwTYoXMITwZowMgbSu3EIjE8NMiKMdhmd/zlzrRjJ12UQb3IQaKojUbiRUW8VSQynzjQV4mtfjbz6fSNN5hBvXsrjbzXzTZjz1V/Bs0/Xw0A9g7qRy3E3DRzARUJpKni0ZSljpEUkcCEBsQR3BYIEIC2mxw+WBV/dx3v950TP5UshpBxskqURG+cvCjtImwqyyDYZ9pDPiMKfR4hHQY5aJdPIIPPg1jrS/nZndW/E0lRJodBHY5SbYUEq4biOx2goi16+D1iLCO/PwDL0HfvdD5X9JFNwXP+vjyL2UMJDnUs8kRpzkjv4BkNX1/l9wlmiOhHL4RIbaDrA0vs0UwifSMVEkuSWJsyTxRACMIKSi8Nj3WWyphLr16PWFaPVlGDs3ou2swldXpgCy0LoWT+t5RJreqEASaMpRDGLs2E6w+u2c7mkgcM/t8IdHID2PSZAQUaJmSrkAypgXXrClaTIj5kZcRXXiKlH4ygAibXA2Yme3wUqAJDMWWDJgGEmiWgzDFL1hCRcjHkWP+kgaPkgHQUyqIz8l+fHDzLa/i0DdVrTaUmL1LsINBTZIignXVRCpK8W3cx3Jdjehnfl4970bHnsA0rpi/QWxLqUf7SiZ2pd/BBPio0kQQyVO/4LO/hsd8ro2sYQxRPYJSJSZJYwhHSBkWoJUABJRqTLRM0m08KKlPYwgiR99kZOdlRi71pOuKSS90wJIrLqK2M4yZTaEGy0N4ml9M96W89Hqs0lVu0nt2Ii+YztPv8XF8ZZ3EPrkAfj9dyF50gaJhscwEP5U0twGrZlKkEhKwDNkZ7gV27yC7ndUxYtfwkymMZMp5L7KF0pJNj2OmYiR0MMkRWsIoyY8MPsYia/fyZmxGzjdsInAjjKM6nIS1aUKJOH6UgK7LAo2FBPc5WKh9kJiYoJWF7I08G5SP7sfMrryLuZsYCjZJYlFp39EWKhIn4TBVy8gXvyN/2e/fd0DREoY4sqvsEo8lHQSCSV8Z5c8RDJJxAeRUKvmm4FEGDQ/S1+6k2NdG4g1ZWPWFJLZWUZi5wbiOypJXF9GfIcbrT6HYNNaPC0X4G2+AF0BxEVqZ7k6NvRuN96dGzjR+VZOva+LyMNfgOizYrwQJkIkY1qmhcMcSZGccTJmCF0XJ/UVAESuuRytOssY8rWjSGVf12MYsbDKyYh0z4SXyIQWIB60hIU2D7/6FjN37uVo56XMNpQSqXdj1LpIVBdj1JSi1WwgUrtBaQ4xscTc8u7O40z9hYTa3UR2uvD2vpPIA/dBJooXmLOfQwkIx7SS13X6RwIY/wDI2Y77n94TDWKVMCQtP0Q6QRpfJJRsJXyfgWhaoCFaJk5SACKM4ZnlxIemOd2+mWhTnvJB4nUlmDsqMK8vVwDRdxYr0yLSkEekIYfwrhyCjXkqmajVFlvMc0M++k43vh2lnKzfxqmpRrRvfRwWHlPVW7G0D40YyaSBGRVJLV66xSWmZPIV96yyZRQSnIy9JRDkK7m6NIES1CIYMgkykv0W3yvug+giaB7LnDrxK1LfvYeT040cqdvEUm0JdFRhtpbhr84mXF9MuL4crWYjevVG9JoKYhIKbypgsTmH0w3rCLcWkbjBTbD9SrxfuxvSfnyYLDmvpQIXKzSpoFceVJlX/9AgTjP9j2/FsYyRVKQcdWlr4QrHIzQhYUIsbSq7Py2Z8/CC0h4cO8rRw3uZb9mKvstFZJdoixISO8tJ3lCuTKxItUjSYiVNtTqXAotyVBtd6ju91gWNLpI7skheV4BevYHFhks4PnA9S/feQup3D4B+0o7WxIjrYUxdEogWs4j584o+DkCk3kku9YJIlTjikZROUsIGKfEvvJDyg+EB3wkSj3yD2dt7OTnwdk7UbySwq4pE0wYiNYUEavOJNJfiF5OqoVw55HpNOUICEKVBmgo4U7+OsBR2Xu8itvsyFu65GVIegsRUpkN1hiRNRVvY7+3UellZ9FfYBq+oAf/8ya9rE0sAItlZAclLAUTLpIkmJfQoUZoASNLrsV/x9Gg3vqaLSNcWkagtQq8rUY6nxPT9tWXKUQ/WVygJKlJUnFIHMFKSEqvPg+4K0rU5pG7IgdpSkjdUMn/NBmbarmLu1n7MR/4NPE8qkMTQCKSlhtcSnlIoqyTpn++nFz9CACKaUsiWyqI9RD5ICkLyDhF0YmYYQ1+CmJhVi7BwlNPf+zK/PtBGtPsSwlefy+w7z1P+RazrYhYbN3K6upTFunIFDvE35H0jDQVKMKg2qCtV2mWxJksFOHivi2TdxczdNQqJWaKZABKtIxO2KhZsh1we09JuVtWxFaJ/8dd7LXz7ugaINLb4FpZ/YYd5RZ07XJK0SoESaZOY5icj9dcSvZLy7Ie/y+mBBsINW0nXlJKsKVEAEfva11CCTyI0dWUogNRV2FEbkZ6lSoKKjR6pd5FoLMLcVUSqzkXyhgL09+ShS/a9fjvB7qs5PlJP8vufhugzKjEmxp4wsDyiVIorS8PGibP/oowhP4oIdrbOvi2V5VfHtLJyDhLhk5yPbmmN2Ax4j5L61bc58pGDPDTcyFNd78SoLoTq9dBUjL67grn6ck5Ul3GmpgJPQxXB+nLCDW4VwXLMSzG7YrVWW/h2ZqHX5cF1hcTrt+G5bS/EThNP+YjKmwowklEwJWJllZo4Cl4EnHqdF33h18aXr2uAqCa0TQtpaKvCVuobjGVTS3zAuCHJvCCYAZICEM2DcecowV0X2aZDKbo4ohLOFC2xTGJarKA66xjruGIideUqbyI5AUvTyPluxBwzatzqev66TXj73g2fvRWOPQpxj2KaU1qIZ2NhO5ttMbcw+LJ5btsjmYyE44QEHBKIcLZytDU2Q5KOkmqRrELELsBPZOKQDkPKC8YMnPkl+r/fydHhnfy+YRvzjdtJtl+GUVNMosaFVudW7y2ACDRUqPdR4Kh3tKZoEREK8t5WG4mppTfkE63OVu8c7ryMk/vq4fhvVeInYJorhJVIAyvULhUN8p6veXQAr2+ACCrEvLATcAlV8xNUA4WUHo+DRDWTCRnLEADTgxxDeAbz5naSNWXLwBCmf7kkibJw7VYCddvwN2zE21iKv9GlnHkV7arJg8ZKaLmUeP8NxO+cgF9+T4VUpZJ1MR1TCbuzDvXZ6FPaCYvKOzpaQrbCWYoEKOJbpfASZyEVYTERJGRKUFukdRik1mzpaXjk6yzePcLxve9ltmkbkboK0tUlZKrd6LVnGd5h/L90K8IgUZdPpCbb8km6L2W2fycc+ZXKhQTkOaV/RGWIv5XRVd9IxbAChy3cXhu64sWf4u8OIHGkFF23OiYOetQkrQoHxbySMYE6zD5FdLzOju8XK5NJpOHLJqVdJPy5Udnq3sZiPLsL8O7OIdyYrULEyVo3qcbNeOsv4Wjj2zl2yyCZn/4nhE6qkXppCSRImZZpkRPkUmkLqWdaaTeJ8xJNk4mYZMIp0mHNMhkTS6rM3Kpt8oG5CN4j8Mwj+L54JzOHezjW/HbmqrcSq5YQtovUNdkY165Hr7W1ggpGSEDiLycBiFGbpwAigsHTtZ2Z/hvg1z9WWjKsRk39AyAvDr2/xrciXYWBVmiQuCqjtgFiQDRkmSLL5obk1Z98mKW+d2PsLEHCtS8bGDaYRONIIk2rLVcOqwzTXWjJY6ElB//ubKVJQjuyiVaLlN7CfMOlHG27iuMHOkh842Nw9BcQ84IeAc2maBhiMuYiCJEgEm3j2afh6FNw5En4/eOkn3iUzOO/hsd+Bk88BI//CJ78L3juF3Div+G3P8T4z3vwfewQj7VdzTONV6roWrLxIlXGn7khH65bR2ZHNhKJezmgWHmsACReY2kQ365Cljq3MrPnGvjJf6j8SlTU4PM0iDUGRHyjf2iQvyZARHur6Ig4wFGVEFQdk4BYWPS8HCCOokhXP+Eff4XZjiuIVxe9IoCIeZLaKTa85bB7mwqYb81jti1HgUQ0iUR+gtflEb2uGKPhIoJNlzHbdDmefTtI3L4HfnUf/Pwe+OHd8B+3wudGMT7cQ/C23XhursV7Yw3ew9V4DlzP4tS1LIy/h7nRq5nd/w7m9l1FaLgeT88OllquI9xRjdm/G/qbibVdz2LNFczVXsJ8/RYWaivw1rgxGlywuwh25UN1tkoGrmT6l7MvANGr84hU5+JpzGexfSMz3e+Cb34GdC9xGaIrALESMsrekuG/UtHwD4D8DQAiDS9DPsVZd0I6CTFLRJJJNEUAEjnJwlc+yJmWi/4HAOLG3OkiVS0h3wKVPJtvKWCmLQ/Zirnlr8kjurOA5A1ukteXkthZgV5bRbhhM77GSuYb3cwJsHa7WGh2sdTsxtPqwt/qVhRocRNsLiDYlE+gMQ9/Qw6++vV469bhq87FaN5GtHoLwasriF1VBddeDDVXQt1boe5StOrNxHZVqfox784cwrXryezOh0YpmblQDTF+OaBYeawARNuZvwyQhfYK5trfCl+4EyLzmKrU3/ZDRE5lZEShjLBcAZC/Bp+8gnu8/n0Q24GV9heAWMM4ZRyG3TGSaLYdRFI+mP8tpz48ohKEEmmSTrZCti9/K5EfAUequkAlEcUOX2h2M9viZr65GE9TMUvX55KQ7HRtKZnrCkm8NwdzRyHpWpcybxJdm9G6thDr3EqkfSPR1irCzZWEdlcQ3F2Kt86Ft64An02B2gKVxJNEnqIGF776QjUSMlBfonI2oZoygjfIwK9CNfhL8jvplgqSzUWEG7Lw1a5hqfZNeOrWqPCtFcaVUO7LI2m72I48wjtzWGrMYb6tlPnWizE/cRiC4mNJlMQ2g6WfVFhXQs82QETtv8Y/r2+ASOPakRALICKh7LCW9YX6XUq9JcRI0gfHH+H0+9rxNm9RodhXChAJkUrNkoAssEtAUcrC7nI8jRX4d1UQra9QtUzmDS7YUQA78mBnNunq9aTq85h/73pmr8ln/rpCFm9w4a0uIVgrSbhyIg1lBGqKCFbLAC6poC2y/SUr6y8l+r7GXPxt+QQ6ixR5W4tZairEU5uPrzqPeEOxer5kvYtUc5Eqq5mvXcOJujczs+tCAo0FLxsYDpCk7aLXWwBZbMpisa2IxeatpP51BHzPWZM7SD8ICRgUIOz+Wf77tY2Q1z9A7Ia2+kEST45TbneMjHKNZ0gbGsTm4dff4Ll9V+HdVUFkR54aKBWuc7EakkFWMaGaIsK1JWpUnb+uEl99FYHaKjWuRKJGcSlf2VGiRt4ZNdnEa9cRq19DtH6dun+0RiqHy4jWlqhtrKYEi4rUtbWaIixyEa92Ea8pVNtoXT6epizmWtcx15JlUy4Lu3PxNeYTqi8kVluIVmNRTLROg5yTy1xLLvPNuYhzHWhwrYqkzVKitXbmEel2M9ucQ6BlI6cHr4VnfmbNpyUdY/uIFhRWRFVe29hQT/d3ARDBiAMQ9VYKNHZ9kgKIOIoJ0Bbgh59kpu9ygo1lRHfmvyKAWKCywBGqKyNUW6lAEarZpPyC2M5NaIoq0apLFLOG6yVnsA7f7jX4G9cpYMZqyrBIgCH7Jep4BYrqEhscAhKXTWcB4m/MwrN7Hb6mdch+qEEKDHMRMGi1+QpMAhABijyvgMG3y4WnsVCRf5XgkOvI9TKNlerawW4XpxsvVLVbZ/a8Ax7/nnLUpWOk9spRIhZabCH2D4C8+i3gaGoLILYaV2aX3QkSaZTaCylBic3BFw7i6d5MrEkYNl+Vi0id0epIRtcVWyPspCSjbgOxmo3Eajaj79yKXr3ZcpKlTFyNpXCrEvHFlmzm29YiWzlfEncWSb2X7MvWrv1a3p7NaIs5p0LT9S60+jwStdnKbEPqwa4VyoNrC1QBYXqHW5mSco5VmWuFtSUCJyFu+V7GdayG5Fx2b1ZmnL87n5O7zlNaa777Cvjp1yC2ZJm4diBL+mi5ImA5+/nq88grucPrXoM42kPgIPtn292ydSURJ9l09aPvGJHbWwm3lxFvLlYRHLHjpZJ3NRRqLCLYWKK0kfgL4m9odVUqShWv2YReu4loXZUayhtoKsHb7GapJZ+FtlwW2rNYaslV58t50Xo53yJ1rYYSIg0lRO1tZFcRFp191uguqX/KJ1mXT6Y6H3bkg+Q4ri+EGwphhxuzxi7ErC9S1crisCdqyzCry1TwQMbdhxrdqyI5l8aNGLtK8HbmcqrxXPRdBYR6r4AHP2dVDcuQAztXKH1k9Y/s2QLslXDvX+Hcvz+ALKsSVd+gZsuURLoCyPHH8Ey8B6PFhdkiYz0KiO0qItq4Ogo3FRFqKlYUbSwmtqsUvaGUeH05upSIN5QTbiwluLsYX0sRvhYXntYCfK15ioLNecQaXarcXkruHbK+KyDWWIDeKMesoKZcYjbJOJbw7gKCzS517aVWF/PtFkmoWL6X90vWF6mK5UxNMUIpSXDWW88qzx3aXbQqiu0qJlVTqTSIpyub2eY3k2krIrnvbfDtj1rjTaQWzh4DvwwQmfFFjc957YPkdQ0QR3sIJqSplQax2l6VlMiUm6rOT6KKaQj88gGWBq+E1nwyLUVEanPQmtzEmopXRZHdxUR2uxXJdYTijdY21uQm3Owm0FqIt80if2uh+jvcXIjWlE+8KR+tSfbPUmx3IbHd+RYJgGyKNOcRacl5PjXnWfdoKWapvZjZzmJOdbs52eNmpsvNggyFbZH3c5PY5casd5NqKCbRUEx8V4kyM+Udws2rIzFT9etLlgGy0HYedBeT2Xcl+hduBn1WFVeenWLIcRb/AZC/gvKzAOGAYxkgdhRRaq5UqbeNIhFaz/3nF/ANXKoAQoubUHWWYh6HyVezFYYWZtd259lkMXGoNQd/Ww6ejhyWOmWbR6A1TzF+QrLYdYVkJMrU5CYkz/I8soAk4LIoX4Vy/W15rKRQSz7xxmJl4lgMbwFWQCFg9LQXstRhkewLUOU3Aa7zrrK/WlJ+3LVFGA1leLuzWGw/HzpdJHsu4rRUCcwfeXGAqBGVMsJTek0lSP4q/LKam7yGNYg03AtImH0l2X9KM4udq5xASzjZVq8M2LF/MBIcue9u/AOXQ0chtOYS3HkuWnMBEdEmq6Boi4toSwFaS766jlxL9iOt+QTb8vF15CKmh5C3M5tgezZ6cy5mYz7UCxWo84JtBTyfXATank++dhe+9gLObgsIt7owmmQ8SolNRSTFZNttPVegvQBvRwGLXfksdMlWzrfuFWktUM8uzy/7kRbXi9AL26WESMsKai7FU+0i1FaqAOJrPRfackk2l3F85Fr43Q9UTZb0i5hZ0keqS6WPVg7DVUEV6UyJBzsduAI49kzyZ7veCuf/NQD2NweIemm7ASwbyW5FaShFYh8JOWMhbFTY2DH0FKFYnEBaJmawf9OTEI1DPIVKEkpnhDw8dfdBwmPXIqFW/7X/C63h/6J3FBDrKH4F5LbPLUVrq0Br20CsfQORjgrCnaUEu9wEevII9uQQ6c5C78wi1ZYDLXkgJlPTeQR3WxRqXkOweR3B3VmEmnMJiXllP1u0qwShSFcZEdnvLkXvKiXVWUK6vRizrYhUq4u4Ddjobjk/l+DuHIIt2YRa8wi35RNtK0BvdxHvcBPvLCTTX0Zqj7UfFaEhxyvKJbS7AL2t1KZy9LYK9NYNyxTs3MCZvnJO9RcR6F6P2boGGtbArlx8HZt5Yu+74blHIHpazSujy8yWUuErGJCpkJYk9C6DWVIQS9jDcmWAlcxbJgdYCRQVJl4xQbhUTJiq5k7mPZaLvXqfvz1AnBj5WfFgN4wARIAhWmAFQBylImLJljwpE8JpmXTZBoiIKl2mNMkQlepYmazg+GMcOdSJv/cdBGtz0Rr/mVjLG9A684h0Fq+Kwp3FBLtKCXQJEEoJd5YT6Sgn1lFqkwBPAOQm1ulC63Ap5jTaC0i0uZBttD2HyDIJE+cqRhZmDreKlrA0RailUDndgd2FquZLEnzeXXnonUXqurF2l2J+0QYWMCxm1zrc6nfZxtoLcY6LthUSEQ3TkoO3JQtfS5YCUrglj1hrAUabm1R7MYlWtyKjrRi9vRit3Xq3SEcp/p5iTvWXcKbfRbgzF7NlPTQK5aK1lnFqz0Wc/FAXxq+/BimZ4ySKaWhktKQFDBlyvDLEJdEUGXkY91ujEJVQtPIoAgM51NIvUq4SVmR98/cMEOHpleCQ/WU9LA6FTUqb2ClZOcYGhxxtJFLEjJTS2nYLKmUkQzp1meTTnIOf/RvHB65Fb96OUZ2P2VeAv20Nka4CIp3uVZGAwtu9AU9PBd49bvx7cgj3XECs+3z0rgswOrIsBmupING8WRUW6i1b0No2Kk0T7thAcM8lBHovw997Ef6erfh6qvB1leHvKsLfUUi8v1xRYk8ZiT0VJLorMbo2kOisIt6xAb13I9G+DUR6qwjvqSDUU0mop5xgdwXB7jKWWouQ6Ja3vQRfRzGBrnL1e3jPBoL9Vcz3livy9pap4/WOEhKtxdaUq40F0JxHujWHVJul/cI96/H3rmexfy3evvWEOwrR2oqItZURtSncXkKoo4iFnjKe6d3MiffVE/7OR+HMb0Am7ktrpE2ZsClD1ATNnsPMshIMezoima9LhuxafS7yUMAhW+vj8IYwwtH2mK0AACAASURBVKv3+ZtrkBf6FC8KDtU0KxpiBaDiCQMhmR9agUa2KUvaxJUenwf9d6S+cjNL3ZdD6zaodcPQRubb1qsSiWhXEashMXVC3WUEe0oI7ikk2JtFqG8N0T3nE+9eQ7IzC7O1ALO5HHP3ZhK7t6G1bCfcth1/5za8ndsI9V9OsO8tCij+nm0Eujfh79qAv70Uf0cx3uZ8fM35BHbnK5NHImDK92kW/8ClGFGAtJIsX8XyN8I9xaykUHcRDgX2lLDQVcRiVyn+7lJlthldZUpzpFvdpFsKSLflYrZnk+jMQutZR6j3Qnz9F7K0d41633SzG5rE9NpIoHMjiz0bWNhTgWdPsSU0ess51V7BM3vfhudTY/D7H0JK1qAKq8k2ZhI6c8kUkUzGsrClRF40iZhYQmI9KCjZUUpHOFqqxP7y7xkg8m4OwysOFyDI2zsSwop2yOwXzmErtzK6Wf5W/5j2HAEx0FOaWhIMjsLv72Pp5huItGyAzougqRRz31Zm2wsJdYttvzqA6J2FinnS7VkkOnPRugqXr6V1WXZ9ol2kbw6pVrH93Yjd7unaxkzPW5jv2U5kTxnx7gLiXUWWuSQmTGs5sdZKoi2WTyO+jfgClrlUgN6Vi96VTbxrHVrHhWida5ZJNNdKCrW8iXDrm4m0nUus43x1XLz7QoyetaR6sqC3CPaUwZ5i6Cki2eNSzxPtzifcnUeoJ5/Qntw/okhPrvJj2LUBdm0m0bJBaadZ8UkGyjk9UM7inlLlR9FZgtnmxtNejveDTfDYVyEzq2Zc8WCySEZNESTTM1n9qEqwQeYSUHPAyJgFMaDtyQGFPeRPoRVy89WAyd9egyyDwgGGAwirLFqcMGuCaqfe6vlbOdoBiClDVMW3k/GrMsWPcZzQo5/lzGc6ebarglBjIXRuhLYK4n2bWOouI9xTSqy7eFUU73JBe7aiTHuBkrzJ9kqMjg3EOyuJdxUrHyfWlaUcdHHU/T1uPD3lLPRsxttdgdaejdl+gTJhku35JDrcGJ2l6nytu4pY9xYi3VsI7dlCsG8zgf6N+Pduwje4Ed9QFWe6rNzH6Z4SzuwpZqa3jJneEoRRZ/tKWdi7gYW9FcwPVDLXX6a+l9/P7CllsdtNqrecdFcRiY5Com15hNoLCHQVEuotItxfQrC3yNrfU0S0pwi9u0iBOdlZRLqtlExzJWZLFVpHJYEeuUclswMVLPaVEepxk27NgqYLQbY9JSwObufY+xsJPHQvxI+rhT/DagWqJFEzoeYPVh0qnSrLRygUSKeqjrU6X76WiMzfP0DssJ74Fyu0howXsIBhgUN8CecIZyu2qFAsmSSeThNPmWoOWjVBmRGGM0/AL77KsX9t46n9F3O6w0203QWdpdBdTqijhEhvJdGeYtXx0vkvl2KKWYpJdhSTaSuGllKbykm3laN3lePtKWa+z83sgIv5/jy8vVmEe9aS6DxfASO9ex1mSzbptnzMrnxSfS4Sg0XEhyuIjFUxt7eS0/u3cmb87cwcvJ75W1tZ+mA/3rtGmf/INOZ370V/4F7iP7gP44efJ/ngl0j9+IuY//UV0j/5Mvz062qrP/B5fP/xUc588Q6euecmnvzIAf5wRz8nJt/J3P4tnOyt4GRnKTMdxfh6KtH3biWz/2IFVKOjklS79U7yXplWi8SJj3bnEugtZKm3ksU9W/D2bCXSWYnZng9ta6BnHVrdPyEROnPsYkKjb+NY18WEpnfCN2+HM/9F2jyhpmoVq0F8SS1h8b70tQjHsyCRCejsUYoCDvHa/741iAMQx5yytgIQmQjO+k/A8XyAOOCQBtRUOtCad1eNGpRZA71HSXzn4xw9UM+xfVfyXFsZsaHNJPZWqmiS0Vem6qBifWWEbcn4csEhx4v2CfVsINwtGqOcVHupKrVQ5RYSnu0uxttTynxfKbP9xcz3F+DvzVJOvNnxz5jt5xHtKFQOdaC3iuC+LQRGLsY/fQWLN76D2Vuvgf84AN+9HX5yLzz2HXjuUVg6DiEZHSnj1kPWeHZdVqKVCNCfIRn7LueF/OA/BY/+O/z4bmJfmWLu7g5OHtzByeGrmN/7Nvx9VxDu3I7esZVU20YyrZXQWm4JgdZiUh0u5YcsDmSz2F+EV4IEXRXEO0rItOVC6zqMhjcqX0zrkQBBKaHujST7Loaei1jq3U7gK1Okn/0OJGfUNEWJpEnAgGDaWqBKcGCFciWaac/Q6Mx9JoygzIdXw7iyrvk3NrEEIHYo9wU+h0gOAUYsqas1ti2QpNXU/VpcxzBkjiWZORD8yTAZAUZiFk79HP2rt3D64E4W979DRYciPZuI9FQR2VNBuLeEUL+bUH8h4b5CIr1uIr3Fq6JwbxmhgYsI7NtOaGgL4X0VBAeK8PVk42lfy1LzGrQ9Vpg30laI1llEvLcUY28xWl8eS31uToxdzLGb3sviv7ahf3EafvBJ+M234blfw+yzEPFDLAq6zPUlQ4nFiXVyRLIvs+OtklKylqBMYC1zZy2A/1l4+mFS3/8c/k8eYPHWDuZGriW4753oA5eS7tsCPRXQ6YaOPOjMJT5USnS/1F/lYu5dT7L3QrSutfhac5nfXajMQ61rE2ZHBbQXQ1shtBdidhYjkbRj+7ZjfGUcnvsRhE+QNmJqfi9ZWkfm+1KznCnr24CUzOIijrvARozrV//zGgGIo0EsA8oBh4BCZpaV5Q3ipoFuRLEmfLYYJGnGmUv41YqzSgL9/rtEPjPK7ORVeIYuITl2GWLHG50Wad0bCPWV4B9w49+bq8gCyWoBUkJo72Z8e7fgFZ9gpJLAeAWBURf+gWx8XRdi9rthsBIGt2Ds3YZv8BIWxt6B933VLH1sD8YPP0ryv78Gxx4C/x9AZlrXRDNEIBg5G/wXv1XW+FieVtQywxWLCAOthjIQ0ZJoyYwValeTLAgYF8H3DMz9Gn79NTJfO8zcjdfybO9G5nsr0PZvwhypwtxXQbK3HGOPi8SeCzH6ziW+91wi+9bhHShmsbeKQM+laB1vISPRw5YKCyAdORh7soj05ZIa2Yxn5C2EPjEIR36gZoGUyR7mE7AohpcpARd5P5lXQISpRjIZQktbK7682lOXvgYAskIa2supiea0oCLbNEY6QVQLocl0OCr0JwkliWzEVJ4jkzoGj99P7GNDLAxchlcy2AMlxPaVou8pJdlVCe0bSXdUofWWEtjrYnF/LovD2QT3uoj2Fa2aRFJG9pSgDVcR2l+JZ28xnoECtJEiOLABvS9XZdElJzAz9nY8HxtSfgKnnrKWbwuKiSTTg9raQGL/GV2tgmUtj+CYmxZWBCDin0oCWvalrVb7EQmsEttSki5zc6mtRAXl6iLDpdjwGOiPw6lvk3j4Q8x+dg9P3nw1j49cwrHeKlLid7WXkt5bTniomDP7cjk5uJ6FoXxi+ysxujeT7LiEZNtbSXRcjtazmUi/i+DQm4kOvgm616vk5cz+K1n8zCippx8EM6T632OkCMQzhGPidwhARAoY6EkfYdOrVl5XczKvtgH+gvP+tgBRkQp7PT+pwzkbrX0+QJKamhldqVgjjBlcsObXTcv6GsfxPPYFTt3ejm/gbZh9WzD7iwkN5OIbyEbrc5HsLoH2ckWJnlIFioXhXIReKUCMgVJCnYUsteYQ6HRhDFVhjm9BG97AQn8R0ZveQuTO64l/aQR+/jmYewIkiGAzpRIEUhKehFjcJBo3iCY04qZGAs2eBURmApGAhQWI/8mtU8Yhgb9oKkM4lSSc0YgisSUfCRaAU8AxQDTcb0g8803+8O338+Rd7cr8MnsvJdS3nfl+GW5bxtxgEYGhIuJD5eji8HdvJdJzGYHeS/AObMSzz4V3+M1E972BVOMboK8cfeRijo69g2c/PUbm2YfU9EwChlQiSTQSJ67ZdXWyTHVGpuQOqxls/r5LTQQgCiRnnS0l1VZoEGEGNQuGWndcJK3M0O6B6DzEjhH41Sc59slWZgcuJdWzFfZUoHVmsbh3HeEDLqKDOaR686FD7GY3qT3FRAfceIfcLO13E9orf6+OIgOl+PaUEeqtwOgpJdNdCj3lsHczxtTb8PzLTvj5p+H4g5A4LXljtU6JP5ViMawzGwgRNDQFCN1MIsuzyfuKNWGaGSsq5xTwKfPCWbZM5hqWBXGseiWnGVe3XVn45wRJ4qRFi2WiJNJBYkkfkZRHMWWcKBphvJlFwv6nmP/mR/D+6xBnRq9lrutSol2byfRvxezfRKyvAt9gBYv7y5gbKWVuxM3ScD6+/XmE9uUR788j05YNXW6SQ5vxT17Oczdew4l79pF54n5IzEBKSoV04ppOMmXlwtIk1eJ7ii9e5TDW316DOAARBWIXLQqTiGRVklJpFjHD7BIEWedCZicJncH87bd47Jb3MDu+lVTfRugsJ9GWh6f9As4MrcN/MJ/ovrWk+9ZCVxZ05ZDszVcACQ6KJikj8goBstTlVgDR+qvQ+jajD10Od7TC9z8Kz8oquIvq2WViackfz6QzLKUFKhLUzBCKB4kmIyQyoiXOJkOV7y02j+NbSORGrbEh64s4AJGrnHVWpSkdLfyXbOXiGbWIj5irTjLOWqJN1i6MabJMnMwSLzPoy8TYsGgmmU0nmFeGWIq0/wQceRi+fQ/6HYOE915NqGWLKtgUcza4rxDvcBaLYxewNHoBvuF1BPflERkoUwCS+jTJvxgD5SQnt7M0spWjo5exdO8gPHW/NfF2Okg6k0A3TeIpazHSjCwrZ8gqodY7v1r//u0B4jDACwCiwCHS1JnNXCIYRggML/hP4v31jzh272HmhreSEQ3R6YIOF+E9LuYHCzgxms2Z4QuI71sD/edC77nQvwZtMJfwYAnC0JIs1KSj9hatirS9LpjcgD5UxPzQJvx3NsGDn4b5p8CIKZQLvrUURAzQhcdVT6ZVLZKmiZ0vDJ5W852bmRSptEhHK/n5ohpBhIhNZ5ljZSO+jH2ZyC0WISMr28rAfclkv4gfGE0k8IdlASA7ky2HxSGyFLAEl0j5yAI8+RDpL91B+OYmYvsvJj3ottp+4J/IDP4TqcE3EB9cp9o6vHeTigDOdJYQGChH31tKRLL2XbkEhysJvO89nP54Jzz9HYgdVwBWII2nSciO4DlqC5BXCx0yu7u8qwVCJyehQgbqO8dRtjpVDnT8BZHo9okrStXVdV7498t+eOlgub5M/iYmlTT+HERmQJ8Bz+8JPvh5fvPhcZ44cAPcciUMCkByoN+NNlLG3LCL40NZzA+vUwDJDJyLUHLvGqL7cgnvKyLeV0Wytwqjv4j4gMvqtMESooNFimKDLrS9BcQHLBIQRQdLCA+WEdpnkX9ISr3LiN1VAz+5C+Z+CvE5VWQnTq/Id6nkliWRRQEobSDOpvRwRhxhXUlwicwlUjpGUkNPxRFzSyI5MuRLcmEOOf3hbJf7ZYXHptruL/1bhI+Ej6XUPGaQihsqfK4nU8iiQ/KEihclepZIk1RIF7SnIJqBeFqZPoYCV8oSCoFZePS7ZD49RnjyCuJ788kMnAf9b1JgSQ2st9p7oJLA3q3M79uOZ/92fH3l+NrWo7WdR2ZvLsbERmYPvgX/v43CyR9BJqiESzSWsCbhcBpFAdriWcWTTjvL1uFRtSti56yGtnj+zzPnOdIIEuZLRHzWRMpGkIweJRQHX8Jad0KTq0lvSLw9IYVkMjm0tVCNDGlVK4DZW8GQREaU8ycxBzt8n5Sl+WxGERvbuaSEcWUFc7VMmJQTqHU8vJCYA/0EBJ6EmZ/CE1+HH3yI1OeG8XygnpMT7+TkwEai+0tIDuaTHCwkPlRIdL+L0IiL4GghoZF8tOFc4vuzFen7c4kOW8doQyUqY53oySLZl4e+v5zo+EZi4xuIjZYQGxJgZMFQLum9uUT6i1jqK2F+oJyF4U0EDmxn7rZrmPv6nYR++6AFYDH/TA0jFkXTk6TsUv5lIbTcYbYQULkfQ6U6rVUUrVYQplT9smIrESvhCetazvmy1LSMo7BzI85WKjdtyiSd1W1F4NjHiaZQJHMi2ReWrf2gspF7Oc8hW/lbfSQULLNgJERD6giYxPSSY6TfVWjNG4Hjz8ETD+O9o5fZ4Svw7CmFsUoV2ZP2jPesx9hfgXffpfgmrsI79lY8feWkutfAwAUwlEVsbw6nRyvQ75+CpafsnE8aM2pYkb+krpZ6ULP5y4vI/QUrK7MG0lRKP69MPC+/qv1Sf3pzjrycNL6KMZuy4IosmSXrOFg2pyzMIq6gxdEyCCmo1vjLxCIktJgFBFuxpFMmYhsSF3tCA01UtzSmbkkXZxZzZ/bymAf96E+JHvkRoce/jf8XX2PpwXtY+vYdeP79MIEvj3Lqrnrmb78e7/veReCmywkf2kZ0spLoWCmx0SKL4YXpV0H6UCHJvhxM0RLDZUTHNxCcqCI4WkpoqIDI3vUw7ibRn6Wknj6xhfmRbfxh/3ZCH2uFR7+szD1JWjqfRCJBLBZD0zQljZ3vX3wrvWnZ/zJ+XiJVKxnTAYnDoMK4Vu/rloMumeWUQSadIiMqSqSVSB9RVbL5c1slzUTQ2bafLXUdPpNnEZKntLFjD2Kzw+xqQRxL0zjaRiaxFPmZ8SVhdhGO/Abzm59k7uYmTvZvwz9YRmaiBEbzVHLRN1BFYPhyAiOXEhnaBMMlMFkEY9kk+y9Q/ey9/Rq0733YWutEVqtKGKTj0ioy1WzMjmbZAHEALw8u+zZABB4yq7z4U897nxfvmOVvzxEAyMupj7Lzg6QTIaJxHa9usBCPs2RohBMBjKQPM+0V11I9WIogaWbI8BzwDKSPgnEEor+DwOPgeRROPgJP/wh+9Z8gSwR/+cNkPn4Lidsnid/aS+TA9cSm30ls+h1Ep68gOnUZkaltxKY2E5ssJzRWQHgsh/D4WiJj5xMZfxORsf9DZOz/IzL6RqIj2asChwBK2+9SmeDEUBnJkSLi48X4xzewOFbJ4nAxS4NuVQ+VGK2AkQLM0VICt1xF9L4x+O13rYnolJMr89IliMfjpFIpMlK6ncmofadpX3wrjCzhSyHLbBXmdCT4SuZUHa0Uhy3BJR8kmfCMwMoKdr7YViw6iY4JU8jvcm2HrCSb46BLyMAyQZbVuxxonWidZONOvhKRICR5KusJFCdawJRdeXjh4XgKluYxHrqfUx8Z4pnxK5kbLCYxlAWj64n2riXen0+mJ59kr5vwyDYiBy4iPOrC07de8cLMyEV4b3sP/PgOCB612wuloSWJLIsiSTDheRpkBQqsuj4Bk5AVKZRXUzLkxTtm+dtzfAYE4qBpcUjIEgGiQXyoiZ5lXTtZjEXq95NnVHUskT9A6Aj4nwLvf5M59XUyz34W44kPE3zkfSx8e5JTX+jj+CdaOHlXPSduuZaZm65h6dA1BKbeTXT83ehjV5EYfhfsv4K0lB60rrOofS10rCXTfSFmz/kkxLEeW09mYh2pqTUkp8/DOPBm9ANvInbwjcQOnE90NIfYiGt1NOzGGKtSlBgrQRsrwj9RwcJ4BXOj5cwNl+Ef30RkfBOJySrSH7gavvMvajkzlctIWhJbgBGJRBRAlltWBJiYpC/1UWgQ6b1CggtzvZBWMqpjPigT12JKh4dfbCsOdixpoKVSyq8xMk4pqMXcsqSoQUwZuSJd5Vbq/nIxYXKH5L5iItt8L5aHkPxsiWo7BJ0W/8qW5vJ4coK0kyyc+uzPCX39Vv5w8F0cE59utABz34UwKIGUtUqTL+zfyqmxS5gbrWRufwmBqYsIT28jNl1B5K6rMR7+FETFz4OIWPzqGawckWJ4555yXxsBFkAERJJ0PeuHOL+/VBedE08lEScrbUSs9fOiJyzJ/8y34NHPwoN3wPdvg/tvIfXlQ0TvGWPpQwPMvL+ThVvqWRi9FN/wJvwjm/GNbVQMFZjYiH9qE/6pjYQObyZwuArfwUoWD5QyN1nEmYkCTo/msTCUA6NlsK8Y9hXBYBH0F5Lqz1cx8uhArpLy0RE3oVE3ofEiAlPF+KdL8B8sJ3SgVEmaVwKQ1HgF8fEqgpOb8E5swDfhxjPhZmZqA3OTGwlOVOKd3Er0I+3w8/vAd8Raa1ySe7bzqurCEgkr4ma3tmiUaFTc9D/zkU50ACGdqrhcfAMxfSSDbPsJDvfLMSIsbV9PggGJjJVDEfloaRFHq5hKeyjZmZHsQep5f8tKHWFSyh6I2KFcuc3zJPELASIxhhf4RkozpXTSMlow4YOMU0Wlq+XvTPFX0mIO+sHzFNGHP8cfPt7PsQNXkh7Khv1vhulcMjeWEzq4lYWJS5mfuBzPgSuUsIrfUoV5sBD/SCGhuxvgN99S682L5SPPIs8szaIwYfu/0qwWCXjEz3PIEir2j3+mc+Ac9VKiNdIRCBwn9N9f59i9+znz/muIve8KEoc2Yx7YTHzqEiITbyE49jb8I1cSGroMfe8WGK6CoVIYKoEhqTsqJL0vj8S+XLShLEJDa/HvX4t/bC2+qfV4D67Hd2M2gZtzid5UQGK8EEaKyIyWYI6VKDMmNVZOcqxSRTK00SoiYxsJj2wiNLoF/8hWvCNb8I5sVcCMjJagjbpXTfGxMiLj5XgnqlicLMc/6cY37cJ7eAPew1uZP7Ad874BePJ7EJoDQypKlz0Ba3KOdFqZVIq3bPPKMbNeqgfOduJyb2LNdCdOthTnOSTOtQUMcYSFKcR6ERNHysPjCYmAxe0QsZSKOBGbF9864WQtIwCxAjGOLyq3UcyzUhLb9xZQOiFm59klEKFAmohjGCFMQxYp8trl60FSpoDFHs8hyU0zCAkv0T/8kvl/uw3/eBXpobXo0wXoN1eg37gR7/gmTo9eyszEJSyNFhCbyiE5uZ7wcA7+qYuJfX4M89nHl4fjOs8ijynkyBILOKI1rECI1WK2KeYIJTn5JT7npPynSIdnIBmA8Ck8D3+e37x/J8+OFmMeLoDRN8LIG0mOZhMdLyI0uYHQpER7yjAmpPAvm8DeLCKDOWj78zFGXSQnXKQni8hMuUmM5dqUTWIsG2M0i8ToOoyRtcRG1xM56CZ4qJjAwRKC08X4J4vwjxcTGCkjMFKhIkcS3ZDx0+GBzeiD2zGGLiO1/0qSIxersG5srJDVUGSsSDnkgZES/KMFeMfy8EwXEjjsJnmzm+htF6F9dRKe+xnEJaMLft3qbmlXMyVh2hSmaSpSOZuXaOwX/mR1rGPDixx27CdhfWdfutkyfWTPAYcDEHVN5b+IDyN5ColWSeTKIJOMk9AiJPUoqXjMmjBBQrLyezqpQC3Xs3wJxyKRu8i9rXs6v8tWgUcBR7SaDV47IJYwIW5CTGaXkSy8MsAkT7IEyNqQHtLxBTJSjStMLNp39jm0f59Ev+sqZqc3sTRRijblUn6nRK/OjJah7V9LYug8jLE8olOleCY3MHvr1cS+eYe1arDSThaopT3l6eV9HLJmQHkBQBzwy/bPAURlZePyItKwUcK/e4AnPtzOsclyuNUF+/8X7P9/SIydS2wyj/DBUsKHK4gfLiFxuJTgeAH+cReBCTehCTeRiSKi4y70CTfx8UIFltR4AamxfMyRPFJDOST3ZZMaXI+2fz3esRwWpnJZmipQzOmbLiI4XUJ0qhxtagPahPgIm0iObiY1shVz+CIYvkRRemSbKgpcDTjknPC4G+9IMZ4RFwHJ8I5egH86i9BNbqLv38T83deC/7dqzUDhKbF5fRlL4lpdIZ2dUqaVOOcrASKgEd/kpT/SQ1bnOSsvWdEWcTytDLbjBJ/VWfYKthkJKftAPw3h58D3NMz9Fk78itQfHiH++x8TfeKHJJ9+CPOZn6rv1e/+oxA7AfHTYMxb0UUpo3cYXkwhO6sugBVj7awTLuFKu6hSwsiqzN72n+xKY9FEEviRtXZ1Ff4JQnqRdPy0BZC0rFNoB4bk9aNPwX99kOMf2MGxye0EDlWg31RM+MZStEMu0vvXwPCFpCbdyg9ZHC9jZnIz0Q/VkfrG+6wlLdSzW0EOAbGAYzmqpt7BgYsEQ+zkohz4lwBE5a1slS0Pju7H+8h9PHf7e5mfKCA2/Ab0oX8iPHIBwSkXnuly5ibK8E+Xod20geBUGcGpCkWhyQqEIpNliqITJQjFbNLHixGSaJFFbvSJfPTJvGWKT+SzTOOFygQTMywxJtrITVJotFiR/C1AjE0Wroqik26lrbxKjWeRPHgh8Yl/JvIvJfDgYdCetMpaZGJ4ex1ykaSW0HEkvLTyS38EPLpujWER08v5SA4jri2RSPqUByDgEEhJ5wqjiQcTjmtkpP5MCjNTpyH0Gzj2Tfjlx0g/cCOBTzQS/WgN4bt34L/rOjwfeA/zt13F7C3v4Mwtb+fY9GWcOHwFc7e+E9+d16J9tJbUZ3bDfR3w5X2qnJ2nH4KFp60aNwmjSoLWtH3S9JI1mMk4qRK1GVl70IgSNNJEpTRAluU1ksrmk5GAQUAMLClVl9IaaTe1aKdjZknEzp6pRDGoVEZEn4Hf34/vi8OceP+78N6yidShtaT2/2+Sg2/CHF5HeiKPuPieN27Ff+M2jOkNykcJf+cTVhY/EcaMSvGqNf3TYjyCpoSM5YNYsEla95YucwDidMaf2J4jLyDk5DvEZDCO/YLFL45wfHoL4YlczIn1JCfy0cZF/ZUpM0tCsGJmCRhCU2VEJmxgONtlkJQRnRCywLJyG5sQoLjPgmA8n4RNFkjyiE+cBY/Yos+jyTxik/kvCQ5R2S9F/uFcQuM5hCbWEphah3nXBnigD05/H+ILymwRwSFttBwOtxX4yymWE5CIMy+RLdEulo8i/kJSMVw4GkI3pHpXACg9KEwXAP8JMid+jv6rL+K7/zBzn2rm1O3v5PTN21g4UI55axXcVAaHy0kdLCI5VUR8sgB9rIDYmGSkXepv0ebaeN7y97GRPPxjpfhueQ/zd7TgvWeU2P13wi++Bsd/CsGnWewSOQAAIABJREFUrahl4jikjoM5Axlh+6gan+PLZPALMGI6BMOkQgaiMB1eEpDMZ2TuEqvd5L2U1lUmoJPIdELWsrLvKTVoau7rh/j9wUtYGvg/MPZPcFMuifG1aENr0MfzCB3cRHi6HCZySB7exKm7u+Hx74MmGnEJUrIMuEwimCaUljCEFbZQ7SlWkoDT0RxnZdWfgAeoRKG8lLyIkPrI6Lxf3Mfs+68ndnAzTBXDaCEM5sCgC0bKYX8pZl8+KQmPThYRn7C22qSzLVLfy29/mkrQJsrRJiptKle5j9hkKdHpEsIHilRFbuigi9DBAgKH8vEfzrUpm8ChXGLTBS8JgJcChzBScjKb6NhavBPZ+O66nPSDN8HSTyB5gozhU1BQhpI0pmpQm3ntBJ/V2k7D/fFWolkCCgGIbJ19OVKYRq4dk7XSRTWpS8cgeAaO/hfmw/eyeO8w3o8147ntXSxOb2RJqgRGckiM55GeyleCy5i0BIs+nqtsdW0sh/hoLrLlcCncWAIHizEPujGnXDjHByeLmb3xLZw4fCUz0xfhnd6Cfsul8OHr4EsD8K33wZmfQeiYmi5GTC0pwg8SJYIMWJIKWystYbstjnqVOfsIJayRJSJYxMhRIJE2VOZZ1Brbk05jZsScFDEUJn3mlyx8dRrPwU1oI2tI35hN4uA64pNr0cfFhy0iNV4E09nKxD86fSVz98mIxAdAgVnK8wXESfyGGHp2vylgiI8mZDvqf9xdf/TNOVa1pl37lcpYGVnJpi88hv6Nmwh94N1KnaWH1sPgBTAsodkC2F+golWm+Bc2JScKEDImz5IwoZA2ZZEw9DJNFdmAqLSy41PlyvcIT5cSXgGQ8IECwgfzCR3KJXQo26b1hA9m/1mA6NNu/hQZ0wVwcxHBiRw8H3gLPHirFeJOLZJJBkiZcdWxwruKeUX6qQiRdKZjqyvD9I8a1vlC8iOiORz/RLYCFkVSniP952BOxpk/8wsSD3wC/z19LP3L1fimNiFCJzNZCFP5MJUH03lwoAAOFpAczyE+kWMx/VQeyelCktP5pA64SB0oIDFVsPy3hErTh4rIHHYr0Ji3lBG4uRLfjeVEDxRgjGeTHlkLYtIe2IL/pqsI3TuM/zsfRzvykLUwJ34y+EixoMaLCOOr9hHGl3yORN5kdsSkTsaQ3IP1+/OOU+UqkuiU8K+pJsEU40hpaIl0Lf0GfvAvRD70LuZG1pO8OZ/MbRXKWtCGs2HKBYfEesjDe/PFPHvL1YTvPwCen4J5EtIe9ERImaeqH6R9paHl+eTeapIQK8Ln9NOf2p6jXkYShAkdMx5VJM46yVk48QCeL/Vx5raLmBk+j8jUGzBvXoN2+Hy8h9YSuTGflHTYuE0TeTCRp+xFczJP/SYdJIwoYTztQAGxgxZFDhUgFDvgPksHC9EOCBWgH8jDmM4jMZ1DaipLkTm1HqH01FpF5lSWurZ1jnPu87f6QRcvRfFDbvzvu4zQNw/D4qNKikXSCbwJqYsS7rWljlOEqVryLwfICxteTCtHm6hybcl1BL2YR39J8PufZunTQyze9l6CUxswhBluzIcDa2HiPBg9F/avIbN/HYmRXKUlBBzxqWzVVsmDeaQOFZK+sZDMTUVkbnKp743pHLWV4+JTuep4fVLOuwB97P8lMfW/SRw4n8RNBRg3V2Hcsp34TZeh3XQ5wUOXMX/wck697z0sfWEvyUfvBd8vgTNqftyQDNu1eU/5LhLmjS9ZVdcyMExK9O2aNAGSEgiqUSypEIj6kJiXaFJxaSxTSIYzHIHffZngnVeRvHUzocMb8U4XEz6YS+qmfNKHRBMWwPu3MH9oI8++/53Ef/JBMJ6EzDwyGjMptWdyTUGpyDYFEvlHqgYkAGFVD7ywj1b+fQ4pAYc4ZGEy8TAyDkBFtERCmrPEfvkZTt1Ty7ED2YRuPI/UbRcQuvHNzNy4Dv9tJYqRxYESElA4JMBJKHo+QFaCRMAifys6mId2MG8ZGNKpFjhyMCdzSCvKgoksGLdIvhPwvRRA/n/e3gO6ruu689b62swksS1ZYhFI9F4JgBSpFpdxquM4TuI4zngmseOZzKRMkkkmK7YlkUTvAEH0QoIA2FQs23KXYjuO4xa5SLIkq8vsRH29l9+3/vvcB0KyLUfkzGCtg/vKfe/du/f+73b22UcA+GlD2biLLXcQvffvXNOAxJLNtCrQlEYT482tMq2z4T14BJZ2FISMq5tp+hMfK+4QMF7xlwzAc18i8+URLs7+Oc93/ibnD9yB/0CTaX8+tgP+7kb4nze4499tJfP3BaQ+VkHi7hpi+2sJfqyUwN2FBO8uIbS/mMiBcqItZUQPVhBrLScukLRVEGuptOc65t6PHyyEljeTPfgm4i03E2wpZf1gLWsHGlm/R9a8nszBRuJ3KdOnJQT1vNT5qyyf+Esy/zIHF75pzTKcKlHZiwJ8zXOsQUxLE/TcZZfMs/KsiYDiwJIhkgkSS0eJe3OiTq8nHG3TF+HRY8RnPsgLd+3j/D2NRLtqibeVEd9fDa31cLDMUvLP3r2Lp0beR+KJU5A556JGscbhwf2gQKLrsdIUxSeql35t/l1nlWWZhMuZJxLmOMjUKS4x5q/8kMgX+swFibdUQlsJyXuKWLm7hrXWZvwHawgdrLQROVCJRnT/lRG7pwo3aoypYuyVUUXi7ioS98ikv2rodRubz68jcXcdybvcMCG5p+Y1LUT8YA0/bSy338nlEx+DJ78IMZXSLBGO+S17pOktBZqiw4YLYb6ry17JZ47napdeIfU//kRxh4oXFY/oT2BZWVkh9NSXiB95L/RWE7pnB/67dhBuKSPRXUeso45AazXLd5Wwur+SYGsdkY5dxDp2EWmrJXSgytzOtf1lFospPlNMpqPvYBm+gxV2zL2v5/6WcgKtVQTbKgm11xDqrGWltYLltjKbvF1rKcR/sIDAgZ2EDuwkcqCQ5b/dRuxAFfTshZ7biO/fbe5w4uBt+A69h8Sj04TOPALhZ7117EpbuwIAzY3YxKJueqOCO7cAK2OrK6XJ1YQhHlGdX2xjnkeTj6aok0skHr2XlZkPstZ1p5tMPFhF9GA1mdZy+Nib4WAeq+31fL/jLTx2/G8Jq/5PYLVCtE0+3gZABA7FKWGviu3HeZZ75Tp/yN1QOu1KTsRCmbt1FeXqiUozH/88q0f+C6udd5Bsryd9sIrwXXVu7uNgDb7WSnwtVfhaagwwAk3wYJUxUYCJSfhfIfB1JO5qMGF/NUB07mZQRQWAe2Sp6ojsbyB6T8OmY50BUlozdLDcgdPOd6DRb4dbq4m0ViNwJw6WE28pJ9xaZ77rM92/R+Kxh12aUKXqcR/B0DqBVNqyesvqGu/R44olcbZapDFxlzR4SsjccM+i67H9STCSKbLWmCFmWRbOP84LDx/l8dEPsL5/O9x1HRz4t2Rb30yybQfR9lLCHbUEOxpZ72hmtXMPa+1NJgTrrVWst5ThaynCBLqjkvWOatbaq/C1ubHeVk5uBDqr0PB3VNr7662VaKy1CBiVnG2t5XxHLcudVQS7y4h17iTevo1YyxYi+7eSaCsmcHc+vr8vspQ8B13GjL8vx/+3pTx5cB/Pz3+I0LcnYPnrNuchDS3ayL5KX28EyoYT5ZVkc1RZmySuiT4VXmqZg601d+ltNbSOJNLus5EL8J1jrE7+By7cs4fQQSmJeoKysAe2kr3rBuhvYqnnTr7R8hYufL7TpY7Da+4idCGObRvunmZpsqb+XmXVc3zzjtcJDLoZVw9qmXd3aznG68sTARKPf5zHR97PSx23EOtugP0lcFcByZYqp9F0we27CHgj2F5PpK3GzHuypYy0xsEKktLoLbtsDiXS2kS8tda5AXIFNkYN8dYfH5pA0vkbx7Yqou3FRDoKCXYWE2qvInGwieSBZvt8qKOCpdZSIr01JNuLyLTuINmxgzP3lPPs4T8h/O0vuDb7tt5bvrKIdWXWWs82GGwEExLcMACozGI15NJQXgmIKKg158FU0vbCSMdyRVspiKzC9+7n8tQfcv5gFYGOmwn2bCXYcxOhru1EOgWOYosFRKf0gVrSB+pJHqwl0eJoGekoJtSZT7B7B76efNa7K1jvqmG9qwrfxqgg0PmTxuZzquwzq91VrHZX4OsuJdhVSLRjB4n2PJLteaTbdpBsyzeQyE1ztN9FtKXZSkKiLU1mXaQYz/XcwYXjf0zyiaOQesqcVFkE0UmeZUpC5imSZEa9zlQR7pSznaS5l2zcQCWrLbmU6Pm1nkWPkkvw1KcIHvvPLB/cQ/hgLQw1EW8vhJ5yaKm1xESwpZhLfXtIffEeiDwJkRDZtRgRrb7wem35jLFayeb3MiSvQsWmp7ZgShdyBSBrriGARfwuKWGB1vJTXPiHQzw19E4utdVDWxns30HmYKkJtsy+QCGA+Dp2EeioNYGNtpcZgdMtpQgoidYqoq31hFqbCbU1GbiiEvSrGe1lRDrzCXXtINBVTLCjinjLKwHi76/h/P6bCR7YQranEH9bHi9338rlz47DyiWX1dhEkNf1UAwPu10qxcdg0jWyE4OV4wqq4Zssh+q3tCXyN+9nfeqP8Xc2Q+cW6L/BgLHeU4yvu5hgZymR9jKibRUOJC1VZq1NsYh2baXEPYUQ6nRKQdbB11lLoKPGhmggxaDvsdFRahZJVkm8yA29F+oo2wBSsLOMiM7tKCTRnm/AEDiSbYXud9sqTOFJCfram1lv32v8o10ZpUJz5852N/PS7Hu4/PB+ki9/BmIvuSJFT6/YOvtE1lZPJtTAIhs0bFyxzq4qV+DQkECvqdOKiCsARV6C7x8jevQPWWm5Bd+BagLt5QQPFpFRVq+1Cva/mfWPXk94+l3wvWMQugShhLUz1QLnFa30FGpl1mJa3++h9qcwfsOC2Ps2kSJz57Wdt2rVlNXykF6Di9/g/MJf8lLb7SQ6a6GlwPLUsg7S/rIYwXYxS2C5ApB4WymJVoHDnScLIOsRad1FtK3m6sBh1qPMNJ40b7CjglC7rEu95celcQW61FA9F+/eynrrVoLdJbzcUs3K/B/Dy/9sltHSfj+FOP+ql0XfpDCgYj0tNHMzuaYBVfekVObqM2T+aZqzh3+f8wf3EO+phwFljPJItlQ469DqrlfCHegsM42+3l1qR1+XE2RZSCmieIvucZdZy2irrPEuu29peN23rLpAJeXFwWJQMH5AI3/T2GHvyVLLXdH3ihfio7JZOX4Zz9pUSLiZv/WeEqwnoc8drLCM5lJrOc8dqOCp7lu5cOrPiH57DvzPeVbatTbS3jnmlJAgquSQVUCahnbk1ky7uWAu/ltVWbspa7li6mTztLlbvpk/4kd3NxHpamTlYzutRCjTWQWtO4jdvZPVrn28NPZBOPeoldEIDz6to1e1SQ4gWuJqrsBP5/R1uhxZHPsTsy01JofNXWhErT9jEYfg+EUyXz/GhfEPsNzZRORgMdnWEmgpIdVWRqK9nGhHtfnPCgDDHdVEOyqJdZSTaK90o63aiBpra8BGey2x9uqrGx3l5jPHOguIttdZABdrq7PvT7bWWnYj2V1JrLcUf28BL7SW8MLgu+BbRyH2sosHbG4jR4DXd8zRWRv4JBPSd45msWQIdX20tjzrPyTw5RFeGHgnZ/dXE+2uhqF6/N3VXPhoIamWBrIHGu1owtZRTaizEn93OWu9Zaz1lrPWU4m/q5ZgZwOh9mairbegIDlxcB+Jlt0kWptJtjSSbGkg1VJHVu6H5joOVMKBcpsz4ECJc4v3F4GNAns9c7Bh4zs2f0/C6FhHzPhT6/FVvNX1uRHsrMXfXkegrY5Ie4XxOXCwiMsHyjjXfRsvj70P/5cOk9I2bEnFGBmLm0MpBeqebHoAkQCbrFrHlpAp+JxLJKoaLa29rEpTnod/WWB56kOsdr2V1YNVrHeWEuoqIbq/kExbPdGefTyx/y2saf1I+Ix9uRZ+ZjX772UlfxY49LPXuWpHl4qzqzKnW0hxacxQJkI8mySrzmJqjLz8FMmHB3l54Je4IC0lgBwsgBb5+CUeSCoJd3pDgOmsNALHBBIDiKuWzT2+FoAkOneS6Cgi1tpMrHU3UWNoJQJIpqXatEl2oJKLPSU83Xcrvi8M2T2QuAgpr0jz9eFi42wx1NyBTMra0rhIUAyIeKnOF7n8xT7OzPwel9oriXfsINtdQLgtn4utNSx37SHc3kiitYGcQG4+CuzR9gbCNhoJdjTj79iNr/MWfB37CLbfYvctcCRa3RBIHFAcWDIH67DRUmvAEXjcqCZ7sI7sgWYyB1T7tM+GyjcSLfre3UTbmu367Pc7aj2elhPtLCPaWUKoqxxfbyPB3iYHkLt3krpru1ksxSyXWhv5Yfcvc+nBj8JLD7vFd7h17FpLY4pZRPQeO4B4JfGeJZa7agAhSSq86rqvyN3yPQ/fu48LYx/kcvcdrPcXEuzdYUWvvv3NZHpvY7XvLTx39I9IvPAZNx8jFNr+h2tuojYHyg2O/viD61wkL6Z6pkRXrce6IOKEslpxBiGVsvojbrHQi1/iwvE/4+Xe20mq4rc1H1oLoa2YdHspSdPsFUS6qgh31diIdNYQ66wh3lFFsqOCdJsb+ny8Q69fxeisINlZaL+Xat1NsnUPkc46Yp1VpNrqoKXGViKGW4t5oaeWix//b7CkAsSwt1xWMx6yllf3J3qHbLbdBfZJdSuRo61KhMuPE/nnKX44/C6WDu0h1qfrvJHowZsIthbh67mF4ODb8HfvJty5y1K48fZ6Um31ZFtUHqIcvzumW3eRbNM5TQS6drPevZvVnj2s9zQR76izCcF0Ww2p9hq772R7nWUb9X3x9l02Yu1NvHok25rg4G44sAe07ufgXlIttxJvvZVI+62EO/YS7thDuLOJcFed8TPWVU68q5h4Vz6R7mJLgvh76o23sbsLSH30ZtifDy3FxPYXs9JWw/Pdt/PywodJPn7CGuhp8ZTcHO2zmrMcOtqfqollRWwSz4FDHR8jiaRrqCehlnyqgjd+iew3Frk4+X6ebdnGxfYtpjwCrXeQ6GgmPngrz/btZfWLH4Nz33OVy+qpFl8hop3Jchm23G//hON1Lpz0lkjqKjWsVMXlq0PeSjMtLU5Z0VAYwi8T+5djnJn9AKHOJmgthjYBpJBsezGpjmLineVEuioIddcQ6qqzISJLeAUgAcmBqcJe0+uvd8QNIKWk2itwQtREpLPeQCIhsaCtQynREi7M/hbZpxe9+EqqX9ZDSdxrAYgaFsQ2Mi7GOJWOX36O5FcmuDj2XlZ7dxEfLCbRl0eo4yYC7fnE+htI9N9GsGsfa917We/eY3TUHEe6VcBW4FvhuUflcLAcWitMGYmGEkh9r7+nhnRHPrRth/Y8aN9JuqNwE/2dggp0y6XbZUOg0ljrabLfNDAeEBh3kVWCo203sfY9BoxQp65rt513BSClxLsKSXXuJNmVz/KBbfjaipECTHU0uMLJllrMpTuQB4MV+A7czPP7izk/9154TIHzC+ojxLpfcyE5C5GTfEmpBND8IZsLUTtWvysYNqsTVY1XDlnhs6S+Ps6PRu/kTEcJgf47ifW9zVnezkp8PWVcPHwnwS90w+XHbFWj1qRoN6t/JUAkIF6K0wq6nN3R78tPVMQvXzDnM2ZDPrde/fJ3OHf/3Sz33EmqvYp0e7mBQwBJdxSTVEamq9wAEuiuQ0wSUGRVBJBsuwOTgPR6gbH5fFkk+31p0LY608ahrl2mWdNtVXCokXDvHmKf/xisP+Z8YNMUihY1HbgRgf0E/fHaL4lGIrI/rZy+Z3mjQVJfe4DlsT9iuaWRbG859OWRHMonNlxOeKCBgK6vvYlE5257LOENdeWArRKRGgeSlgpXaqJyk7ZSUh2lxEXX7lJCPToWQsdWaL8B2m+0x9mOPBNeaXk7r7uCQE+VgUmA2jzCXVVk25QidQAUvaRYBFRZDSm/QJfGrldYkGRnManOfJJdO8kOlRnoV/aXEm5vJttzJ3TeCm210FHkZeu2kurbwaWeal6eei/+r4zD0nPWTUV0Ew0tHthoLqHn8mpUzRG1OrbcBIR6GKtNlarP7XNKKCWeg0eHeHn4VzjX2kS4u5FYdx3J7nIYKuXiR29ibeJd8Ox9toZGa22UZRQ4za17DTZfl7swd3ruY1dWk+kiNPRlTpSEbK+kIPoMkfkP4ZPpb62GgXrSncpW7STdXUqsq5RQdxUCiLSejuHuCmNytiMfjVhHGeHO8qsaSgCkuhqtvit2oIB0Z4XNxay1NhLurCfZowRAJckj74FnPmlBuXzai+pkbskI3ZX5k69Bop/+lughJ031SM56aFL1q5wb/nNSg78BPU3QkU+m62bCfSWs99ey3ttMuKuZTFsDaNXigTxC7TuMVom+GpJ9TcS7mwm2NrC+v8YeJ7oaSHdVkeosJNN+M5n2G8h0/Dy0/zy03QBtN5Jpu4lMxzZSHZrr2WlaPiZXqLuEaGcR4bYdBFu2ET6wlcjBrTYRmGrdBl159p2a95C7muqrRNcR7Kpktb3ceGZ866rZcLGk/OQl6PxYdz7hnmIDeKBzL+H2O4m13UpaZSDyKto10/3/km7/OeL9hVzuv4VzY79H5stjth+I5E9S5/jgLIr60qXkQtkut5I397oBwnNyVAakoaJEsqrdet7iy5WhO4n3lZEarLbro7MADlWS6Skhcuo/WlcU/d6lgN+aWPxsgEg+7Cx9TDiVCLl8tK039laKCekK6LM57Cn1pqzCPw4RmX4vSwoauxtJ99aQ6Sol011MqrfMABHoqWG9VyCpI9RdgRgn10Aj1lFyVeAwUHVUk+7Za5mWRPsO0t3FRLt3E+y5lWDfLgK9Daz17IaH/hYuPm7VnBJoFUSL2BllM67hT59e9poMWhXrxSfJPjRIeOB3Sbfshu46aC8i0V1CoK+a9b4GfL3NRDsbnYZtKyPdmUei52biPQWEZRm6a1jvrGe5vZHLrY3Ehn+RcP8tbib8wA4C+28k1no9dL8RBncQ6akk0NNowbK/bzf+3lvw9d1CoG8vgf49BHsaCPfUkuiuId1dTqazyEBLWx60bSF04I1Eu24i2b+D1EAB0d4CAp078XUU4tOkZLfc5Arjo1zmWKcUXLl5AbJmod5iAr0CUj0BuWQdil/2uoqL9lLo2k6m5Q2k2n+BdO92/B2lXGrfRWL29+Gro1bek0jEyCTlp7j2RLYNQyJFRk0Kvepbiagstv501NmSVFecouW/wIUfwmf/gtWeUtZbtsOhCtBE4oGtZFpuIDhxJ0vfOkI4eMHm8TW/8rMBol8zJSqAyCf3AiRVYOY60lg1pkCTm573Yhah/OI34B86OT/8W5xr20usrxH6q8n2FMFgGdGeMoK9lfj6am3ocbSnhHRXgQ1ZGRH+qob83r59pHoarYgy3p1Hoq+ZyNAd+AaaOdfTxNLEb9rkkm1Kk4b1jNv5QgBJZLwNQD3Cv96DeLLkbZlH/DzZR4aJDP0WdN9maxfoqUPaP6R4oW+XgTba00CyS8CpIdtZSaq/iPhgAdGBIhM2X5c3M97TZMK+2r3bQO6XsA80ERxsIDBYjb+vlIt9jVw++iHOHP0fNs4e+2vOzf01F4/9FctH/4K1o39CYOYPCYz9Dv6+X8anyb2DSgTUgaohhhoI9exgvXc7Po2ePAI9eYR68wn3FRDtKzZeiYcacVn/rkobic5qIt3VxtP1vnoCvfVEuutJdNaT6qgl1VlNQuf2VeDvyDfQxfpLCHcVsX73zcRbS4iOvxOe/gSsv+S2V1PK3eaO0taFUzsebHg43ky7BFp013DuWYqQukSav5uBHz3I+vHfZFUKoDMP5NJ/JI9Y+82sDNTy4vT7SDz9oMm5Xw2wfwbTr7MzDEa6misWxC5MwNmwMLn3BZKUazOpb9e65pcfZuXev+OZjrey0tVEqq+ajPzjoRLiPUWEe8vw9TliCiCRnjJS3QU24t1lRLsrr2pEumuJ9ewhOdBMrCePcOcW4v01RIf3cXloN88PvoXApz8CF9R0IYlaNkW9mEoaKPKvXzfzE8mo2zf3QAHlS//A2ux/InigzhV03pVHtqeGUE+TaXRpdwEl0V1FuqvSwKGj6p/8A+VEhmoID1YT7K8k0FdBeLCO8OEm/MO3sHJoHxcHbuPs0Nu5OPluAif/mNRn/ob0l/tg+XFYfdEbL8CqxnOw8jQsfx9e/BI8fi98aZDU6f9JaPwD+Pp/g/WOt7PWuY+zHRWc7ytldagS36EKAkNlhAdLLLGQGiol0V3gjRLEK4HE8auacHc9a32NrPU3EOytJdpdQaqrhHRnCQkpvZ5KQkNNLHXXsNRZRWigllRfubl0tLyJTF8Nl+Y/TPrpT0HqsqvgCF22tqZG282pYC3BSIdMLC3es5WCScLErIe1lbLY9N2L8MMp0vO/xtpHt0KPJlQriQ/UstZfy/nOBlKf/4htnaFCSc86/ET+6kUvBtFD11XDlZyYSXHoNYBonkSi4LLSKhE25Jnd0z5350k9usjzY3/AGWVAuivIdBdAXz7JXmmiUoL9VajsQ8dIXzmJ3kKSPYXEesuJyqpczeipMdMeHdxFsDuP9Y43E+0vwD9UxdnhvTw/+3vw9AMQPKPmUWSEDrOGyj5lbYbW7uOnkudnveFlW8IvEvxcK0tDbzd3KttyI3Tnk+gpJ9DfjG/AuTvR3l0ke6vMH870FhHvreJSSx3LvXsJDu4lNCRNXMx69834+rfhGypgbayWlZk7WT/1BwQ/9zGS356H5/4Zln8EIc0uez6HbmTzkIq1Xsra3FPNHc65FPczD8PXjhH+ZA/rp/6Wx9ru4OXBO1mbejuh6bcQOiw3rdw0vcAhPmZ6dprCS/YUb/BL9W3BvgazjP7+BuOfeJru2Umydyex3mLjdXBkn9VGXehsZqWnkWg4+/l8AAAgAElEQVR/FdnefOi5AQYK+VF7E2v3/xU892mIvuB6Msd91g4sF/ua/pYFSav61lkPFxXHrdG3XLJkWP2lBR25Pefg0UGCw2/D39ZoMd/64K2s9zeR6CojPfsr8MRJW1hl2bLXYPN1OQTphzcPez2XSVCmwIYKykT5K2ZOKTerk1n6Lv7PHOTy8C+ZH20A6dlOujfP5gCkGX0DNfgHqgj1a3a7mERvsZngWF8VVzOiskrdtUSG6vF1F+Lr2kp0cAvLvTfzo7HbWfrsRyHwDKgSQK37NVQ5l3VrloNZr1r0NQj0mm8py6LdWZ/7JC9O/h5Lw7eSHa0g1v4GGMoj1ZdPuL/WQKKYINTXbPeZ7Csk1b/d/P611hJLYEQE8v4qVjuLudRZyNJABcvje0k++B/gKx+BZ4+D//uuMZs1lfO692hNkDwMrU60NqNXeCO8RCPy5dWxRn661v6sQfSitwfhN4h8vp31U/+VsyPv5kzf21jpv5XwQJNZg2iLXJSd0J1Htnsn6Z4C45sUnK412F9DpLeOWE+V8TLRt5NY/3aiA3kEB4rxidejt7M2eDuXuvey1NnEuiaQ5V733ES2P8+U6bnOvayc+jN4+XOg5hCJgDU9F0A0TOIsw5rT+HpFHo1iFK2CxeKVVDrrMlsCidrePjrKub63s9631yo/ZMEZqiMy2Ezsof8Boe+49SuvwWRvHkT9ZF13buvMbh/QBag8YNXrqKElkl4DM9PC7hJXVEem89We9NkHSN37QWKDzdBbAN0qENxOoreA4EC5EUxEE1hkVQSSWF/FVYFDgBJAZJUiI40OeAMFxIffyMXuX+D8kXfAD+bNLEuRpiVU2nBFBYQJVUzJiniW8DUI9JpvqT5t7Z9JfPYj/HDoHVwYv43wdA3BwZvIDLwJDm0l3V9gbl9gYB/rg/vwDzUQGSoiPaQg+9+Q7f6/oP/nYXArkd4iVnsaWBv5VaKn/pLUI0Nw6VFYeR4CWqUXcQrSEw8THGUU1fFEbYDMRVYFhGuCLQ6aK6ljNkMsk7BCwbSWxKpTSuwiBB+3lXvh+z/CxdH3szr4a0SGfpFkT70VKtK5Ezq3G0hkSTThGeovxT9QaYou21WMMkWyHAKGf2gn64cKWD0kt62KwKFmwkP7iA7eRrB3N6tdtaz0VLA2WEZwsNDiVK0/We65lewX7rGl3lK4ygpq2wjZhNwwS2LzI86TMWWtxtsJFc+rQthlu9RoxXVb+RHhz/xPwlNvs0VW9NWTGHkLl/puJTX7a/C9Qbdy9jWY/GMA2VjgYggV0QWQNWclcl3APTdFDFjSmgkz7apYfQ6+0odv8teJDtaS7M0j03ezaVIBISyNMqhjGbF+N+KyJgOVXM0xMlhNeLiO2FgjoUNVxEeKyYy9mbNdN/Hi8T+CpW9btaYW7jjbHCezvmSbZ+bW4hvRN/Vr3WxF7b1cHOatRReznGCKMVqWPI9v+le5MHQna6O3EhirIDqyg0D3z8Ooywzp/oKDjQQGmwgYQIpJHfoFGLoO+q9zQBnaYe9dGn03a/ffBd/9NFx+CdS+VJtYumpw85py+UYrJldvrOQFSC07PuGaiGpBqbSvRElDQMlp5Ki2184kSce1BuYyRF6Ey9+Fby5wee5veLHrnSy330aqvxm6S6Bnh41M3w5iA4UWo4QGHf/oLoLuQpL9+YSH8vENF7J6uIjVw2X4hlx5vWJEhu8ge2gfkcEmUxKrY3VGKwZ2wGCxxSO+sV8n/vVJp2yzKfwh3cUVi3gFIKKAZ02sY13MmtWFBRPNk0QduKx+68JDxD7xYWJDt8LoHQRGf8lcPsYa4BMfgOQLr/BMc/x3nlWK69zPu5jDXUAOTnpNFyEYeBOJVz7t+YLq46Qafq+Vik5dfp7ww/2cHXsHofE6ksMFMFwMh+rJ9lcbIeP9+SQGK0gMVJE4VEnsUOVVHquJDCgoryYyWkh2ugRmKjnbu5tL37qXpLZgyAm4lTXrfpRk8NYh6Fb1unZa8jSVYd1r2qbHJlXm2kZtn75lEq77i0qlg0+yNPMrpMbrjPkM7CEzWGpZqdBgCb7BcvyHqu0Y6S+0ib1wa4mluePDNxDr+39gVtm+Yla6dnFp6vdJf/soqD9yKkTCr7okL7ZQutMrstNLEnaR2/iXm3U2fgm+Wrh65aO5r9h8NF4bcVyhpTbPsS0rLr1A5GunWZn7c1b63krmUJNNtikFnBgoIDZURHyohPShMjhcTXKwjsRgnfEgNFJKcLTERvhwqfE0018HfQ3QI6+ikdSAOzc4Uk5wtIjo0M0k+7ZDbyGx/jpWFn8bnj1mnktOHjPaPiOrqUFHDyW7tMAvpcV8Of6+KkRw9xeG2LPw0oNcuv8veHbg7Vwc3kd4uBr63kx48hYuP3GvWZ71RIalQNqslegkmU/FVgSQq/9TQB+3jSmTLqsl+QtFyTz1CEsP/gXnx24lMFxKWqa0Twvsy8n2FZAa2ElS5d6DHkCGy22WWTPNr2sckqtWTWSwnPj4DtKTO2Gshosjv8nFpx4l4CmZjXy1uR6qLXOiZcLn5dklVnpVR7lfAr1ZCiX2JI3am5tLXLJmz0JT2LJ3genbYLQYBhpNAOgrJSH/e6iapUN1rIzsYn2onFh/Hpkel3ZUGjw+mkf40E0wVE5q8DbiJz4M35kDdT5Mhm22WNfvmOUlAxTzKC70wKzrdYJwdTzUd+s7AllYkfepG9YXqhXt9+8n+sm/4/LAW0iN7iNzuIZY906ivfkWZ6WGS1lu3070UB3hQw2EDlcTGiknNFJCeKSE2HAJqcFyS/nTWwc9DeiYGagmcajczgmNFhAbzSc5tMMSOqn+Utan9xH+yt/C0jddBa8tB9e+NRc9d9/F4bpMgcSuVyDRCxvE0Au6mSiZ6Hnr65V85jTPzP8hL/TfYpk6+m9k7VAVz365i0T2slUeBsIZ2yrFyuGzKbJJ37UCRH58VN2RHCNFcU3uBF+C70xzfvY9LA3X45f57Soi2VNKpr/EtGxSbtZguRHrdYFiM4g8gMgnjo/l20gdqiJ0/EPEL71o/aZMw9iEjoRIvvmrAKL3tDegJyy6BblfapBpANF0rZUFrZJkBbc7t9ImPlLfmMU3uQ9Gi2BAmrIe+svRvfkP1bA8Usf6WL2lT02w+opgaBccbiA6VoJvpIz1Q7eROPmf4bE5iD1j20/okgJagOix2dSkFfFpnY4uxpvI3SwTV4MRTQLr67z14i6DKfdFTRdegDMPE/9cK6Hp9xHpaoYele5UkBwsZKW/gKWhYqLDlabUoodL0ZDl0BBPk0OVBohsfy301ZoHkRrMnV9CdKSAzHgR6ZECkoMlluZeGmnkR3O/SeCbYxBTwzq/xZGZ5IoJbA4I4pOWnOeebxw9OrhmDHGS2hE5ex7ijxH6p27OzvwmlwcaifQVsDJUxeOL7ye7/AVXaRyJWB4jJYUopyiTuVaAgPabCKXFMKFWKk8r6Fbh4jeJfKGFy7O/wuWhSlY6dxLuKSQ9VGNDLlZ8qIzkcAXxw1c5hquI9FZZajQxXkBoZIelSnnkHtsJS6tdnYvi9Y81i/cqgJjr5dYnyJNyNNdzJ4TZHECiasK8ip+EbdiiJg+XH/gYvom9ZEeKwbRlFQxUkRysxne4hpXRWoLTjYQOK41dQLa3FA7vIjVcz9qhCi6O30Lw/j+F7y+4HrWpVeu4KBHVzyp2MJAat+ROKJvo/G9dp4RkQ2leLUD0dfZF+iX9oiaLFXsG3A6za98j85VDBMd/l8TQbWQPNRDpL7YYI3K03uK+9HA+qeFikgLGSBkx8XO4iuShGlLG7yrSQxoVHr/L7DzFjAIH46UWQ64N1bI03MgLg7dx4fR/gR/e56p/CZNKh60DpQXmuHUlWrPuGCYllxvuJUcb9R9Tlx7F0IqzHib7T21cnvkNLvXWszLSzGMje0l8tx2C33W7pwVd11VVIgmA1+hiOQaqeEyz7M5HVDcJBZbL8NKX8D30V1yafSsX+vIJDOSTGd5FZqjBxSCHSkkOlxE/fJXDA4jqnGITBQRGCvGP3Q6PTdtKPtMwopRUssBiluHVAHGvb86USPu4lpVeYki3lnAdyjUxZfe59G1emv0g/pE9pA8JIIUwUAoDNSQ1nzFax/p4DdHZBmLjtbZoS6U3HK61DNDZwTrOzv82PPMA+J51lkPdTuKwnAGVUQokuq5XWJBNbta1AiQHsmgsQcKv5MXKxv7rcl8StlOvD0JPw2NHuXzs/ZzpaSR4uIHUzC0kJ6tJH94OwzeRHd5pdDCFN1xDXMmT4ToDilOApcQPbx5lBqjE4E7S45WsjdZxabiB9dE9LA81szKhXmV/CuvfMIumWbqgJnvNgiYIxZIEtPmoaTSl7jUEkisAEX2Up0yoF0D8AiSfg0ufI/bFuzk3/m5e7N/NC4criXzyt+Hl+yB0zpltTR0pJLOJQmPA1f0zAstMW38usVN7QwRA2wpr5ZbM2+MnWP/EBzk/Usn6cB6ZkV2khxotQBcxEyPl1zAqbeIpKndtogDfZDnB+d+Cs582S2baN+eOGu2037cAolvPaU73MAcQ90xkFUu89I+2G7ZOWZeJmZZdgR/cy5nx38F/uJnkIQXaO2CokKyC1uFGgnKtJqoJTVSRnKwl0VdtFQbpkRqWBqo4N/F2/A/fDckX0W5VyZTbi1zrps8m4Ly3gaqzICLw/3oLovtTuiIUD5MIr0My6LZ1SyZtbkEC5hcvZU1SzxF+bJYfHf+PnBt5C6HRvTBZC4e3wMj1cPhmGC4iPVxpFjIx3EB0pJ7oaDWRsTIi40VEx4qIjxWRGCkznqcOlxMfLCIxVsHSeB3nDzcQHN1Damw38ZFaLkrZPTEBseeNF/6E5ntjqOmDwBHJlUIYODyQbEpQiHaK2BJqdapFbNotLfgkvPhpgp89wA8G38HFsRpWj94GT6gJ9osbKT9tuKZK9mu2IMKBJqiypu/E3iiJeNQaNlou2vc80X/q5vyRJtbH8yzjkz7UQHywhuRIHYmRSuKj5Vc19FnNzCoTFh3PJ3Cknuin/gR834JUzK7LLIdTJUbkDYAYup2PoocCiAmjnlga2+u6pzcCApTmGi7YnkwkzhD+UgerU79O4NAuEkPFZIa3wKE80oPSms0EJxrwT1WxcqiA5Fg1GZVZDNSRGG/g0kgTq/d9EF76lKVmtVgzqOF13VjKuhovPdflGJsNIF6ud1PnFfe+g/Xr/S9L6U+qC7oSxk77EtdKJm9vMz1MaxdbpTUElDMknrufyx//c/xj/x6mmmBkC4y+EQ5vM4BkDlUj/uYAEh6rJDhRQnCygOBkvgPJaAmpw5WkDlfbMThcweXxKi6N1jgXeawRFKMNlrL2iT+Giw/bfiYCSMTbFTkmoklvmdVQsC4o5LIyjpfGOq0jyRFJezqGzkLoOXj+YZYf+ihnJ27n/NSd8MQshF6EkLrVO6Wv/Mw1AySkCgDjjITIbylUlZL7c/6BOpp/7xTnjt/B6tQ2UhPSMLVunmS0kfg1AiQ2ICJXG0D8c3uIPHIXxJ81Kyba/WsAousXMe0+vM+oTseCVs1S+5VilHU8R1pBY+QF1j/xF0SPvIPAUJ2lPpPDbyZ1eItpz9jIbkKTuwhMV3OpZ5tpS1kWWc7k9G4uz7yV4MMHIKGgXLu0OsuuBgXy/kU6m7fIXVCuosEL0MVvfUYjx/vXCw6dL1Bo+zaXtXOFBrZE3wiSJOpXax53LYpMNB2npt688HFin/pzVoabLDGSHruJzIjc5zIyh2o3AaSW4EQ5vqkifNP5BKbyCU8UEM8BZLiWzESzxWMrY2WsjpVZZTCHqmC6kMxoAWfHb4UfztoqwFA8ixbwqVWpFzo6gOi6cgDxKj1yPF2JqHWQRyvpAK1n0vp47Wf/wuf5wcRv89z078Czn4DIefArRPBCh/8VABEBjY8miRIqEdxlRmVZzJu5+AOWHnwna7PbiI8VkB1zmiI62kRsrIbYePlVjcRoFYw2GUAyR8pZOnorPLvgUoK2/NW7OF2gN/t/xYJ4cYn3+gZAdK4pU1ebZmWllni/DFllRCKw8n3WTn+A6HgzsdFdJEcryIxvsZEYrSEy1oR/0lkQuRdyKZK9FWRHmgjP7GP9vt+HMw9BTFsKXJkMywm96OcEXxci5lsazfnZnqXLzYM42l8NPLzPbNDFeRf6Xl2Hl7qzZuY5QKrmyWbr0y+Q/eExzp1+PxfEg+N1ZKdrLHhnpJHMcAMRrSuZqCM0WY5/upj12QJ8MwWEpgqJjZeSHqkwDyIyupvg+C5CU8XEJgqRC8pwBRzeCqNbCY5Xc2bud2H9Sbt/TYMabHVRqo4wLSiKeRbEAOLmgXQfup/cJKnOMsKqIkTLrtWX4IXPwsuftyXSBC+5LKzR2GUyr9mC6EeNSTltbZNsTgvq4izRvvQjVj71btaObUXZJgEkeGgXIk70WgAyVgEjDUZUAWR54S2e2xIgq15KuYvTtW0IgtwFuSqvBIi0qLkZ3mdkPdTcOKNcu24ivQTZFcgmrEo2ePp3iY9XkhitMx86PbGF5MQWYmMVhMZ34RNAJmtsXiAzVkFmsJzsaCOBI3cQ+NyfweVvubZDpvlEqNyfLjYXdG4Gh3MfROsc40XfawGIZEyf1y1vFiRl8DSZakLnJThU7xTXHmuWRVsmtfYtVr8zzMr9v41vdg/B8SZSU3tgtJn0oVo313GoiNBkqQeQQtZnigkKCALIaDnJkRpCY7cYQCQX6fE8A05mRHzdbgBJjxWyMv1Wkt9dtNWAiiC1qYEtqLK9F0Q3j2YbINHzV96XQKI72pBV3ZdcrrWXwH8G1IVRnek9vidto4fAtbpYLh0qAhuVxUOvRFkmWV6rcXN9hfXPvIfgwjbSE8UwXk9guN6IGpuoJj5RcVUjOV5pAMmM1pIWQE7/GsmLX7OLyajn0WaAiB4mCDmAvLKuTEG5neEdBBYtEEvZPuhx0iq3kfOTUYDwfSKn30VqopDkWC26jtTkzSSnthKbLCM0VYt/ahcBjaEimKgETZiONeA79g4y39TWYS9dabG0YSFUfKdgfNOw9xw4JNC6vBxANhhu4vD6/+m7xCMNPXb/5SpruCSFZNC0bjpDMqJ9Gl3NV1Z7eYQeI/Hdfl4+8uucGb2T5Pw7SE40kzpUQXa0DH/fNqJTJQRmSvDNluGbrSA0XWE0yoyVkRqrJDTRbO5oZmIHjG/DvS6LXEh2fAeM7SQy2cT5U/8NVtRVft21LY2onsxZCtHFgcRztSxod5edo9VmK2LnC0O54SlPR1vJtFqnrhPj8rUBxE2oOR92g3Oe3Akg8qWNm/41Ap97D+GF7WQnq2G8Gd9IHb6pOqKTlVcFDoFKgpk9XG8WKX2kEt8nf5ek7wemDV1libeQRoTwgKvSGGvRLSJ66lcHWQsTEB1EZ7OEWmvu2h9pP1jLM4m6y08Qu/dXyU7m2TXEx6tJTu4gPrWN6HQhgekKA0hoopFAfxHYPZeQGq9hfUFroxdd5xO7rlyGKpel2lw57SYFxUn9bE7bO0Y6cXbCYRLyuv9JeLwCDvt+A6atu5CFdb+nCnJHpjjp+JrbxXajFiFkmw1d/Mzf8dzku1mefgexid3IYjJWSPzQNuKTRYSnyvBPV9sITVUSmxAAikiNlxGdqCc6UUt2PB/GbyY1XkR8ooz4RIkpICYKyEzV8OKht8D3pyFzxs2Qp5MkE25qM4dho8iGS6pXHY2kSGQhddQ92/2IcJ5M5N7XOe48RVtrpFi6doDEbPGit6xOv+RZkZxmMuD4zhL6zDuJzefB2G4Yu4P18SrWj1QSmS4nPnV1IzVRZQBhvJaUtNMX/iOpxHmb6jKOW5m3R5GfBBBHQyPYhkshCnrpdVcjKosjpyxiwaxpnaUnSdz3yzC5xZIOiYkaElNFxGfyCB/JI3CkGP9Mk9X6hAcqYKoSJnaSmq5m7eR74dIjZj0sRjNGea7Uj1kOd4GbgaHHV0AhhF39Xw4gpsjEOE3yqu5J5bCebtEuUcpxpQmTZp1Map2stxGnlWQkLpF+/iFevPeveGb47QSndsOUXKRtMJVvHkNsqsomTAPTTYSn64hPVpKdKLKhpE1istImCxFoJouIyw3zZILpMpit57JKWj75XyDwLbd5qZKLXqcTiZ0DcS5m87J9KhfxQOKx1cTxCv3c5/SegJGznTG0DEJrT66x1EQWRGJj+5YKGDmYZlxgJLNmV7T6FIGH3k78iCaVfhFGf5n16Qp8CyWEZ0qvASCVZIbrYKLBABL/yp+g7rjatsCIEPfUn2dBHCFkQVSyobUhTric0Hno1sEDSMx2kEgQsSSn61huMeHK0yTueztMv5nURDnxyRoSk6XEZnYSnNuCf26nA8jEPhKH60ECMLGV7JEqC+7xPWrmXfS5wlhdjOcimJZxVkOv/jhzJQhe4G6IvTqQODdSFlKiIffOS9/oorziA4UdcjWdIxYkk9GcTRS1EHVLYiWlZ1n/+gTPTP8WgZk9MFsGozfC7E4k9FIgKgwMTu4jPNVIfLKK7EQBTOTZMauYZLzKKZupAmIzBUSmK4lN1pCcqoIj9eZl+KZ/EZ5ZsK6Y4q+WO0sRS+w8Vjq6mKLRO1dcRd2SznG8lkA4emfScVKag9K0ne5JsZadp+RJ+NosiH4i7NUt2VoRXamG139VP2SLlC79M74H9hGd2goDvwoj78Z/pBL/qXzCs8XXBBAFhEzuMoBkvv6X5hKpKMQAoopbj9l64bUB4gmJBxB9TABxu0gkCXo70Cq2Y/kZDyBvIjVZagCRFRRjg3M34p/bbgAJTtxOdnw3jJfD2BthoYr1+z4EgadsT8L17BXmOsY5Qde163lOs+X0ziuEwFuC6kovrg4gBrLkZbfc1eusLtdYbtcGeFXMqeBVrVWTMdsXPpnN2PqSUEaWRWCKwvkv4//8X+Of20tmpgCmboLJmwwgqnaOjN+K6BGe3G0AscLSyS0wsQ3GC0iMNxKbaDQaxmbzDCCRqTqbcE3N1MNMoZXs8MW/h7Xv2e9e2HStopX9SXFYSY7WyXi1axZPuSSMc8oEeDFaMx1K3/vNauo2teWH9iYRraUYrimLJeK4qlepm9QVjnp5Z110JrgK5z6P73QzkfGtMPjvYfRdBI9UET5ZRHS2hMR0xVUNaRdNFqand6G6oPQ3/952JpIFMYFTJZ4eSOF6LpbsnWnMTVksCaRbn+y5il54ogU4mqXQ5JQ2rzQmCCArT+N78L2EjpQRmakmOlNNcrqE5MwO4nM3EDl6M6HpZqITt8FMswlJcuwGEieauPTgn0LwaaKRDD51c93kG4spulwNPdbviY25c/SaZdqkgZQ8sGXQYrbe0U16msG0gycvGy/pwjedY0pUM2JaA67da3WPrqGF6KfH9jXWKTIO4bDtcaLYTtfn9nF3v2yZvfgZeHKeS/O/juIM5gvIjr4BpnaQnKogOtVEdHIviYm9JCbrSczuID0rEG2HiWKSE7uITzYRnykheiTfaBs8Us3KWBXRiWqYyLe6r8SJ34LlLxKN+FHxT27OSBRw9/cqgJgVcTQSONxw0xHO/njrnTIpuw31IVGJl+PDNRYrOoaI6B7hRXRx1cOKbVGWUlnGAmtzjXCsGCb3kZ64g9hMA7HZSpKzVab9FUO83pE8UmHfEZlrwH/irfDUIeuWKIFywuTRzNMIrqAy559urmLdJFtecJoTTgmC7iORWkcVpbZ4LPUCgX8Z4fuTv8ZLU00Ej1TAfBEcuxlmbyQ5sZ3YaAPp0SY4vMOC+fhiHWcW38aTn/rvEH8SUhHXpdSzwB7ZbNmAkVPVCd61KMMmbaYNZ3Rebuge3TZwcicEGC91vQkLDmy50hmvXkkf3PgSCZRgqHPcy3qmx/Zn37mJxxuAE82SxBKCk1Zpxmzb7PVHOjl/5JdJzVeTmLmR+PxNxOa3E58rJj1db/FndnIvoYVCQovbSR0phekqmKqxuZTkbA2RuQr8i4WsHy/EN1tCbKqSzFgJmdlaLszW4//a39iW1NpuIncbjre6MV19LprwvAJzEV1Bbe7WdZZS+y5oFoCuKI/NeuaaLIgxRBpG6BPh9E+/5S2V1LwnqQvwgzn8RwWQQjIzDTabnJzSsYbUNQAkfrSC8NFSfAsNrJ78JXhyzNYY54TCBEgaT4v/jLEyDXK7LE11BUSbCtxyBLxCeImJAljtuXfR7X+XfpHVJz/J149+mOen9xI6Wgzz2+HYFpjZ7vzqsToYb4DRfDJT+QQWannp1Nt47KE/Bf8/u7XQlkN1gqlr1hANbWwIonxht5+eSz++2qpIGwog7p7sS/RFHr/d/UiQ9D06Ry6Tlh97aJCMXPWfmO0t9xXvEwkyT36OCyc/RPBYI/GFbYSO30R48SaSx7aTnVFscgeM324AWDu5ndhcmQHDgvHpCpIz9YTnalg7kc/ayTz8R4tIzAggFaRmarg4V8rKlz4A4ccM2LonKQ9XO6fr+UlDVBCdHaB067KQOopUr/V3bQCRiJmZ1yo9XaT3i0Z/IVZFbpccQObqYD6P9Gwt8dldJGaqScyWkzxSdtUjfrTMABKY9wDy1Ajax0TXIZJoCByvBZDNbol47ATKASr3Hc790szrZZArkT5D4uzX+N6J/86FiWYS09vJLNxIdnEr6aNlpGeqbGUjM7Wg1YbTVQSPbmflvjqee+B9ZF7+uPWINV7mfkSCugFUWQyXTFBCQWlpCYGbzPQUkWcldK9OezpQXLlx933ufrQOxgmSA5IHEr2pm77qP31BiHD4svsa9d4JvET4iy2sCiAndxJazCO8kGcAYaYURm83gAQWClk/uZXYXCnpmQr0ngDkAFJr1mP9xA6CR4vJHK0iO15OarqSpWNFnP3UO0mc+bR5C8o+ilqiwZWkb26idTNxHUAcPa7oh591+9cOEAt0AsvFCHwAACAASURBVF4a0HNQDSDy9TU7uWTNE/xz1bC4hdTRSuJHaokdKSF2tIjUXBmpo+VXNRJz5Q4gC3WsnnobPHXYNSfecE2c16FGFFcsiLp7iICe9hCFPOHMvZYja+5oK9ekb5JrrtgtcxF8T/PiQ/fgn2qG6ZuIL9xA9MR2YvMVpI7Vw9EaOFoP07eQmakjOPMGIvflc+H0W4g92g8KjqXCcppckm4AcRZDOzA5cAgoEgBPlj1gGBA8cus67R5ymkFKVOd54NHnlbJWulpAM8DbZJpnZtypV/FfQpnAH1hxIFWAHPHBE/dy+fg+Qid3ElwsIbJQQlru50wJjN1mbnZkoYDAyW3E54tMqXCkBFVDJI7WETlWR+B4CYHjRUSPlsJRxSBlpKdKWDtWxEsnbmftu4fdQigVx+aUYY5GdvOeW5h77FVK5EiU+8z/ZoCIE8p7aFLFZ8Q3ThpAVFK2Cl4M4jtWCYs3GCBiR6uIzu0kOpdH4liJA4mA8jpHYq7MCBicr2P91C/C04OWI7/iu3vh0StcLKUynWCIWBuC5Qle7rM5cNhRfXWk7lMh0kG5WW59gf8bo6Rnb4GZm2wSdP3kTgKnikicqAbFXEf2wuxe0keqic38HKkTb8Z/ooLgwx+G4DNYxzNxSmT0LkYa0c055DSj6zbzyuvU5KJLY+r6DPy6fu8ynel00uLcCvUOUBCudkculnHrXTwUXQU0ch+JRuOEoxGi6q6iOjW1Vlp5nNVP/QbLxwsILFYSPVZO5uhOmCmCsVtgcg/RhZ2ET2whPr+T9FGBxAEl7gEkuFBOcLEMeQkGkKkyMlPFBOcKeWmunkuP/A3EvmcWLCfsuv0NgdeDVw97yVkS0cVInruRn3K8RgsizrrOJwnW3Eo7/arlkj2AJNetSZdvvhxOXI+EOjJXRfjYzYTnt5GcLyZ9rPyqRvJYuQeQWnynboOne627R07IjWCeT29CJMmR9XAGJCeTzq/yAGJafJMF0mSe+i0ZktIxsjGf279Cu+I+/0k4/jY4VkDweAHLp3fiv/dm4qdK4KiAcxvZI3tIzZaRnXsTyfl/R+z4Vvwf/yV45iFYWbkCDuOsxF0mxeXvjcE5wc8hVs+lqY3usjLuE3aJuuHceXbzkhH53SmbrwqT0tq8jbkrJSDsZ3+KcPxrXg4EEpYWjWT8ZLNhL610kdS3/orLp+vwHa8nNl8FitNmS2GiCaZ3kVjIJ3piK8mFPNLz+aTnC0nOl5I4Vkd0vp7IQhXRhQrzOCw+mSknO11kivXisUouffJ9cP5+U845gOjWc5jYeJB7IXf0LInTjD9bQfwvAEiALGvE8RkjctpL5lwzkWgfusfvZW2+0gASP1ZOeK6K0PzNhBa2kFhwhBFxXvc4Vk7sSCnh+Wr8p/bC0x1uUcyGL38l4HXC4Pnem+JZ08yirATqp4FEk2WpjCtcTGsWUdYkAr7vwIPvg5O7CJ0sY+X0DgL3XU/slGKtW0hO3E5qtpH4bDGczCMy8/+ROnG9CU30kTY4q/0qvASHSaMuwAuk9VDXJR2UG7lrtDe0TZlK9zyA6PP6jM7JBV5Zl/uXACkolc3JDfUEU4pep1/1nzqze1NNccFO/bb0Q9EAnJth+aFfZvXELgeQI+UwXQGTdTBTRXp+J4njW0kubietGGUhn8R8KbH5GgNIdL6GxLEqMrOVruTkaDnM5BM7utOs0tp9byf5WDdwecPF+tcCxBFKZ2uIaD/97xoBoh/QlHzAupuIEcZMS/OK8VrXHDKA+I5Vw/EbiM9VWpYiOJ9HaH7btQFEBD1SQuRYNcFTzfB0K6TPmxrZUBgeWFyFqjfP8WqAmEBtAoho5lkefY/ejicypHNd9bJpsmqynDxD8rN/Dff/EqHT1ayf2kbw3jcSO7mV+Gw9sfFbSM40EZ0qgvsLWZ/8v0mduhHfXBmB0x+A5z/t9loxRomWHjhela7dDN7cfclF0CdeARAxW7PhueGVWogvSn1LdnNDDpyjyWsLyE8XHUejXN9oS0OnI07m1Jwv+lVWHv5Dlk7tIrpQDrM1lso1kBwpJDOfR1JJjQUHElmU2EIpUVmO+ToDSvJYFdmjFdYBhvlSOLKTxNGdxE+UEzrZSOSrfw7ZM0YH0ws/yYJ4vNywKAYIj9b/+wGiX48Ty4RYT/qNWQbnhJo56JKV+QmR+fYCsZO7SR65gfBMuQVhMd3kwg4SC8XmZsnVev2j1Fyz1Ik6/MdryHzzv9uiJk3+iiAJW5TuzK6yOJbl8LSxCb64bzNfV5ILRj9PCnPCuPlohNbn9GJqDZa/QuJz/5W1kzUkPr6D0OIbCc/dQPZkDSzuIT27l+RcvblWodNvImiZnRpiR29nae79pH74WQhdtM4dmpHOxCOkUgnDhG4jlFaoriDbyZ6oamlgZay9+Sa954ot5ZopFrgyg6wO9loguBJO4Yu7NKeyYQnNxMuGaH/FTMZGNpu1phGvCYrNb+bcVytHcXM1Ya3I04XFniHx/S7OLNYRO14Es41WxY2s6dFtJI9uIXnsJtKLWwwoscWdRI4XE1msMJAkjtWQnquCuTKYK7KJR+YLyS6UkjleRvJEGZfu/RVY/66xIpFME405axzTfSZc4bW9uZmBUj5KUGyY5ddWENdoQUzCiGcS1oI+Z0HEn1AsSkp9YONr8Ogs8ePNJKevJzpTQXxxF4lTNQTnCzyAyM0SQF7vsZjssQoyJ2os+E1/408grd1rnQCr2lMEEjDcDPoVCyJBs4kiESznkohWr9I4r6Cthwu9Zn8qsfA/D49PEPjEW/EvbiN5agepE3nEFgvJnqgnfWS3ASR0Mg/fvVtZO1VOaLEGZitIHLmdcw/8BclnP2kd/qzDeWydYGgdXzyCL6vlrqou04y+A4mu29DjTa/ruQO/nCclTLyOJBbHZAhFde+OJFpfZDeogFrd1JVlzAgkVw8Q0ctau3qxTiBqUmA9gONPHuPsqSZiJ/JhugkmG+FoPtn5LaSOaW7kJjILW8yKRI/nEzpeTOh4KZHFcmQ9MnPlFt9xbAcs7iS7WAiLVbBYAcdvZv30bbD0Vbu7dCpBQp0WBPtEBtWpWkX3qxmoC/4/CRAZCi29EFnECAFTfRuCwSDx8GXX3PnRw8QXmkhMXE9itpTMyWbS9zYRPFpCbKHYAjbFIjKzr+coUDFfSWaxitDJEmJf/U9uua2shGbPdbTyAjkUShp4KterYVRcYieKkhqmXTwUGKmvIOLVdDaQ6PxYABUfJr/631iaqyB5qhTuKyZ8bCeZE5WmBcXs4IlSVk+Vs3KqjpAYfGQrHMnn/HQTa5/9Y/jBGCz/o9sLnCTBDJyNWR5QuUDWSVviN6kYSF0y5CuZPCgF7FkOA0cOIK57pLXv9DS9aVNtkJk8A2vfIqG9XdIqQLxKgIg80jkxZ3k00+JPu6JOKZ3Ui1/k4ifuIHpyB8zsghlNFheY1UgJJPNbPIBsJ7pYSPBEsQ1ZEiVgsnPFsLATjm8nfSKP1PFi0ov1MC93fQvhU03wo4ecxUyGScmdV0ugZBrNwUrvbeZbjqWeuvCsiGmNK2+96tE1WxBrTOilUS2TIqaF427f8PQyxB6Df2kltdBAavx6kmq1eV8z3L+X4NEy4oslJBcLrmqkFmR6nUaJnCok8MhvQ+AJd/+qp/EQq5lm5W9+HCAyyR5IcsccRV9FKD3NvbX5aCDMLMOzR1i7/zfwK7g8WWL3FZ8vILOQj64zdLyW9ZMNrJ1qIHq8DI69CY69gfjCDpYWGrh477sIf+0gXPoyiG4qlMzmaqOyrKvlaTpMUn2e1FZJpR1S3RvaMGdBZEW0j4abuNVOssJTStZUiQVtDbD+T0S+P8xL2l8ktXL1ABHwFJtph9e0RSH4iFoiQG4gS4+z8vl3EjldADPVcHQXmYUiEioxOb7NgMLCNjKLO4ieKDElFzpVQPREAZn5Emc9FreRPbGFxGmXHUwu7IGFBlh8s3khsafnIbPutHJc27ElkTVRvJhbcGhu6Sb+/R8FiMmV/H1ZEYFDWw2oc7hptYtw+eMkv/6nZBY12XM96aN58EADPHALIU0aCiDH869qpBYLYEGFcaXETu1k9TO/CivfdvcvcJg/Yo1trC731QDR5JmbgZX4vwoor3j+E9DiEdw8CpWB+n5A6ls9nJ+/Df9CGdn76okc2UbmxPWkjm8lutBAaGGvAcVcjoU3wOK/IXP852w+QDHM2ifehf/Lf0/iBydh5QnH9HQuThCYZadlOhRnBLxq1Zya1A2LAa73sJ7p7KglGFQ2HHYZvsSj8OQQlz71+zzzwIcgffaaAGLOfjwqBFpdmCJRVQObcQudIfyP/4nwac2GF8HxalKL5cidSp/aTnrxBgwgSvkeLyNwqoTA6R1ET95MZqEA5nfAyTeTPn0Dkfu3E7q3jMTx22H+Fli8kezJKgL/Mux6XqmJtxoWqrIjE7e19IpBJQI5UdBjBxbRUu6F3vnfaUGkJfT9qkpVoy0pMRWvZdVHVcszn2XlsTYCj7wbTpbB9PVkj94I95fDx/cQPVZN4ngpqRMFVzXSx51Pmj1WQvz0Di4++Fay5796RUHYvcv9CFi7HgtkJTWeiyWAKA5xhHMpUWdRRLhXj80AcoDR7UtDxzQ5Jq1+8R9Z+9yHCdx3KzxYR3zuTWRP/QLpk1vMgibmbyG1WEXqxE7Sp64nffrnSJ1+I4lTNxK7txj/6V1cPvGLXLrv9wl/tRWefdAqf4mqe7sAoXuR6GkyUW6jHO1NfN6It5TCzcHFuRpypUg9C+cWWP38B7h8qonAI38AqeevHiD6cbl8aW14rmVVMes8qcXJcnYkC4lv/iXhU+Wkj26HUyU2iRo9XkH6vjxTHizkWWwRO1FJ8HQ5wfvyiNy7jezxnbCoz7yJ5H1vIPjAVgL3VxLVvNP87bBwo8nU0lfarMsMGVUmqLGGytxjqLFXPOEqEF7NSSlKDVeaYkLykzXgtXY1MZ8jnbGsogxHPCIOnQdeIBl8maUXP8sTX/gAlx/aC6eLYe4GMnM/D/fmw8ebiS/UGkASJwq4mmEAOV4Nx4qI33szZ+69lcTL/+BkW0DYAIhm+VUOc6UOX6DQXI3iEEdAN8MqojnC6VUv5bWhbXKkdmARQMICiAJmE9RlePE42X/4ANmPF8Dpf+uAcCKPrCzdXA3MVZJdLCB83434HthC7BPVhE5W2ERj/HQhiVOFRBaKiJ9qJv3J98BjnfDsKbj4bQircYRL2arzoqINSxbqXnVpnjHR01xaN6wgOp0iE3yJ1I8+ZZWwZxabid5fAI99ANJPXj1AFIOpEljzHxpErOOI1uPo+lTgmf3ufoKnakgeeyM8kE/8RD2h4/VkH1CM9iYEEHkAkRNVBE6XE7h/O5F7t5A9ngeL2+D0z5P4+C/g/8QWfA9UEznxKzD/Vpi/CY6Xcf5zH3HN4DLnXZeS/5+99wCvK7vuey+HBexEryQI9jKcrhLLlhRFUYrlOLFjRYmduDyX51jOs2PHzy+2FSdOHMe27Eh2JCeO7TiSZoYVBAESIFhBdICoLGCvYEXH7fee8sv33/scEBwNZ4ag9OaT7MtvcV/ce8655+y9/qvttdfWPimKEmn9TkZCxGrSQC4GvrICFyFI3hUgQc/qgUKNoFZkR/3x3p/5zp6nxFgJEa2nMWkG3g3w+0kPH+Rq62/Q9+bL3Nu7GjQgXyvA+4vl8GYF7H2FzNd2PBNAHGkQOcL/ew3ZXWUMv/4iXN4T2OjGQw8yYZXwJ6n7iOctQEJwSAmGAJEm0bO9Ez0CiNZsG7lu1JBWil2G/t9hatcLUFNBamcpmTdWY8D81XXwlxvwv7aO6d2rGd9bSaz6BSbf2GzylvzqCqguwH9zqZWuezbh7PsY3pGfgP4/hLuHIXnZaESbfBgkmYWKZbbVYMZPs3j3IN5L+vrr3G/+Ja7v/hRX/mIb6Vr11S+aiolzdtJNBFAMoHX00hvTJuImD2pC/aF0mL4vMfn6R0h+dSXUFJHauY2YUnH2VuK/XmCjU1/dgNUgG4jvKie1s8Q45Xy9BH9XLpm9uUxXlzG1dwupNz6K/7W/AV8thTe2cefAL8Jkn82yVgV4E8lThzikJLUDs0qjKWGouZ/3Cg6dG1FKulmPrQ7VQ4V8IR4wyzoVU5ddp/yfADPqGMMWWjgfOMNG2GoBzg0YP0i29xe5Vf0Sk3VFZGqWwe4iUDRj52rYuQHvzZdxRbu24O5eP2ealk9TvRn2V5KQ3d/4y5A6C9nbSqNDm6rIbZVE1eOFzyAnUo8a4j1sTY+GBz2xtUfpfHlbijKNKe6uPsjEYLIbzvxH4oe/l7Fdm4ntrSSzbzXe3rW4O9eTfXMLqV3Pk9i9nWT1RpLVlST3lhDfk0ty7zKy1ctwDuTi1JYQ21/JWPU27u77EMO1f5/Rpp8ideY/wJ3/CeMHIN4N0SsQGzPBEeOiyOSTuZHthZt/SLL9hxmv/zATh17l9u5XuPS1DxNv1sKtertPn32cp/9fnRbyCbIcVJrVztZLYpsgwnAz0T0/QqZ6De6BxWRqVxPbWYH7xhp4vQp2rjf84O7ahLN7A+7uKkPsqsLQ7tW4e1bj7Kkis2cD7s7n8XZugzcrcXa+yoM9Pws3DgfFHMaIE2XKi5NUiXYDYPGt5sBsJFOC0pimM/f+zo8dUYRnBiB62BAkRgJoMY11euxcQdghYg2hNGvX6MjgNQDRhu7tOKd/k9G672GqutyCozoHdufDLgFkA7z5PO7OF3F37kAdM1eAqENjr6/F27ce9pfh7NxItu5HYKIR3CtkmWQKz6h7yRITZVOniYLGasp37qQnfasu0nUFQJXFlDa1/TAJY21w7U94eOQHGa3/AGP7q5jcW0Jq7xq8vZtgz0uw+wXYtwl33xqye8rJ7Com9WYuyTdXkti5koTs75oy4nvLGNtZxv3X13D/jY2M73uFaP3HSDV+H+7Rn8A98StkW36fdMefEe38KhMtX2Hi5K8TO/rPyTZ+GK++Cu9gCfGaNdza9SpXq/8JsbN/CpnLgTnypCd8D58bgOjBh4F7xoQROyjEapK0bvUxtednSO5fS6ZuIdmDhcR2FeMKHPJLJSx3rZ8TJXe9xujuH4aLX7fzX0wHAtEnrXQgSSyFMs2CMIX5Y0E0U3wdKAMB5R1ej4d5jRgN5gTCuQFdaDaZC4ZXDxlCnSHtcRku/1eGD34vt9/YTqx6E44xr4pgT67VIm9uw3vzRZxdz+Ps3mKkhrNnPXOi3RvI7N5GRtJmf4H5reje74NbO8G5Y6I+kvCKqsxokFByCCDmed+hd97tq/BainKmPDzVtVXXCDlmYdZNkre+yoPun+PSgRe5truA2IEiOFQJ+7QKcT18bQN8bRN8XZNfWyxpnuSrG+z3e7fDni2wax3+m3Luy3Dk4O4rxzlQydiu1Yzs38BI/YuMHHmVh0df4cHhrYzUljCxbxmxNyL4e5ea4yd27uBuzQ8ycfr38cds1XSL6Hd70Hf4Xs9rQs1Wg0hzPAKIA7d6mNr706Rq1pOpW0TmUCHx3SV4byg/7dkAkt69g5E9fwuv9z9C9Kzpc5Ut0+/LjEopcBIKRFum26xR8iXcQ0Vg+PnJzxeZYX7zRpe2mmHGHwnBYa4RRnoeuSjupBbxqlTMLbzLf8ndY/+Y4dpXmTrwMun9L+Lu3o6/ewPsLYU95fi7tuLu3kF2z1Yy+zbi7t0wd9oj0+pVUm+sxd+/HKe6kLGd3w1n/hhSDw2zChgCiRk0PUPIwCbi8+SOeU/f6Fq6sCjj4afTpNJZEloKaraukFq5Rfx+Dfd7/jW3Gz/JWN1WsjUVsEtOZoExFYxW3bUZdj0PO1+ANxTnfwG+qkVmz5tiD3x9E7y5DnZXwp4y2FcK1SVk9xeTqCliqjaPsbqlTNTnEDu8AKdxHhyeD/ueg+oiMw5Te/8uifbfgtFmM4v+XqI479oPBiD6TzPzY2/RIA7c7mZq708agGTrlpA5VPw4QHYpJL5hTpTZu5XJ/a8wffJHYVg75Gr9kd3xV0ZVLJMwOLAYkJYL54qCWgUCybsCRAfp+YxtoAtYBycEiMn7MULRLuSxSxxtRXdjxEdVCmIYRvfzoOknuLhnB6N1W3GPvIpb+xLerpfwd76Av3uz6QR37zoy1etIVW80lN0ngKybGwkgNR+0E0v7c0jvX8bIzhdItfwaTN6dCecKJNZJD8KiQce8S9+8K2/Y5w9imhKdXoqEN8GoO8KIM82Yp0rAQmIC4lfg+k7iLf83o9UvMbVzMfHdEZyGlWQO5ZOpKzLmlHyV7M4qk2vkfG0L3td3wNdfwn/dkv5Of3ULib/cQPx/rYc9r8Gul3B2bia+s5LpXcXE9xXg7s+D/dLcq6H6Faj/AWj7DbhVD9mHZng10uqKZ3qpE01HWic91CAS3CbfY7iV6L4fJ1OzCad2BdlDZSR2l+FrScCbmwxPeHvWMRcSH8Xrt/Cw9ruh5/dg+oKZb1AQS+WKNNFqx143Ewh/41dbnjf3+C5MEDGDrPNNjdhAe+hPhbi1LmKmGohmozWfoOWfds2RERfJcXjQSKbvl7hT/z3c2rve2NvZui0QAuTN1/B3voK7ZyvZ6jWkasqJ799gHNTMMwFkI96+l40GcaoXk6pZzET1ZkYbfgLuXLRYN4l0EixKD1e0JeAKo0H0xzO8dLrsN8kVk+ikvpEdPMUUUSbNAiXbl7avJuBBA5z5fxk/9ho39y5j5GABD2vzeLg/l4f78pjYV0S8uoJMzQao3YovU2zvVti7w5AmIOWoOm9ux1M0aOfL8OYOM7ckXyZRXczk3gLGdxUz+vpaxt/4AG79j0L/H8Gdk6AFX1m7F59qQH3zACJPzFaqFytagCThbhPR6h8xAHHrVpE9VE5sT5kJVjwrQLL71pKuX8+DvVtIHFZ29Fdh/ApMJMzWcvIPZT2ofJMBiHwSw7wWIIGcfEcGiFgnRikLIhtINxGeIIIoiaDQWMasGNTKQaVsWDVmBv1hO5z+PA8PfZwHNZuYrN1IbH8Vmeq1ULcdf8/zsOtV/F0fxNnzItmaNaRqy4gfWE+iZiPZ/etxqqvmSBvJ7txKdvc6MvuWkqhZSrxhHXdqvheuttjZKj2AAYPmQSatsDMC4R375T1+6eE7SbxsAleFkM3MvECiPlL8TJVGAuFlR8ok8ZHpxbn9RUZO/xhjJ/8eE43fxcO6l7i/bxOj1WuJ1a3Bb6yE42tIV6/A2bfMkLdvJX51oBkOlMEBTaSthF1L8WqeI1kfYeJohAfHl3P/2HpuH/448bZfhbO74e51u1+3wKx5PUWkg4Vj7/Fh3/Yw48eZb3Rhm4elrBYjiBTRu9dAouYzFiC1BTj1FcT3lpvADLs2wt71+Puq5kTim/SBDUzv20h0/wtwSnuu1MDEPTN3qTuysA3sYIFDKiUYFv35LgqEiPHwTY2lIIw7S2uE6tKGxkIG088Gxyrdu//3SDR8L8NvbuDhntWk6jbi12+GgxvgwEb8vZsMSLzdL+PsfYF0TRWJ2jXEa6sMZfdX4eyvtAB52rZ6vdUee6pI7c0nXrOC9LE13K7+EFzYA9H4jEulaTWl/tkZ1PfQM2/LDo9/aFfraalQnKnsJAnHrs03PyqzypTCCUJd6rZQQWu2lxtAD9z8czj728Ta/hX3Gz7D7eq/yZ3qVxk9sI3pQ+tIHV5Dur6EVF0+yZpckjX5ZA8U49aVwcE1UF2Jt7+CWG0po42rGWnZzlj/p5i69C9JXP0CRLshqRBwIE7VamCVGiSz/BlfhsH0nzFd5BrbyxspqjX892tIHPh+sgc249YWBQApMwAxvocBSGUAkKdrFb1M7Nlorh3bnUviwA4499swNWhuQjETC5AgvPgNAJGn8s5WRMSIEhMKCyWt1UIWHCrXrIJpGVw/Y2qymlTd7Ah+8gZMH+b+wU+QqN1MsuZF0vt34NSsw62pxN+/FqrXQfVGQ17NetyazWRqXiB14HmSB6tIHlqDW7sW74Co6m1bt2atud7bt2vN6j1qN+HUrCVzsIL08RLuHtjCaOMvwd0ztockQJgmyR0eRm+RNqkINrbwLPyh/g75TuVJVWjBqi3lJgWaI5BYxsWzCtrwpz42zGX2c7wNYwNw6yjZ83/B9On/xFjL5xg98c+41/ApHjR8jJGGv8FYwwcZa3iNcVH9Bxk/9D1EG/4xsaM/Q6Ll10gP/BHO1Wq7tcLUdUjIaQ2KgolHdE+iQIOa33+WDtClpJ3N9SQ0fbPxpX7CAuQBXPwTotWfNP4R9atNQCG2rwRqt8C+DbB/PdSshZqqp2/3byaz60VczYMdWkaqeinj1R+Bi182m+RIu92Pxon6Lqr270xmbPqBKS3rEc3Kong3gISdFhhkOkErD2xVjRgJN246wfrwStWYAO8q7tgBJgd/gYn6DWTqCnD2b8WpfhF3/6YAHAJIJdSsgQMVeLUVZGvXk6l5idSBl0geXEvqUBlu7ZoAGCFQHm8Ftnei7G519nrky6TqqsicLOZhXQXjDZ+BK/tgUqvbNGJavz3CpDOM1k+LO73A/Jorj6jrHgHEbjFtfkyTqmLIwC80bWDs6yfDuROda8ZH32mFT3oSErfsjrfRdpg+Qvbqn+Fc+zL+tS/CtS/Atd+Ha79r6eofwd39JgeMBwMwph2SMvam9NshKAJA6Oc0vtKidjZZUcm5v3SuwZveCCkz/l7wXM4wXPg9YjUfNRVdLEAKie8vhroNVoDOAEMgeUravxktmzA+Wl0O1D5H4kAVbudPwugpM5OvqKI2/jGRaPGBOt8YQFlSbuI9AEQjpo40QFJ+ihxxrfO9h8MYWSWhqbNlQ0tAph7AZD0T5/8frh1+nujhlbgHF+PuX4e7bwf+vu341QJJJf6BEqgrwj9YhFe3Gq92A86BHWRrt5M9WIlzsNx+iNxyWgAAIABJREFUXleJ9wRyDqzmSeTWrMZTuLNmrVG1ybqNOKdKGT9cyOjBD+F1fR7GtB2b/CsFGSZJ8ZCoP4JWnRkp9wwcolOtpp2xWiy36QsxvbhnFpMaHAS40TgJINNmpl+bwqiCvNXWnjbpEYWJgEpTl6bJToEzYZfpOqopO4HvTpF1EmZDmWzaMRntM4drvPSjgeYSKFRjK8U0CcZNa1cizg0keszw8ULNFMpbA5jsFTjz6yRrP0hS0bRDFcRri+xc0EFN7lbBgSr82so5ETVKen0Z9m2DmuVwKIfsoVwmG14mPvAbJsUGd1ZIVzyuLcudJI6XfU8r8iNGus0ARPlKIzgMGzLLddQLAocy47Sr6EQP8b7Pc+Poa9xtzCd1eAkcXAZyGvfLnHoeb/8WozG8ukL8Q3kBQKrwajcFtM4C4z0ARBrmSSStRE2ZAWNszwYSBy1Aoo25jNRUMnXkH8GNg5AYMdJEjnMKpSOMkxBoxDx6vmd46XQTCg9MDRPnEEOK9JkJENif0s/ZteTKIrabKkSJMUXMGIBy6xUWNqFJreEIuU9t+F4caC9kri0MKJCmIVKuqOZfdJ7Bls4xzyetoXI/KvIQI8UkCSMs5DPpgnN76dLKhlYxa7PMOdAguj1T8Do5iN/zOdIHXzKpNn5dGfHaAkPGfzqwZk7ACAHFgXVwQDlvyu3Kx9+/FL9+KWM1hYwc/bv4V/8bZIZnKQBFZWM4fswsNdbYvNsrYkSg6SOpW0VfBJAHRnuYkpZCnVFNHkzfhPN/wp2Df4fhA4VkmwvIHsrHqyvGq8vHqy3Fq9mIe2Ajbl0x7sFc/Pp8/EPlJmTJgW1QVwEHpWLLg/dr8A8+mdzaCp5EBiC1miRcS6x6PbH6KtInS4keWcF47UpGa1+CPjltKlOpNAPVhRKDxNEaZpM79W499I7fiwMlOZTNaplY2lYRIpn+0i6PAhzKeQu9dHW4zpVWGwm09YiJss3YR8pk0DWFlpCkyeV5aofXjLaHU4DACxxRa9EZ5jSo1Y3rBFWdUXBC46qJvOngnuzl3wOPPLEHZK4pwqm0DpNmMwMQDy8zZdKO0m0/TKZ+G87+CqgtJXEw35B3sAxqy22gQcGGuVDdWji0CQ5uxtldQWpXHv6hXBIHcxmv28B002fhYYOZ95GNZarSe6rTr37ROAWq9YlPqF1u1aPGvLILi6yJpTmDQLJIa5g9z4fh0k4mjv4YD/dvJ3E4F5oEDqnIdVZLHMpDD+4cKsOpV0ivALe+HP+gkL4NDmyGulI4mA91AUgOVrwjQIxpJvPsbUiShAPrze/L6Y83VBI/WkyscQWx+kVM1FUwffKfw/B+8GQ6iv0yaJcpMYZjp7vfoXve7St1tFIVVSvLmlSKlivmod+ypL4ctyWQjFEVLJZWv2vQXG31NULWH8fxdJ1gIZRSVVRqXNmgIam0p8iohwwyxaQTQs2T9rWKzsNxHLOXeNrXdpcqxyRgKEQvsMjcnLm5Z9KgFiBBxUbFdmcAksVT5GzsKNMnvx+ncaPxQ2Vyi3mTh1aZ3DAOCiASmHMFSAXUr4H6dXj7NpLctcYIaw4Xkj5YxKiiWmd/C8aVOTBNwneY9jX6Fhi++PpdJETESixZw7bOklSmBtYkmylOrm18YsMwXM10288b2z7VsA4aC6FOam0HXs2Lxodw6nNxGueTPTKfbEMe2UOrceu24tdug9qNIMRLatQWQ22eBcrBclxFs55AzsHVPIkETmoEuk1kG9cSP1LK1KE84oeX4xxdQOpIIQ/qP0ri7Bcg8cDIApkk0pMaXLNB57t00DtDRKiQJpBmUB+K9F5kxH3wffDeJNAFzKnTgnXT0jTqcwk0Mbg2qElnYmTMfoCPrqfUEOEqBJ/RFvoj9CNVFE9FGHx5M3ESZvmrZmVmfFOrNnQ74a3q/Dm+LEDSpPwk2cCn0z2ZxVyJh/j3DzF65JM4R6qMteDVFBpwpBpW4R4shEMSls8AEAMwCdtSOLQDr+5F3AProaES6guJ7i8n3vSPyF79Y9zUJZR4orCvFLIBhgSQ1Q5P7IGIzaGRhLOTPDpZp5n13Or41Bjc3k+i++eZOPZxEkd34BxbS7ZhJXHlPx3YbgDi1FXh1OeRPTLPkgBysAq3bpsh/+BajMSQ31BbasBlNIkBSEUAkG9sLTjKA5A83kqrULsODm3AO1ZF8shqJmuKiB/Kh5OL8JpWcL9uPZMdPwsT/Qb1inzLsJEv4D1m2AfqVoB5K5lONGp2piPDQ+wH4jJBT9pEqyHU6u9gSwIdPDv5U1wUAEQ8rbQIs2RZZpmksOeazUMdXwaU9vASqWio/tnx0TjJjDNRGXn8Kd9qGhNREUDSZgY5hK05XvehC+iPbwJALJ95qJBENp0x2knmi5ZQuLHbcGcfIw0fgSMSjJX4e0vJ1JWQqi8kVZ9PtrHImN/+IQuSp20NPx1YhX+gAI6+CMc/aMx7o5GOFuDUrWKiYQuJgZ/FGzthxkQjpa4y5rUZUv1n1wBJAIVkgaP9QdwL4N03kkn9LItapL1nTOdPNOIP/hzRxh1m0so7+SLJk5t5cHIZd48uJN24Gk8MWl+J11CGd7jAkt7rs0PrLdUL1astqUPqywKqwGuowKtfPYe2DBoKoKEEt76KbP0WMod2kG1YT/bYQjInIsRPFDJxXFt3/RkkboIW9gdCdzKrFGhxipg5ULe2v6ykNe8FHKlSda3pUYMfKylDDa3P9b06LaTg+BBJT2hnO/LhIRZ04Y2Eg/Z4oqh+X8eb/3SoOXn2OXYBmO5CNHP8o0NmfWh/8an/12+q+0yKm2z7MaLcI+3fhvR1HrT/LtOHP2yiV1RvsNGmA9vM+ERPFDB9MpdsY4kZd/GG+OBpWuor4FCZvf6hqhleM583LsM9ugSvpYzx4x/GH/oSxIcgOYyTiptKPNFE0G3BpkrihTAlySwBIU3ELJF1Y6YCRMyz4FDI0Umrds8AXPk8tH836YNL4NBiOLKZ5PGtPGjK5cGp5WSOluI3rJk7HV6NP1dqLIYjK/AbC/AaNuIdegHn0Es4DRtxji0ne2IBiWPLiB/fQbrj5+DmPltWx1UMHKKeCg1I6UokBAAJOT/g70c5ajJ1JH5tp4aHGSZ9as76DjlBYAtki8cUSYZJowW3Kit0nJHWf0288RU4pIRJpZVshwPbcQ5vYPJUIZPNeZZ/5jj+NJRDQymmrV+L3yBSmk4x/tHleMeW4p0sJN30Aqn2H4crfwqxQcjGSacgqjVVZjwVvbVmaTj/p4CKQBIxYy7fL6mCW5YFHH8YEtUw/pskWz9K9nAZ1ETMRIx1qDYYcyZ1fA1O4zOAQw9jOqcc//AcSB0RAqR+qwWIwCGt1liKc6SYeH0BiRObud/wYaZO/yKkZGrFSCWSJnPK5mfJeQ0UxGMiV3+IA2ymqhWXdnLNdux3CKPP9TFkqpj6COqTSZL+/UCI3CF2/Q8Ybf00ySNbLUBq1oGobi1OYznRk8VMNRWSlYCdy9gfLofDxXA4DznlHLIpTv7hUrwjefhH8gwPZA9vxm1+jQdHNnO/9R+Zbau1dimbcEwdcw28xlK+t5YxK/iufwqLi8x6EBN5kWUQ6uJUL/7Ir+Fe/hST9ZW4tQVQuxRqta5Ys+Mb4dB6aNxgESsmD5n9ads5do7pVAOQPPzGQryGDTgNmw1gDUAa1uEcXke8roTEkUruH67iYcvfgzt/AdnL4GgpsawMJRfK1LKZy+E8hkWMNIY0jAASgkQdJdH51y8xlqpqJh2bnJlOaxJTxfmGGOn/UUabt5I5oiiTQvqiUuM8e0cKiZ8oJnailOyROQjGkGdkQTSugsY8oz28hnV4jeV44gsB5fBa3MPbcI5v537jSm4c3Ub6whdtpRhV59dgy4IOMtY1stYHF1gUrPKIRLlPQkvtLYxAD/mwlvTQDzPduo2E0tYP7oD656FuG+zfDvuVyr4eDlaCNMiRCvy5tjr3SNmcicYyOFKCf6QE92gJ7pE1uI3r8Ru24TdsIXmwlOThIlKtFUy2v8h474/C2C6zTUJoVdn5ChsMmq1AbM0s63Q+8s6kUWRu6ci/2kDRVECcaSY91X1Ut2gtfBRGD/Kg5UNMnVqFe7QIDhdBQyE05MORlfjHckkfLzOJpe4zjL/GnSOFcCQXjq3AP1qIf3ijGXsatsDh9XBsDenGQmInyhk9uZV77Z+G+18CX0JSmylZB01NCBALEssPkYdcYJrbJlpialqNNeFc+vdEOz7CZGMp2cMb4ejLcOQDcPAFqN0OhzbbUJom/BpD5g4Z/Wnb8Py5tBVwuMoA1D2+EufECpxjZThH1hvJIYBogU5aczany0h2r+P28RdJX/lVu5FmQuvtbUc8Aomteh7+bftPjrK80TCEIa2iCNU3IV/821gRaeJ1iklGmcTsMSQOmxyAi/+esRNVJE8shKO50JgPjSuNv2gY+ViuHaejFbhH5zLu4TkCSBEcXYl/IgfxgNu4EbfhRfz6HdCwEY7bbA+/ZS2pjm3cPL6O6NA/hWSdWQVrXIxZ4XaBwwQFA20SGeE8ca7hch9nopn4lV9lsvtjTDVVkjieS+p4Hl5TKeljVcTq15I6vBZOrYNTRbhHlsOxYjhaCkfL59jq3DnSkUo4vBWOrMM7uRS3aRHuiSLco2vxjmyynzeU4B/Nh5Z8Uq0FPDhRznjH98L1P4eoainZ2sICgt3yTHudaDsHK1FCoEhaKnxpTS2p578GiGbRJ5hmTCaqOEuK9eGfE2/7hNEQcpQ5mmfp+HI4sRKOFbwNz8xx/MV75nqr8E6swj1RgHusEq9RAvwFOLwJGpfhH1sEp8rJNK/l4ckiRjtewb/xKyYbnawmZAMtEqxAFEBCikS5RUZrE9whssP/i8nuTzF2tJh4YwE0lxA/lkPs5BLGGldy9+AqJo8V4Xeuga5csicW4R2f9cAhWN5rO1dghOeFAGncYDuoaRFOkzRJEd7R1SBqLIKThWQOLyJ+dAmp1jU8PPYSMc2NxNpmOkeTXjaLQLPOsRl1GwJFALKhP4FExQCCGXGZpn9FX7LTJ0iaYnFW7I7BvX/DpMyeYyU2BG8kfAHeiZW4J1fhHRMYVsORUjgSCte5AqQIjq2C46vwjhfhHi+1dGw13pEN0LgWrYOnKRfv6ApSR1eRbClnpKmKye6PwvgfgDKOBRAZA8EiwVCLCCSRTDZqioulbuxntOVzJJtfhpMlODUqCFBEtG4BY4efY+T4Ukab8phoKSTenofTtRg6l+E15eKfKMY/XvK2LSdKzfXettV3z0LHK+HIFji6BY5X4jcV4LUuNERTnr22BuJoARxfin8yl8zJKmInX2Sy6XsYPqnatNfMwiY/JSfclkBS/ELLUdVRIoHEmlryOYI5Eb0VfYcDJJlM4rrGw5gRA0plUbE59Uk438h0FG7W4PR+F7GGCN6BFXB0HWgW/dhqOzanivCPr4Nj66zFIRAdfxYeKIETeXCyAP9EFd7JKmPteKfyMON/UsK7HATKk0vh1EqyJzcRP7WFyY4S7rV/GGInbE2xVAIv7ZqJWo23lOG0r1ysWILszS7GOn6V0caPkT2sybw8qF8LTdugZQ20l5LtLCPRUUa8o5hE6yrSzYtwTi0OAFJowXHiG1uBjZOS4m/X6rOA3glIOubtvj8hLbEBjm6CYxvwmkpw2ubjtEfwmxcbyYGcRCPFVhnV7h3bSPbEVuLN6xhp+Qjc/e8Q7wHtXJuJ4SY8m2wYzLiGGkSmlmETASLAyaPZtxne+Y57k1UF+VkvgSOVSpHJZMxkm9GsWpE60YV76TdJd1bhHo9Ag6JLSknaEoxNqVmK4J3YYAEipj1WBCeeYfwNXxSYa4QA0XIHp2UlfstCaM6xADy22gLpRAH+8U2kT1UR75rHWOdquP3FIMviAXhR9LwSjgKIwB9h/C7x7v/O2PHvMrOOnFA4Ls+mghypgJPr4dQGaN0E7evw2yugvQRayuCUkFlkpIPfNJdW54RUErx/760BzbEqK6mOS3qU4LTm4LRF8Noi0LwQjss5FEgqbMqDgHSykmzLSqbaS7nb8Qmc2/8Zkn2QiJuisr7WUwWJd9bcUgasXU6qyKARnValfMdrkFnYMFojnU4jraLWJHuqH5K38O/9IdG+v028qQhOSVrLzKqAxu2g9KSmSpymciPlpe2NVDcAeYbxF0CkgY6X4mvsT5WSPbXaZJmLB/zWCDTJTy4H45NughOr8ZpXku6MMNWZS7Tvx+HO18HrM9u5+dkJ4umMqYov6yHC6HmiLb/JyPHV0LMYWjT5kgtHFkDjAlAlioPFpOvLSB8pIXN8hbH3OVYGRyugqdR2hjpkTlQMp+ZI+u3ja+DYWjhRgd9cjNu6Cqd9MW5HBNrmQdMSOFYIjZq3kaapNOrXb1tAqnMxd04UMNb1d+HG/4DoDSs2BBAzy6p1G9q9V6T3wfJSM2nyV8PECgGi7dlkakmDhCaWieIlJ01KefLSZxlrryB6LA9OFUCr+KECjm6DYy/gN63DbS6DpjI4KbOq2FLTHMdePKPxP1Fh6VQxbnMxmZZSMq35OO0L8NvnQctiOKmMi+ct6bdbF+F2Rkh0LmOi9WOkz/0bmHzd1ghQqkx6ykasDUCifcT6/h13W9cy0TmPZPNyaFkCp8VgEWhZhH8qzyDUbcrHPbUQ9+RCM/egORBOlUFLwZOpOR/eieYEqgCM6tzjZdb8kt3ZnIfTIhVbGHRQBP/UAjiuUKMiGpIgCj4shrbncDqeI9a8gPGmtaS7fgzu7oLkHe1nYBas+07UOO42/UCTR3Yy0ZhYf0V8kLCwtVqB5LGXKqmPt8Dl32SyZzvjrYtJyKIwTFgIHRqbdXBiI37TGiPANEacWgUnCi3jPuv4Gx9WwKvAbS4l07aCdMcy0vKTW/OgdbH9vaObQdSkiOYi/LbnSLevINYmf+Rv41xWTed9wEV8Z8xsnaDxjpC+Cnd3M33lJ5m88Alig99Dov9lol1FjJzIIdO5AbdtB27LDvzW9fgdi6H9OWjRepBiKykkLZ5E7wQefddcNHc6Jd9GpOvkGpC6LavJtpbjti/Ha8+xdujJXDhaaemkgCSALIT2+dC6ALelkHTLh/H6fgZG3gDnnNlsJjl536TFS3M8CveGVpU+lX0upHznvkKN8VZH3Txx5oZZJ+/2fpqp9nyi7QtJN5eQPVlmwdBRbiKhSHM0yyzPh9YlVgCfyoemkrmPvfjmlPwXgbAcmlbjtZSQ6VxKunM5mbYysq1l0Cq+yIWT5ZbEJ61LoXUlbls+XncZE80bmOz6Xrj/e+B2KlZt19QYJ13FAlLjuNNncSaPQ3QPzv0vMXH+3zLe86+Y6vgcsbafJtn8z0m3fJxM1woy3REyHRGy0jDtK6BVKvUJpE55Ij0jQKSZZO/KzhRYTpXjtpTbzmkrwW0rwD+10n5/cjmcXAUGIOqkJQFp0EqMepY0yV74LEx+GZIDZKeUOvFoD0aBRGkpZnvrmXV8Asp37is0q6RBwpc+m5qaInG/iWj3D+C0VxqJnWpbbswbmTlO8xoj0WlbDm3q65XQovcSSvOtQNOYPZOAVPBGjC8AFuC15ZLpyCfZWUC6XQCRv1EZgFPAXGjvwQjmcgMoTq8g1Z7PWPsHmTr3L2F8f1CIW7V9IaI1I1pPnFbRQVMoSTVWr4FzEbyLMNIM9w7BzTfg8m+RufgPiJ17kYnezYx3bCTd+jJOy6s4LS/itm7DbduI37YaX458Rz62g4TYgNRJLeqs3MD0eitIZLIV4bWtxGtbjteabx7EPuhqa8pJKrQKeJIOS+CUACJptBq3tYxsW4mRHnrvyR4WSKQ1TikmLpNPYNY9rIQuResK8ZoWku5YQqy3itjQ98OD/2krhodhLNXaDXK3lOCoORNVC58Jos947gJMSKEdFrLWW9vw+0fM99YjvvFvHTv7+sERb7F+vvG8Wcfp2JCMFgwnPYPU/mBdhCZHwyW11gOTxozhJm8yMdzC5MUv8PDoFtzWZdBZQKYtj3RrLm5HCdmWSpInCvDalwSaejm0CiALLZnx19iHJLBIy4iK8FpFBUbKu225hvnFC9ac17EKEgXgMD7PcmM1CCDpjiIyhgfK8Zqr7LHmd2X5iBdK4FQVXrP4NA+vp5ypzud50PV9pG79PtBrsibcdErp7rM6y3SyOkFBrqQtja9dUc1S0HG0nx3eADgdkGmD1Ammu3+HVOevkez4HIn2Hyba/kmibS8Saysn0bYMvzcHTs+DLmkbRZbmwcn5cCwHjkj654LUrbRBSwlKCXAUVj49n2xPhGRbDpm2cpyWl8k0bSPVtIx0i663AnpLwNiZYvhi/NZSQ15bMV6bfe+3hWZgqOH0d/CZAhIKNhzXdZZC53y8vgWk+0pJnP4E453K/r0KsYytjKCZI8OeWr6kPKQkqewkjjONp6WyBjBxfD+K40Uxc0yGmR9lAAdsOhMrDhfoaKJy9lA89kf4hTHnwvFREFKMHVxKuAmPe1Ib4nF2kCGtPdqDfdqVm5SeJp0cIZoeYcofNxOBYyZbz8wMmN3DmK4hdeHfMNr0UTJtimzm4bcJDCEVojHw2wrx21fhy8poywtoFbSJgvFoXmV8RxMRba7Ab60w45/pLCTVvYrk6aUkexaTOr2ITPdSnI5VZmy9lg34LeusCdW2xJjTbscSAxIDKAnW1gBIBnShQC2E5lJoroDmdbintnGvbjX3jlYy2vMhRs78E+LDvwsZ1TEYeRuAPKFzTeKjQp8qjZX2SKdc/MQUZJWuMQSJdhivgTtfxr/6a2Qv/ATZc/+QB03PM9a0jqlTFSSay0g3y4kuNSFlMTRdq/E6y030IXGqmERTKanWPLI9C/AGFpLpWoXftQE6PoDb+hLptkJSbYtwupbC6VVmYASCuRD6fWO/llrV27nQBid6l+L2rCfd9Sm48QV4cBQS9yFjCiqZ6Q8rQlwyaKGQdiHRbubSKAk8L2FLkqrqvUkPDpg3QIedtbf1qbRG3G4oGWiRsP9DZp7dmu/0gUAi1faWRV76Ssc8FkV4y0rJ8HohSJJBKSEJQbN92SiOPxqUBkrywFHdSNcsozXrPKL7cK/8IomeD5HUOLYXBmNQ8NStAY20T+uKwDKwgi3bUUyqq4Dk6RUGHMmeBaR6FpDpXozTucIAT/NzApPAkO1YOousJnHb5WMoB68YTpdCt3itFDpk2ZSYuT3a1kLXazjN20l2ljPds5oH3duZOP9ZmPg6+L08vj/II/H2De9kXphCG9peN+uS0QaJaS3JUpEBbUg/aXdl1TpqLdNNaOuvq7ao2d1quPUVvKufJzX0fxEd/D7G+j7CWM8Oome2MN27jrG29Yw2lTHZstSEX+nLh4FKaFday0Y4vRm6NuO0bTKZuYnOJWROL8KXGddeNDdqk6ottoMjqdaeCx1F0FmB172WdO9Wov0fxLn+4zD9v8A7axfOyMIxeTvSIirZM2FaUw5ADKjvQ69erfg5OEcdqzRqFQ5QqoYFR3hAwMxvZeKQmdXqOuHfBhCBRjG5YdIooU04u9Uxb0M6X1pR92h8qmEy3CLBXVOlS1eQTDC/qe2nJ0+QvfILTHa/xlR7LunORfid6rP8OZLOlXmWAxJOXTn4nctxO3NxO/Mtda00n9G5HDpWgpjemNgy11YabZPqqCDVWUqqU8DKM0I1073MaJx01xIe0VLrQ3etItOVZzQe/VXQVwI9EdLdEaY7lzLR/Tzu5Z+H+J+/O0AEjJBCgGgPakfFBVzHbHWVcFLGoTECVuMQMofpYY2N6uA8hPQgJI5A9HX86S/jTf4W0Zs/TvTqP2bq/A8x2fdJpk9vJdFdQbZD4FhrHT1FyLoK8bvKcNu3km7fQrxbEmaZ6UTaCwOAzKHVtY26z8NeZzV0VOJ1VZDpLWaqbwWT59YRv/YDMPoFSPWCmEUZJ1qVSNqYIZOoBI9vhIhhKPVByKN6L0YTQwZvw69kYs0s6Q21TQgQtSEYZrch0+ozk1EcACNYjz5jIpu5YJliYUJIYDrPmNCPcmm0Pj/JQ6a4yzSjpsCBWQKh8Uw8hFvVpPp/gWjnh4h2FpLuysE5LYZeOUdw5Btw6Xy/axm+wBEAxABOQk8CrK3IRkhbQodeJpL8xxzr/LeXmCCB07Eap6MCp7MEp6sQpyufbHcusZYc4go/ty0h2b6UlELAinLJMjGgWwYdEdxmmfMR3IEI8Z4iJrv+Jt7Fn353gMzUGnjrxlOqwOFbASTrW6TiZSFpd1UzZZ/FgEemmeeEeymo5pO83jH8dDskD8LUXnjwP+Dar+Ke/Qzprg+Sbi0j0zwfpyVCtjWC07aEbMdqMt2bSfZUkTxdjttVbBxEOiX5C3jqVuAyWiiUgrpeMV5XAU7PCtKDOUwN5jAyWMjEhe8iO/xvYbIBEhNGEIj39cyBR2Aqk5jl66GgCJk5ZPqA58OPH/Mhws42TC/zaRbjP/Ze380qHxSCT79hflzw0x3J5HvkG1ng6MZ0QmCO6TIKZHqPjtaZGipzmPacvPtnuAN/n2RrCamWZZaxThfA6aDPu/JgDmT6uLsUp6sct7PcmNq0r4Y2BWPWQnOQOd6kQMoaE8o1Gl9zKc2Knsq3CfwZ+Zptpfbc1rXQKv8kIJlSum57CX5HIZ78mI4VZsrCO6G5sgiJpgjxtgjZMxGifUsZ73wJZ+gH3h0g0haimbELNEooCbWb6Zg/kwQeBD89s/zd1uNQ7SbPTN0nHEhqP/UEJGKgTUgd1XkyVUDugXcJEi0w8jrc/E9w9V8YU8fpXkCyPUKiPUK6vYRM90bSfevJ9K3F7Q6BIXA8JWlQu1daEsA6iq0W6cjF61xKtlt+0HxSg88xPfgco70FjPZ+F6kLKox9GOIjNgYc+g1GSfhknCzxRIppVZcPlnTObi13iptnuQvSBobpW/u1AAAgAElEQVTBw0iSmFwSX+wqCqW/PhdApHmCc2bODZjecLaAIAqON5GpxwMBOl3Va6TgtUZf1T5mLC79XvYc2cu/Ded/EPrXQvcC6BDlQFe+te3nAIwQTJ7ModMFZE4X48gX7VDe3xpoXWNzAAWQpg3QpOUVa6BZ5rAm/wSMFdAus2uJpfaV0C5zW2NYZq/TVgkdG6FjA7Svx2+rxGkuI3WqhGRTEfGTinZuxu3cQra3nNRgKfGBKsb7XyN16Sdg/D+/O0A0UaS4dzijOns2VR0cDqM6Vqu8tVN2nAfEuE2Cm6S4Tdps7jhhlLhklzHZxAeu3Wdc+1QYZ1bF0hRvFnomJmCyBy7+PAy9QmZgAfFuAaSMbNdG3L5S/L58/O58mCudXgH986FP6l3Om6RMkYm6eJ0RXEXe+nLw+paT6VtJ7HQhk53rifV+DIZ+Dq5+Be6dhrG7kBizRd/MRpEJprwkDzOquWs1jPJ6JArUZ9Zx12KtoPxpaD4ZgIQH6Q91jCgAkzk3+D441jr5ykLW/Iwt56/fMBQIs3AjJNO+xWrzFVTw0wYoRvkId6lpstNv4D78Wcb7N+D0F0LfGji9Gjplii4BZXP3LA8ETC50Pz35PcvIDkQMub0r8but9raCShpB0UUBpgI6ZGYvg57n8PsieL05NkLasxB65sPpoO3VeNoxpW8x9OVCfwl+/zr8/m3Q9xp+/3dB/8dxej8DF34Xd+jLcPO/woMvkb31X0nf/EtTYhfn3nsHSAgSI/Vm/acCcyILHMFFal2p45pPuYNnViuqHcFjHOkWVQT0Xc9YWcbq0vyC4QchJli4p+IYIknqG79AamgLsf4ckt0FuKfL8HuKoEca4OkHJjxHA8Tgc/j98w3QvM5S/I4AIB3zcZXL02s1TLYzD6e7FG9gLe7AJjI9zxNt+YTdj+LqmzB2DNyztqIHWnSl1XYm99HssiugCCTCgl0LHdQoDRjdcrTtWBvynV3uZ5Yror4K+svCR9saq2qkStbY2YoQVo8BIzxvFkCM/2PGSoWwo5CagOlrZO/tYfziP+Nezxrigwtx+iK2n00EqBDkd3QrfC9aMecx8HtW4PblkO1fhNe7BL9nCfSE11WrlKcV+KdzcXsKSfWWEOtbw1R/FRP9m5ns20ayZysp0entJHu2E+/ZQaz3RaZ7X2Gq50Okzv0d0kM/gHv5x+DmL8Dd34SRL8DEV2ByP4xfIHvnEkQvgn/Njp+CTupETRTO4vU5vBUyZMTOIv09Q2FUJTQVQps4XLoqyRdIOyP9BLBAaEolhYffbyZ55WeZPqsiYItwJSVkTyoPTCZWz6q5Ue9y/L7F+P05RlPQuxxkW0ubGCqC7lXQLSCWQPdq6KrE75FEkgRbSeJUFdnOT8LQ5+D+VyDeaje1N7UxrPYI/TOjZV3XhMiduEs2ljUVFNPOFBlPJULF6KpwKVJwVaVoRCqXak2gpI8xV5Ne0JpAlN0YT6asjpMuMUVrfG2HbPfsMAWmFa71M2Qczd/ESKfDQndX7bzWw68Qv/gjTPR+gMmuzcS7V5s5oWz/KpyBHDzzzHm2j06vhNPqrzn2vcasu5xsy3YTmfT7lsPZCJyP4A3KUY4w2RkhdSFC8sJSYhe3Ebv4aaYv/Sumr/4Hpm7+B6K3fpvYtf9C4voXyd7+U3iwEyZk+nZCesguqXVGzF4hpjq+CnVoBaFK+GjORwWFJbFMirYV7D7jQSEP64c9I0AEs3BCUVPy+kFbKcIgcLbpICfG2MQCi25GIAm26TXhTlUWETPYpEBpFqPydfhkGu/OHuKX/qlxmJNKpOyQk7jeDpak/BzJ712FGRxjaklNSzqWQKfs10roEkgEkIKABMjlMBCx1CtToxS371WcMz+Ie+XX4OHXINkO/k18Rs0KxaSXJOlmTd1ps/4o0BxZL0nW1NC1GyGEhctsETNVXFHFS81oPwqKGbAEGkmgC0ldpe8eeR2BSacKHdkkmeQkWWkJV7pNY3AP6IP4Trw7nyd1/h+QHqjC61sJfUUwUIHTU066v4jk2RxSZxeSHViO17fKAuNZwBEAhK4PQOfL+D0FeAPzcM5FSJ2PED0zj9GBFSRvvED85qeJ3/4lUvf/B9mxo/iJPvDOABfsgjd3GNwR+1wh0wcaYCaiqh0lAtktv8vUT9beQmHcyFS/j5tKxyr7E/Zx5Bsmld46yfSOf+suQi9Erf7WyIcOp1UP8jmkKozvEah4fWTUh0BkTpF5oOrrsrJkuz8we1joigZX02M4t/6EyfNVTPVG8HsXwflSAwwx+JxI4JA2kLlm7NWF0KsJSGmRSujURFJpICVzQJqrV8eEtNisrNQMb6p7Gan+IjLnN+Fc+wT+7Z/CHf7PkD4JqSugOSP5zDJJg6Wd5tlCFaruMCnlvgmfy9FPOtrfS7P2tuC2ClUn3/Jv0plmyomZIngJ3xZTNf2tPhUJWWHgylTxiEL6ISRuQ6KD2J3PE7v+A8TOvkqmrwp61tgMhe4VZl5C/ldqYBWxMwXEzq4ieXYx2QEJBYEkz7TSpHMhc43+EhhcA4MbcPrKiPYuY2KwgOjlV0ne+imI1sPEdZiSbyrhK6tDGla1q4JAhfrQc021+2wmTiodJZGeJpqaCjYLst0gDn0raQysHBf/2cqKj8xVlR41vRj25tO2spPtTHCYKhEqDdMGdm94E2pnkwFJeEJQh0gAiZJkmltEGTaAMbeoJxs5TfL6DzN1tojU4AI4vwA0WP3LoF8O91O2koS9hdBTDL35oL/NZ7n2MzMLq+/0uX7nORiYB/0LoHeFOTbVtYBk7zwy/RGcs/Pwzi3COZtHZnAj8b6Pw93fgIf/G6ZaIH0zcOS1eUvW1OD1ZDNJBejBxcxhZwVDoWohUspWfEibqD69FnBpjYpMMn1jB9lEGnWNUEmbCIFCh5q3eQDuZUi2wsguYtf+iPGhX+Th4HczMbiGeO9yXK0HklaQ+dS1yKTe+P0ryAzkkzhTSvxsMalzS3DOLLL9HfbXXNv+JXjdETi3DC5WkT2zgdHTVYye+QjZe78M6Xq7kWKgIhXpVoFvdVnMc4m5jonAKZ9Q2M9mfLPSMZNNkHaiyHQN657ZfrLWibpodldbGaUIn0o8CYA2amgrKz4DQGzKxONMbwAgtRXSrJvRoiO7tkKFEe6ZTTWN3hNIBKYA4eoPYVll/RNmB4yslYKxEbMpZOrazzBxMY/xM2LK56B/SQCOubSLoE+OpsKYMtnKLEgGnoPBSHBtgUHREIFQQFF0ZAX+wDK8C4twLi4ge24h7uCSQJLmQW8xfs8asn1byJ79CM7lz+Ld/XUY/7pNy3HugqJ2GqnZYi3U7eFnihQnbAVDL+0bU0m5X9o2IasJSwFJnR5eR+CwHWjb2ChM9cCD1+H2b5C69hmmL36IyaF1TA+VkDxbQuZMAU7vIrPKLq3VmFps1rsAzhZBr8zHMpz+YgMUZyAXd2AZDEg4LXjUP6Zv1D9PQQOLyZ6O4A5GyAxFSFwoIXrxk2Tu/DokjoP/wFTz1opeJ4UpF6qa0Yp6ahMgU+ZUyHgrxxsmnFENto8MCtRZYehbroFsrVC6BBZN+L2ZdFXp0W8FQGZpCv1+KOGsWpR61IYuD5BDZMwsHSB//y1jLR4xm904U2aDeFIqqHoPRvYyeePT3DuziszZlTCwdI60BAYWghzEnnLoqYRe2d5LrcMop3FwCd5gLt5AkSEGVsLgUhhcjndmCdmhRTiXFsOlfLhQCufLYKDISGLNEDunl5DoXs5EVyljp19kcvAfkrr6y3D/yza12u8B9xw4lyGj4toPQflRiRTE3EfgERNkpU60h7N8Ny17jEImjukXTS5pdZ9209I+h8mzkFQm9p/DzX9n0vhj/a8x1VNMtH8B6XMRGFK4VH2QB2dXwJnnyPZFSPdE8Pol2eXbCSRltu0rAFH/SnMsZ2z/mD5RvzwtDS6BcxHivRHGTkeIDr0AD38dsifBVdjcagWlnaeUca6ijYGbq02BTJhTfaF+MTRr7/JQwDwGFplockQUPAoA8tj3gVkq4ATHRd5G/s9i7/DsJ7UhGnU3eh8au0Jq8LL6y3L/W9+H0k+XD7SIjb9oNyYb01fJRy+TxpeZoH23tVdf7AqJe/uYvvxLJBXbVkfPhfqXW80gp3Qgx4R8zaCfmR8AYyXeQB7uQBHuQImlwQLcMyvxzi7EPbuQZJ9MkGI4VwEXVsOFcjhTjNujXKXlMFiC119CuqeIeHehSWNI9pfinFtL8uJ2Jm/8LcZv/xDT93+S9Oj/B9NfhMROSJ+ATD8412xCaHYEU9hPrcwlkXcrAFcbJPeTnfxjEg9+hanhzzJ57buZurKVyaFyEufzyJ5bhX9mFQzkQ48icuUm58wsk+4shDOFcGkV/pUcspeso5wxgQgFLpbbYEWnzivARPsG50PQTwwoaDEHOpODfz5CvC+HidOvkLn4CzDRaKteah2OapYZ5ah6AFpeoK0lFPtXHyjJUoEGmUWWFMOTESVBrKkEV6onVABv5T39Lf4LNUjY6jOT8mN5O9Aguo2QW5+21U1Ir4tC2M4Cy+z8ovAm1M7GXPi5btpcR4FRXctmz5pGE4jZuO21dJLM5D0y9w8x1vd9JAfXkRkswT2z3A7aGZlHOVYTDIpJl+KdEc0C0oDMseXQK6koiRh5pDXOKtS43IJDgAgoa9o88zvuufkGIJwXcFS1cRVRrSfpzCGl+ZUzBTBURqYnD29A70vgSjFcWW4kt8yK6b4Ik5dzGL2Sz/iVtUxce4mpqx8nevUHiV/5KWKXfxnvzn/Bu/0l3Nt/gnvrT8ne/jMyt//MhDWzw3/M9LXfYPL6zzNx7UcYu/JJRi5t4eHFlYwMqdxAhNGBCNGBiIkOcUFh1FwYkLbcAF3roXuNiQhmOpeQ6FtI9sIS3GuL8a9H4FLE9ouWLGi+qUuRw1zokwZdaISE6degj42Qmq3N1e/qx8GVVgsP6rdX2n7vk+ZaTrwvzwQI0hc/B7e/DmMXIWHNJpM5bgAi01xb1N0K9pdX1oXdnFOTnOH+KWFrLRWBJvSPQ/4OzKgQLGL1t5LhQZkzOkfFq81LR+k1l1bnhNz+1l8L7CZdOryp2a0On/23uQd7YzN3Hn4fPokBXPgAw8Qe/jfunP8h7g++SOJSMdmhYGCv5sKFIvzBfOM0Z8+tIHt+Kd65pXBGJlng0A/KtJBJtQDOzTetf24R/tml+GeXPUbezN9Lsccstt+fWYEvmvl+1nmPfW7Ps781D//8PJyh+WSHFpAdWkxmaBmZ83lkzhaTOVNBZnAdqcGNpAY3kxrcSmpgu6HkYNhuJXlmM/Gz60meW0PyfCmpoVVkhhaTvTCf7IUIzoUI3lDESGr7fIvs8+q5B6VRVoHpg2Wmb7xzi3HOL8I7vwDf9IeOXwyDmlQNyPTXYnO8NJP61j+72PbhwCIQnVEfLsYZWk7qfB6pc6U4Z8vgrLSVwsjSTCXcaf0oYxd/Bab2WG2onXy1D6MsoWCLd+tiaApAQQlpEjnRllcfTarKyVbQSK2lGR6a4WvDYI/+C3krbB99M4OcZ5wHeeyK78Mfk2TSzTy8818YufpZoldfJHrWJpxxYQWcL8Y7U0j2bKFhnNTQMpzzGkgNYuC3nBWTv090TqCcZ+xw2eJzITGxf07MPFeyjDyXPpDAyJ7LI3s23wgII2gGF4EijGd1P4vInFtC4twK4mfzyZ4rsZpUWqx/vqm1Nn7mXzI9/HVIdmH3qlG6keVPhaslLi1ANBckTaKoXbBNYCBfv5WM920OEEmSGzjJg2Qe/g7pW59lerCS6e75NtJyvgD3bBHOuRLSQ/lkLyzHHVpow8NnckB0LgfOv1+ke3nOzB7PrdW5CnfrOnOlZ3j2czJp7YSiWbszKA2x/LE+VUg4O5hDdnAhvo6/WAgX8412muqvxB/5HfyYIlZK9VAoOmXTxGUkBJFNa5/IDxE47D6Nssol+L/Vr29zgMjXUSRMdmsjjH2BzOVPm5VhSUWmhlbinCsykit7vojs0ErcC2IoSevn4OxCOCfGegYmeaZz9du6HzH6XCiYC5ozOJ7x2c/Jj5A2CEAyWIF7Lh/3/DLcoRz8oQX4Z+bBWaslvbPPkT6zgtT5CjJXXiB54zPgVQPnbA6UP24mVJ1k1gaRAh9aGiPc0MZuSRGA4/8HhHybA0TLSpQHFofMXRvWfPgfyVz4W8QHC3AuLsW5UIBzvtSQO7QKLs7DOqvz4Px8OL8ILryPJI32rHRhIcyZnuHZBUwBQNGsc4vh/AqcoWIyFwpxLy6Ciza3yrSXrB80PbiUiaEPkb3972HyEDAUVBEZA2cSJxkjGU+YZd2O6xt/QuB45HAHfu3b+g3ffH3ybQ0Q9VEyyBq3AbQHkKiD4Z8mfqGM9JX5ZC6uMoMmkLhDeXbQNHBy5gUQMef7CpBFAUDm2M4ZGCGongEgQ4H/pPki9eeF+TgXVpG5mGcBoiiY+vpqBK5F8C+tJHbhg8Su/zKMd9mJUi3ZVl6eF8XNREmnEqb2bzqbQbvnWsfbAsR6JIFtFcaDvsVa5NseIJkw3V7uiJYxpgfgweeJXqkgcT1C5tJynKFC3POleOdnA2TeI8n9zEwWMtvTtrOYc2ixBerTtu/bvetZA2AYcFgweJciuJfmmwwDo60FjqsREzb2br8Gd34fJs7a1BpZyCYY5Zg9610nY0ubellcX4ZVOF0QtoH20DlyQtT+NUCerDbVN4pwKK0lE7VFOUwh5YdfYPL6ahK3IqQvLzKawztXDAKIBtVoEJk2YsqnZepv9vHSHIvnRkbzfbPv52muJ60R9Kf69DGah3dpHt61CPGLS5m8uJXM8Ocg3mUHLVwVocltOeOhoDM8H4ZsBQyFtNTOWkH51wB5MihmfxMCRGsjTDxQc5XOBKS/xtjtSkavRshcXYB3cSWcL7STZBpEqf4LS/AvrMC/mIN/acH7RIvs719cgj8n0r0vep/uXX22CG4sw72cg2f8uQVwcQlcXoh/KULsfITJC0VMX/t7ZLQvYKzT1uCSz+hlTI0CZTYrSjV7rtgu5HoLOKQuNOAijXdIsxniW/D+29zEUjzcIe27NpVewsZVbtJeRu+s5+H1COnr8+CSIlrSHsvh0jy4PM/Yw97lVfiXF5sB1aC+L3Rpsb2HS0ufukU5YJdz3p/7vrwQ70oO8cs5pK6thGv5cKUALmqCdjnOhRyiQ2vI3v8XpB9+hezEOUgZO9gWkvAn8bLKywusgEDGWQyEk8WB5ngrOEKQqP0Wv77NAaKYuGZYlbvl4GeVyKe850OM3N3G6M0QIDKllsIFMVMErszDubyK7JU8vCuL8a/Mf59oocl9EqMJqE/bKm/Kv6JrPAvN/dmz1+YzdT1CbHgp7s0KuFJhtfKFUvwrH8e9qYrpbZAew/fs/IWiUWaxljuFrw2LvgEgdgnFjGllEfNIY4RFML7FwAgv/20NEClnVTPMKn6ueirelF0qljnF6L0PMHYrggaRSznW11CI93IE/6qK2q8kbQAiJps7kzzrud5VK4kFjqemq88CDJ37bM+tvo3fWcz0neVMX80jeqGY5MX1cOP7YOSLEDsNWWUdWjNK/qIAYUBiFnBoCfAjDTLznQ1JfqPfEVhZOk6LpRT+tcgJ2fmb336bA8TD99K4qupoikQoy1MiqZ+Je59i4lYOzjXZxEpHj1jtEURU0tdXkL6+yiTmce053he6ugD/2iLcOZLO5apowbPRHJ9ffZu6XcbEjXxGrkcYv1lK+s6/gIk3ID5uI1WBby1RpjWAqqWmajZmnLQc4zGAyDmXnSxnUl58wP86XuCYmVlXzq6WI2vlvQb8W/f69gaIOtBRRpvWWA/bNdYSKpnrTN39DNO3i3CursLY6gLIFRuPd29ESN1YQuLmCsOc7ws4rj2Hb2gB7vUFFiRzaN8vcKjP3KsrSF5ea9Lqo3eex5n6p5B5E3zt92gDUI7WcHguGT9lSNEqJSEaVaLIVYAVtdb8CjPDNQEcHKcvtRRmJjdLK/VjM6b1tw4ez1zV5Ft5a+/h2gKIBI4q0JlU6LvBwDwgevcnid3egHe1CC4vseBQTP5GBO9WhMRwDonbS3FvLAQ58u8LzYfr8/FuiBbOoV0I156Rruse5vj8V3Nxr+yAG/8ERv4Akk1mrYakuoYl5vlmWWzKyZr14mZfu0BDSFPITJL8F+m91gLNFMkzi5ZmAWSWtlFJpYTZy16ZvZKI37rXt78Gyfi2IIJZTKNUaYFmgqm7v8T07VfIXiuxTBTM5gog7q0IyeH5hrybFjT6/BGJYcQ4Ak9Ilpm5MW/WcW85R98F5z1i+Hl4TzxH54fnBNc357/H999wXd3bbGafHwBoCVxdakkmp55p9rla+xHchwVreB2dn2OvoWMM5cD1HNRv2WtFMPxD8FD+Rp9d3aiSs6aMaYqEaoR4abP2/tEiJIFAzrnW1atVnlWoPULzSu1jDsfbahBpkb82sd5ROEj2hLULg8VVEiiph7jRL5Gd+H4SwyW4dyJwOwK3xAg5cHMZ7vB8nLsR0Hez6XYE//ZC/FvLcG8tnyH97d/KgdvPwXDE0h0da8m9GcG58RzpazmkruaQuLqI+JUFZG8vIDs8D2c4gnMngjdsSeeZ+7n5HNycPze6FcG/E8G9Z1u9N/cWXts8rwCRC0ZQlMD1fNBz6BkeO34+7u0cnGHRQtzh5+zz3iiAaysMOPxrEdwruTg38snciTA9vB7YbXaHxZ0GN2N3AJgpMiGtELzMG9lMoc7Qd/Z7e0w4saHv9X7Wef+nvXMNte266vg65+xzzj7Pe+7Nuff03pt3G1objG1ME7W9IpVQCTVSqpZUKkZMiSik2pp+CKgfzAcpVBFbNdoiFB/UQCBSIaCxRaGlEUKj1UaTe85+v9+P9V4/+c+51r773ua2+ZDmZF+yYbDW2WudNedce/zHGHOOMcfQDenqlT21pSNMVpP5e7P/eQ2Pi61BjLpo2fyFenN6twKI1yAYfgGv+wCj8gFBykAGJIe7cLhHUlw2zDVj9pTpzffFVcssxQ3C4gaRoXUiAUf/V7IMaRhMIBOVVgxDhUeb+IfbeEebuEdr+KVlw0xBeQ4gApWYV9pLx1ekZSjM0woUrqDisgG/gB6lNGN68/xlOBRAduHidRYcR1skxRU79jmA6DuBIyht4JUFEgsYLp6Bl09YgBw6hC/t4R+ewa+t06v+KPCNWVraLD3t/PE15NVjedSCA0RBbG0SJfkUQLIZn9cmGn+JsP8rDCoHeLWUoSVZ5wEiUBxtw+EpC5qjbZLChmEgaZ2M6SSZDSikPTIm1bmeV1mGSg7Ka1DKQ3ELSlvEpTxROWe0lJ5zuXRfsYx7tGo1wByDZ22a9tUHUUm0bLSeQJqRwBsW9ggLu/Z6phHNuASOdduOjuqrKAW3GY/uS7/XM/W8oLx+OUAO94y3XCBOjhyiQwmNMwT1c4waHzTlyuYBceX5sXD1a9jowgNEkaAJw0srI9IickxNvkwyfJhh5Rx+PZXyxSU4OgFHJ43Ej8s5OHwLXLzJHgsnoZg31y4zu8pLICqtXKJiDor6O2OyHJRWobgOZdEqSWWZuOIQV+fMOPWhoBCNbShsEpdXCGvWTAqrVx6XiCpLhFUdVwirK5cdo3KeqLBvSIBUW7N+q39qR6Q2pS10vWr7Y8ZeEqgtaOLSGmF5E7+Sx6+uEVbWDMiTw004WjX3xUUHX1TZJ27eRdB+xGQ3VMK7q9FryKvH8qhrACADU/4sVLmRTIOoypVqjkw/ybBy/rsBUjhBXF5CTBEfnic+vIm4cNL8PQNCYQcO9y1wdCxsg5hODJWZJjqXmXSZqSTQrBmQJBULkkQAEXOKStI8q1DYgKNdKJwkKl5HXLiOqHiSuHiCqLRDLO90OY8YWeBQfzXXENiiWnrUuUBS2jb3azy2jysWFCnzZyDIwGPGXs4ZcGZjEcAMQKpr+FVpPgFkjeRIgmDJ9F3j8CoOXv0MtD8Cg88DxauCQ6BZ9M+CA0TqYmQAIoetqvVaM0t5tP4VvN9nVLmZoGElp2XOXSjuoh87ruaIi6dJCgck5S0SmUoymaQBCqfg6ACOzkJhH4oblrkFDgMQSdVV4osOiZaOj7R8rIntBklph6SyTVzbJK6vkEiDZSSwGI0iIOVtO4XTcHTatqN2i3tQ3IGS2pQ2kgknU+6SFphpA2mfWo64sk5S3ki119y92f9Iu5W27DjLW8QpeM1zypqPbRFVdghqqwS1HFFV4F414zL3qJ26w7Tu4LbOQ+dRGP9zmsl/0WFw9f4vPEC0dq4sF0EYm4TEBiDBANxvgv8ZhtVb8RsOSU2MLem9C5LQ6Q8e1WQGrVqGETBKYsydOQYVYFKmFrPpXPMMAefoPMHhJkEhT1DcJSjvE1UPiOpvIWyeIWrv4TVX8VoOQdtS3HKImw5JMwWNeZ6kdEoFmWnSMK90FHA1n0iPpRxx3TECQMxrxqL5lsYqyjSXQCYAHp0nKZwnKe4bQOkdWNBdDSA5Qk329ZyWQ9R0GDUdJu3boPtn4B7aBGxX56+Fv7LgALGhCcqDpGTP0iIGICai97/A/3OGldssQIzUFhMKAFsWIA2HSFRPtUl5awYgM58QIPR/lzGbJvGnSApniUoHTKs7TGu7TGvXM629E7f2Ptz6B/DqP4vfuJ9h6f0My+9lXHkX49oPMandilu/Aa9+lrB+ykr8kky3V0tWc2VzHb+2wbSxjltfNeOUttSYYgkFM/fSsvSOAXRydKMBiLRmXNkgrl7SNJdpkPqy1SBVh1CTeD2n5eA3Hfoth3H73baisetjizIuPA6uOoCFB4h2nYmU7yJK4nQeotpuSjL2JOPq7YQtB2RmVVahvA2VzRlAdM1I9NoqSXUTypv2PgFDjNFO/7eYJzw8QVg4RVw5CXrZgg4AAAtoSURBVO1NktEq3bbDoJ/HG78L/Acg/AxET0P0gu1DUAXvJZh+E8ZfgcET0H2cpPVJks4DJK0fgc710DpH3NwnrO/h17bwqmu41WW8umx+B6/x3TSt5Um6dzOu30W3dJpBZQWv48DQgZEDfS3drpKUdqFyGqpnoLZPXN4lKGziyjko8NeXobpDUjtB1FgjauZIGjk7fr23ngNdh2HNodtycMf3wvg/bKzVnMviqly2wBcWHCByKcndZGM7zTZNpThX2Luv3K1fYVK7IwWImECTZwEkTyIp23TMtVAmT32JpLYC1ZRpxDiipv7OQ/mAuHQ9fuU00/oWk7ZDv+vQ7p+mN76bIHwQ+CPgqxBXLfMoM2ZGZiuEijMewvQ5m5zZ+xuYPgbex8F9CNwHYfIxkvFHiEcfJhrdz7DxfobNCwxb72XY+rHLqf4zMHiUuPfbBIMP4vVvZ9o9ybTtGLNO4Pc1udcSdG0DGinVNohKm4SFHMjUayxBfZu4sUvcXCdurUBTpPELaBlAlui0tnHHD8BEmeJTjb3AAPh+XV98gJgANoFEOkSe3DSpq+KzkmcZ1d5NIKmqH7y6DtUtqK0bE0TAkLQUGUbINI00R00T9h2oHED5RpLqzfiNc0xaJ+h3lul2HarNk/T6v8Zw9DmI/xH4lg2YNHn6bbIVw0RZaQOFsmq/ihIuKwI5Ua5ZBVkeXkEXbTkwlQTzXgTvf9KjzkWqN/J/4L4I4+fBfxaSv4LwEdz+Bbr1M3SqDoOqw7TmmFU8CQOjDVtL0MpBbQ2qcyCob84BRPdYs8ocew5xZ4lBbYtu++1Mp58CL417e1ODfD+MHd/1zDeo1V3l3FNu1kRhpAohVV3j+BvG/Ai7S9CUFM1DbRsa6ySadIrqW0SSrGIgMYWOAkhVK0L7UD5PVLkBt3aGcWufQfeAweAWBoMfptv7RfzpU/iT75AombRSYipUNTH1XAh8peyPbck1PyJRhgl5+udJ4eAaQLZMHdnVOFVDMqtyuq65laK/rzjOIgdUU8/klnoK3MeYdO6jU7uJTjXHpOnga5wyFWe0DNIS+jsDQiNP3Ngmaq0TtZcu3avrPYewk6PfOE2v89P43mchTOv4Lf5K7vdk4IXWIBlAxDeKH1X5MlOlT3HRRv0/z6R+D1FvGdrr1sSo70Azbxggbmsyu2fImFIZQBrSNntQvc7Y5V4jz6jt0Ovs0u/dyWT0q0STJ0imXzf17xJhMiviog2NgUK8lSAwxo083GiKH3oEQWTvU6oi5eFWOq8AVB5bKf7NMbT15ZXuX3Xm9RyVZ1ZMplFEc0cpJBNBYMKYhKgaRP8O7ufwex9l1Lwdt3OCsOsQSYvOAJKedx3Ivm+tkbS2iNt54s6y/V7XDUBWCDp5hs23Mug+SBT+A9A3+zNM+9+TxRb74jUEEC33XgmQbzNt/ARxfwU6eWhuQmPbAqTjkHQEkG2o71oTbAYQzTs2jV0uDeO39ph0rmc6uIA3fgS8v4egOB93Zwq8yKpTni5PlY5izYtk9qkYp6JOxzY1v8K2Vc5R6wjTtE55Visv3RCUKhS7IGeUi4LzsgC9YC7LoA0X97VXQgGwZhVPCRFeAO8JGD5E3LsLBmegl7NMn40xM7kEGoGklSNpbbwyQDrrBJ0NRq07mPQfI4m+ZnZwqp/XuALJsrsvJsqNBpltotEOs4HdU6BfTltEoheZNt9HPJREzENrywKktW5XZnoOSVvmRg5km18pYVs5osZJovZ7oKeUNV8A7zkIW2memjTwVB2RRSdGjxLCWKtq6o9KgGXFgurEtEzcWKLiLaaSZEzsTYi9MYk/MWSrsGpvvW/J7ItIH64G5klGZTRmjGtSH6kP1nyTSvoOeE9C52EY/DgMdu2YBRBjQq5BffWSpmivQnuDpLNB0l0xq1ZaudJ7STqb+J0dxq27mfY/C8l/ktAzuzmUSf1a/iy+BplJ3WzDjSoHmdJUEB/Rb96HPxRzbEB7DVoyr1ZIeg7RwCHpWk0ibWKo6xB3HWNSuK0bGFcvEDQ/AcOnIaxdmj+oqJFKHqkSkeFKa+jZdTUFUSqZRB8vruFTMRSmZecS7Z8XghPXFoFRIRjZW7Fr615okm9SqgagIhmKo8lIdpUSganZOGIYdEyZumy/t9EiMi81B4vK0Pw76PwOcfcCQec0QTN1LMr7LoAIBKIUIFq+pnNJgFjBsYHbyzPs3sN08NeQaGGhk9by07iv3c9CA0RAMOE+EmLSGjqa8lppYQma+NGniZO7jEc7C/kwXmzXIZSvYOLA2B4j18FzHSbuHsPBXXRbvwxodep/LcOKKWecqMYyu0bnGYlZdG73O1hO1n0Z6SGXwDQ7NzPwbD/qFUcB/pXIlAHQ8oTmX5bsttW0CfVV5L4Mk7/EG/wS7vBtuON1omkOvHW8wSphb90KjqY07J4xtYKuJubWl8JwnUnPYTB8D/CMWYyIgxrE3VQ4vAmQN+wbmAEkE+RGoqcOQwZMkj8k4l6C3qaVlNmk1F3DFUCCJZg6uAP5Ndbo925hNLmPIPhd4lCZxwvWrMmePzO89YVE9fFJUJk3mulYcNgkBjo3XbQYtQARSLxvEbt/QeB+FHf6TsajLYajJcbjNYLhBnRWQaZnW9p225il8UDaJQ+DTaaDJUbjnySOv27BqoDQWJrw+Mb/ejDlYmuQbJIoZsgY+DKAjHD5Y+B+kuGe9TDL6dVeIh6fYNhdI/KW8CcO49Eqo94dTFufhsHTELycMoCYQMtOWWNiCGkDgUNHXTiejwWIACESWNJIm6yrpm/qZ5r1JX4egi8RTn7DLFO3uuuM/FWCqQMCgzzm/TUQYCZ5GG9D6xR0zuIODphMfoFQBUfN+5apl6YsOZ7hvy6tLjRAZHWIKYz5MbNabLoY41tgyjT+PAk/TzLcTwGi5cwNgv4NdJtnGfZPMRnfSOz/FISPgvcv4CoaODVPDP8LfQKD3OFyjeuo744PHJY7sjiCOXCkczKZWppIJ5RT56Wck/JdfBvcv8V1H2Ls3s3QO4fnrs8BxCEZrRBNNmC8B63T0LqFYHAn/vS3CIKL1m+TCST9CNfw55oAiPmNMvEp71qWewmPSfBF4vhjBL2zYPwh20T1A/zunXRq9zDs30sw+TgEXwSesxNQ1S9XbXIpjxkOBAyFr6RfqtHjxkfaB5mZOrUaRWDRUvDYACSmSmKqwk7AV7HxLgT/DclTwOOMph/And5KMsgZEzToOfgDB3+cIx5uQ/skNG8j6X+I2PtT/KBpMi3JspL/xzT8JkDemG9ATJFVHJrZF2ZZNPVOyzoPv0wS/Tpe+3oSrWI194kb74DRzzHuChh/AsE/QfCSRYRQEcuZEZo0v+a5BggyVQQO1dhOwaHvdX5cH7UtzWnUqLqiRGpKhdMzWUOSNHtIGA8J/AmhOzXLyoQaR9XsJ/emjxO5H4bRLdBfwu87TAcOk5FDMNIqV564+TYYPAzBk3hB2zgtzWt+EyDH9cu/unYlMTVFNdktLFpsPIYWgQzTyAR5BpJH8btvNev5tG+G3gWIPwHTL0DUtNaTvG3hBCLtb1fohkgJ6dKJuJ4vEijm6dV19Qdzl/ohJpWWM1pEDskyCSUSswwbGLdJpKGFIUE0IIx7JhvlDPj+C2ZbANMPwehGgvEy3sRhqlU+1yHpO8Stm2D0KYifwY2qxtgMtYqctvuDGdwb46kLbmLNAyQtMGEcaRlA9JL/Dfg9wt7boasVmnfA+D7gD4BnLf9rSiFGk3POVDxSphTFVknKyqxKJ+kCXSaxj1t7aGjqg6ZGM0kuR+nLJLyUAiSe1d2QqIgZEtEhSnzj+Y+MM1UhKl8F/zfBvYPE3cL3HfzQIQwdkoEins/B+DHga0ySkpmeyRUj18yxatDXAUP/Dzulf7mPSc8LAAAAAElFTkSuQmCC -``` - -### `annotation` - -A Kubernetes annotation for the NavLink custom resource. - -### `label` - -A Kubernetes label for the NavLink custom resource. - -### `sideLabel` - -Label that appears in the left navigation bar - -### `target` - -Sets the target property of the link's anchor tag (``), which (depending on browsers) determines if it opens in a new window or in an existing tab. - -The default value is `_self`, which opens the link on the current tab. To open the link in a new window or tab, set the target to `_blank`. - -For more information about the target property, see [this page.](https://www.w3schools.com/tags/att_a_target.asp) - -### `toService` - -Has five fields that are constructed to create a URL like the following: `https:///k8s/clusters//k8s/namespace//service/::/proxy/` - -For example, a link to a monitoring service can be set up as follows: - -- name: `rancher-monitoring-grafana` -- namespace: `cattle-monitoring-system` -- path: `proxy/?orgId=1` -- port: `"80"` -- scheme: `http` - -It is required to provide either the `toService` directive or the `toURL` directive. - -### `toUrl` - -Can be any link, even to links outside of the cluster. - -It is required to provide either the `toService` directive or the `toURL` directive. - -# Link Examples - -### Example of Link with `toUrl` - -This example NavLink YAML shows an example of configuring a NavLink to a Grafana dashboard: - -```yaml -apiVersion: ui.cattle.io/v1 -kind: NavLink -metadata: - name: grafana -spec: - group: "Monitoring Dashboards" - toURL: https:///api/v1/namespaces/cattle-monitoring-system/services/http:rancher-monitoring-grafana:80/proxy/?orgId=1 -``` - -Adding the above YAML results in a link to Grafana being created, as shown in the following screenshot: - -![Screenshot of Grafana Link]({{< baseurl >}}/img/rancher/example-grafana-link.png) - -### Example of Link with `toService` - -This example YAML shows an example of `toService` used for the link target: - -```yaml -apiVersion: ui.cattle.io/v1 -kind: NavLink -metadata: - annotations: - key: annotation - labels: - key: label - name: navlinkname -spec: - description: This is a description field # Optional. - group: "group1" # Optional. If not provided, the links appear standalone. - iconSrc: data:image/jpeg;base64,[icon source string is clipped for brevity] - label: This is a label # Optional. - sideLabel: A side label. # Optional. - target: _blank #Optional. _blank opens the link in a new tab or window. - toService: # toService or #toUrl needs to be provided. - name: rancher-monitoring-grafana - namespace: cattle-monitoring-system - path: proxy/?orgId=1 - port: "80" - scheme: http -``` - -Adding the `toService` parameters above results in a link to Grafana being created, as shown in the following screenshot: - -![Screenshot of Grafana Link]({{< baseurl >}}/img/rancher/example-service-link.png) - diff --git a/content/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/_index.md b/content/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/_index.md deleted file mode 100644 index c2767b8a66..0000000000 --- a/content/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/_index.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: Cluster and Project Roles -weight: 1127 ---- - -Cluster and project roles define user authorization inside a cluster or project. - -To manage these roles, - -1. Click **☰ > Users & Authentication**. -1. In the left navigation bar, click **Roles** and go to the **Cluster** or **Project/Namespaces** tab. - -### Membership and Role Assignment - -The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. - -When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. - -> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. - -### Cluster Roles - -_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. - -- **Cluster Owner:** - - These users have full control over the cluster and all resources in it. - -- **Cluster Member:** - - These users can view most cluster level resources and create new projects. - -#### Custom Cluster Roles - -Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. - -#### Cluster Role Reference - -The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. - -| Built-in Cluster Role | Owner | Member | -| ---------------------------------- | ------------- | --------------------------------- | -| Create Projects | ✓ | ✓ | -| Manage Cluster Backups             | ✓ | | -| Manage Cluster Catalogs | ✓ | | -| Manage Cluster Members | ✓ | | -| Manage Nodes [(see table below)](#Manage-Nodes-Permissions)| ✓ | | -| Manage Storage | ✓ | | -| View All Projects | ✓ | | -| View Cluster Catalogs | ✓ | ✓ | -| View Cluster Members | ✓ | ✓ | -| View Nodes | ✓ | ✓ | - -#### Manage Nodes Permissions - -The following table lists the permissions available for the `Manage Nodes` role in RKE and RKE2. - -| Manage Nodes Permissions | RKE | RKE2 | -|-----------------------------|-------- |--------- | -| SSH Access | ✓ | ✓ | -| Delete Nodes | ✓ | ✓ | -| Scale Clusters Up and Down | ✓ | * | -***In RKE2, you must have permission to edit a cluster to be able to scale clusters up and down.** -
- -For details on how each cluster role can access Kubernetes resources, you can look them up in the Rancher UI: - -1. In the upper left corner, click **☰ > Users & Authentication**. -1. In the left navigation bar, click **Roles**. -1. Click the **Cluster** tab. -1. Click the name of an individual role. The table shows all of the operations and resources that are permitted by the role. - -> **Note:** ->When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. - -### Giving a Custom Cluster Role to a Cluster Member - -After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.6/en/admin-settings/rbac/default-custom-roles/) cluster owners and admins can then assign those roles to cluster members. - -To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. - -To assign the role to a new cluster member, - -{{% tabs %}} -{{% tab "Rancher before v2.6.4" %}} -1. Click **☰ > Cluster Management**. -1. Go to the cluster where you want to assign a role to a member and click **Explore**. -1. Click **RBAC > Cluster Members**. -1. Click **Add**. -1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. -1. Click **Create**. -{{% /tab %}} -{{% tab "Rancher v2.6.4+" %}} -1. Click **☰ > Cluster Management**. -1. Go to the cluster where you want to assign a role to a member and click **Explore**. -1. Click **Cluster > Cluster Members**. -1. Click **Add**. -1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. -1. Click **Create**. -{{% /tab %}} -{{% /tabs %}} - -**Result:** The member has the assigned role. - -To assign any custom role to an existing cluster member, - -1. Click **☰ > Users & Authentication**. -1. Go to the member you want to give the role to. Click the **⋮ > Edit Config**. -1. If you have added custom roles, they will show in the **Custom** section. Choose the role you want to assign to the member. -1. Click **Save**. - -**Result:** The member has the assigned role. - -### Project Roles - -_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. - -- **Project Owner:** - - These users have full control over the project and all resources in it. - -- **Project Member:** - - These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. - - >**Note:** - > - >By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. - -- **Read Only:** - - These users can view everything in the project but cannot create, update, or delete anything. - - >**Caveat:** - > - >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. - -#### Custom Project Roles - -Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. - -#### Project Role Reference - -The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. - -| Built-in Project Role | Owner | Member | Read Only | -| ---------------------------------- | ------------- | ----------------------------- | ------------- | -| Manage Project Members | ✓ | | | -| Create Namespaces | ✓ | ✓ | | -| Manage Config Maps | ✓ | ✓ | | -| Manage Ingress | ✓ | ✓ | | -| Manage Project Catalogs | ✓ | | | -| Manage Secrets | ✓ | ✓ | | -| Manage Service Accounts | ✓ | ✓ | | -| Manage Services | ✓ | ✓ | | -| Manage Volumes | ✓ | ✓ | | -| Manage Workloads | ✓ | ✓ | | -| View Secrets | ✓ | ✓ | | -| View Config Maps | ✓ | ✓ | ✓ | -| View Ingress | ✓ | ✓ | ✓ | -| View Project Members | ✓ | ✓ | ✓ | -| View Project Catalogs | ✓ | ✓ | ✓ | -| View Service Accounts | ✓ | ✓ | ✓ | -| View Services | ✓ | ✓ | ✓ | -| View Volumes | ✓ | ✓ | ✓ | -| View Workloads | ✓ | ✓ | ✓ | - -> **Notes:** -> ->- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. ->- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. ->- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. - -### Defining Custom Roles -As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. - -When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. - -### Default Cluster and Project Roles - -By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. - -There are two methods for changing default cluster/project roles: - -- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.6/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. - -- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. - - For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). - ->**Note:** -> ->- Although you can [lock]({{}}/rancher/v2.6/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. ->- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. - -### Configuring Default Roles for Cluster and Project Creators - -You can change the cluster or project role(s) that are automatically assigned to the creating user. - -1. In the upper left corner, click **☰ > Users & Authentication**. -1. In the left navigation bar, click **Roles**. -1. Click the **Cluster** or **Project/Namespaces** tab. -1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit Config**. -1. In the **Cluster Creator Default** or **Project Creator Default** section, enable the role as the default. -1. Click **Save**. - -**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. - -If you want to remove a default role, edit the permission and select **No** from the default roles option. - -### Cluster Membership Revocation Behavior - -When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: - -- Access the projects they hold membership in. -- Exercise any [individual project roles](#project-role-reference) they are assigned. - -If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/content/rancher/v2.6/en/api/_index.md b/content/rancher/v2.6/en/api/_index.md deleted file mode 100644 index d1cc9cc445..0000000000 --- a/content/rancher/v2.6/en/api/_index.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: API -weight: 24 ---- - -## How to use the API - -The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it: - -{{% tabs %}} -{{% tab "Rancher v2.6.4+" %}} - -1. Click on your user avatar in the upper right corner. -1. Click **Account & API Keys**. -1. Under the **API Keys** section, find the **API Endpoint** field and click the link. The link will look something like `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment. - -{{% /tab %}} -{{% tab "Rancher before v2.6.4" %}} - -Go to the URL endpoint at `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment. - -{{% /tab %}} -{{% /tabs %}} - -## Authentication - -API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.6/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. - -By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.6/en/api/api-tokens). - -## Making requests - -The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). - -- Every type has a Schema which describes: - - The URL to get to the collection of this type of resources - - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. - - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). - - Every field that filtering is allowed on - - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. - - -- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. - -- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. - -- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. - -- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. - -- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. - -- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. - -- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). - -## Filtering - -Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. - -## Sorting - -Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. - -## Pagination - -API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. - -## Capturing Rancher API Calls - -You can use browser developer tools to capture how the Rancher API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster: - -1. In the Rancher UI, go to **Cluster Management** and click **Create.** -1. Click one of the cluster types. This example uses Digital Ocean. -1. Fill out the form with a cluster name and node template, but don't click **Create**. -1. You will need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click on the Rancher UI and click **Inspect.** -1. In the developer tools, click the **Network** tab. -1. On the **Network** tab, make sure **Fetch/XHR** is selected. -1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`. -1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.** -1. Paste the result into any text editor. You will be able to see the POST request, including the URL it was sent to, all of the headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: The request should be stored in a safe place because it contains credentials. diff --git a/content/rancher/v2.6/en/backups/_index.md b/content/rancher/v2.6/en/backups/_index.md deleted file mode 100644 index 45b402c3e1..0000000000 --- a/content/rancher/v2.6/en/backups/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Backups and Disaster Recovery -weight: 5 ---- - -In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. - -The `rancher-backup` operator is used to backup and restore Rancher on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** page, or by using the Helm CLI. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup) - -The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. - -- [Backup and Restore for Rancher installed with Docker](#backup-and-restore-for-rancher-installed-with-docker) -- [How Backups and Restores Work](#how-backups-and-restores-work) -- [Installing the rancher-backup Operator](#installing-the-rancher-backup-operator) - - [Installing rancher-backup with the Rancher UI](#installing-rancher-backup-with-the-rancher-ui) - - [Installing rancher-backup with the Helm CLI](#installing-rancher-backup-with-the-helm-cli) - - [RBAC](#rbac) -- [Backing up Rancher](#backing-up-rancher) -- [Restoring Rancher](#restoring-rancher) -- [Migrating Rancher to a New Cluster](#migrating-rancher-to-a-new-cluster) -- [Default Storage Location Configuration](#default-storage-location-configuration) - - [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) - -# Backup and Restore for Rancher installed with Docker - -For Rancher installed with Docker, refer to [this page](./docker-installs/docker-backups) to perform backups and [this page](./docker-installs/docker-restores) to perform restores. - -# How Backups and Restores Work - -The `rancher-backup` operator introduces three custom resources: Backups, Restores, and ResourceSets. The following cluster-scoped custom resource definitions are added to the cluster: - -- `backups.resources.cattle.io` -- `resourcesets.resources.cattle.io` -- `restores.resources.cattle.io` - -The ResourceSet defines which Kubernetes resources need to be backed up. The ResourceSet is not available to be configured in the Rancher UI because the values required to back up Rancher are predefined. This ResourceSet should not be modified. - -When a Backup custom resource is created, the `rancher-backup` operator calls the `kube-apiserver` to get the resources in the ResourceSet (specifically, the predefined `rancher-resource-set`) that the Backup custom resource refers to. - -The operator then creates the backup file in the .tar.gz format and stores it in the location configured in the Backup resource. - -When a Restore custom resource is created, the operator accesses the backup .tar.gz file specified by the Restore, and restores the application from that file. - -The Backup and Restore custom resources can be created in the Rancher UI, or by using `kubectl apply`. - ->**Note:** Refer [here]({{}}/rancher/v2.6/en/backups/migrating-rancher/#2-restore-from-backup-using-a-restore-custom-resource) for help on restoring an existing backup file into a v1.22 cluster in Rancher v2.6.3. - -# Installing the rancher-backup Operator - -The `rancher-backup` operator can be installed from the Rancher UI, or with the Helm CLI. In both cases, the `rancher-backup` Helm chart is installed on the Kubernetes cluster running the Rancher server. It is a cluster-admin only feature and available only for the **local** cluster. (*If you do not see `rancher-backup` in the Rancher UI, you may have selected the wrong cluster.*) - ->**NOTE:** There is a known issue in Fleet that occurs after performing a restoration using the backup-restore-operator: Secrets used for clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here]({{}}/rancher/v2.6/en/deploy-across-clusters/fleet/#troubleshooting) for a workaround. - -### Installing rancher-backup with the Rancher UI - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the `local` cluster and click **Explore**. -1. In the left navigation bar, **Apps & Marketplace > Charts**. -1. Click **Rancher Backups**. -1. Click **Install**. -1. Optional: Configure the default storage location. For help, refer to the [configuration section.](./configuration/storage-config) -1. Click **Install**. - -**Result:** The `rancher-backup` operator is installed. - -From the **Cluster Dashboard,** you can see the `rancher-backup` operator listed under **Deployments**. - -To configure the backup app in Rancher, go to the left navigation menu and click **Rancher Backups**. - -### RBAC - -Only the rancher admins and the local cluster’s cluster-owner can: - -* Install the Chart -* See the navigation links for Backup and Restore CRDs -* Perform a backup or restore by creating a Backup CR and Restore CR respectively -* List backups/restores performed so far - -# Backing up Rancher - -A backup is performed by creating a Backup custom resource. For a tutorial, refer to [this page.](./back-up-rancher) - -# Restoring Rancher - -A restore is performed by creating a Restore custom resource. For a tutorial, refer to [this page.](./restoring-rancher) - -# Migrating Rancher to a New Cluster - -A migration is performed by following [these steps.]({{}}/rancher/v2.6/en/backups/migrating-rancher) - -# Default Storage Location Configuration - -Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible or Minio object store. - -For information on configuring these options, refer to [this page.](./configuration/storage-config) - -### Example values.yaml for the rancher-backup Helm Chart - -The example [values.yaml file](./configuration/storage-config/#example-values-yaml-for-the-rancher-backup-helm-chart) can be used to configure the `rancher-backup` operator when the Helm CLI is used to install it. diff --git a/content/rancher/v2.6/en/backups/back-up-rancher/_index.md b/content/rancher/v2.6/en/backups/back-up-rancher/_index.md deleted file mode 100644 index 5f1a34fdd5..0000000000 --- a/content/rancher/v2.6/en/backups/back-up-rancher/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Backing up Rancher -weight: 1 ---- - -In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer the instructions for [single node backups]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups) - -The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. - -Note that the rancher-backup operator version 2.x.x is for Rancher v2.6.x. - -> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. The Kubernetes version should also be considered when restoring a backup, since the supported apiVersion in the cluster and in the backup file could be different. - -### Prerequisites - -The Rancher version must be v2.5.0 and up. - -Refer [here]({{}}/rancher/v2.6/en/backups/migrating-rancher/#2-restore-from-backup-using-a-restore-custom-resource) for help on restoring an existing backup file into a v1.22 cluster in Rancher v2.6.3. - -### 1. Install the Rancher Backups operator - -The backup storage location is an operator-level setting, so it needs to be configured when the Rancher Backups application is installed or upgraded. - -Backups are created as .tar.gz files. These files can be pushed to S3 or Minio, or they can be stored in a persistent volume. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the `local` cluster and click **Explore**. The `local` cluster runs the Rancher server. -1. Click **Apps & Marketplace > Charts**. -1. Click **Rancher Backups**. -1. Click **Install**. -1. Configure the default storage location. For help, refer to the [storage configuration section.](../configuration/storage-config) -1. Click **Install**. - ->**NOTE:** There is a known issue in Fleet that occurs after performing a restoration using the backup-restore-operator: Secrets used for clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here]({{}}/rancher/v2.6/en/deploy-across-clusters/fleet/#troubleshooting) for a workaround. - -### 2. Perform a Backup - -To perform a backup, a custom resource of type Backup must be created. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the `local` cluster and click **Explore**. -1. In the left navigation bar, click **Rancher Backups > Backups**. -1. Click **Create**. -1. Create the Backup with the form, or with the YAML editor. -1. For configuring the Backup details using the form, click **Create** and refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) -1. For using the YAML editor, we can click **Create > Create from YAML**. Enter the Backup YAML. This example Backup custom resource would create encrypted recurring backups in S3. The app uses the `credentialSecretNamespace` value to determine where to look for the S3 backup secret: - - ```yaml - apiVersion: resources.cattle.io/v1 - kind: Backup - metadata: - name: s3-recurring-backup - spec: - storageLocation: - s3: - credentialSecretName: s3-creds - credentialSecretNamespace: default - bucketName: rancher-backups - folder: rancher - region: us-west-2 - endpoint: s3.us-west-2.amazonaws.com - resourceSetName: rancher-resource-set - encryptionConfigSecretName: encryptionconfig - schedule: "@every 1h" - retentionCount: 10 - ``` - - > **Note:** When creating the Backup resource using YAML editor, the `resourceSetName` must be set to `rancher-resource-set` - - For help configuring the Backup, refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) - - > **Important:** The `rancher-backup` operator doesn't save the EncryptionConfiguration file. The contents of the EncryptionConfiguration file must be saved when an encrypted backup is created, and the same file must be used when restoring from this backup. -1. Click **Create**. - -**Result:** The backup file is created in the storage location configured in the Backup custom resource. The name of this file is used when performing a restore. - diff --git a/content/rancher/v2.6/en/backups/docker-installs/docker-backups/_index.md b/content/rancher/v2.6/en/backups/docker-installs/docker-backups/_index.md deleted file mode 100644 index d3e8118e56..0000000000 --- a/content/rancher/v2.6/en/backups/docker-installs/docker-backups/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Backing up Rancher Installed with Docker -shortTitle: Backups -weight: 3 ---- - -After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. - -## Before You Start - -During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher -``` - -In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. - -## Creating a Backup - -This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. - - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` -1.
Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data- rancher/rancher: - ``` - -1. From the data container that you just created (`rancher-data-`), create a backup tarball (`rancher-data-backup--.tar.gz`). Use the following command, replacing each placeholder: - - ``` - docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** A stream of commands runs on the screen. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. - -1. Restart Rancher Server. Replace `` with the name of your Rancher container: - - ``` - docker start - ``` - -**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.6/en/backups/docker-installs/docker-restores) if you need to restore backup data. diff --git a/content/rancher/v2.6/en/backups/docker-installs/docker-restores/_index.md b/content/rancher/v2.6/en/backups/docker-installs/docker-restores/_index.md deleted file mode 100644 index 8fa8b8cb30..0000000000 --- a/content/rancher/v2.6/en/backups/docker-installs/docker-restores/_index.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Restoring Backups—Docker Installs -shortTitle: Restores -weight: 3 ---- - -If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. - -## Before You Start - -During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker run --volumes-from -v $PWD:/backup \ -busybox sh -c "rm /var/lib/rancher/* -rf && \ -tar pzxvf /backup/rancher-data-backup--" -``` - -In this command, `` and `-` are environment variables for your Rancher deployment. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version number for your Rancher backup. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Restoring Backups - -Using a [backup]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups) that you created earlier, restore Rancher to its last known healthy state. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container: - - ``` - docker stop - ``` -1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. - -1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. - - >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. - - ``` - docker run --volumes-from -v $PWD:/backup \ - busybox sh -c "rm /var/lib/rancher/* -rf && \ - tar pzxvf /backup/rancher-data-backup--.tar.gz" - ``` - - **Step Result:** A series of commands should run. - -1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. - - ``` - docker start - ``` - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.6/en/cli/_index.md b/content/rancher/v2.6/en/cli/_index.md deleted file mode 100644 index 553cb44c5c..0000000000 --- a/content/rancher/v2.6/en/cli/_index.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: CLI with Rancher -description: Interact with Rancher using command line interface (CLI) tools from your workstation. -weight: 21 ---- - -- [Rancher CLI](#rancher-cli) - - [Download Rancher CLI](#download-rancher-cli) - - [Requirements](#requirements) - - [CLI Authentication](#cli-authentication) - - [Project Selection](#project-selection) - - [Commands](#commands) - - [Rancher CLI Help](#rancher-cli-help) - - [Limitations](#limitations) -- [kubectl](#kubectl) - - [kubectl Utility](#kubectl-utility) - - [Authentication with kubectl and kubeconfig Tokens with TTL](#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) - -# Rancher CLI - -The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. - -### Download Rancher CLI - -The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. - -1. In the upper left corner, click **☰**. -1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. -1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. - -### Requirements - -After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: - -- Your Rancher Server URL, which is used to connect to Rancher Server. -- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.6/en/user-settings/api-keys/). - -### CLI Authentication - -Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): - -```bash -$ ./rancher login https:// --token -``` - -If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. - -### Project Selection - -Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. - -**Example: `./rancher context switch` Output** -``` -User:rancher-cli-directory user$ ./rancher context switch -NUMBER CLUSTER NAME PROJECT ID PROJECT NAME -1 cluster-2 c-7q96s:p-h4tmb project-2 -2 cluster-2 c-7q96s:project-j6z6d Default -3 cluster-1 c-lchzv:p-xbpdt project-1 -4 cluster-1 c-lchzv:project-s2mch Default -Select a Project: -``` - -After you enter a number, the console displays a message that you've changed projects. - -``` -INFO[0005] Setting new context to project project-1 -INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json -``` - -Ensure you can run `rancher kubectl get pods` successfully. - -### Commands - -The following commands are available for use in Rancher CLI. - -| Command | Result | -|---|---| -| `apps, [app]` | Performs operations on catalog applications (i.e., individual [Helm charts](https://docs.helm.sh/developing_charts/)) or Rancher charts. | -| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.6/en/helm-charts/). | -| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.6/en/cluster-provisioning/). | -| `context` | Switches between Rancher [projects]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | -| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.6/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | -| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | -| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | -| `namespaces, [namespace]` |Performs operations on namespaces. | -| `nodes, [node]` |Performs operations on nodes. | -| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/). | -| `ps` | Displays [workloads]({{}}/rancher/v2.6/en/k8s-in-rancher/workloads) in a project. | -| `settings, [setting]` | Shows the current settings for your Rancher Server. | -| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | -| `help, [h]` | Shows a list of commands or help for one command. | - - -### Rancher CLI Help - -Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. - -All commands accept the `--help` flag, which documents each command's usage. - -### Limitations - -The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../helm-charts/). - -# kubectl - -Interact with Rancher using kubectl. - -### kubectl Utility - -Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -Configure kubectl by visiting your cluster in the Rancher Web UI, clicking on `Kubeconfig`, copying contents, and putting them into your `~/.kube/config` file. - -Run `kubectl cluster-info` or `kubectl get pods` successfully. - -### Authentication with kubectl and kubeconfig Tokens with TTL - -_Requirements_ - -If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.6/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher CLI](../cli) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like: -`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. - -This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: - -1. Local -2. Active Directory (LDAP only) -3. FreeIPA -4. OpenLDAP -5. SAML providers: Ping, Okta, ADFS, Keycloak, Shibboleth - -When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. -The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../../api/api-tokens/#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../api/api-tokens/#deleting-tokens). -Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. diff --git a/content/rancher/v2.6/en/cluster-admin/_index.md b/content/rancher/v2.6/en/cluster-admin/_index.md deleted file mode 100644 index 7919b01b21..0000000000 --- a/content/rancher/v2.6/en/cluster-admin/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Cluster Administration -weight: 8 ---- - -After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. - -This page covers the following topics: - -- [Switching between clusters](#switching-between-clusters) -- [Managing clusters in Rancher](#managing-clusters-in-rancher) -- [Configuring tools](#configuring-tools) - -> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.6/en/overview/concepts) page. - -## Managing Clusters in Rancher - -After clusters have been [provisioned into Rancher]({{}}/rancher/v2.6/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. - -{{% include file="/rancher/v2.6/en/cluster-provisioning/cluster-capabilities-table" %}} - -## Configuring Tools - -Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: - -- Alerts -- Notifiers -- Logging -- Monitoring -- Istio Service Mesh -- OPA Gatekeeper - -Tools can be installed through **Apps & Marketplace.** \ No newline at end of file diff --git a/content/rancher/v2.6/en/cluster-admin/certificate-rotation/_index.md b/content/rancher/v2.6/en/cluster-admin/certificate-rotation/_index.md deleted file mode 100644 index c38a4dd0d5..0000000000 --- a/content/rancher/v2.6/en/cluster-admin/certificate-rotation/_index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Certificate Rotation -weight: 2040 ---- - -> **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. - -By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. - -Certificates can be rotated for the following services: - -{{% tabs %}} -{{% tab "RKE" %}} - -- etcd -- kubelet (node certificate) -- kubelet (serving certificate, if [enabled]({{}}/rke/latest/en/config-options/services/#kubelet-options)) -- kube-apiserver -- kube-proxy -- kube-scheduler -- kube-controller-manager - -{{% /tab %}} -{{% tab "RKE2" %}} - -- admin -- api-server -- controller-manager -- scheduler -- rke2-controller -- rke2-server -- cloud-controller -- etcd -- auth-proxy -- kubelet -- kube-proxy - -{{% /tab %}} -{{% /tabs %}} - -> **Note:** For users who didn't rotate their webhook certificates, and they have expired after one year, please see this [page]({{}}/rancher/v2.6/en/troubleshooting/expired-webhook-certificates/) for help. - - -### Certificate Rotation - -Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the cluster you want to rotate certificates for amd click **⋮ > Rotate Certificates**. -1. Select which certificates that you want to rotate. - - * Rotate all Service certificates (keep the same CA) - * Rotate an individual service and choose one of the services from the drop-down menu - -1. Click **Save**. - -**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. - -### Additional Notes - -{{% tabs %}} -{{% tab "RKE" %}} - -Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher launched Kubernetes clusters. - -{{% /tab %}} -{{% tab "RKE2" %}} - -In RKE2, both etcd and control plane nodes are treated as the same `server` concept. As such, when rotating certificates of services specific to either of these components will result in certificates being rotated on both. The certificates will only change for the specified service, but you will see nodes for both components go into an updating state. You may also see worker only nodes go into an updating state. This is to restart the workers after a certificate change to ensure they get the latest client certs. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.6/en/cluster-admin/cleaning-cluster-nodes/_index.md b/content/rancher/v2.6/en/cluster-admin/cleaning-cluster-nodes/_index.md deleted file mode 100644 index d1cf15c658..0000000000 --- a/content/rancher/v2.6/en/cluster-admin/cleaning-cluster-nodes/_index.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Removing Kubernetes Components from Nodes -description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually -weight: 2055 ---- - -This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. - -When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. - -When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. - -## What Gets Removed? - -When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. - -| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Registered Nodes][4] | -| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | -| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | -| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | -| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | -| Rancher Deployment | ✓ | ✓ | ✓ | | -| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | -| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | -| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | - -[1]: {{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ -[2]: {{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/ -[3]: {{}}/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/ -[4]: {{}}/rancher/v2.6/en/cluster-provisioning/registered-clusters/ - -## Removing a Node from a Cluster by Rancher UI - -When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -## Removing Rancher Components from a Cluster Manually - -When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. - ->**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. - -### Removing Rancher Components from Registered Clusters - -For registered clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. - -After the registered cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. - -{{% tabs %}} -{{% tab "By UI / API" %}} ->**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. - -After you initiate the removal of a registered cluster using the Rancher UI (or API), the following events occur. - -1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. - -1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. - -1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. - -**Result:** All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% tab "By Script" %}} -Rather than cleaning registered cluster nodes using the Rancher UI, you can run a script instead. - ->**Prerequisite:** -> ->Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. - -1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: - - ``` - chmod +x user-cluster.sh - ``` - -1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. - - If you don't have an air gap environment, skip this step. - -1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): - - >**Tip:** - > - >Add the `-dry-run` flag to preview the script's outcome without making changes. - ``` - ./user-cluster.sh rancher/rancher-agent: - ``` - -**Result:** The script runs. All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. - -{{% /tab %}} -{{% /tabs %}} - -### Windows Nodes - -To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. - -To run the script, you can use this command in the PowerShell: - -``` -pushd c:\etc\rancher -.\cleanup.ps1 -popd -``` - -**Result:** The node is reset and can be re-added to a Kubernetes cluster. - -### Docker Containers, Images, and Volumes - -Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) - -**To clean all Docker containers, images and volumes:** - -``` -docker rm -f $(docker ps -qa) -docker rmi -f $(docker images -q) -docker volume rm $(docker volume ls -q) -``` - -### Mounts - -Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. - -Mounts | ---------| -`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | -`/var/lib/kubelet` | -`/var/lib/rancher` | - -**To unmount all mounts:** - -``` -for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done -``` - -### Directories and Files - -The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. - ->**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. - -Directories | ---------| -`/etc/ceph` | -`/etc/cni` | -`/etc/kubernetes` | -`/opt/cni` | -`/opt/rke` | -`/run/secrets/kubernetes.io` | -`/run/calico` | -`/run/flannel` | -`/var/lib/calico` | -`/var/lib/etcd` | -`/var/lib/cni` | -`/var/lib/kubelet` | -`/var/lib/rancher/rke/log` | -`/var/log/containers` | -`/var/log/kube-audit` | -`/var/log/pods` | -`/var/run/calico` | - -**To clean the directories:** - -``` -rm -rf /etc/ceph \ - /etc/cni \ - /etc/kubernetes \ - /opt/cni \ - /opt/rke \ - /run/secrets/kubernetes.io \ - /run/calico \ - /run/flannel \ - /var/lib/calico \ - /var/lib/etcd \ - /var/lib/cni \ - /var/lib/kubelet \ - /var/lib/rancher/rke/log \ - /var/log/containers \ - /var/log/kube-audit \ - /var/log/pods \ - /var/run/calico -``` - -### Network Interfaces and Iptables - -The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. - -**To restart a node:** - -``` -# using reboot -$ sudo reboot - -# using shutdown -$ sudo shutdown -r now -``` - -If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. - -### Network Interfaces - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. - -Interfaces | ---------| -`flannel.1` | -`cni0` | -`tunl0` | -`caliXXXXXXXXXXX` (random interface names) | -`vethXXXXXXXX` (random interface names) | - -**To list all interfaces:** - -``` -# Using ip -ip address show - -# Using ifconfig -ifconfig -a -``` - -**To remove an interface:** - -``` -ip link delete interface_name -``` - -### Iptables - ->**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. - -Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. - -Chains | ---------| -`cali-failsafe-in` | -`cali-failsafe-out` | -`cali-fip-dnat` | -`cali-fip-snat` | -`cali-from-hep-forward` | -`cali-from-host-endpoint` | -`cali-from-wl-dispatch` | -`cali-fw-caliXXXXXXXXXXX` (random chain names) | -`cali-nat-outgoing` | -`cali-pri-kns.NAMESPACE` (chain per namespace) | -`cali-pro-kns.NAMESPACE` (chain per namespace) | -`cali-to-hep-forward` | -`cali-to-host-endpoint` | -`cali-to-wl-dispatch` | -`cali-tw-caliXXXXXXXXXXX` (random chain names) | -`cali-wl-to-host` | -`KUBE-EXTERNAL-SERVICES` | -`KUBE-FIREWALL` | -`KUBE-MARK-DROP` | -`KUBE-MARK-MASQ` | -`KUBE-NODEPORTS` | -`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | -`KUBE-SERVICES` | -`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | - -**To list all iptables rules:** - -``` -iptables -L -t nat -iptables -L -t mangle -iptables -L -``` diff --git a/content/rancher/v2.6/en/cluster-admin/cluster-autoscaler/amazon/_index.md b/content/rancher/v2.6/en/cluster-admin/cluster-autoscaler/amazon/_index.md deleted file mode 100644 index ffa5b83b76..0000000000 --- a/content/rancher/v2.6/en/cluster-admin/cluster-autoscaler/amazon/_index.md +++ /dev/null @@ -1,580 +0,0 @@ ---- -title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups -weight: 1 ---- - -This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. - -We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. - -- [Prerequisites](#prerequisites) -- [1. Create a Custom Cluster](#1-create-a-custom-cluster) -- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) -- [3. Deploy Nodes](#3-deploy-nodes) -- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) - - [Parameters](#parameters) - - [Deployment](#deployment) -- [Testing](#testing) - - [Generating Load](#generating-load) - - [Checking Scale](#checking-scale) - -# Prerequisites - -These elements are required to follow this guide: - -* The Rancher server is up and running -* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles - -### 1. Create a Custom Cluster - -On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: - -* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag -* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag -* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster - - ```sh - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum - ``` - -### 2. Configure the Cloud Provider - -On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. - -1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. - * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:SetDesiredCapacity", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:DescribeTags", - "autoscaling:DescribeLaunchConfigurations", - "ec2:DescribeLaunchTemplateVersions" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - -2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. - * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] - } - ``` - - * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` - * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.6/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--etcd --controlplane" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. - * IAM profile: Provides cloud_provider worker integration. - This profile is called `K8sWorkerProfile`. - - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] - } - ``` - - * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` - * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.6/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster - - ```sh - #!/bin/bash -x - - cat < /etc/sysctl.d/90-kubelet.conf - vm.overcommit_memory = 1 - vm.panic_on_oom = 0 - kernel.panic = 10 - kernel.panic_on_oops = 1 - kernel.keys.root_maxkeys = 1000000 - kernel.keys.root_maxbytes = 25000000 - EOF - sysctl -p /etc/sysctl.d/90-kubelet.conf - - curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh - sudo usermod -aG docker ubuntu - - TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) - PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) - K8S_ROLES="--worker" - - sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} - ``` - -More info is at [RKE clusters on AWS]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) - -### 3. Deploy Nodes - -Once we've configured AWS, let's create VMs to bootstrap our cluster: - -* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.6/en/cluster-provisioning/production/) - * IAM role: `K8sMasterRole` - * Security group: `K8sMasterSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * User data: `K8sMasterUserData` - -* worker: Define an ASG on EC2 with the following settings: - * Name: `K8sWorkerAsg` - * IAM role: `K8sWorkerRole` - * Security group: `K8sWorkerSg` - * Tags: - * `kubernetes.io/cluster/: owned` - * `k8s.io/cluster-autoscaler/: true` - * `k8s.io/cluster-autoscaler/enabled: true` - * User data: `K8sWorkerUserData` - * Instances: - * minimum: 2 - * desired: 2 - * maximum: 10 - -Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. - -### 4. Install Cluster-autoscaler - -At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. - -#### Parameters - -This table shows cluster-autoscaler parameters for fine tuning: - -| Parameter | Default | Description | -|---|---|---| -|cluster-name|-|Autoscaled cluster name, if available| -|address|:8085|The address to expose Prometheus metrics| -|kubernetes|-|Kubernetes master location. Leave blank for default| -|kubeconfig|-|Path to kubeconfig file with authorization and master location information| -|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| -|namespace|"kube-system"|Namespace in which cluster-autoscaler run| -|scale-down-enabled|true|Should CA scale down the cluster| -|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| -|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| -|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| -|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| -|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| -|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| -|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| -|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| -|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| -|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| -|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| -|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| -|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format :. Cluster autoscaler will not scale the cluster beyond these numbers| -cloud-provider|-|Cloud provider type| -|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| -|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| -|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| -|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| -|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| -|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| -|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| -|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| -|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: ::| -|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| -|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| -|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| -|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| -|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| -|write-status-configmap|true|Should CA write status information to a configmap| -|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| -|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| -|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| -|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| -|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| -|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| -|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| -|regional|false|Cluster is regional| -|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| -|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| -|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| -|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| -|profiling|false|Is debug/pprof endpoint enabled| - -#### Deployment - -Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: - - -```yml ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler - name: cluster-autoscaler - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["events", "endpoints"] - verbs: ["create", "patch"] - - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["endpoints"] - resourceNames: ["cluster-autoscaler"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: - - "pods" - - "services" - - "replicationcontrollers" - - "persistentvolumeclaims" - - "persistentvolumes" - verbs: ["watch", "list", "get"] - - apiGroups: ["extensions"] - resources: ["replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["watch", "list"] - - apiGroups: ["apps"] - resources: ["statefulsets", "replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["watch", "list", "get"] - - apiGroups: ["batch", "extensions"] - resources: ["jobs"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resourceNames: ["cluster-autoscaler"] - resources: ["leases"] - verbs: ["get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create","list","watch"] - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] - verbs: ["delete", "get", "update", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - app: cluster-autoscaler -spec: - replicas: 1 - selector: - matchLabels: - app: cluster-autoscaler - template: - metadata: - labels: - app: cluster-autoscaler - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '8085' - spec: - serviceAccountName: cluster-autoscaler - tolerations: - - effect: NoSchedule - operator: "Equal" - value: "true" - key: node-role.kubernetes.io/controlplane - nodeSelector: - node-role.kubernetes.io/controlplane: "true" - containers: - - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 - name: cluster-autoscaler - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - command: - - ./cluster-autoscaler - - --v=4 - - --stderrthreshold=info - - --cloud-provider=aws - - --skip-nodes-with-local-storage=false - - --expander=least-waste - - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs/ca-certificates.crt - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - -``` - -Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): - -```sh -kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml -``` - -**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) - -# Testing - -At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: - -* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. -* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. - -### Generating Load - -We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world -spec: - replicas: 3 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - ports: - - containerPort: 80 - protocol: TCP - resources: - limits: - cpu: 1000m - memory: 1024Mi - requests: - cpu: 1000m - memory: 1024Mi -``` - -Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): - -``` -kubectl -n default apply -f test-deployment.yaml -``` - -### Checking Scale - -Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. - -Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/_index.md b/content/rancher/v2.6/en/cluster-admin/editing-clusters/_index.md deleted file mode 100644 index cab7cdd0d0..0000000000 --- a/content/rancher/v2.6/en/cluster-admin/editing-clusters/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Cluster Configuration -weight: 2025 ---- - -After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. - -For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/cluster-members) - -### Cluster Configuration References - -The cluster configuration options depend on the type of Kubernetes cluster: - -- [RKE Cluster Configuration](./rke-config-reference) -- [RKE2 Cluster Configuration](./rke2-config-reference) -- [K3s Cluster Configuration](./k3s-config-reference) -- [EKS Cluster Configuration](./eks-config-reference) -- [GKE Cluster Configuration](./gke-config-reference) -- [AKS Cluster Configuration](./aks-config-reference) - -### Cluster Management Capabilities by Cluster Type - -The options and settings available for an existing cluster change based on the method that you used to provision it. - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.6/en/cluster-provisioning/cluster-capabilities-table" %}} - diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/rke-config-reference/_index.md b/content/rancher/v2.6/en/cluster-admin/editing-clusters/rke-config-reference/_index.md deleted file mode 100644 index 27e73a4c40..0000000000 --- a/content/rancher/v2.6/en/cluster-admin/editing-clusters/rke-config-reference/_index.md +++ /dev/null @@ -1,359 +0,0 @@ ---- -title: RKE Cluster Configuration Reference -shortTitle: RKE Cluster Configuration -weight: 1 ---- - -When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) or [RKE2](https://docs.rke2.io/) as the Kubernetes distribution. - -This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. - -- [Overview](#overview) -- [Editing Clusters with a Form in the Rancher UI](#editing-clusters-with-a-form-in-the-rancher-ui) -- [Editing Clusters with YAML](#editing-clusters-with-yaml) -- [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) - - [Kubernetes Version](#kubernetes-version) - - [Network Provider](#network-provider) - - [Project Network Isolation](#project-network-isolation) - - [Kubernetes Cloud Providers](#kubernetes-cloud-providers) - - [Private Registries](#private-registries) - - [Authorized Cluster Endpoint](#authorized-cluster-endpoint) - - [Node Pools](#node-pools) - - [NGINX Ingress](#nginx-ingress) - - [Metrics Server Monitoring](#metrics-server-monitoring) - - [Pod Security Policy Support](#pod-security-policy-support) - - [Docker Version on Nodes](#docker-version-on-nodes) - - [Docker Root Directory](#docker-root-directory) - - [Default Pod Security Policy](#default-pod-security-policy) - - [Node Port Range](#node-port-range) - - [Recurring etcd Snapshots](#recurring-etcd-snapshots) - - [Agent Environment Variables](#agent-environment-variables) - - [Updating ingress-nginx](#updating-ingress-nginx) -- [RKE Cluster Config File Reference](#rke-cluster-config-file-reference) - - [Config File Structure in Rancher](#config-file-structure-in-rancher) - - [Default DNS Provider](#default-dns-provider) -- [Rancher Specific Parameters in YAML](#rancher-specific-parameters-in-yaml) - - [docker_root_dir](#docker_root_dir) - - [enable_cluster_monitoring](#enable_cluster_monitoring) - - [enable_network_policy](#enable_network_policy) - - [local_cluster_auth_endpoint](#local_cluster_auth_endpoint) - - [Custom Network Plug-in](#custom-network-plug-in) - -# Overview - -You can configure the Kubernetes options one of two ways: - -- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. -- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -The RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) - -In [clusters launched by RKE]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. - -For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). - -The forms in the Rancher UI don't include all advanced options for configuring RKE. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -# Editing Clusters with a Form in the Rancher UI - -To edit your cluster, - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster you want to configure and click **⋮ > Edit Config**. - - -# Editing Clusters with YAML - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. - -RKE clusters (also called RKE1 clusters) are edited differently than RKE2 and K3s clusters. - -To edit an RKE config file directly from the Rancher UI, - -1. Click **☰ > Cluster Management**. -1. Go to the RKE cluster you want to configure. Click and click **⋮ > Edit Config**. This take you to the RKE configuration form. Note: Because cluster provisioning changed in Rancher 2.6, the **⋮ > Edit as YAML** can be used for configuring RKE2 clusters, but it can't be used for editing RKE1 configuration. -1. In the configuration form, scroll down and click **Edit as YAML**. -1. Edit the RKE options under the `rancher_kubernetes_engine_config` directive. - -# Configuration Options in the Rancher UI - -> Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE cluster configuration file in YAML. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -### Kubernetes Version - -The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). - -For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.6/en/cluster-admin/upgrading-kubernetes). - -### Network Provider - -The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.6/en/faq/networking/cni-providers/). - -> After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - -Out of the box, Rancher is compatible with the following network providers: - -- [Canal](https://github.com/projectcalico/canal) -- [Flannel](https://github.com/coreos/flannel#flannel) -- [Calico](https://docs.projectcalico.org/v3.11/introduction/) -- [Weave](https://github.com/weaveworks/weave) - -**Notes on Weave:** - -When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). - -### Project Network Isolation - -If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. - -Project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. - -### Kubernetes Cloud Providers - -You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers). If you want to use dynamically provisioned [volumes and storage]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. - ->**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. - -### Private Registries - -The cluster-level private registry configuration is only used for provisioning clusters. - -There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.6/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. - -If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. - -The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. - -- **System images** are components needed to maintain the Kubernetes cluster. -- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. - -For more information on setting up a private registry for components applied during the provisioning of the cluster, see the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/). - -Rancher v2.6 introduced the ability to configure [ECR registries for RKE clusters]({{}}/rke/latest/en/config-options/private-registries/#amazon-elastic-container-registry-ecr-private-registry-setup). - -### Authorized Cluster Endpoint - -Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. - -> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.6/en/overview/architecture/#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. - -This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. - -For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.6/en/overview/architecture/#4-authorized-cluster-endpoint) - -We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.6/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) - -### Node Pools - -For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) - -### NGINX Ingress - -If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. - -### Metrics Server Monitoring - -Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). - -Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. - -### Pod Security Policy Support - -Enables [pod security policies]({{}}/rancher/v2.6/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. - -You must have an existing Pod Security Policy configured before you can use this option. - -### Docker Version on Nodes - -Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. - -If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. - -For details on which Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -### Docker Root Directory - -If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), specify the correct Docker Root Directory in this option. - -### Default Pod Security Policy - -If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. - -### Node Port Range - -Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. - -### Recurring etcd Snapshots - -Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). - -### Agent Environment Variables - -Option to set environment variables for [rancher agents]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. - -### Updating ingress-nginx - -Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. - -If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. - - - -# RKE Cluster Config File Reference - -Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. - -For the complete reference for configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) - -### Config File Structure in Rancher - -RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. - -{{% accordion id="v2.3.0-cluster-config-file" label="Example Cluster Config File" %}} - -```yaml -# -# Cluster Config -# -docker_root_dir: /var/lib/docker -enable_cluster_alerting: false -enable_cluster_monitoring: false -enable_network_policy: false -local_cluster_auth_endpoint: - enabled: true -# -# Rancher Config -# -rancher_kubernetes_engine_config: # Your RKE template config goes here. - addon_job_timeout: 30 - authentication: - strategy: x509 - ignore_docker_version: true -# -# # Currently only nginx ingress provider is supported. -# # To disable ingress controller, set `provider: none` -# # To enable ingress on specific nodes, use the node_selector, eg: -# provider: nginx -# node_selector: -# app: ingress -# - ingress: - provider: nginx - kubernetes_version: v1.15.3-rancher3-1 - monitoring: - provider: metrics-server -# -# If you are using calico on AWS -# -# network: -# plugin: calico -# calico_network_provider: -# cloud_provider: aws -# -# # To specify flannel interface -# -# network: -# plugin: flannel -# flannel_network_provider: -# iface: eth1 -# -# # To specify flannel interface for canal plugin -# -# network: -# plugin: canal -# canal_network_provider: -# iface: eth1 -# - network: - options: - flannel_backend_type: vxlan - plugin: canal -# -# services: -# kube-api: -# service_cluster_ip_range: 10.43.0.0/16 -# kube-controller: -# cluster_cidr: 10.42.0.0/16 -# service_cluster_ip_range: 10.43.0.0/16 -# kubelet: -# cluster_domain: cluster.local -# cluster_dns_server: 10.43.0.10 -# - services: - etcd: - backup_config: - enabled: true - interval_hours: 12 - retention: 6 - safe_timestamp: false - creation: 12h - extra_args: - election-timeout: 5000 - heartbeat-interval: 500 - gid: 0 - retention: 72h - snapshot: false - uid: 0 - kube_api: - always_pull_images: false - pod_security_policy: false - service_node_port_range: 30000-32767 - ssh_agent_auth: false -windows_prefered_cluster: false -``` -{{% /accordion %}} - -### Default DNS provider - -The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. - -| Rancher version | Kubernetes version | Default DNS provider | -|-------------|--------------------|----------------------| -| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | -| v2.2.5 and higher | v1.13.x and lower | kube-dns | -| v2.2.4 and lower | any | kube-dns | - -# Rancher Specific Parameters in YAML - -Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): - -### docker_root_dir - -See [Docker Root Directory](#docker-root-directory). - -### enable_cluster_monitoring - -Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.6/en/monitoring-alerting/). - -### enable_network_policy - -Option to enable or disable Project Network Isolation. - -Project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. - -### local_cluster_auth_endpoint - -See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). - -Example: - -```yaml -local_cluster_auth_endpoint: - enabled: true - fqdn: "FQDN" - ca_certs: |- - -----BEGIN CERTIFICATE----- - ... - -----END CERTIFICATE----- -``` - -### Custom Network Plug-in - -You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. - -There are two ways that you can specify an add-on: - -- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) -- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) - -For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.6/en/cluster-provisioning/_index.md b/content/rancher/v2.6/en/cluster-provisioning/_index.md deleted file mode 100644 index 9e9f44c4c8..0000000000 --- a/content/rancher/v2.6/en/cluster-provisioning/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Setting up Kubernetes Clusters in Rancher -description: Provisioning Kubernetes Clusters -weight: 7 ---- - -Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. - -This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.6/en/overview/concepts) page. - -For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.6/en/overview/architecture/) page. - -This section covers the following topics: - - - -- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) -- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) -- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) - - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) - - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) -- [Registering Existing Clusters](#registering-existing-clusters) -- [Programmatically Creating Clusters](#programmatically-creating-clusters) - - - -### Cluster Management Capabilities by Cluster Type - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.6/en/cluster-provisioning/cluster-capabilities-table" %}} - -# Setting up Clusters in a Hosted Kubernetes Provider - -In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. - -If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. - -For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters) - -# Launching Kubernetes with Rancher - -Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. - -In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. - -These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. - -If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. - -For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) - -### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider - -Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. - -Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. - -One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. - -The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. - -For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/) - -### Launching Kubernetes on Existing Custom Nodes - -When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. - -You can bring any nodes you want to Rancher and use them to create a cluster. - -These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. - -# Registering Existing Clusters - -The cluster registration feature replaces the feature to import clusters. - -Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. - -When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. - -For more information, see [this page.](./registered-clusters) - -# Programmatically Creating Clusters - -The most common way to programmatically deploy Kubernetes clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) - -EKS, GKE, AKS clusters and RKE clusters can be created or imported with Terraform. \ No newline at end of file diff --git a/content/rancher/v2.6/en/cluster-provisioning/node-requirements/_index.md b/content/rancher/v2.6/en/cluster-provisioning/node-requirements/_index.md deleted file mode 100644 index 519e8c3107..0000000000 --- a/content/rancher/v2.6/en/cluster-provisioning/node-requirements/_index.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: Node Requirements for Rancher Managed Clusters -weight: 1 ---- - -This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. - -> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.6/en/installation/requirements/) - -Make sure the nodes for the Rancher server fulfill the following requirements: - -- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) -- [Hardware Requirements](#hardware-requirements) -- [Networking Requirements](#networking-requirements) -- [Optional: Security Considerations](#optional-security-considerations) - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.6/en/installation/resources/advanced/arm64-platform/) - -For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) - -### Oracle Linux and RHEL Derived Linux Nodes - -Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. - ->**Note:** In RHEL 8.4, two extra services are included on the NetworkManager: `nm-cloud-setup.service` and `nm-cloud-setup.timer`. These services add a routing table that interferes with the CNI plugin's configuration. If these services are enabled, you must disable them using the command below, and then reboot the node to restore connectivity: -> -> ``` - systemctl disable nm-cloud-setup.service nm-cloud-setup.timer - reboot - ``` - -### SUSE Linux Nodes - -SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps]({{}}/rancher/v2.6/en/installation/requirements/ports/#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. - -### Flatcar Container Linux Nodes - -When [Launching Kubernetes with Rancher]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) - -{{% tabs %}} -{{% tab "Canal"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: canal - options: - canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} - -{{% tab "Calico"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: calico - options: - calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} -{{% /tabs %}} - -It is also required to enable the Docker service, you can enable the Docker service using the following command: - -``` -systemctl enable docker.service -``` - -The Docker service is enabled automatically when using [Node Drivers]({{}}/rancher/v2.6/en/admin-settings/drivers/#node-drivers). - -### Windows Nodes - -Nodes with Windows Server must run Docker Enterprise Edition. - -Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/) - -# Hardware Requirements - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. - -For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) - -For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) - -# Networking Requirements - -For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. - -The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.6/en/cluster-provisioning/). - -For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) - -Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements]({{}}/rancher/v2.6/en/installation/requirements/ports#downstream-kubernetes-cluster-nodes). - -# Optional: Security Considerations - -If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. - -For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.6/en/security/#rancher-hardening-guide) diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md b/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md deleted file mode 100644 index 3bd7d676fd..0000000000 --- a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Setting up the Azure Cloud Provider -weight: 2 ---- - -When using the `Azure` cloud provider, you can leverage the following capabilities: - -- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. - -- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. - -- **Network Storage:** Support Azure Files via CIFS mounts. - -The following account types are not supported for Azure Subscriptions: - -- Single tenant accounts (i.e. accounts with no subscriptions). -- Multi-subscription accounts. - -# Prerequisites for RKE and RKE2 - -To set up the Azure cloud provider for both RKE and RKE2, the following credentials need to be configured: - -1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) -2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) -3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) -4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) - -### 1. Set up the Azure Tenant ID - -Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). - -If you want to use the Azure CLI, you can run the command `az account show` to get the information. - -### 2. Set up the Azure Client ID and Azure Client Secret - -Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). - -1. Select **Azure Active Directory**. -1. Select **App registrations**. -1. Select **New application registration**. -1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. -1. Select **Create**. - -In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. - -The next step is to generate the **Azure Client Secret**: - -1. Open your created App registration. -1. In the **Settings** view, open **Keys**. -1. Enter a **Key description**, select an expiration time and select **Save**. -1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. - -### 3. Configure App Registration Permissions - -The last thing you will need to do, is assign the appropriate permissions to your App registration. - -1. Go to **More services**, search for **Subscriptions** and open it. -1. Open **Access control (IAM)**. -1. Select **Add**. -1. For **Role**, select `Contributor`. -1. For **Select**, select your created App registration name. -1. Select **Save**. - -### 4. Set up Azure Network Security Group Name - -A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. - -If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. - -You should already assign custom hosts to this Network Security Group during provisioning. - -Only hosts expected to be load balancer back ends need to be in this group. - -# RKE2 Cluster Set-up in Rancher - -1. Choose "Azure" from the Cloud Provider drop-down in the Cluster Configuration section. - -1. * Supply the Cloud Provider Configuration. Note that Rancher will automatically create a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you will need to specify them before creating the cluster. - * You can click on "Show Advanced" to see more of these automatically generated names and update them if - necessary. Your Cloud Provider Configuration **must** match the fields in the Machine Pools section. If you have multiple pools, they must all use the same Resource Group, Availability Set, Subnet, Virtual Network, and Network Security Group. - * An example is provided below. You will modify it as needed. - - {{% accordion id="v2.6.0-cloud-provider-config-file" label="Example Cloud Provider Config" %}} - -```yaml -{ - "cloud":"AzurePublicCloud", - "tenantId": "YOUR TENANTID HERE", - "aadClientId": "YOUR AADCLIENTID HERE", - "aadClientSecret": "YOUR AADCLIENTSECRET HERE", - "subscriptionId": "YOUR SUBSCRIPTIONID HERE", - "resourceGroup": "docker-machine", - "location": "westus", - "subnetName": "docker-machine", - "securityGroupName": "rancher-managed-KA4jV9V2", - "securityGroupResourceGroup": "docker-machine", - "vnetName": "docker-machine-vnet", - "vnetResourceGroup": "docker-machine", - "primaryAvailabilitySetName": "docker-machine", - "routeTableResourceGroup": "docker-machine", - "cloudProviderBackoff": false, - "useManagedIdentityExtension": false, - "useInstanceMetadata": true -} -``` - {{% /accordion %}} - -1. Under the **Cluster Configuration > Advanced** section, click **Add** under **Additional Controller Manager Args** and add this flag: `--configure-cloud-routes=false` - -1. Click the **Create** button to submit the form and create the cluster. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md b/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md deleted file mode 100644 index 8e7deb9bf6..0000000000 --- a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/_index.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Creating an Azure Cluster -shortTitle: Azure -weight: 2220 ---- - -In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. - -First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. - -Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - ->**Warning:** When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: - -> - Terminate the SSL/TLS on the internal load balancer -> - Use the L7 load balancer - -> For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). - -For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) - -For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](./azure-node-template-config) - -- [Preparation in Azure](#preparation-in-azure) -- [Creating an Azure Cluster](#creating-an-azure-cluster) - -# Preparation in Azure - -Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. - -To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. - -The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: - -``` -az ad sp create-for-rbac \ - --name="" \ - --role="Contributor" \ - --scopes="/subscriptions/" -``` - -The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, and *The client secret*. This information will be used when you create a node template for Azure. - -# Creating an Azure Cluster - -{{% tabs %}} -{{% tab "RKE" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. Click **☰ > Cluster Management**. -1. Click **Cloud Credentials**. -1. Click **Create**. -1. Click **Azure**. -1. Enter your Azure credentials. -1. Click **Create**. - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. - -1. Click **☰ > Cluster Management**. -1. Click **RKE1 Configuration > Node Templates**. -1. Click **Add Template**. -1. Click **Azure**. -1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](./azure-node-template-config) - -### 3. Create a cluster with node pools using the node template - -Use Rancher to create a Kubernetes cluster in Azure. - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, click **Create**. -1. Click **Azure**. -1. Enter a **Cluster Name**. -1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) -1. In the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Click **Create**. - -{{% /tab %}} -{{% tab "RKE2" %}} - -### 1. Create your cloud credentials - -If you already have a set of cloud credentials to use, skip this section. - -1. Click **☰ > Cluster Management**. -1. Click **Cloud Credentials**. -1. Click **Create**. -1. Click **Azure**. -1. Enter your Azure credentials. -1. Click **Create**. - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create your cluster - -Use Rancher to create a Kubernetes cluster in Azure. - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, click **Create**. -1. Toggle the switch to **RKE2/K3s**. -1. Click **Azure**. -1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. -1. Enter a **Cluster Name**. -1. Create a machine pool for each Kubernetes role. Refer to the [best practices]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools#node-roles-in-rke2) for recommendations on role assignments and counts. - 1. For each machine pool, define the machine configuration. Refer to the [Azure machine configuration reference]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/) for information on configuration options. -1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/) -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -**Result:** - -Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active**. - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md b/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md deleted file mode 100644 index 7d3e103cd1..0000000000 --- a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/_index.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Creating a DigitalOcean Cluster -shortTitle: DigitalOcean -weight: 2215 ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. - -First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. - -Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -{{% tabs %}} -{{% tab "RKE" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. Click **☰ > Cluster Management**. -1. Click **Cloud Credentials**. -1. Click **Create**. -1. Click **DigitalOcean**. -1. Enter your Digital Ocean credentials. -1. Click **Create**. - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials - -Creating a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. - -1. Click **☰ > Cluster Management**. -1. Click **RKE1 Configuration > Node Templates**. -1. Click **Add Template**. -1. Click **DigitalOcean**. -1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](./do-node-template-config) - -### 3. Create a cluster with node pools using the node template - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, click **Create**. -1. Click **DigitalOcean**. -1. Enter a **Cluster Name**. -1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) -1. **In the Cluster Configuration** section, choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Click **Create**. - -{{% /tab %}} -{{% tab "RKE2" %}} - -### 1. Create your cloud credentials - -If you already have a set of cloud credentials to use, skip this section. - -1. Click **☰ > Cluster Management**. -1. Click **Cloud Credentials**. -1. Click **Create**. -1. Click **DigitalOcean**. -1. Enter your Digital Ocean credentials. -1. Click **Create**. - -### 2. Create your cluster - -Use Rancher to create a Kubernetes cluster in DigitalOcean. - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, click **Create**. -1. Toggle the switch to **RKE2/K3s**. -1. Click **DigitalOcean**. -1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. -1. Enter a **Cluster Name**. -1. Create a machine pool for each Kubernetes role. Refer to the [best practices]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools#node-roles-in-rke2) for recommendations on role assignments and counts. - 1. For each machine pool, define the machine configuration. Refer to the [DigitalOcean machine configuration reference]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/) for information on configuration options. -1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/) -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -**Result:** - -Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active**. - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces -# Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md b/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md deleted file mode 100644 index 26d8d3c457..0000000000 --- a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/_index.md +++ /dev/null @@ -1,275 +0,0 @@ ---- -title: Creating an Amazon EC2 Cluster -shortTitle: Amazon EC2 -description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher -weight: 2210 ---- -In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. - -First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. - -Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. - -### Prerequisites - -- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. -- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: - - [Example IAM Policy](#example-iam-policy) - - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers) or want to pass an IAM Profile to an instance) - - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) -- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. - -# Creating an EC2 Cluster - -The steps to create a cluster differ based on your Rancher version. - -{{% tabs %}} -{{% tab "RKE" %}} - -1. [Create your cloud credentials](#1-create-your-cloud-credentials) -2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) -3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) - -### 1. Create your cloud credentials - -1. Click **☰ > Cluster Management**. -1. Click **Cloud Credentials**. -1. Click **Create**. -1. Click **Amazon**. -1. Enter a name for the cloud credential. -1. In the **Default Region** field, select the AWS region where your cluster nodes will be located. -1. Enter your AWS EC2 **Access Key** and **Secret Key**. -1. Click **Create**. - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create a node template with your cloud credentials and information from EC2 - -Creating a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. - -1. Click **☰ > Cluster Management**. -1. Click **RKE1 Configuration > Node Templates** -1. Click **Add Template**. -1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) -1. Click **Create**. - - >**Note:** If you want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements]({{}}/rke//latest/en/config-options/dual-stack#requirements) that must be taken into consideration. - -### 3. Create a cluster with node pools using the node template - -Add one or more node pools to your cluster. For more information about node pools, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, click **Create**. -1. Click **Amazon EC2**. -1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) -1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) - - >**Note:** If you want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements]({{}}/rke//latest/en/config-options/dual-stack#requirements) that must be taken into consideration. -1. Click **Create**. - -{{% /tab %}} -{{% tab "RKE2" %}} - -### 1. Create your cloud credentials - -If you already have a set of cloud credentials to use, skip this section. - -1. Click **☰ > Cluster Management**. -1. Click **Cloud Credentials**. -1. Click **Create**. -1. Click **Amazon**. -1. Enter a name for the cloud credential. -1. In the **Default Region** field, select the AWS region where your cluster nodes will be located. -1. Enter your AWS EC2 **Access Key** and **Secret Key**. -1. Click **Create**. - -**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. - -### 2. Create your cluster - -1. Click **☰ > Cluster Management**. -1. On the **Clusters** page, click **Create**. -1. Toggle the switch to **RKE2/K3s**. -1. Click **Amazon EC2**. -1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. -1. Enter a **Cluster Name**. -1. Create a machine pool for each Kubernetes role. Refer to the [best practices]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools#node-roles-in-rke2) for recommendations on role assignments and counts. - 1. For each machine pool, define the machine configuration. Refer to [the EC2 machine configuration reference]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-machine-config/) for information on configuration options. -1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/) -1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. -1. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -**Result:** - -Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. - -You can access your cluster after its state is updated to **Active**. - -**Active** clusters are assigned two Projects: - -- `Default`, containing the `default` namespace -- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces - -### Optional Next Steps - -After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: - -- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. -- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. - -# IAM Policies - -### Example IAM Policy - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` - -### Example IAM Policy with PassRole - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "VisualEditor0", - "Effect": "Allow", - "Action": [ - "ec2:AuthorizeSecurityGroupIngress", - "ec2:Describe*", - "ec2:ImportKeyPair", - "ec2:CreateKeyPair", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:DeleteKeyPair", - "ec2:ModifyInstanceMetadataOptions" - ], - "Resource": "*" - }, - { - "Sid": "VisualEditor1", - "Effect": "Allow", - "Action": [ - "iam:PassRole", - "ec2:RunInstances" - ], - "Resource": [ - "arn:aws:ec2:REGION::image/ami-*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", - "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" - ] - }, - { - "Sid": "VisualEditor2", - "Effect": "Allow", - "Action": [ - "ec2:RebootInstances", - "ec2:TerminateInstances", - "ec2:StartInstances", - "ec2:StopInstances" - ], - "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" - } - ] -} -``` -### Example IAM Policy to allow encrypted EBS volumes -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:GenerateDataKeyWithoutPlaintext", - "kms:Encrypt", - "kms:DescribeKey", - "kms:CreateGrant", - "ec2:DetachVolume", - "ec2:AttachVolume", - "ec2:DeleteSnapshot", - "ec2:DeleteTags", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:DeleteVolume", - "ec2:CreateSnapshot" - ], - "Resource": [ - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", - "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", - "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" - ] - }, - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeTags", - "ec2:DescribeVolumes", - "ec2:DescribeSnapshots" - ], - "Resource": "*" - } - ] -} -``` diff --git a/content/rancher/v2.6/en/deploy-across-clusters/fleet/_index.md b/content/rancher/v2.6/en/deploy-across-clusters/fleet/_index.md deleted file mode 100644 index 1b129ee97f..0000000000 --- a/content/rancher/v2.6/en/deploy-across-clusters/fleet/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Fleet - GitOps at Scale -weight: 1 ---- - -Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. - -Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. - -- [Architecture](#architecture) -- [Accessing Fleet in the Rancher UI](#accessing-fleet-in-the-rancher-ui) -- [Windows Support](#windows-support) -- [GitHub Repository](#github-repository) -- [Using Fleet Behind a Proxy](#using-fleet-behind-a-proxy) -- [Helm Chart Dependencies](#helm-chart-dependencies) -- [Troubleshooting](#troubleshooting) -- [Documentation](#documentation) - -# Architecture - -For information about how Fleet works, see [this page.](./architecture) - -# Accessing Fleet in the Rancher UI - -Fleet comes preinstalled in Rancher and is managed by the **Continous Delivery** option in the Rancher UI. For additional information on Continuous Delivery and other Fleet troubleshooting tips, refer [here](https://fleet.rancher.io/troubleshooting/). - -Users can leverage continuous delivery to deploy their applications to the Kubernetes clusters in the git repository without any manual operation by following **gitops** practice. - -Follow the steps below to access Continuous Delivery in the Rancher UI: - -1. Click **☰ > Continuous Delivery**. - -1. Select your namespace at the top of the menu, noting the following: - - By default,`fleet-default` is selected which includes all downstream clusters that are registered through Rancher. - - You may switch to `fleet-local`, which only contains the `local` cluster, or you may create your own workspace to which you may assign and move clusters. - - You can then manage clusters by clicking on **Clusters** on the left navigation bar. - -1. Click on **Gitrepos** on the left navigation bar to deploy the gitrepo into your clusters in the current workspace. - -1. Select your [git repository](https://fleet.rancher.io/gitrepo-add/) and [target clusters/cluster group](https://fleet.rancher.io/gitrepo-structure/). You can also create the cluster group in the UI by clicking on **Cluster Groups** from the left navigation bar. - -1. Once the gitrepo is deployed, you can monitor the application through the Rancher UI. - -# Windows Support - -For details on support for clusters with Windows nodes, see [this page.](./windows) - - -# GitHub Repository - -The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/releases/latest) - - -# Using Fleet Behind a Proxy - -For details on using Fleet behind a proxy, see [this page.](./proxy) - -# Helm Chart Dependencies - -In order for Helm charts with dependencies to deploy successfully, you must run a manual command (as listed below), as it is up to the user to fulfill the dependency list. If you do not do this and proceed to clone your repository and run `helm install`, your installation will fail because the dependencies will be missing. - -The Helm chart in the git repository must include its dependencies in the charts subdirectory. You must either manually run `helm dependencies update $chart` OR run `helm dependencies build $chart` locally, then commit the complete charts directory to your git repository. Note that you will update your commands with the applicable parameters. - -# Troubleshooting - ---- -* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator]({{}}/rancher/v2.6/en/backups/back-up-rancher/#1-install-the-rancher-backups-operator). We will update the community once a permanent solution is in place. - -* **Temporary Workaround:**
-By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). - ---- - -# Documentation - -The Fleet documentation is at [https://fleet.rancher.io/.](https://fleet.rancher.io/) diff --git a/content/rancher/v2.6/en/deploy-across-clusters/fleet/architecture/_index.md b/content/rancher/v2.6/en/deploy-across-clusters/fleet/architecture/_index.md deleted file mode 100644 index 620747b01c..0000000000 --- a/content/rancher/v2.6/en/deploy-across-clusters/fleet/architecture/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Architecture -weight: 1 ---- - -Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy everything in the cluster. This gives you a high degree of control, consistency, and auditability. Fleet focuses not only on the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster. - -![Architecture]({{}}/img/rancher/fleet-architecture.svg) - diff --git a/content/rancher/v2.6/en/faq/_index.md b/content/rancher/v2.6/en/faq/_index.md deleted file mode 100644 index 4fc0889d10..0000000000 --- a/content/rancher/v2.6/en/faq/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: FAQ -weight: 25 ---- - -This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. - -See [Technical FAQ]({{}}/rancher/v2.6/en/faq/technical/), for frequently asked technical questions. - -
- -**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** - -When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. - -
- -**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** - -Yes. - -
- -**Does Rancher support Windows?** - -As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/) - -
- -**Does Rancher support Istio?** - -As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.6/en/istio/) - -Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) - -
- -**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** - -Secrets management is on our roadmap but we haven't assigned it to a specific release yet. - -
- -**Does Rancher v2.x support RKT containers as well?** - -At this time, we only support Docker. - -
- -**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and registered Kubernetes?** - -Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. - -
- -**Are you planning on supporting Traefik for existing setups?** - -We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. - -
- -**Can I import OpenShift Kubernetes clusters into v2.x?** - -Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. - -
- -**Are you going to integrate Longhorn?** - -Yes. Longhorn was integrated into Rancher v2.5+. diff --git a/content/rancher/v2.6/en/faq/dockershim/_index.md b/content/rancher/v2.6/en/faq/dockershim/_index.md deleted file mode 100644 index a9f79c0bdd..0000000000 --- a/content/rancher/v2.6/en/faq/dockershim/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Dockershim -weight: 300 ---- - -The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). Removal is currently scheduled for Kubernetes 1.24. For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). - -RKE clusters, starting with Kubernetes 1.21, now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. - -To enable the external Dockershim, configure the following option. - -``` -enable_cri_dockerd: true -``` - -For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. - -### FAQ - -
- -Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim? - -The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on Rancher 2.6 or above to have support for RKE with Kubernetes 1.21. See our [support matrix](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/) for details. - -
- -Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim? - -A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and is not scheduled for removal upstream until Kubernetes 1.24. It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to Kubernetes 1.21 as you would normally, but should consider enabling the external Dockershim by Kubernetes 1.22. The external Dockershim will need to be enabled before upgrading to Kubernetes 1.24, at which point the existing implementation will be removed. - -For more information on the deprecation and its timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). - -
- -Q: What are my other options if I don’t want to depend on the Dockershim? - -A: You can use a runtime like containerd with Kubernetes that does not require Dockershim support. RKE2 or K3s are two options for doing this. - -
- -Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options? - -A: Rancher is exploring the possibility of an in-place upgrade path. Alternatively you can always migrate workloads from one cluster to another using kubectl. - -
diff --git a/content/rancher/v2.6/en/faq/networking/cni-providers/_index.md b/content/rancher/v2.6/en/faq/networking/cni-providers/_index.md deleted file mode 100644 index ea4bbde487..0000000000 --- a/content/rancher/v2.6/en/faq/networking/cni-providers/_index.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -title: Container Network Interface (CNI) Providers -description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you -weight: 2300 ---- - -## What is CNI? - -CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. - -Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. - -![CNI Logo]({{}}/img/rancher/cni-logo.png) - -For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). - -## What Network Models are Used in CNI? - -CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). - -### What is an Encapsulated Network? - -This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. - -In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. - -This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. - -CNI network providers using this network model include Flannel, Canal, Weave, and Cilium. By default, Calico is not using this model, but it can be configured to do so. - -![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) - -### What is an Unencapsulated Network? - -This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). - -In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. - -This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. - -CNI network providers using this network model include Calico and Cilium. Cilium may be configured with this model although it is not the default mode. - -![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) - -## What CNI Providers are Provided by Rancher? - -### RKE Kubernetes clusters - -Out-of-the-box, Rancher provides the following CNI network providers for RKE Kubernetes clusters: Canal, Flannel, and Weave. - -You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. - -#### Canal - -![Canal Logo]({{}}/img/rancher/canal-logo.png) - -Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. - -In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. - -Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (health checks). If using Wireguard, you should open UDP ports `51820` and `51821`. For more details, refer to [the port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/). - -{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} - -For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) - -#### Flannel - -![Flannel Logo]({{}}/img/rancher/flannel-logo.png) - -Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan). - -Encapsulated traffic is unencrypted by default. Flannel provides two solutions for encryption: - -* [IPSec](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. It is an experimental backend for encryption. -* [WireGuard](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard), which is a more faster-performing alternative to strongSwan. - -Kubernetes workers should open UDP port `8472` (VXLAN). See [the port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) - -For more information, see the [Flannel GitHub Page](https://github.com/flannel-io/flannel). - -#### Weave - -![Weave Logo]({{}}/img/rancher/weave-logo.png) - -Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. - -Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - -For more information, see the following pages: - -- [Weave Net Official Site](https://www.weave.works/) - -### RKE2 Kubernetes clusters - -Out-of-the-box, Rancher provides the following CNI network providers for RKE2 Kubernetes clusters: [Canal](#canal) (see above section), Calico, and Cilium. - -You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. - -#### Calico - -![Calico Logo]({{}}/img/rancher/calico-logo.png) - -Calico enables networking and network policy in Kubernetes clusters across the cloud. By default, Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. - -Calico also provides a stateless IP-in-IP or VXLAN encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. - -Kubernetes workers should open TCP port `179` if using BGP or UDP port `4789` if using VXLAN encapsulation. In addition, TCP port `5473` is needed when using Typha. See [the port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. - ->**Important:** In Rancher v2.6.3, Calico probes fail on Windows nodes upon RKE2 installation. Note that this issue is resolved in v2.6.4. -> ->- To work around this issue, first navigate to `https:///v3/settings/windows-rke2-install-script`. -> ->- There, change the current setting: `https://raw.githubusercontent.com/rancher/wins/v0.1.3/install.ps1` to this new setting: `https://raw.githubusercontent.com/rancher/rke2/master/windows/rke2-install.ps1`. - -![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) - -For more information, see the following pages: - -- [Project Calico Official Site](https://www.projectcalico.org/) -- [Project Calico GitHub Page](https://github.com/projectcalico/calico) - -#### Cilium - -![Cilium Logo]({{}}/img/rancher/cilium-logo.png) - -Cilium enables networking and network policies (L3, L4, and L7) in Kubernetes. By default, Cilium uses eBPF technologies to route packets inside the node and VXLAN to send packets to other nodes. Unencapsulated techniques can also be configured. - -Cilium recommends kernel versions greater than 5.2 to be able to leverage the full potential of eBPF. Kubernetes workers should open TCP port `8472` for VXLAN and TCP port `4240` for health checks. In addition, ICMP 8/0 must be enabled for health checks. For more information, check [Cilium System Requirements](https://docs.cilium.io/en/latest/operations/system_requirements/#firewall-requirements). - -##### Ingress Routing Across Nodes in Cilium -
-By default, Cilium does not allow pods to contact pods on other nodes. To work around this, enable the ingress controller to route requests across nodes with a `CiliumNetworkPolicy`. - -After selecting the Cilium CNI and enabling Project Network Isolation for your new cluster, configure as follows: - -``` -apiVersion: cilium.io/v2 -kind: CiliumNetworkPolicy -metadata: - name: hn-nodes - namespace: default -spec: - endpointSelector: {} - ingress: - - fromEntities: - - remote-node -``` - -## CNI Features by Provider - -The following table summarizes the different features available for each CNI network provider provided by Rancher. - -| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | -| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | -| Canal | Encapsulated (VXLAN) | No | Yes | No | K8s API | Yes | Yes | -| Flannel | Encapsulated (VXLAN) | No | No | No | K8s API | Yes | No | -| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8s API | Yes | Yes | -| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | -| Cilium | Encapsulated (VXLAN) | Yes | Yes | Yes | Etcd and K8s API | Yes | Yes | - -- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) - -- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. - -- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. - -- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. - -- External Datastore: CNI network providers with this feature need an external datastore for its data. - -- Encryption: This feature allows cyphered and secure network control and data planes. - -- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. - - -## CNI Community Popularity - -The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2022. - -| Provider | Project | Stars | Forks | Contributors | -| ---- | ---- | ---- | ---- | ---- | -| Canal | https://github.com/projectcalico/canal | 679 | 100 | 21 | -| Flannel | https://github.com/flannel-io/flannel | 7k | 2.5k | 185 | -| Calico | https://github.com/projectcalico/calico | 3.1k | 741 | 224 | -| Weave | https://github.com/weaveworks/weave/ | 6.2k | 635 | 84 | -| Cilium | https://github.com/cilium/cilium | 10.6k | 1.3k | 352 | - -
- -## Which CNI Provider Should I Use? - -It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. - -Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. - -## How can I configure a CNI network provider? - -Please see [Cluster Options]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.6/en/faq/security/_index.md b/content/rancher/v2.6/en/faq/security/_index.md deleted file mode 100644 index 55eb76ee08..0000000000 --- a/content/rancher/v2.6/en/faq/security/_index.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Security -weight: 8007 - ---- - -**Is there a Hardening Guide?** - -The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.6/en/security/) section. - -
- -**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** - -We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.6/en/security/) section. diff --git a/content/rancher/v2.6/en/helm-charts/creating-apps/_index.md b/content/rancher/v2.6/en/helm-charts/creating-apps/_index.md deleted file mode 100644 index b4376f81ef..0000000000 --- a/content/rancher/v2.6/en/helm-charts/creating-apps/_index.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Creating Apps -weight: 400 ---- - -Rancher's App Marketplace is based on Helm Repositories and Helm Charts. You can add HTTP based standard Helm Repositories as well as any Git Repository which contains charts. - -> For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. - -- [Chart types](#chart-types) - - [Helm charts](#helm-charts) - - [Rancher charts](#rancher-charts) -- [Chart directory structure](#chart-directory-structure) -- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) - - [questions.yml](#questions-yml) - - [Min/Max Rancher versions](#min-max-rancher-versions) - - [Question variable reference](#question-variable-reference) -- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) - -# Chart Types - -Rancher supports two different types of charts: Helm charts and Rancher charts. - -### Helm Charts - -Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you' can provide the chart's parameter values in a YAML editor. - -### Rancher Charts - -Rancher charts are native helm charts with two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) - -Rancher charts add simplified chart descriptions and configuration forms to make the application deployment easy. Rancher users do not need to read through the entire list of Helm variables to understand how to launch an application. - -# Chart Directory Structure - -You can provide Helm Charts in a standard, HTTP based Helm Repository. For more information see the [Chart Repository Guide](https://helm.sh/docs/topics/chart_repository) in the official Helm documentation. - -Alternatively you can organize your charts in a Git Repository and directly add this to the App Marketplace. - -The following table demonstrates the directory structure for a Git repository. The `charts` directory is the top level directory under the repository base. Adding the repository to Rancher will expose all charts contained within it. The `questions.yaml`, `README.md`, and `requirements.yml` files are specific to Rancher charts, but are optional for chart customization. - -``` -/ - │ - ├── charts/ - │ ├── / # This directory name will be surfaced in the Rancher UI as the chart name - │ │ ├── / # Each directory at this level provides different app versions that will be selectable within the chart in the Rancher UI - │ │ │ ├── Chart.yaml # Required Helm chart information file. - │ │ │ ├── questions.yaml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* - │ │ │ ├── README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. - │ │ │ ├── requirements.yml # Optional: YAML file listing dependencies for the chart. - │ │ │ ├── values.yml # Default configuration values for the chart. - │ │ │ ├── templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. -``` - -# Additional Files for Rancher Charts - -Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. - -- `app-readme.md` - - A file that provides descriptive text in the chart's UI header. - -- `questions.yml` - - A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using a values YAML config, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). - - -
Rancher Chart with questions.yml (top) vs. Helm Chart without (bottom)
- - ![questions.yml]({{}}/img/rancher/rancher-app-2.6.png) - ![values.yaml]({{}}/img/rancher/helm-app-2.6.png) - - -### Chart.yaml annotations - -Rancher supports additional annotations that you can add to the `Chart.yaml` file. These annotations allow you to define application dependencies or configure additional UI defaults: - -| Annotation | Description | Example | -| --------------------------------- | ----------- | ------- | -| catalog.cattle.io/auto-install | If set, will install the specified chart in the specified version before installing this chart | other-chart-name=1.0.0 | -| catalog.cattle.io/display-name | A display name that should be displayed in the App Marketplace instead of the chart name | Display Name of Chart | -| catalog.cattle.io/namespace | A fixed namespace where the chart should be deployed in. If set, this can't be changed by the user | fixed-namespace | -| catalog.cattle.io/release-name | A fixed release name for the Helm installation. If set, this can't be changed by the user | fixed-release-name | -| catalog.cattle.io/requests-cpu | Total amount of CPU that should be unreserverd in the cluster. If less CPU is available, a warning will be shown | 2000m | -| catalog.cattle.io/requests-memory | Total amount of memory that should be unreserverd in the cluster. If less memory is available, a warning will be shown | 2Gi | -| catalog.cattle.io/os | Restricts the OS where this chart can be installed. Possible values: `linux`, `windows`. Default: no restriction | linux | - -### questions.yml - -Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. - -### Min/Max Rancher versions - -For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. - -> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. - -``` -rancher_min_version: 2.3.0 -rancher_max_version: 2.3.99 -``` - -### Question Variable Reference - -This reference contains variables that you can use in `questions.yml` nested under `questions:`. - -| Variable | Type | Required | Description | -| ------------- | ------------- | --- |------------- | -| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | -| label | string | true | Define the UI label. | -| description | string | false | Specify the description of the variable.| -| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| -| required | bool | false | Define if the variable is required or not (true \| false)| -| default | string | false | Specify the default value. | -| group | string | false | Group questions by input value. | -| min_length | int | false | Min character length.| -| max_length | int | false | Max character length.| -| min | int | false | Min integer length. | -| max | int | false | Max integer length. | -| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"| -| valid_chars | string | false | Regular expression for input chars validation. | -| invalid_chars | string | false | Regular expression for invalid input chars validation.| -| subquestions | []subquestion | false| Add an array of subquestions.| -| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | -| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| - ->**Note:** `subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. diff --git a/content/rancher/v2.6/en/installation/install-rancher-on-k8s/_index.md b/content/rancher/v2.6/en/installation/install-rancher-on-k8s/_index.md deleted file mode 100644 index cf9b936c74..0000000000 --- a/content/rancher/v2.6/en/installation/install-rancher-on-k8s/_index.md +++ /dev/null @@ -1,300 +0,0 @@ ---- -title: Install/Upgrade Rancher on a Kubernetes Cluster -description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation -weight: 2 ---- - -In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. - -- [Prerequisites](#prerequisites) -- [Install the Rancher Helm Chart](#install-the-rancher-helm-chart) - -# Prerequisites - -- [Kubernetes Cluster](#kubernetes-cluster) -- [Ingress Controller](#ingress-controller) -- [CLI Tools](#cli-tools) - -### Kubernetes Cluster - -Set up the Rancher server's local Kubernetes cluster. - -Rancher can be installed on any Kubernetes cluster. This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. - -For help setting up a Kubernetes cluster, we provide these tutorials: - -- **RKE:** For the tutorial to install an RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha) -- **K3s:** For the tutorial to install a K3s Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-with-external-db) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db) -- **RKE2:** For the tutorial to install an RKE2 Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-rke2) For help setting up the infrastructure for a high-availability RKE2 cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha) -- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an Ingress controller so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/amazon-eks) -- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an Ingress controller so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/aks) -- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an Ingress controller so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/gke) - -### Ingress Controller - -The Rancher UI and API are exposed through an Ingress. This means the Kubernetes cluster that you install Rancher in must contain an Ingress controller. - -For RKE, RKE2, and K3s installations, you don't have to install the Ingress controller manually because one is installed by default. - -For distributions that do not include an Ingress Controller by default, like a hosted Kubernetes cluster such as EKS, GKE, or AKS, you have to deploy an Ingress controller first. Note that the Rancher Helm chart does not set an `ingressClassName` on the ingress by default. Because of this, you have to configure the Ingress controller to also watch ingresses without an `ingressClassName`. - -Examples are included in the **Amazon EKS**, **AKS**, and **GKE** tutorials above. - -### CLI Tools - -The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. - -- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. -- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. - -# Install the Rancher Helm Chart - -Rancher is installed using the [Helm](https://helm.sh/) package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm, we can create configurable deployments instead of just using static files. - -For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/). - -To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.6/en/installation/resources/choosing-version) - -To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) - -> **Note:** The installation instructions assume you are using Helm 3. - -To set up Rancher, - -1. [Add the Helm chart repository](#1-add-the-helm-chart-repository) -2. [Create a namespace for Rancher](#2-create-a-namespace-for-rancher) -3. [Choose your SSL configuration](#3-choose-your-ssl-configuration) -4. [Install cert-manager](#4-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) -5. [Install Rancher with Helm and your chosen certificate option](#5-install-rancher-with-helm-and-your-chosen-certificate-option) -6. [Verify that the Rancher server is successfully deployed](#6-verify-that-the-rancher-server-is-successfully-deployed) -7. [Save your options](#7-save-your-options) - -### 1. Add the Helm Chart Repository - -Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - -{{< release-channel >}} - -``` -helm repo add rancher- https://releases.rancher.com/server-charts/ -``` - -### 2. Create a Namespace for Rancher - -We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: - -``` -kubectl create namespace cattle-system -``` - -### 3. Choose your SSL Configuration - -The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: - -- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. -- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. -- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. - - -| Configuration | Helm Chart Option | Requires cert-manager | -| ------------------------------ | ----------------------- | ------------------------------------- | -| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#4-install-cert-manager) | -| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#4-install-cert-manager) | -| Certificates from Files | `ingress.tls.source=secret` | no | - -### 4. Install cert-manager - -**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. - -> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). - -{{% accordion id="cert-manager" label="Click to Expand" %}} - -> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). - -These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). - -``` -# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart: -kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml - -# Add the Jetstack Helm repository -helm repo add jetstack https://charts.jetstack.io - -# Update your local Helm chart repository cache -helm repo update - -# Install the cert-manager Helm chart -helm install cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace \ - --version v1.7.1 -``` - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -{{% /accordion %}} - -### 5. Install Rancher with Helm and Your Chosen Certificate Option - -The exact command to install Rancher differs depending on the certificate configuration. - -However, irrespective of the certificate configuration, the name of the Rancher installation in the `cattle-system` namespace should always be `rancher`. - -> **Tip for testing and development:** This final command to install Rancher requires a domain name that forwards traffic to Rancher. If you are using the Helm CLI to set up a proof-of-concept, you can use a fake domain name when passing the `hostname` option. An example of a fake domain name would be `.sslip.io`, which would expose Rancher on an IP where it is running. Production installs would require a real domain name. - -{{% tabs %}} -{{% tab "Rancher-generated Certificates" %}} - - -The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. - -Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. - -- Set the `hostname` to the DNS name you pointed at your load balancer. -- Set the `bootstrapPassword` to something unique for the `admin` user. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. -- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set bootstrapPassword=admin -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Let's Encrypt" %}} - -This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. - ->**Note:** You need to have port 80 open as the HTTP-01 challenge can only be done on port 80. - -In the following command, - -- `hostname` is set to the public DNS record, -- Set the `bootstrapPassword` to something unique for the `admin` user. -- `ingress.tls.source` is set to `letsEncrypt` -- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) -- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set bootstrapPassword=admin \ - --set ingress.tls.source=letsEncrypt \ - --set letsEncrypt.email=me@example.org \ - --set letsEncrypt.ingress.class=nginx -``` - -Wait for Rancher to be rolled out: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -{{% /tab %}} -{{% tab "Certificates from Files" %}} -In this option, Kubernetes secrets are created from your own certificates for Rancher to use. - -When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. - -Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. - -> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.6/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) - -- Set the `hostname`. -- Set the `bootstrapPassword` to something unique for the `admin` user. -- Set `ingress.tls.source` to `secret`. -- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set bootstrapPassword=admin \ - --set ingress.tls.source=secret -``` - -If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: - -``` -helm install rancher rancher-/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set bootstrapPassword=admin \ - --set ingress.tls.source=secret \ - --set privateCA=true -``` - -Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.6/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. -{{% /tab %}} -{{% /tabs %}} - -The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. - -- [HTTP Proxy]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) -- [Private container image Registry]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) -- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -See the [Chart Options]({{}}/rancher/v2.6/en/installation/resources/chart-options/) for the full list of options. - - -### 6. Verify that the Rancher Server is Successfully Deployed - -After adding the secrets, check if Rancher was rolled out successfully: - -``` -kubectl -n cattle-system rollout status deploy/rancher -Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... -deployment "rancher" successfully rolled out -``` - -If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: - -``` -kubectl -n cattle-system get deploy rancher -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -rancher 3 3 3 3 3m -``` - -It should show the same count for `DESIRED` and `AVAILABLE`. - -### 7. Save Your Options - -Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. - -### Finishing Up - -That's it. You should have a functional Rancher server. - -In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. - -Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.6/en/installation/resources/troubleshooting/) Page diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/_index.md deleted file mode 100644 index a739fd0798..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/_index.md +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: 4. Install Rancher -weight: 400 ---- - -This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. - -### Privileged Access for Rancher - -When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. - -# Docker Instructions - -If you want to continue the air gapped installation using Docker commands, skip the rest of this page and follow the instructions on [this page.](./docker-install-commands) - -# Kubernetes Instructions - -Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. - -This section describes installing Rancher: - -- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) -- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) -- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) -- [4. Install Rancher](#4-install-rancher) - -# 1. Add the Helm Chart Repository - -From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. - -1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. - -2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). - {{< release-channel >}} - ``` - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. - ```plain - helm fetch rancher-/rancher - ``` - - If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: - ```plain - helm fetch rancher-stable/rancher --version=v2.4.8 - ``` - -# 2. Choose your SSL Configuration - -Rancher Server is designed to be secure by default and requires SSL/TLS configuration. - -When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. - -> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). - -| Configuration | Chart option | Description | Requires cert-manager | -| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | -| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | - -# Helm Chart Options for Air Gap Installations - -When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. - -| Chart Option | Chart Value | Description | -| ----------------------- | -------------------------------- | ---- | -| `certmanager.version` | "" | Configure proper Rancher TLS issuer depending of running cert-manager version. | -| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | - -# 3. Render the Rancher Helm Template - -Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. - -# Option A: Default Self-Signed Certificate - - -By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. - -> **Note:** -> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). - -### 1. Add the cert-manager repo - -From a system connected to the internet, add the cert-manager repo to Helm: - -```plain -helm repo add jetstack https://charts.jetstack.io -helm repo update -``` - -### 2. Fetch the cert-manager chart - -Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - -**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. - -```plain -helm fetch jetstack/cert-manager --version v1.7.1 -``` - -### 3. Render the cert-manager template - -Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - -```plain -helm template cert-manager ./cert-manager-v1.7.1.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller \ - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ - --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl -``` - -### 4. Download the cert-manager CRD - -Download the required CRD file for cert-manager: - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml - ``` - -### 5. Render the Rancher template - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - - -Placeholder | Description -------------|------------- -`` | The version number of the output tarball. -`` | The DNS name you pointed at your load balancer. -`` | The DNS name for your private registry. -`` | Cert-manager version running on k8s cluster. - -```plain -helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set certmanager.version= \ - --set rancherImage=/rancher/rancher \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` - -# Option B: Certificates From Files using Kubernetes Secrets - - -### 1. Create secrets - -Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. - -### 2. Render the Rancher template - -Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------- | -| `` | The version number of the output tarball. | -| `` | The DNS name you pointed at your load balancer. | -| `` | The DNS name for your private registry. | - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: - -```plain - helm template rancher ./rancher-.tgz --output-dir . \ - --no-hooks \ # prevent files for Helm hooks from being generated - --namespace cattle-system \ - --set hostname= \ - --set rancherImage=/rancher/rancher \ - --set ingress.tls.source=secret \ - --set privateCA=true \ - --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher - --set useBundledSystemChart=true # Use the packaged Rancher system charts -``` - -**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` - -Then refer to [Adding TLS Secrets]({{}}/rancher/v2.6/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. - -# 4. Install Rancher - -Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. - -Use `kubectl` to create namespaces and apply the rendered manifests. - -If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. - -### For Self-Signed Certificate Installs, Install Cert-manager - -{{% accordion id="install-cert-manager" label="Click to expand" %}} - -If you are using self-signed certificates, install cert-manager: - -1. Create the namespace for cert-manager. -```plain -kubectl create namespace cert-manager -``` - -1. Create the cert-manager CustomResourceDefinitions (CRDs). -```plain -kubectl apply -f cert-manager/cert-manager-crd.yaml -``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Launch cert-manager. -```plain -kubectl apply -R -f ./cert-manager -``` - -{{% /accordion %}} - -### Install Rancher with kubectl - -```plain -kubectl create namespace cattle-system -kubectl -n cattle-system apply -R -f ./rancher -``` -The installation is complete. - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.6/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. - -# Additional Resources - -These resources could be helpful when installing Rancher: - -- [Rancher Helm chart options]({{}}/rancher/v2.6/en/installation/resources/chart-options/) -- [Adding TLS secrets]({{}}/rancher/v2.6/en/installation/resources/tls-secrets/) -- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.6/en/installation/resources/troubleshooting/) diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/_index.md deleted file mode 100644 index 97aeffdcca..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/_index.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Docker Install Commands -weight: 1 ---- - -The Docker installation is for Rancher users who want to test out Rancher. - -Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. - -The backup application can be used to migrate the Rancher server from a Docker install to a Kubernetes install using [these steps.]({{}}/rancher/v2.6/en/backups/migrating-rancher) - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -| Environment Variable Key | Environment Variable Value | Description | -| -------------------------------- | -------------------------------- | ---- | -| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | -| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | - -> **Do you want to..**. -> -> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.6/en/installation/resources/custom-ca-root-certificate/). -> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). - -Choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. - -Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to install. | - -Privileged access is [required.](#privileged-access-for-rancher) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. - -> **Prerequisites:** -> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. -> -> - The certificate files must be in PEM format. -> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | The path to the certificate authority's certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to install. | - -Privileged access is [required.](#privileged-access-for-rancher) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. - -> **Prerequisite:** The certificate files must be in PEM format. - -After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. - -| Placeholder | Description | -| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `` | The path to the directory containing your certificate files. | -| `` | The path to your full certificate chain. | -| `` | The path to the private key for your certificate. | -| `` | Your private registry URL and port. | -| `` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to install. | - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -Privileged access is [required.](#privileged-access-for-rancher) - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged - /rancher/rancher: -``` - -{{% /accordion %}} - - - -> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.6/en/faq/telemetry/) during the initial login. - diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md deleted file mode 100644 index e86238ca77..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/launch-kubernetes/_index.md +++ /dev/null @@ -1,356 +0,0 @@ ---- -title: '3. Install Kubernetes (Skip for Docker Installs)' -weight: 300 ---- - -> Skip this section if you are installing Rancher on a single node with Docker. - -This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.6/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. - -Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes providers. - -The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are shown below. - -{{% tabs %}} -{{% tab "K3s" %}} - -In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. - -### Installation Outline - -1. [Prepare Images Directory](#1-prepare-images-directory) -2. [Create Registry YAML](#2-create-registry-yaml) -3. [Install K3s](#3-install-k3s) -4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) - -### 1. Prepare Images Directory -Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. - -Place the tar file in the `images` directory before starting K3s on each node, for example: - -```sh -sudo mkdir -p /var/lib/rancher/k3s/agent/images/ -sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ -``` - -### 2. Create Registry YAML -Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. - -The registries.yaml file should look like this before plugging in the necessary information: - -``` ---- -mirrors: - customreg: - endpoint: - - "https://ip-to-server:5000" -configs: - customreg: - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password - tls: - cert_file: - key_file: - ca_file: -``` - -Note, at this time only secure registries are supported with K3s (SSL with custom CA). - -For more information on private registries configuration file for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/private-registry/) - -### 3. Install K3s - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -Obtain the K3s binary from the [releases](https://github.com/k3s-io/k3s/releases) page, matching the same version used to get the airgap images tar. -Also obtain the K3s install script at https://get.k3s.io - -Place the binary in `/usr/local/bin` on each node. -Place the install script anywhere on each node, and name it `install.sh`. - -Install K3s on each server: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh -``` - -Install K3s on each agent: - -``` -INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh -``` - -Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. -The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` - ->**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. - -### 4. Save and Start Using the kubeconfig File - -When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: - -``` -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### Note on Upgrading - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download the new air-gap images (tar file) from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. -2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. -3. Restart the K3s service (if not restarted automatically by installer). -{{% /tab %}} -{{% tab "RKE2" %}} - -In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. - -### Installation Outline - -1. [Create RKE2 configuration](#1-create-rke2-configuration) -2. [Create Registry YAML](#2-create-registry-yaml) -3. [Install RKE2](#3-install-rke2) -4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) - -### 1. Create RKE2 configuration -Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. - -On the first server the minimum config is: - -``` -token: my-shared-secret -tls-san: - - loadbalancer-dns-domain.com -``` - -On each other server the config file should contain the same token and tell RKE2 to connect to the existing first server: - -``` -server: https://ip-of-first-server:9345 -token: my-shared-secret -tls-san: - - loadbalancer-dns-domain.com -``` - -For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/ha/). - ->**Note:** RKE2 additionally provides a `resolv-conf` option for kubelets, which may help with configuring DNS in air-gap networks. - -### 2. Create Registry YAML -Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. - -The registries.yaml file should look like this before plugging in the necessary information: - -``` ---- -mirrors: - customreg: - endpoint: - - "https://ip-to-server:5000" -configs: - customreg: - auth: - username: xxxxxx # this is the registry username - password: xxxxxx # this is the registry password - tls: - cert_file: - key_file: - ca_file: -``` - -For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration/) - -### 3. Install RKE2 - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -Download the install script, rke2, rke2-images, and sha256sum archives from the release and upload them into a directory on each server: - -``` -mkdir /tmp/rke2-artifacts && cd /tmp/rke2-artifacts/ -wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/rke2-images.linux-amd64.tar.zst -wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/rke2.linux-amd64.tar.gz -wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/sha256sum-amd64.txt -curl -sfL https://get.rke2.io --output install.sh -``` - -Next, run install.sh using the directory on each server, as in the example below: - -``` -INSTALL_RKE2_ARTIFACT_PATH=/tmp/rke2-artifacts sh install.sh -``` - -Then enable and start the service on all servers: - -`` -systemctl enable rke2-server.service -systemctl start rke2-server.service -`` - -For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap/). - -### 4. Save and Start Using the kubeconfig File - -When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. - -To use this `kubeconfig` file, - -1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl), a Kubernetes command-line tool. -2. Copy the file at `/etc/rancher/rke2/rke2.yaml` and save it to the directory `~/.kube/config` on your local machine. -3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `rke2.yaml`: - -``` -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: [CERTIFICATE-DATA] - server: [LOAD-BALANCER-DNS]:6443 # Edit this line - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - password: [PASSWORD] - username: admin -``` - -**Result:** You can now use `kubectl` to manage your RKE2 cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: - -``` -kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces -``` - -For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. - -### Note on Upgrading - -Upgrading an air-gap environment can be accomplished in the following manner: - -1. Download the new air-gap artifacts and install script from the [releases](https://github.com/rancher/rke2/releases) page for the version of RKE2 you will be upgrading to. -2. Run the script again just as you had done in the past with the same environment variables. -3. Restart the RKE2 service. -{{% /tab %}} -{{% tab "RKE" %}} -We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. - -### 1. Install RKE - -Install RKE by following the instructions in the [RKE documentation.]({{}}/rke/latest/en/installation/) - -### 2. Create an RKE Config File - -From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. - -This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. - -Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the three nodes you created. - -> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). - -
RKE Options
- -| Option | Required | Description | -| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | -| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | -| `user` | ✓ | A user that can run Docker commands. | -| `role` | ✓ | List of Kubernetes roles assigned to the node. | -| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | -| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | - -> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. - -```yaml -nodes: - - address: 10.10.3.187 # node air gap network IP - internal_address: 172.31.7.22 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.254 # node air gap network IP - internal_address: 172.31.13.132 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - - address: 10.10.3.89 # node air gap network IP - internal_address: 172.31.3.216 # node intra-cluster IP - user: rancher - role: ['controlplane', 'etcd', 'worker'] - ssh_key_path: /home/user/.ssh/id_rsa - -private_registries: - - url: # private registry url - user: rancher - password: '*********' - is_default: true -``` - -### 3. Run RKE - -After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: - -``` -rke up --config ./rancher-cluster.yml -``` - -### 4. Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `rancher-cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ -{{% /tab %}} -{{% /tabs %}} - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Issues or errors? - -See the [Troubleshooting]({{}}/rancher/v2.6/en/installation/resources/troubleshooting/) page. - -### [Next: Install Rancher](../install-rancher) diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md deleted file mode 100644 index 18c8817f76..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/_index.md +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: '2. Collect and Publish Images to your Private Registry' -weight: 200 ---- - -This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. - -By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.6/en/cluster-provisioning/) or launch any tools in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. - -Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. - -The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. - -> **Prerequisites:** -> -> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. -> -> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. - -{{% tabs %}} -{{% tab "Linux Only Clusters" %}} - -For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. - -1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) -2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) -3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) -4. [Populate the private registry](#4-populate-the-private-registry) - -### Prerequisites - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - -### 1. Find the required assets for your Rancher version - -1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets**. Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: - -| Release File | Description | -| ---------------- | -------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - -### 2. Collect the cert-manager image - -> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. - -In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. - -**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v1.7.1 - helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - -### 4. Populate the private registry - -Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.sh --image-list ./rancher-images.txt --registry - ``` -{{% /tab %}} -{{% tab "Linux and Windows Clusters" %}} - -For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. - -# Windows Steps - -The Windows images need to be collected and pushed from a Windows server workstation. - -1. Find the required assets for your Rancher version -2. Save the images to your Windows Server workstation -3. Prepare the Docker daemon -4. Populate the private registry - -### Prerequisites - -These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - -Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. - -2. From the release's "Assets" section, download the following files: - -| Release File | Description | -|----------------------------|------------------| -| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | -| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | -| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Save the images to your Windows Server workstation - -1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. - -1. Run `rancher-save-images.ps1` to create a tarball of all the required images: - ```plain - ./rancher-save-images.ps1 - ``` - - **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. - - - -### 3. Prepare the Docker daemon - -Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. - - ``` - { - ... - "allow-nondistributable-artifacts": [ - ... - "" - ] - ... - } - ``` - - - -### 4. Populate the private registry - -Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. - -The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. - -1. Using `powershell`, log into your private registry if required: - ```plain - docker login - ``` - -1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - ```plain - ./rancher-load-images.ps1 --registry - ``` - -# Linux Steps - -The Linux images need to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. - -1. Find the required assets for your Rancher version -2. Collect all the required images -3. Save the images to your Linux workstation -4. Populate the private registry - -### Prerequisites - -You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. - -These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. - -The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. - - - -### 1. Find the required assets for your Rancher version - -1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets**. - -2. From the release's **Assets** section, download the following files: - -| Release File | Description | -|----------------------------| -------------------------- | -| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | -| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | -| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | -| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | - - - -### 2. Collect all the required images - -**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. - -1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: - > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - helm fetch jetstack/cert-manager --version v0.12.0 - helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt - ``` - -2. Sort and unique the images list to remove any overlap between the sources: - ```plain - sort -u rancher-images.txt -o rancher-images.txt - ``` - - - -### 3. Save the images to your workstation - -1. Make `rancher-save-images.sh` an executable: - ``` - chmod +x rancher-save-images.sh - ``` - -1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: - ```plain - ./rancher-save-images.sh --image-list ./rancher-images.txt - ``` - -**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. - - - -### 4. Populate the private registry - -Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. - -The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. - -1. Log into your private registry if required: - ```plain - docker login - ``` - -1. Make `rancher-load-images.sh` an executable: - ``` - chmod +x rancher-load-images.sh - ``` - -1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: - -```plain -./rancher-load-images.sh --image-list ./rancher-images.txt \ - --windows-image-list ./rancher-windows-images.txt \ - --registry -``` - - -{{% /tab %}} -{{% /tabs %}} - -### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/launch-kubernetes/) - -### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md deleted file mode 100644 index 19cc815977..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/prepare-nodes/_index.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: '1. Set up Infrastructure and Private Registry' -weight: 100 ---- - -In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). - -An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. - -The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.6/en/installation/) - -Rancher can be installed on any Kubernetes cluster. The RKE and K3s Kubernetes infrastructure tutorials below are still included for convenience. - -{{% tabs %}} -{{% tab "K3s" %}} -We recommend setting up the following infrastructure for a high-availability installation: - -- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. -- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. -- **A load balancer** to direct traffic to the two nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.6/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node) for setting up nodes as instances in Amazon EC2. - -### 2. Set up External Datastore - -The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. - -For a high-availability K3s installation, you will need to set up one of the following external databases: - -* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) -* [MySQL](https://www.mysql.com/) (certified against version 5.7) -* [etcd](https://etcd.io/) (certified against version 3.3.15) - -When you install Kubernetes, you will pass in details for K3s to connect to the database. - -For an example of one way to set up the database, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds) for setting up a MySQL database on Amazon's RDS service. - -For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) - -### 3. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 4. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 5. Set up a Private Docker Registry - -Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) -{{% /tab %}} -{{% tab "RKE" %}} - -To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: - -- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. -- **A load balancer** to direct front-end traffic to the three nodes. -- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. -- **A private Docker registry** to distribute Docker images to your machines. - -These nodes must be in the same region/data center. You may place these servers in separate availability zones. - -### Why three nodes? - -In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. - -The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. - -### 1. Set up Linux Nodes - -These hosts will be disconnected from the internet, but require being able to connect with your private registry. - -Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.6/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/) for setting up nodes as instances in Amazon EC2. - -### 2. Set up the Load Balancer - -You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. - -When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. - -When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. - -For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: - -- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. -- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) - -For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) - -For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) - -> **Important:** -> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. - -### 3. Set up the DNS Record - -Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. - -Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. - -You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. - -For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) - -### 4. Set up a Private Docker Registry - -Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. - -In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file]({{}}/rke/latest/en/config-options/private-registries/) with details from this registry. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) - -{{% /tab %}} -{{% tab "Docker" %}} -> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. -> -> The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.6/en/backups/migrating-rancher) - -### 1. Set up a Linux Node - -This host will be disconnected from the Internet, but needs to be able to connect to your private registry. - -Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.6/en/installation/requirements/) - -For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/) for setting up nodes as instances in Amazon EC2. - -### 2. Set up a Private Docker Registry - -Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. - -If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) - -{{% /tab %}} -{{% /tabs %}} - -### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md deleted file mode 100644 index 4e27d10330..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/_index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Rolling Back Rancher Installed with Docker -weight: 1015 ---- - -If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades). Rolling back restores: - -- Your previous version of Rancher. -- Your data backup created before upgrade. - -## Before You Start - -During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: - -``` -docker pull rancher/rancher: -``` - -In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. - -Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | ------------------------------------------------------- | -| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.0.5` | The version of Rancher that the backup is for. | -| `` | `9-27-18` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -## Rolling Back Rancher - -If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. - ->**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. - -1. Using a remote Terminal connection, log into the node running your Rancher Server. - -1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. - - For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. - - ``` - docker pull rancher/rancher: - ``` - -1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - You can obtain the name for your Rancher container by entering `docker ps`. - -1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. - - If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades), it will have a name similar to (`rancher-data-backup--.tar.gz`). - -1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. - - ``` - docker run --volumes-from rancher-data \ - -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ - && tar zxvf /backup/rancher-data-backup--.tar.gz" - ``` - -1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. - ``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: - ``` - Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) - - >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. - -1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. - -**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md b/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md deleted file mode 100644 index 3b03de02a2..0000000000 --- a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/_index.md +++ /dev/null @@ -1,360 +0,0 @@ ---- -title: Upgrading Rancher Installed with Docker -weight: 1010 ---- - -The following instructions will guide you through upgrading a Rancher server that was installed with Docker. - -> **Docker installs are not supported in production environments.** These instructions are provided for testing and development purposes only. If you have already deployed a Docker install in production and need to upgrade to a new Rancher version, we recommend [migrating to the Helm chart install]({{}}/rancher/v2.6/en/backups/migrating-rancher/) before upgrading. - -# Prerequisites - -- **Review the [known upgrade issues]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/#known-upgrade-issues)** section in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums](https://forums.rancher.com/c/announcements/12). Note that upgrades to or from any chart in the [rancher-alpha repository]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren’t supported. -- **For [air gap installs only,]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version**. Follow the guide to [populate your private registry]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. - -# Placeholder Review - -During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). - -Here's an **example** of a command with a placeholder: - -``` -docker stop -``` - -In this command, `` is the name of your Rancher container. - -# Get Data for Upgrade Commands - -To obtain the data to replace the placeholders, run: - -``` -docker ps -``` - -Write down or copy this information before starting the upgrade. - -Terminal `docker ps` Command, Displaying Where to Find `` and `` -![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) - -| Placeholder | Example | Description | -| -------------------------- | -------------------------- | --------------------------------------------------------- | -| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | -| `` | `festive_mestorf` | The name of your Rancher container. | -| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | -| `` | `2018-12-19` | The date that the data container or backup was created. | -
- -You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. - -# Upgrade Outline - -During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: - -- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) -- [2. Create a backup tarball](#2-create-a-backup-tarball) -- [3. Pull the new Docker image](#3-pull-the-new-docker-image) -- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) -- [5. Verify the Upgrade](#5-verify-the-upgrade) -- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) - -# 1. Create a copy of the data from your Rancher server container - -1. Using a remote Terminal connection, log into the node running your Rancher server. - -1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. - - ``` - docker stop - ``` - -1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. - - ``` - docker create --volumes-from --name rancher-data rancher/rancher: - ``` - -# 2. Create a backup tarball - -1. From the data container that you just created (`rancher-data`), create a backup tarball (`rancher-data-backup--.tar.gz`). - - This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. - - - ``` - docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher - ``` - - **Step Result:** When you enter this command, a series of commands should run. - -1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. - - ``` - [rancher@ip-10-0-0-50 ~]$ ls - rancher-data-backup-v2.1.3-20181219.tar.gz - ``` - -1. Move your backup tarball to a safe location external from your Rancher server. - -# 3. Pull the New Docker Image - -Pull the image of the Rancher version that you want to upgrade to. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker pull rancher/rancher: -``` - -# 4. Start the New Rancher Server Container - -Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. - ->**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. - -If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/proxy/) - -If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) - -If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) - -To see the command to use when starting the new Rancher server container, choose from the following options: - -- Docker Upgrade -- Docker Upgrade for Air Gap Installs - -{{% tabs %}} -{{% tab "Docker Upgrade" %}} - -Select which option you had installed Rancher server - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: -``` - -Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) - -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. - -Placeholder | Description -------------|------------- - `` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - --privileged \ - rancher/rancher: -``` - -Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) - -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. - ->**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - --privileged \ - rancher/rancher: \ - --no-cacerts -``` - -Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) -{{% /accordion %}} - -### Option D: Let's Encrypt Certificate - -{{% accordion id="option-d" label="Click to expand" %}} - ->**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). - -If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. - ->**Reminder of the Cert Prerequisites:** -> ->- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). ->- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. - -Placeholder | Description -------------|------------- -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. -`` | The domain address that you had originally started with - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --privileged \ - rancher/rancher: \ - --acme-domain -``` - -Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) - -{{% /accordion %}} - -{{% /tab %}} -{{% tab "Docker Air Gap Upgrade" %}} - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -When starting the new Rancher server container, choose from the following options: - -### Option A: Default Self-Signed Certificate - -{{% accordion id="option-a" label="Click to expand" %}} - -If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. - -Placeholder | Description -------------|------------- -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to to upgrade to. - -``` - docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` - -Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) -{{% /accordion %}} - -### Option B: Bring Your Own Certificate: Self-Signed - -{{% accordion id="option-b" label="Click to expand" %}} - -If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. - ->**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | The path to the certificate authority's certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -v //:/etc/rancher/ssl/cacerts.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged \ - /rancher/rancher: -``` -Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) -{{% /accordion %}} - -### Option C: Bring Your Own Certificate: Signed by Recognized CA - -{{% accordion id="option-c" label="Click to expand" %}} - -If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. - - >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -Placeholder | Description -------------|------------- -`` | The path to the directory containing your certificate files. -`` | The path to your full certificate chain. -`` | The path to the private key for your certificate. -`` | Your private registry URL and port. -`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. - -> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. - -``` -docker run -d --volumes-from rancher-data \ - --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - --no-cacerts \ - -v //:/etc/rancher/ssl/cert.pem \ - -v //:/etc/rancher/ssl/key.pem \ - -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher - -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts - --privileged - /rancher/rancher: -``` -privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) -{{% /accordion %}} -{{% /tab %}} -{{% /tabs %}} - -**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. - -# 5. Verify the Upgrade - -Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. - ->**Having network issues in your user clusters following upgrade?** -> -> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). - - -# 6. Clean up Your Old Rancher Server Container - -Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. - -# Rolling Back - -If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/). diff --git a/content/rancher/v2.6/en/installation/requirements/_index.md b/content/rancher/v2.6/en/installation/requirements/_index.md deleted file mode 100644 index 122f954de3..0000000000 --- a/content/rancher/v2.6/en/installation/requirements/_index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Installation Requirements -description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup -weight: 1 ---- - -This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. - -> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/) which will run your apps and services. - -Make sure the node(s) for the Rancher server fulfill the following requirements: - -- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) - - [RKE Specific Requirements](#rke-specific-requirements) - - [K3s Specific Requirements](#k3s-specific-requirements) - - [RKE2 Specific Requirements](#rke2-specific-requirements) - - [Installing Docker](#installing-docker) -- [Hardware Requirements](#hardware-requirements) -- [CPU and Memory](#cpu-and-memory) - - [RKE and Hosted Kubernetes](#rke-and-hosted-kubernetes) - - [K3s Kubernetes](#k3s-kubernetes) - - [RKE2 Kubernetes](#rke2-kubernetes) - - [Docker](#docker) -- [Ingress](#ingress) -- [Disks](#disks) -- [Networking Requirements](#networking-requirements) - - [Node IP Addresses](#node-ip-addresses) - - [Port Requirements](#port-requirements) -- [Dockershim Support](#dockershim-support) - -For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.6/en/best-practices/rancher-server/deployment-types/) - -The Rancher UI works best in Firefox or Chrome. - -# Operating Systems and Container Runtime Requirements - -Rancher should work with any modern Linux distribution. - -Docker is required for nodes that will run RKE Kubernetes clusters. It is not required for Kubernetes installs. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -All supported operating systems are 64-bit x86. - -The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. - -Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes v1.19, v1.20 and v1.21, firewalld must be turned off. - -If you don't feel comfortable doing so you might check suggestions in the [respective issue](https://github.com/rancher/rancher/issues/28840). Some users were successful [creating a separate firewalld zone with a policy of ACCEPT for the Pod CIDR](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822). - -If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.6/en/installation/resources/advanced/arm64-platform/) - -### RKE Specific Requirements - -For the container runtime, RKE should work with any modern Docker version. - -Note that the following sysctl setting must be applied: - -``` -net.bridge.bridge-nf-call-iptables=1 -``` - -### K3s Specific Requirements - -For the container runtime, K3s should work with any modern version of Docker or containerd. - -Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. - -If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. - -If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. - - - -### RKE2 Specific Requirements - -For details on which OS versions were tested with RKE2, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) - -Docker is not required for RKE2 installs. - -### Installing Docker - -Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.6/en/installation/requirements/installing-docker) to install Docker with one command. - -# Hardware Requirements - -The following sections describe the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. - -# CPU and Memory - -Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. - -### RKE and Hosted Kubernetes - -These CPU and memory requirements apply to each host in the Kubernetes cluster where the Rancher server is installed. - -These requirements apply to RKE Kubernetes clusters, as well as to hosted Kubernetes clusters such as EKS. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | ---------- | ------------ | -------| ------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | - -Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. - -### K3s Kubernetes - -These CPU and memory requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/) - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | -| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | -| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | -| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | -| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | -| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | -| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | - -Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. - - -### RKE2 Kubernetes - -These CPU and memory requirements apply to each instance with RKE2 installed. Minimum recommendations are outlined here. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 2 | 5 GB | -| Medium | Up to 15 | Up to 200 | 3 | 9 GB | - -### Docker - -These CPU and memory requirements apply to a host with a [single-node]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker) installation of Rancher. - -| Deployment Size | Clusters | Nodes | vCPUs | RAM | -| --------------- | -------- | --------- | ----- | ---- | -| Small | Up to 5 | Up to 50 | 1 | 4 GB | -| Medium | Up to 15 | Up to 200 | 2 | 8 GB | - -# Ingress - -Each node in the Kubernetes cluster that Rancher is installed on should run an Ingress. - -The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. - -For RKE and K3s installations, you don't have to install the Ingress manually because it is installed by default. - -For hosted Kubernetes clusters (EKS, GKE, AKS) and RKE2 Kubernetes installations, you will need to set up the ingress. - -- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/amazon-eks) -- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/aks) -- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/gke) - -# Disks - -Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. - -# Networking Requirements - -This section describes the networking requirements for the node(s) where the Rancher server is installed. - -> If a server containing Rancher has the `X-Frame-Options=DENY` header, some pages in the new Rancher UI will not be able to render after upgrading from the legacy UI. This is because some legacy pages are embedded as iFrames in the new UI. - -### Node IP Addresses - -Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. - -### Port Requirements - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements]({{}}/rancher/v2.6/en/installation/requirements/ports) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. - -# Dockershim Support - -For more information on Dockershim support, refer to [this page]({{}}/rancher/v2.6/en/installation/requirements/dockershim/). diff --git a/content/rancher/v2.6/en/installation/requirements/dockershim/_index.md b/content/rancher/v2.6/en/installation/requirements/dockershim/_index.md deleted file mode 100644 index e4c3490c4b..0000000000 --- a/content/rancher/v2.6/en/installation/requirements/dockershim/_index.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Dockershim -weight: 300 ---- - -The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). - -RKE clusters now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community external Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. - -To enable the external Dockershim, configure the following option. - -``` -enable_cri_dockerd: true -``` - -For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. - -### FAQ - -
- -Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim? - -A The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on a version of Rancher that supports RKE 1.21. See our support matrix for details. - -
- -Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim? - -A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and it is not deprecated until a later release. For information on the timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to 1.21 as you would normally. - -
- -Q: What are my other options if I don’t want to depend on the Dockershim? - -A: You can use a runtime like containerd with Kubernetes that does not require Dockershim support. RKE2 or K3s are two options for doing this. - -
- -Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options? - -A: Today, you can stand up a new cluster and migrate workloads to a new RKE2 cluster that uses containerd. Rancher is exploring the possibility of an in-place upgrade path. - -
diff --git a/content/rancher/v2.6/en/installation/requirements/ports/_index.md b/content/rancher/v2.6/en/installation/requirements/ports/_index.md deleted file mode 100644 index a9eaaf1bd8..0000000000 --- a/content/rancher/v2.6/en/installation/requirements/ports/_index.md +++ /dev/null @@ -1,317 +0,0 @@ ---- -title: Port Requirements -description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes -weight: 300 ---- - -To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. - -- [Rancher Nodes](#rancher-nodes) - - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) - - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) - - [Ports for Rancher Server Nodes on RKE2](#ports-for-rancher-server-nodes-on-rke2) - - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) -- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) - - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) - - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) - - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) - - [Ports for Registered Clusters](#ports-for-registered-clusters) -- [Other Port Considerations](#other-port-considerations) - - [Commonly Used Ports](#commonly-used-ports) - - [Local Node Traffic](#local-node-traffic) - - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) - - [Opening SUSE Linux Ports](#opening-suse-linux-ports) - -# Rancher Nodes - -The following table lists the ports that need to be open to and from nodes that are running the Rancher server. - -The port requirements differ based on the Rancher server architecture. - -Rancher can be installed on any Kubernetes cluster. For Rancher installs on a K3s, RKE, or RKE2 Kubernetes cluster, refer to the tabs below. For other Kubernetes distributions, refer to the distribution's documentation for the port requirements for cluster nodes. - -> **Notes:** -> -> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). -> - Kubernetes recommends TCP 30000-32767 for node port services. -> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. -> - Rancher nodes may also need outbound access to an external S3 location which is used for storing cluster backups (Minio for example). - -### Ports for Rancher Server Nodes on K3s - -{{% accordion label="Click to expand" %}} - -The K3s server needs port 6443 to be accessible by the nodes. - -The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| TCP | 443 |
  • server nodes
  • agent nodes
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | -| TCP | 6443 | K3s server nodes | Kubernetes API -| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. -| TCP | 10250 | K3s server and agent nodes | kubelet - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RKE - -{{% accordion label="Click to expand" %}} - -Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. - -The following tables break down the port requirements for traffic between the Rancher nodes: - -
Rules for traffic between Rancher nodes
- -| Protocol | Port | Description | -|-----|-----|----------------| -| TCP | 443 | Rancher agents | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| TCP | 6443 | Kubernetes apiserver | -| TCP | 8443 | Nginx Ingress's Validating Webhook | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 10250 | Metrics server communication with all nodes | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | - -The following tables break down the port requirements for inbound and outbound traffic: - -
Inbound Rules for Rancher Nodes
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | -| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | -| TCP | 443 |
  • Load Balancer/Reverse Proxy
  • IPs of all cluster nodes and other API/UI clients
| HTTPS traffic to Rancher UI/API | -| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | - -
Outbound Rules for Rancher Nodes
- -| Protocol | Port | Destination | Description | -|-----|-----|----------------|---| -| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | -| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | -| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | -| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | - -{{% /accordion %}} - -### Ports for Rancher Server Nodes on RKE2 - -{{% accordion label="Click to expand" %}} - -The RKE2 server needs port 6443 and 9345 to be accessible by other nodes in the cluster. - -All nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. - -If you wish to utilize the metrics server, you will need to open port 10250 on each node. - -**Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. - -
Inbound Rules for RKE2 Server Nodes
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 9345 | RKE2 agent nodes | Kubernetes API -| TCP | 6443 | RKE2 agent nodes | Kubernetes API -| UDP | 8472 | RKE2 server and agent nodes | Required only for Flannel VXLAN -| TCP | 10250 | RKE2 server and agent nodes | kubelet -| TCP | 2379 | RKE2 server nodes | etcd client port -| TCP | 2380 | RKE2 server nodes | etcd peer port -| TCP | 30000-32767 | RKE2 server and agent nodes | NodePort port range -| TCP | 5473 | Calico-node pod connecting to typha pod | Required when deploying with Calico -| HTTP | 8080 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | -| HTTPS | 8443 |
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl. Not needed if you have LB doing TLS termination. | - -Typically all outbound traffic is allowed. -{{% /accordion %}} - -### Ports for Rancher Server in Docker - -{{% accordion label="Click to expand" %}} - -The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: - -
Inbound Rules for Rancher Node
- -| Protocol | Port | Source | Description -|-----|-----|----------------|---| -| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used -| TCP | 443 |
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl - -
Outbound Rules for Rancher Node
- -| Protocol | Port | Source | Description | -|-----|-----|----------------|---| -| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | -| TCP | 443 | git.rancher.io | Rancher catalog | -| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | -| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | - -{{% /accordion %}} - -# Downstream Kubernetes Cluster Nodes - -Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. - -The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.6/en/cluster-provisioning/). - -The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.6/en/cluster-provisioning). - -
Port Requirements for the Rancher Management Plane
- -![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) - ->**Tip:** -> ->If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. - -### Ports for Rancher Launched Kubernetes Clusters using Node Pools - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/). - ->**Note:** ->The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. - -{{< ports-iaas-nodes >}} - -{{% /accordion %}} - -### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/). - -{{< ports-custom-nodes >}} - -{{% /accordion %}} - -### Ports for Hosted Kubernetes Clusters - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - -### Ports for Registered Clusters - -Note: Registered clusters were called imported clusters before Rancher v2.5. - -{{% accordion label="Click to expand" %}} - -The following table depicts the port requirements for [registered clusters]({{}}/rancher/v2.6/en/cluster-provisioning/registered-clusters/). - -{{< ports-imported-hosted >}} - -{{% /accordion %}} - - -# Other Port Considerations - -### Commonly Used Ports - -These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. - -{{% include file="/rancher/v2.6/en/installation/requirements/ports/common-ports-table" %}} - ----- - -### Local Node Traffic - -Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). -These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. - -However, this traffic may be blocked when: - -- You have applied strict host firewall policies on the node. -- You are using nodes that have multiple interfaces (multihomed). - -In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. - -### Rancher AWS EC2 Security Group - -When using the [AWS EC2 node driver]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. - -| Type | Protocol | Port Range | Source/Destination | Rule Type | -|-----------------|:--------:|:-----------:|------------------------|:---------:| -| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | -| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | -| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | -| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | -| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | -| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | -| All traffic | All | All | 0.0.0.0/0 | Outbound | - -### Opening SUSE Linux Ports - -SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, - -{{% tabs %}} -{{% tab "SLES 15 / openSUSE Leap 15" %}} -1. SSH into the instance. -1. Start YaST in text mode: -``` -sudo yast2 -``` - -1. Navigate to **Security and Users** > **Firewall** > **Zones:public** > **Ports**. To navigate within the interface, follow the instructions [here](https://doc.opensuse.org/documentation/leap/reference/html/book.opensuse.reference/cha-yast-text.html#sec-yast-cli-navigate). -1. To open the required ports, enter them into the **TCP Ports** and **UDP Ports** fields. In this example, ports 9796 and 10250 are also opened for monitoring. The resulting fields should look similar to the following: -```yaml -TCP Ports -22, 80, 443, 2376, 2379, 2380, 6443, 9099, 9796, 10250, 10254, 30000-32767 -UDP Ports -8472, 30000-32767 -``` - -1. When all required ports are enter, select **Accept**. - -{{% /tab %}} -{{% tab "SLES 12 / openSUSE Leap 42" %}} -1. SSH into the instance. -1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: - ``` - FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" - FW_SERVICES_EXT_UDP="8472 30000:32767" - FW_ROUTE=yes - ``` -1. Restart the firewall with the new ports: - ``` - SuSEfirewall2 - ``` -{{% /tab %}} -{{% /tabs %}} - -**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/content/rancher/v2.6/en/installation/requirements/ports/common-ports-table/index.md b/content/rancher/v2.6/en/installation/requirements/ports/common-ports-table/index.md deleted file mode 100644 index 4819129eb2..0000000000 --- a/content/rancher/v2.6/en/installation/requirements/ports/common-ports-table/index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -headless: true ---- -| Protocol | Port | Description | -|:--------: |:----------------: |---------------------------------------------------------------------------------- | -| TCP | 22 | Node driver SSH provisioning | -| TCP | 179 | Calico BGP Port | -| TCP | 2376 | Node driver Docker daemon TLS port | -| TCP | 2379 | etcd client requests | -| TCP | 2380 | etcd peer communication | -| UDP | 8472 | Canal/Flannel VXLAN overlay networking | -| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | -| TCP | 8443 | Rancher webhook | -| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | -| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | -| TCP | 9443 | Rancher webhook | -| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | -| TCP | 6783 | Weave Port | -| UDP | 6783-6784 | Weave UDP Ports | -| TCP | 10250 | Metrics server communication with all nodes API | -| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | -| TCP/UDP | 30000-
32767 | NodePort port range | diff --git a/content/rancher/v2.6/en/installation/resources/advanced/single-node-install-external-lb/_index.md b/content/rancher/v2.6/en/installation/resources/advanced/single-node-install-external-lb/_index.md deleted file mode 100644 index 91972e9aa5..0000000000 --- a/content/rancher/v2.6/en/installation/resources/advanced/single-node-install-external-lb/_index.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer -weight: 252 ---- - -For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. - -A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. - -This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. - -## Requirements for OS, Docker, Hardware, and Networking - -Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.6/en/installation/requirements/) - -## Installation Outline - - - -- [1. Provision Linux Host](#1-provision-linux-host) -- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) -- [3. Configure Load Balancer](#3-configure-load-balancer) - - - -## 1. Provision Linux Host - -Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.6/en/installation/requirements) to launch your Rancher Server. - -## 2. Choose an SSL Option and Install Rancher - -For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. - -> **Do you want to..**. -> -> - Complete an Air Gap Installation? -> - Record all transactions with the Rancher API? -> -> See [Advanced Options](#advanced-options) below before continuing. - -Choose from the following options: - -{{% accordion id="option-a" label="Option A-Bring Your Own Certificate: Self-Signed" %}} -If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. - -> **Prerequisites:** -> Create a self-signed certificate. -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Self-Signed Cert:** - -1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ - rancher/rancher:latest - ``` - -{{% /accordion %}} -{{% accordion id="option-b" label="Option B-Bring Your Own Certificate: Signed by Recognized CA" %}} -If your cluster is public facing, it's best to use a certificate signed by a recognized CA. - -> **Prerequisites:** -> -> - The certificate files must be in PEM format. - -**To Install Rancher Using a Cert Signed by a Recognized CA:** - -If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. - -1. Enter the following command. - - ``` - docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - rancher/rancher:latest --no-cacerts - ``` - - {{% /accordion %}} - -## 3. Configure Load Balancer - -When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. - -The load balancer or proxy has to be configured to support the following: - -- **WebSocket** connections -- **SPDY** / **HTTP/2** protocols -- Passing / setting the following headers: - - | Header | Value | Description | - |--------|-------|-------------| - | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. - | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. - | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. - | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. -### Example NGINX configuration - -This NGINX configuration is tested on NGINX 1.14. - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). - -- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. -- Replace both occurrences of `FQDN` to the DNS name for Rancher. -- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. - -``` -worker_processes 4; -worker_rlimit_nofile 40000; - -events { - worker_connections 8192; -} - -http { - upstream rancher { - server rancher-server:80; - } - - map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; - } - - server { - listen 443 ssl http2; - server_name FQDN; - ssl_certificate /certs/fullchain.pem; - ssl_certificate_key /certs/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } - } - - server { - listen 80; - server_name FQDN; - return 301 https://$server_name$request_uri; - } -} -``` - -
- -## What's Next? - -- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.6/en/backups/docker-installs/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. -- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.6/en/cluster-provisioning/). - -
- -## FAQ and Troubleshooting - -For help troubleshooting certificates, see [this section.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) - -## Advanced Options - -### API Auditing - -If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.6/en/installation/resources/advanced/api-audit-log) feature by adding the flags below into your install command. - - -e AUDIT_LEVEL=1 \ - -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ - -e AUDIT_LOG_MAXAGE=20 \ - -e AUDIT_LOG_MAXBACKUP=20 \ - -e AUDIT_LOG_MAXSIZE=100 \ - -### Air Gap - -If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. - -**Example:** - - /rancher/rancher:latest - -### Persistent Data - -Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. - -You can bind mount a host volume to this location to preserve data on the host it is running on: - -``` -docker run -d --restart=unless-stopped \ - -p 80:80 -p 443:443 \ - -v /opt/rancher:/var/lib/rancher \ - --privileged \ - rancher/rancher:latest -``` - -As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) - -This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). - -> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). - -``` -upstream rancher { - server rancher-server:80; -} - -map $http_upgrade $connection_upgrade { - default Upgrade; - '' close; -} - -server { - listen 443 ssl http2; - server_name rancher.yourdomain.com; - ssl_certificate /etc/your_certificate_directory/fullchain.pem; - ssl_certificate_key /etc/your_certificate_directory/privkey.pem; - - location / { - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://rancher; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. - proxy_read_timeout 900s; - proxy_buffering off; - } -} - -server { - listen 80; - server_name rancher.yourdomain.com; - return 301 https://$server_name$request_uri; -} -``` - -
- diff --git a/content/rancher/v2.6/en/installation/resources/choosing-version/_index.md b/content/rancher/v2.6/en/installation/resources/choosing-version/_index.md deleted file mode 100644 index db2a8afef3..0000000000 --- a/content/rancher/v2.6/en/installation/resources/choosing-version/_index.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: Choosing a Rancher Version -weight: 1 ---- - -This section describes how to choose a Rancher version. - -For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. - -For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image**. - -{{% tabs %}} -{{% tab "Helm Charts" %}} - -When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. - -Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. - -### Helm Chart Repositories - -Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. - -| Type | Command to Add the Repo | Description of the Repo | -| -------------- | ------------ | ----------------- | -| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | -| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | -| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | - -
-Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). - -> **Note:** All charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. - -### Helm Chart Versions - -Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
-    `helm search repo --versions` - -If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
-For more information, see https://helm.sh/docs/helm/helm_search_repo/ - -To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
-    `helm fetch rancher-stable/rancher --version=2.4.8` - -### Switching to a Different Helm Chart Repository - -After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. - -> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. - -{{< release-channel >}} - -1. List the current Helm chart repositories. - - ```plain - helm repo list - - NAME URL - stable https://charts.helm.sh/stable - rancher- https://releases.rancher.com/server-charts/ - ``` - -2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. - - ```plain - helm repo remove rancher- - ``` - -3. Add the Helm chart repository that you want to start installing Rancher from. - - ```plain - helm repo add rancher- https://releases.rancher.com/server-charts/ - ``` - -4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades) from the new Helm chart repository. -{{% /tab %}} -{{% tab "Docker Images" %}} -When performing [Docker installs]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. - -### Server Tags - -Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. - -| Tag | Description | -| -------------------------- | ------ | -| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | -| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | -| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | - -> **Notes:** -> -> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. -> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/aks/_index.md b/content/rancher/v2.6/en/installation/resources/k8s-tutorials/aks/_index.md deleted file mode 100644 index 89f32b12f6..0000000000 --- a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/aks/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Installing Rancher on Azure Kubernetes Service -shortTitle: AKS -weight: 3 ---- - -This page covers how to install Rancher on Microsoft's Azure Kubernetes Service (AKS). - -The guide uses command line tools to provision an AKS cluster with an ingress. If you prefer to provision your cluster using the Azure portal, refer to the [official documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough-portal). - -If you already have an AKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -# Prerequisites - ->**Note** ->Deploying to Microsoft Azure will incur charges. - -- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. -- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. -- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. -- Your subscription has sufficient quota for at least 2 vCPUs. For details on Rancher server resource requirements, refer to [this section]({{}}/rancher/v2.6/en/installation/requirements/#rke-and-hosted-kubernetes) -- When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). - -# 1. Prepare your Workstation - -Install the following command line tools on your workstation: - -- The Azure CLI, **az:** For help, refer to these [installation steps.](https://docs.microsoft.com/en-us/cli/azure/) -- **kubectl:** For help, refer to these [installation steps.](https://kubernetes.io/docs/tasks/tools/#kubectl) -- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) - -# 2. Create a Resource Group - -After installing the CLI, you will need to log in with your Azure account. - -``` -az login -``` - -Create a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) to hold all relevant resources for your cluster. Use a location that applies to your use case. - -``` -az group create --name rancher-rg --location eastus -``` - -# 3. Create the AKS Cluster - -To create an AKS cluster, run the following command. Use a VM size that applies to your use case. Refer to [this article](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) for available sizes and options. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. - -``` -az aks create \ - --resource-group rancher-rg \ - --name rancher-server \ - --kubernetes-version 1.20.7 \ - --node-count 3 \ - --node-vm-size Standard_D2_v3 -``` - -The cluster will take some time to be deployed. - -# 4. Get Access Credentials - -After the cluster is deployed, get the access credentials. - -``` -az aks get-credentials --resource-group rancher-rg --name rancher-server -``` - -This command merges your cluster's credentials into the existing kubeconfig and allows `kubectl` to interact with the cluster. - -# 5. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. Installing an Ingress requires allocating a public IP address. Ensure you have sufficient quota, otherwise it will fail to assign the IP address. Limits for public IP addresses are applicable at a regional level per subscription. - -The following command installs an `nginx-ingress-controller` with a Kubernetes load balancer service. - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -# 6. Get Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) - AGE -ingress-nginx-controller LoadBalancer 10.0.116.18 40.31.180.83 80:31229/TCP,443:31050/TCP - 67s -``` - -Save the `EXTERNAL-IP`. - -# 7. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the `EXTERNAL-IP` that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the [Azure DNS documentation](https://docs.microsoft.com/en-us/azure/dns/) - -# 8. Install the Rancher Helm Chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/amazon-eks/_index.md b/content/rancher/v2.6/en/installation/resources/k8s-tutorials/amazon-eks/_index.md deleted file mode 100644 index f8da88bd61..0000000000 --- a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/amazon-eks/_index.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: Installing Rancher on Amazon EKS -shortTitle: Amazon EKS -weight: 3 ---- - -This page covers two ways to install Rancher on EKS. - -The first is a guide for deploying the Rancher server on an EKS cluster using CloudFormation. This guide was created in collaboration with Amazon Web Services to show how to deploy Rancher following best practices. - -The second is a guide for installing an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. - -If you already have an EKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -- [Automated Quickstart using AWS Best Practices](#automated-quickstart-using-aws-best-practices) -- [Creating an EKS Cluster for the Rancher Server](#creating-an-eks-cluster-for-the-rancher-server) - -# Automated Quickstart using AWS Best Practices - -Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) - -The quick start guide provides three options for deploying Rancher on EKS: - -- **Deploy Rancher into a new VPC and new Amazon EKS cluster**. This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, Amazon EKS cluster, and other infrastructure components. It then deploys Rancher into this new EKS cluster. -- **Deploy Rancher into an existing VPC and a new Amazon EKS cluster**. This option provisions Rancher in your existing AWS infrastructure. -- **Deploy Rancher into an existing VPC and existing Amazon EKS cluster**. This option provisions Rancher in your existing AWS infrastructure. - -Deploying this Quick Start for a new virtual private cloud (VPC) and new Amazon EKS cluster using default parameters builds the following Rancher environment in the AWS Cloud: - -- A highly available architecture that spans three Availability Zones.* -- A VPC configured with public and private subnets, according to AWS best practices, to provide you with your own virtual network on AWS.* -- In the public subnets: - - Managed network address translation (NAT) gateways to allow outbound internet access for resources.* - - Linux bastion hosts in an Auto Scaling group to allow inbound Secure Shell (SSH) access to Amazon Elastic Compute Cloud (Amazon EC2) instances in public and private subnets.* -- In the private subnets: - - Kubernetes nodes in an Auto Scaling group.* - - A Network Load Balancer (not shown) for accessing the Rancher console. -- Rancher deployment using AWS Systems Manager automation. -- Amazon EKS service for the EKS cluster, which provides the Kubernetes control plane.* -- An Amazon Route 53 DNS record for accessing the Rancher deployment. - -\* The CloudFormation template that deploys the Quick Start into an existing Amazon EKS cluster skips the components marked by asterisks and prompts you for your existing VPC configuration. - -# Creating an EKS Cluster for the Rancher Server - -In this section, you'll install an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. - -> **Prerequisites:** -> -> - You should already have an AWS account. -> - It is recommended to use an IAM user instead of the root AWS account. You will need the IAM user's access key and secret key to configure the AWS command line interface. -> - The IAM user needs the minimum IAM policies described in the official [eksctl documentation.](https://eksctl.io/usage/minimum-iam-policies/) - -### 1. Prepare your Workstation - -Install the following command line tools on your workstation: - -- **The AWS CLI v2:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) -- **eksctl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) -- **kubectl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) -- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) - -### 2. Configure the AWS CLI - -To configure the AWS CLI, run the following command: - -``` -aws configure -``` - -Then enter the following values: - -| Value | Description | -|-------|-------------| -| AWS Access Key ID | The access key credential for the IAM user with EKS permissions. | -| AWS Secret Access Key | The secret key credential for the IAM user with EKS permissions. | -| Default region name | An [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) where the cluster nodes will be located. | -| Default output format | Enter `json`. | - -### 3. Create the EKS Cluster - -To create an EKS cluster, run the following command. Use the AWS region that applies to your use case. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. - -``` -eksctl create cluster \ - --name rancher-server \ - --version 1.20 \ - --region us-west-2 \ - --nodegroup-name ranchernodes \ - --nodes 3 \ - --nodes-min 1 \ - --nodes-max 4 \ - --managed -``` - -The cluster will take some time to be deployed with CloudFormation. - -### 4. Test the Cluster - -To test the cluster, run: - -``` -eksctl get cluster -``` - -The result should look like the following: - -``` -eksctl get cluster -2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0 -2021-03-18 15:09:35 [ℹ] using region us-west-2 -NAME REGION EKSCTL CREATED -rancher-server-cluster us-west-2 True -``` - -### 5. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. - -The following command installs an `nginx-ingress-controller` with a LoadBalancer service. This will result in an ELB (Elastic Load Balancer) in front of NGINX: - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -### 6. Get Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) - AGE -ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP - 27m -``` - -Save the `EXTERNAL-IP`. - -### 7. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the AWS documentation on [routing traffic to an ELB load balancer.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) - -### 8. Install the Rancher Helm Chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/gke/_index.md b/content/rancher/v2.6/en/installation/resources/k8s-tutorials/gke/_index.md deleted file mode 100644 index 0aab07851c..0000000000 --- a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/gke/_index.md +++ /dev/null @@ -1,182 +0,0 @@ ---- -title: Installing Rancher on a Google Kubernetes Engine Cluster -shortTitle: GKE -weight: 3 ---- - -In this section, you'll learn how to install Rancher using Google Kubernetes Engine. - -If you already have a GKE Kubernetes cluster, skip to the step about [installing an ingress.](#7-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) - -# Prerequisites - -- You will need a Google account. -- You will need a Google Cloud billing account. You can manage your Cloud Billing accounts using the Google Cloud Console. For more information about the Cloud Console, visit [General guide to the console.](https://support.google.com/cloud/answer/3465889?hl=en&ref_topic=3340599) -- You will need a cloud quota for at least one in-use IP address and at least 2 CPUs. For more details about hardware requirements for the Rancher server, refer to [this section.]({{}}/rancher/v2.6/en/installation/requirements/#rke-and-hosted-kubernetes) - -# 1. Enable the Kubernetes Engine API - -Take the following steps to enable the Kubernetes Engine API: - -1. Visit the [Kubernetes Engine page](https://console.cloud.google.com/projectselector/kubernetes?_ga=2.169595943.767329331.1617810440-856599067.1617343886) in the Google Cloud Console. -1. Create or select a project. -1. Open the project and enable the Kubernetes Engine API for the project. Wait for the API and related services to be enabled. This can take several minutes. -1. Make sure that billing is enabled for your Cloud project. For information on how to enable billing for your project, refer to the [Google Cloud documentation.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project) - -# 2. Open the Cloud Shell - -Cloud Shell is a shell environment for managing resources hosted on Google Cloud. Cloud Shell comes preinstalled with the `gcloud` command-line tool and kubectl command-line tool. The `gcloud` tool provides the primary command-line interface for Google Cloud, and `kubectl` provides the primary command-line interface for running commands against Kubernetes clusters. - -The following sections describe how to launch the cloud shell from the Google Cloud Console or from your local workstation. - -### Cloud Shell - -To launch the shell from the [Google Cloud Console,](https://console.cloud.google.com) go to the upper-right corner of the console and click the terminal button. When hovering over the button, it is labeled **Activate Cloud Shell**. - -### Local Shell - -To install `gcloud` and `kubectl`, perform the following steps: - -1. Install the Cloud SDK by following [these steps.](https://cloud.google.com/sdk/docs/install) The Cloud SDK includes the `gcloud` command-line tool. The steps vary based on your OS. -1. After installing Cloud SDK, install the `kubectl` command-line tool by running the following command: - - ``` - gcloud components install kubectl - ``` - In a later step, `kubectl` will be configured to use the new GKE cluster. -1. [Install Helm 3](https://helm.sh/docs/intro/install/) if it is not already installed. -1. Enable Helm experimental [support for OCI images](https://github.com/helm/community/blob/master/hips/hip-0006.md) with the `HELM_EXPERIMENTAL_OCI` variable. Add the following line to `~/.bashrc` (or `~/.bash_profile` in macOS, or wherever your shell stores environment variables): - - ``` - export HELM_EXPERIMENTAL_OCI=1 - ``` -1. Run the following command to load your updated `.bashrc` file: - - ``` - source ~/.bashrc - ``` - If you are running macOS, use this command: - ``` - source ~/.bash_profile - ``` - - - -# 3. Configure the gcloud CLI - - Set up default gcloud settings using one of the following methods: - -- Using gcloud init, if you want to be walked through setting defaults. -- Using gcloud config, to individually set your project ID, zone, and region. - -{{% tabs %}} -{{% tab "Using gloud init" %}} - -1. Run gcloud init and follow the directions: - - ``` - gcloud init - ``` - If you are using SSH on a remote server, use the --console-only flag to prevent the command from launching a browser: - - ``` - gcloud init --console-only - ``` -2. Follow the instructions to authorize gcloud to use your Google Cloud account and select the new project that you created. - -{{% /tab %}} -{{% tab "Using gcloud config" %}} -{{% /tab %}} -{{% /tabs %}} - -# 4. Confirm that gcloud is configured correctly - -Run: - -``` -gcloud config list -``` - -The output should resemble the following: - -``` -[compute] -region = us-west1 # Your chosen region -zone = us-west1-b # Your chosen zone -[core] -account = -disable_usage_reporting = True -project = - -Your active configuration is: [default] -``` - -# 5. Create a GKE Cluster - -The following command creates a three-node cluster. - -Replace `cluster-name` with the name of your new cluster. - -When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. - -``` -gcloud container clusters create cluster-name --num-nodes=3 --cluster-version=1.20.8-gke.900 -``` - -# 6. Get Authentication Credentials - -After creating your cluster, you need to get authentication credentials to interact with the cluster: - -``` -gcloud container clusters get-credentials cluster-name -``` - -This command configures `kubectl` to use the cluster you created. - -# 7. Install an Ingress - -The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. - -The following command installs an `nginx-ingress-controller` with a LoadBalancer service: - -``` -helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx -helm repo update -helm upgrade --install \ - ingress-nginx ingress-nginx/ingress-nginx \ - --namespace ingress-nginx \ - --set controller.service.type=LoadBalancer \ - --version 3.12.0 \ - --create-namespace -``` - -# 8. Get the Load Balancer IP - -To get the address of the load balancer, run: - -``` -kubectl get service ingress-nginx-controller --namespace=ingress-nginx -``` - -The result should look similar to the following: - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:31876/TCP,443:32497/TCP 81s -``` - -Save the `EXTERNAL-IP`. - -# 9. Set up DNS - -External traffic to the Rancher server will need to be directed at the load balancer you created. - -Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. - -There are many valid ways to set up the DNS. For help, refer to the Google Cloud documentation about [managing DNS records.](https://cloud.google.com/dns/docs/records) - -# 10. Install the Rancher Helm chart - -Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. - -Use the DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md b/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md deleted file mode 100644 index 24eb4b4ca4..0000000000 --- a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/_index.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Setting up Amazon ELB Network Load Balancer -weight: 5 ---- - -This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. - -These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. - -This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. - -Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. - -# Setting up the Load Balancer - -Configuring an Amazon NLB is a multistage process: - -1. [Create Target Groups](#1-create-target-groups) -2. [Register Targets](#2-register-targets) -3. [Create Your NLB](#3-create-your-nlb) -4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) - -# Requirements - -These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. - -# 1. Create Target Groups - -Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. - -Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. - -Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. - -1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. -1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. -1. Click **Create target group** to create the first target group, regarding TCP port 443. - -> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) - -### Target Group (TCP port 443) - -Configure the first target group according to the table below. - -| Option | Setting | -|-------------------|-------------------| -| Target Group Name | `rancher-tcp-443` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `443` | -| VPC | Choose your VPC | - -Health check settings: - -| Option | Setting | -|---------------------|-----------------| -| Protocol | TCP | -| Port | `override`,`80` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -Click **Create target group** to create the second target group, regarding TCP port 80. - -### Target Group (TCP port 80) - -Configure the second target group according to the table below. - -| Option | Setting | -|-------------------|------------------| -| Target Group Name | `rancher-tcp-80` | -| Target type | `instance` | -| Protocol | `TCP` | -| Port | `80` | -| VPC | Choose your VPC | - - -Health check settings: - -| Option |Setting | -|---------------------|----------------| -| Protocol | TCP | -| Port | `traffic port` | -| Healthy threshold | `3` | -| Unhealthy threshold | `3` | -| Timeout | `6 seconds` | -| Interval | `10 seconds` | - -# 2. Register Targets - -Next, add your Linux nodes to both target groups. - -Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. - -{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} - -Select the instances (Linux nodes) you want to add, and click **Add to registered**. - -
-**Screenshot Add targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} - -
-**Screenshot Added targets to target group TCP port 443**
- -{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} - -When the instances are added, click **Save** on the bottom right of the screen. - -Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. - -# 3. Create Your NLB - -Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). - -1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). - -2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. - -3. Click **Create Load Balancer**. - -4. Choose **Network Load Balancer** and click **Create**. Then complete each form. - -- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) -- [Step 2: Configure Routing](#step-2-configure-routing) -- [Step 3: Register Targets](#step-3-register-targets) -- [Step 4: Review](#step-4-review) - -### Step 1: Configure Load Balancer - -Set the following fields in the form: - -- **Name:** `rancher` -- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. -- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. -- **Availability Zones:** Select Your **VPC** and **Availability Zones**. - -### Step 2: Configure Routing - -1. From the **Target Group** drop-down, choose **Existing target group**. -1. From the **Name** drop-down, choose `rancher-tcp-443`. -1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. - -### Step 3: Register Targets - -Since you registered your targets earlier, all you have to do is click **Next: Review**. - -### Step 4: Review - -Look over the load balancer details and click **Create** when you're satisfied. - -After AWS creates the NLB, click **Close**. - -# 4. Add listener to NLB for TCP port 80 - -1. Select your newly created NLB and select the **Listeners** tab. - -2. Click **Add listener**. - -3. Use `TCP`:`80` as **Protocol** : **Port** - -4. Click **Add action** and choose **Forward to..**. - -5. From the **Forward to** drop-down, choose `rancher-tcp-80`. - -6. Click **Save** in the top right of the screen. - -# Health Check Paths for NGINX Ingress and Traefik Ingresses - -K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. - -For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. - -- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. -- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. - -To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/content/rancher/v2.6/en/installation/resources/upgrading-cert-manager/_index.md b/content/rancher/v2.6/en/installation/resources/upgrading-cert-manager/_index.md deleted file mode 100644 index f3b21f8aed..0000000000 --- a/content/rancher/v2.6/en/installation/resources/upgrading-cert-manager/_index.md +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: Upgrading Cert-Manager -weight: 4 ---- - -Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: - -1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) -1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. -1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) - -To address these changes, this guide will do two things: - -1. Document the procedure for upgrading cert-manager -1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data - -> **Important:** -> If you are currently running the cert-manager whose version is 1.5 or below, and want to upgrade both Rancher and cert-manager to a new version (1.6+ in the case of cert-manager), then you need to re-install both Rancher and cert-manager due to the API change in cert-manager 1.6. This will also be necessary if you are upgrading from a version of cert manager below 0.11 to a version of cert-manager above 0.11. Follow the steps below: - -> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server -> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager -> 3. Install the newer version of Rancher and cert-manager - -> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. - -# Upgrade Cert-Manager - -The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. - -In order to upgrade cert-manager, follow these instructions: - -### Option A: Upgrade cert-manager with Internet Access - -{{% accordion id="normal" label="Click to expand" %}} -1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) - - ```plain - helm uninstall cert-manager - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed - - ```plain - kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager if needed - - ```plain - kubectl create namespace cert-manager - ``` - -1. Add the Jetstack Helm repository - - ```plain - helm repo add jetstack https://charts.jetstack.io - ``` - -1. Update your local Helm chart repository cache - - ```plain - helm repo update - ``` - -1. Install the new version of cert-manager - - ```plain - helm install \ - cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --version v0.12.0 - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Option B: Upgrade cert-manager in an Air Gap Environment - -{{% accordion id="airgap" label="Click to expand" %}} - -### Prerequisites - -Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. - -1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry) with the images needed for the upgrade. - -1. From a system connected to the internet, add the cert-manager repo to Helm - - ```plain - helm repo add jetstack https://charts.jetstack.io - helm repo update - ``` - -1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). - - ```plain - helm fetch jetstack/cert-manager --version v0.12.0 - ``` - -1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. - - The Helm 3 command is as follows: - - ```plain - helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ - --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - - The Helm 2 command is as follows: - - ```plain - helm template ./cert-manager-v0.12.0.tgz --output-dir . \ - --name cert-manager --namespace cert-manager \ - --set image.repository=/quay.io/jetstack/cert-manager-controller - --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook - --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector - ``` - -1. Download the required CRD file for cert-manager (old and new) - - ```plain - curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml - curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml - ``` - -### Install cert-manager - -1. Back up existing resources as a precaution - - ```plain - kubectl get -o yaml --all-namespaces \ - issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml - ``` - - > **Important:** - > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) - -1. Delete the existing cert-manager installation - - ```plain - kubectl -n cert-manager \ - delete deployment,sa,clusterrole,clusterrolebinding \ - -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' - ``` - - Delete the CustomResourceDefinition using the link to the version vX.Y you installed - - ```plain - kubectl delete -f cert-manager/cert-manager-crd-old.yaml - ``` - -1. Install the CustomResourceDefinition resources separately - - ```plain - kubectl apply -f cert-manager/cert-manager-crd.yaml - ``` - - > **Note:** - > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. - -1. Create the namespace for cert-manager - - ```plain - kubectl create namespace cert-manager - ``` - -1. Install cert-manager - - ```plain - kubectl -n cert-manager apply -R -f ./cert-manager - ``` - -1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) - - ```plain - kubectl apply -f cert-manager-backup.yaml - ``` - -{{% /accordion %}} - -### Verify the Deployment - -Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: - -``` -kubectl get pods --namespace cert-manager - -NAME READY STATUS RESTARTS AGE -cert-manager-5c6866597-zw7kh 1/1 Running 0 2m -cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m -cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m -``` - -## Cert-Manager API change and data migration - ---- -_New in v2.6.4_ - -Rancher now supports cert-manager versions 1.6.2 and 1.7.1. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. To read more, see the [cert-manager docs]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#4-install-cert-manager). For instructions on upgrading cert-manager from version 1.5 to 1.6, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/). For instructions on upgrading cert-manager from version 1.6 to 1.7, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/). - ---- - -Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. - -Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. - -Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. - -We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). - -Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). - -More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). - diff --git a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md b/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md deleted file mode 100644 index 5b1141b1f7..0000000000 --- a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/_index.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: Managing HPAs with kubectl -weight: 3029 ---- - -This section describes HPA management with `kubectl`. This document has instructions for how to: - -- Create an HPA -- Get information on HPAs -- Delete an HPA -- Configure your HPAs to scale with CPU or memory utilization -- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics - - -You can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. - -##### Basic kubectl Command for Managing HPAs - -If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: - -- Creating HPA - - - With manifest: `kubectl create -f ` - - - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` - -- Getting HPA info - - - Basic: `kubectl get hpa hello-world` - - - Detailed description: `kubectl describe hpa hello-world` - -- Deleting HPA - - - `kubectl delete hpa hello-world` - -##### HPA Manifest Definition Example - -The HPA manifest is the config file used for managing an HPA with `kubectl`. - -The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. - -```yml -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi -``` - - -Directive | Description ----------|----------| - `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | - `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | - `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | - `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. - `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. - `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. -
- -##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) - -Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. - -Run the following commands to check if metrics are available in your installation: - -``` -$ kubectl top nodes -NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% -node-controlplane 196m 9% 1623Mi 42% -node-etcd 80m 4% 1090Mi 28% -node-worker 64m 3% 1146Mi 29% -$ kubectl -n kube-system top pods -NAME CPU(cores) MEMORY(bytes) -canal-pgldr 18m 46Mi -canal-vhkgr 20m 45Mi -canal-x5q5v 17m 37Mi -canal-xknnz 20m 37Mi -kube-dns-7588d5b5f5-298j2 0m 22Mi -kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi -metrics-server-97bc649d5-jxrlt 0m 12Mi -$ kubectl -n kube-system logs -l k8s-app=metrics-server -I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true -I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 -I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version -I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 -I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink -I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) -I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi -[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ -I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 -``` - - -##### Configuring HPA to Scale Using Custom Metrics with Prometheus - -You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. - -For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: - -- Prometheus is deployed in the cluster. -- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. -- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` - -Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. - -For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). - -1. Initialize Helm in your cluster. - ``` - # kubectl -n kube-system create serviceaccount tiller - kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` - -1. Clone the `banzai-charts` repo from GitHub: - ``` - # git clone https://github.com/banzaicloud/banzai-charts - ``` - -1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. - ``` - # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system - ``` - -1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. - - 1. Check that the service pod is `Running`. Enter the following command. - ``` - # kubectl get pods -n kube-system - ``` - From the resulting output, look for a status of `Running`. - ``` - NAME READY STATUS RESTARTS AGE - ... - prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h - ... - ``` - 1. Check the service logs to make sure the service is running correctly by entering the command that follows. - ``` - # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system - ``` - Then review the log output to confirm the service is running. - {{% accordion id="prometheus-logs" label="Prometheus Adaptor Logs" %}} - ... - I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds - I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: - I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT - I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json - I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 - I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} - I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.xip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK - I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} - I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] - I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} - ... - {{% /accordion %}} - - - -1. Check that the metrics API is accessible from kubectl. - - - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. - ``` - # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} - - - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. - ``` - # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 - ``` - If the API is accessible, you should receive output that's similar to what follows. - {{% accordion id="custom-metrics-api-response-rancher" label="API Response" %}} - {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} - {{% /accordion %}} diff --git a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md b/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md deleted file mode 100644 index 93e13be752..0000000000 --- a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/_index.md +++ /dev/null @@ -1,491 +0,0 @@ ---- -title: Testing HPAs with kubectl -weight: 3031 ---- - -This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). - -For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. - -1. Configure `kubectl` to connect to your Kubernetes cluster. - -2. Copy the `hello-world` deployment manifest below. -{{% accordion id="hello-world" label="Hello World Manifest" %}} -``` -apiVersion: apps/v1beta2 -kind: Deployment -metadata: - labels: - app: hello-world - name: hello-world - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: hello-world - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - type: RollingUpdate - template: - metadata: - labels: - app: hello-world - spec: - containers: - - image: rancher/hello-world - imagePullPolicy: Always - name: hello-world - resources: - requests: - cpu: 500m - memory: 64Mi - ports: - - containerPort: 80 - protocol: TCP - restartPolicy: Always ---- -apiVersion: v1 -kind: Service -metadata: - name: hello-world - namespace: default -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: hello-world -``` -{{% /accordion %}} - -1. Deploy it to your cluster. - - ``` - # kubectl create -f - ``` - -1. Copy one of the HPAs below based on the metric type you're using: -{{% accordion id="service-deployment-resource-metrics" label="Hello World HPA: Resource Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 1000Mi -``` -{{% /accordion %}} -{{% accordion id="service-deployment-custom-metrics" label="Hello World HPA: Custom Metrics" %}} -``` -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: hello-world - namespace: default -spec: - scaleTargetRef: - apiVersion: extensions/v1beta1 - kind: Deployment - name: hello-world - minReplicas: 1 - maxReplicas: 10 - metrics: - - type: Resource - resource: - name: cpu - targetAverageUtilization: 50 - - type: Resource - resource: - name: memory - targetAverageValue: 100Mi - - type: Pods - pods: - metricName: cpu_system - targetAverageValue: 20m -``` -{{% /accordion %}} - -1. View the HPA info and description. Confirm that metric data is shown. - {{% accordion id="hpa-info-resource-metrics" label="Resource Metrics" %}} -1. Enter the following commands. - ``` - # kubectl get hpa - NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE - hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m - # kubectl describe hpa - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 1253376 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - {{% accordion id="hpa-info-custom-metrics" label="Custom Metrics" %}} -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive the output that follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 3514368 / 100Mi - "cpu_system" on pods: 0 / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - ``` - {{% /accordion %}} - - -1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). - -1. Test that pod autoscaling works as intended.

- **To Test Autoscaling Using Resource Metrics:** - {{% accordion id="observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to two pods based on CPU Usage. - -1. View your HPA. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10928128 / 100Mi - resource cpu on pods (as a percentage of request): 56% (280m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm you've scaled to two pods. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-upscale-3-pods-cpu-cooldown" label="Upscale to 3 pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 9424896 / 100Mi - resource cpu on pods (as a percentage of request): 66% (333m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - ``` -2. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-f46kh 0/1 Running 0 1m - hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m - hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h - ``` - {{% /accordion %}} - {{% accordion id="observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 10070016 / 100Mi - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` - {{% /accordion %}} -
-**To Test Autoscaling Using Custom Metrics:** - {{% accordion id="custom-observe-upscale-2-pods-cpu" label="Upscale to 2 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale two pods based on CPU usage. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8159232 / 100Mi - "cpu_system" on pods: 7m / 20m - resource cpu on pods (as a percentage of request): 64% (321m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm two pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` - {{% /accordion %}} -{{% accordion id="observe-upscale-3-pods-cpu-cooldown-2" label="Upscale to 3 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows: - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - ``` -1. Enter the following command to confirm three pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows: - ``` - # kubectl get pods - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="observe-upscale-4-pods" label="Upscale to 4 Pods: CPU Usage Up to Target" %}} -Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive output similar to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8374272 / 100Mi - "cpu_system" on pods: 27m / 20m - resource cpu on pods (as a percentage of request): 71% (357m) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - ``` -1. Enter the following command to confirm four pods are running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m - hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m - hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} -{{% accordion id="custom-metrics-observe-downscale-1-pod" label="Downscale to 1 Pod: All Metrics Below Target" %}} -Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. - -1. Enter the following command. - ``` - # kubectl describe hpa - ``` - You should receive similar output to what follows. - ``` - Name: hello-world - Namespace: default - Labels: - Annotations: - CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 - Reference: Deployment/hello-world - Metrics: ( current / target ) - resource memory on pods: 8101888 / 100Mi - "cpu_system" on pods: 8m / 20m - resource cpu on pods (as a percentage of request): 0% (0) / 50% - Min replicas: 1 - Max replicas: 10 - Conditions: - Type Status Reason Message - ---- ------ ------ ------- - AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 - ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource - ScalingLimited False DesiredWithinRange the desired count is within the acceptable range - Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target - Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target - Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target - ``` -1. Enter the following command to confirm a single pods is running. - ``` - # kubectl get pods - ``` - You should receive output similar to what follows. - ``` - NAME READY STATUS RESTARTS AGE - hello-world-54764dfbf8-q6l82 1/1 Running 0 6h - ``` -{{% /accordion %}} diff --git a/content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md b/content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md deleted file mode 100644 index 9ee0922040..0000000000 --- a/content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: "Layer 4 and Layer 7 Load Balancing" -description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" -weight: 3041 ---- -Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. - -## Layer-4 Load Balancer - -Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. - -Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. - -> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. - -### Support for Layer-4 Load Balancing - -Support for layer-4 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-4 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GCE cloud provider -Azure AKS | Supported by Azure cloud provider -RKE on EC2 | Supported by AWS cloud provider -RKE on DigitalOcean | Limited NGINX or third-party Ingress* -RKE on vSphere | Limited NGINX or third party-Ingress* -RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* -Third-party MetalLB | Limited NGINX or third-party Ingress* - -\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) - -## Layer-7 Load Balancer - -Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. - -### Support for Layer-7 Load Balancing - -Support for layer-7 load balancer varies based on the underlying cloud provider. - -Cluster Deployment | Layer-7 Load Balancer Support -----------------------------------------------|-------------------------------- -Amazon EKS | Supported by AWS cloud provider -Google GKE | Supported by GKE cloud provider -Azure AKS | Not Supported -RKE on EC2 | Nginx Ingress Controller -RKE on DigitalOcean | Nginx Ingress Controller -RKE on vSphere | Nginx Ingress Controller -RKE on Custom Hosts
(e.g. bare-metal servers) | Nginx Ingress Controller - -### Host Names in Layer-7 Load Balancer - -Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. - -Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: - -1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. -2. Ask Rancher to generate an xip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name ..a.b.c.d.xip.io. - -The benefit of using xip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. - -## Related Links - -- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/content/rancher/v2.6/en/logging/custom-resource-config/_index.md b/content/rancher/v2.6/en/logging/custom-resource-config/_index.md deleted file mode 100644 index 71a5cfda34..0000000000 --- a/content/rancher/v2.6/en/logging/custom-resource-config/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Custom Resource Configuration -weight: 5 ---- - -The following Custom Resource Definitions are used to configure logging: - -- [Flow and ClusterFlow](./flows) -- [Output and ClusterOutput](./outputs) \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/_index.md b/content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/_index.md deleted file mode 100644 index f0b2f96fbc..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Advanced Configuration -weight: 500 ---- - -### Alertmanager - -For information on configuring the Alertmanager custom resource, see [this page.](./alertmanager) - -### Prometheus - -For information on configuring the Prometheus custom resource, see [this page.](./prometheus) - -### PrometheusRules - -For information on configuring the Prometheus custom resource, see [this page.](./prometheusrules) \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/receiver/_index.md b/content/rancher/v2.6/en/monitoring-alerting/configuration/receiver/_index.md deleted file mode 100644 index 3a9daaeb6c..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/configuration/receiver/_index.md +++ /dev/null @@ -1,310 +0,0 @@ ---- -title: Receiver Configuration -shortTitle: Receivers -weight: 1 ---- - -The [Alertmanager Config](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) Secret contains the configuration of an Alertmanager instance that sends out notifications based on alerts it receives from Prometheus. - -> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../how-monitoring-works/#3-how-alertmanager-works) - -- [Creating Receivers in the Rancher UI](#creating-receivers-in-the-rancher-ui) -- [Receiver Configuration](#receiver-configuration) - - [Slack](#slack) - - [Email](#email) - - [PagerDuty](#pagerduty) - - [Opsgenie](#opsgenie) - - [Webhook](#webhook) - - [Custom](#custom) - - [Teams](#teams) - - [SMS](#sms) -- [Configuring Multiple Receivers](#configuring-multiple-receivers) -- [Example Alertmanager Config](../examples/#example-alertmanager-config) -- [Example Route Config for CIS Scan Alerts](#example-route-config-for-cis-scan-alerts) -- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) - -# Creating Receivers in the Rancher UI - -> **Prerequisites:** -> ->- The monitoring application needs to be installed. ->- If you configured monitoring with an existing Alertmanager Secret, it must have a format that is supported by Rancher's UI. Otherwise you will only be able to make changes based on modifying the Alertmanager Secret directly. Note: We are continuing to make enhancements to what kinds of Alertmanager Configurations we can support using the Routes and Receivers UI, so please [file an issue](https://github.com/rancher/rancher/issues/new) if you have a request for a feature enhancement. - -To create notification receivers in the Rancher UI, - -{{% tabs %}} -{{% tab "Rancher v2.6.5+" %}} - -1. Go to the cluster where you want to create receivers. Click **Monitoring -> Alerting -> AlertManagerConfigs**. -1. Ciick **Create**. -1. Click **Add Receiver**. -1. Enter a **Name** for the receiver. -1. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. -1. Click **Create**. - -{{% /tab %}} -{{% tab "Rancher before v2.6.5" %}} - -1. Go to the cluster where you want to create receivers. Click **Monitoring** and click **Receiver**. -2. Enter a name for the receiver. -3. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. -4. Click **Create**. - -{{% /tab %}} -{{% /tabs %}} - -**Result:** Alerts can be configured to send notifications to the receiver(s). - -# Receiver Configuration - -The notification integrations are configured with the `receiver`, which is explained in the [Prometheus documentation.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) - -### Native vs. Non-native Receivers - -By default, AlertManager provides native integration with some receivers, which are listed in [this section.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) All natively supported receivers are configurable through the Rancher UI. - -For notification mechanisms not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI. - -Currently the Rancher Alerting Drivers app provides access to the following integrations: -- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver -- SMS, based on the [Sachet](https://github.com/messagebird/sachet) driver - -The following types of receivers can be configured in the Rancher UI: - -- Slack -- Email -- PagerDuty -- Opsgenie -- Webhook -- Custom -- Teams -- SMS - -The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. - -# Slack - -| Field | Type | Description | -|------|--------------|------| -| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | -| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | -| Proxy URL | String | Proxy for the webhook notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -# Email - -| Field | Type | Description | -|------|--------------|------| -| Default Recipient Address | String | The email address that will receive notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -SMTP options: - -| Field | Type | Description | -|------|--------------|------| -| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | -| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | -| Use TLS | Bool | Use TLS for encryption. | -| Username | String | Enter a username to authenticate with the SMTP server. | -| Password | String | Enter a password to authenticate with the SMTP server. | - -# PagerDuty - -| Field | Type | Description | -|------|------|-------| -| Integration Type | String | `Events API v2` or `Prometheus`. | -| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | -| Proxy URL | String | Proxy for the PagerDuty notifications. | -| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -# Opsgenie - -| Field | Description | -|------|-------------| -| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | -| Proxy URL | Proxy for the Opsgenie notifications. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - -Opsgenie Responders: - -| Field | Type | Description | -|-------|------|--------| -| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | -| Send To | String | Id, Name, or Username of the Opsgenie recipient. | - -# Webhook - -| Field | Description | -|-------|--------------| -| URL | Webhook URL for the app of your choice. | -| Proxy URL | Proxy for the webhook notification. | -| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | - - - -# Custom - -The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. - -# Teams - -### Enabling the Teams Receiver for Rancher Managed Clusters - -The Teams receiver is not a native receiver and must be enabled before it can be used. You can enable the Teams receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the Teams option selected. - -1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Apps & Marketplace**. -1. Click the **Alerting Drivers** app. -1. Click the **Helm Deploy Options** tab. -1. Select the **Teams** option and click **Install**. -1. Take note of the namespace used as it will be required in a later step. - -### Configure the Teams Receiver - -The Teams receiver can be configured by updating its ConfigMap. For example, the following is a minimal Teams receiver configuration. - -```yaml -[Microsoft Teams] -teams-instance-1: https://your-teams-webhook-url -``` - -When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). - -Use the example below as the URL where: - -- `ns-1` is replaced with the namespace where the `rancher-alerting-drivers` app is installed - -```yaml -url: http://rancher-alerting-drivers-prom2teams.ns-1.svc:8089/v2/teams-instance-1 -``` - - - -# SMS - -### Enabling the SMS Receiver for Rancher Managed Clusters - -The SMS receiver is not a native receiver and must be enabled before it can be used. You can enable the SMS receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the SMS option selected. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the cluster where you want to install `rancher-alerting-drivers` and click **Explore**. -1. In the left navigation bar, click -1. Click the **Alerting Drivers** app. -1. Click the **Helm Deploy Options** tab -1. Select the **SMS** option and click **Install**. -1. Take note of the namespace used as it will be required in a later step. - -### Configure the SMS Receiver - -The SMS receiver can be configured by updating its ConfigMap. For example, the following is a minimal SMS receiver configuration. - -```yaml -providers: - telegram: - token: 'your-token-from-telegram' - -receivers: -- name: 'telegram-receiver-1' - provider: 'telegram' - to: - - '123456789' -``` - -When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). - -Use the example below as the name and URL, where: - -- the name assigned to the receiver, e.g. `telegram-receiver-1`, must match the name in the `receivers.name` field in the ConfigMap, e.g. `telegram-receiver-1` -- `ns-1` in the URL is replaced with the namespace where the `rancher-alerting-drivers` app is installed - -```yaml -name: telegram-receiver-1 -url http://rancher-alerting-drivers-sachet.ns-1.svc:9876/alert -``` - - - - -# Configuring Multiple Receivers - -By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. - -It is also possible to send alerts to multiple notification systems. One way is to configure the Receiver using custom YAML, in which case you can add the configuration for multiple notification systems, as long as you are sure that both systems should receive the same messages. - -You can also set up multiple receivers by using the `continue` option for a route, so that the alerts sent to a receiver continue being evaluated in the next level of the routing tree, which could contain another receiver. - - -# Example Alertmanager Configs - -### Slack -To set up notifications via Slack, the following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret, where the `api_url` should be updated to use your Webhook URL from Slack: - -```yaml -route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 3h - receiver: 'slack-notifications' -receivers: -- name: 'slack-notifications' - slack_configs: - - send_resolved: true - text: '{{ template "slack.rancher.text" . }}' - api_url: -templates: -- /etc/alertmanager/config/*.tmpl -``` - -### PagerDuty -To set up notifications via PagerDuty, use the example below from the [PagerDuty documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) as a guideline. This example sets up a route that captures alerts for a database service and sends them to a receiver linked to a service that will directly notify the DBAs in PagerDuty, while all other alerts will be directed to a default receiver with a different PagerDuty integration key. - -The following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret. The `service_key` should be updated to use your PagerDuty integration key and can be found as per the "Integrating with Global Event Routing" section of the PagerDuty documentation. For the full list of configuration options, refer to the [Prometheus documentation](https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config). - -```yaml -route: - group_by: [cluster] - receiver: 'pagerduty-notifications' - group_interval: 5m - routes: - - match: - service: database - receiver: 'database-notifcations' - -receivers: -- name: 'pagerduty-notifications' - pagerduty_configs: - - service_key: 'primary-integration-key' - -- name: 'database-notifcations' - pagerduty_configs: - - service_key: 'database-integration-key' -``` - -# Example Route Config for CIS Scan Alerts - -While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. - -For example, the following example route configuration could be used with a Slack receiver named `test-cis`: - -```yaml -spec: - receiver: test-cis - group_by: -# - string - group_wait: 30s - group_interval: 30s - repeat_interval: 30s - match: - job: rancher-cis-scan -# key: string - match_re: - {} -# key: string -``` - -For more information on enabling alerting for `rancher-cis-benchmark`, see [this section.]({{}}/rancher/v2.6/en/cis-scans/#enabling-alerting-for-rancher-cis-benchmark) - - -# Trusted CA for Notifiers - -If you need to add a trusted CA to your notifier, follow the steps in [this section.](../helm-chart-options/#trusted-ca-for-notifiers) \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/route/_index.md b/content/rancher/v2.6/en/monitoring-alerting/configuration/route/_index.md deleted file mode 100644 index 4366f20a9a..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/configuration/route/_index.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Route Configuration -shortTitle: Routes -weight: 5 ---- - -The route configuration is the section of the Alertmanager custom resource that controls how the alerts fired by Prometheus are grouped and filtered before they reach the receiver. - -When a Route is changed, the Prometheus Operator regenerates the Alertmanager custom resource to reflect the changes. - -For more information about configuring routes, refer to the [official Alertmanager documentation.](https://www.prometheus.io/docs/alerting/latest/configuration/#route) - -> This section assumes familiarity with how monitoring components work together. For more information, see [this section.]({{}}/rancher/v2.6/en/monitoring-alerting/how-monitoring-works) - -- [Route Restrictions](#route-restrictions) -- [Route Configuration](#route-configuration) - - [Receiver](#receiver) - - [Grouping](#grouping) - - [Matching](#matching) - -# Route Restrictions - -Alertmanager proxies alerts for Prometheus based on its receivers and a routing tree that filters alerts to certain receivers based on labels. - -Alerting drivers proxy alerts for Alertmanager to non-native receivers, such as Microsoft Teams and SMS. - -In the Rancher UI for configuring routes and receivers, you can configure routing trees with one root and then a depth of one more level, for a tree with a depth of two. But if you use a `continue` route when configuring Alertmanager directly, you can make the tree deeper. - -Each receiver is for one or more notification providers. So if you know that every alert for Slack should also go to PagerDuty, you can configure both in the same receiver. - -# Route Configuration - -### Note on Labels and Annotations - -Labels should be used for identifying information that can affect the routing of notifications. Identifying information about the alert could consist of a container name, or the name of the team that should be notified. - -Annotations should be used for information that does not affect who receives the alert, such as a runbook url or error message. - - -### Receiver -The route needs to refer to a [receiver](#receiver-configuration) that has already been configured. - -### Grouping - -{{% tabs %}} -{{% tab "Rancher v2.6.5+" %}} - -> **Note** As of Rancher v2.6.5 `Group By` now accepts a list of strings instead of key-value pairs. See the [upstream documentation](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#route) for details. - -| Field | Default | Description | -|-------|--------------|---------| -| Group By | N/a | List of labels to group by. Labels must not be repeated (unique list). Special label "..." (aggregate by all possible labels), if provided, must be the only element in the list. | -| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | -| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | -| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | - -{{% /tab %}} -{{% tab "Rancher before v2.6.5" %}} - -| Field | Default | Description | -|-------|--------------|---------| -| Group By | N/a | The labels by which incoming alerts are grouped together. For example, `[ group_by: '[' , ... ']' ]` Multiple alerts coming in for labels such as `cluster=A` and `alertname=LatencyHigh` can be batched into a single group. To aggregate by all possible labels, use the special value `'...'` as the sole label name, for example: `group_by: ['...']` Grouping by `...` effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. | -| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | -| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | -| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | - -{{% /tab %}} -{{% /tabs %}} - - - -### Matching - -The **Match** field refers to a set of equality matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs to the Rancher UI, they correspond to the YAML in this format: - -```yaml -match: - [ : , ... ] -``` - -The **Match Regex** field refers to a set of regex-matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs in the Rancher UI, they correspond to the YAML in this format: - -```yaml -match_re: - [ : , ... ] -``` diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/_index.md b/content/rancher/v2.6/en/monitoring-alerting/guides/_index.md deleted file mode 100644 index 1ef6fc5cce..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/guides/_index.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Monitoring Guides -shortTitle: Guides -weight: 4 ---- - -- [Enable monitoring](./enable-monitoring) -- [Uninstall monitoring](./uninstall) -- [Monitoring workloads](./monitoring-workloads) -- [Customizing Grafana dashboards](./customize-grafana) -- [Persistent Grafana dashboards](./persist-grafana) -- [Debugging high memory usage](./memory-usage) -- [Migrating from Monitoring V1 to V2](./migrating) \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/persist-grafana/_index.md b/content/rancher/v2.6/en/monitoring-alerting/guides/persist-grafana/_index.md deleted file mode 100644 index 4e36acf334..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/guides/persist-grafana/_index.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Persistent Grafana Dashboards -weight: 6 ---- - -To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. - -- [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) -- [Known Issues](#known-issues) - -# Creating a Persistent Grafana Dashboard - -{{% tabs %}} -{{% tab "Rancher v2.5.8+" %}} - -> **Prerequisites:** -> -> - The monitoring application needs to be installed. -> - To create the persistent dashboard, you must have at least the **Manage Config Maps** Rancher RBAC permissions assigned to you in the project or namespace that contains the Grafana Dashboards. This correlates to the `monitoring-dashboard-edit` or `monitoring-dashboard-admin` Kubernetes native RBAC Roles exposed by the Monitoring chart. -> - To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.]({{}}/rancher/v2.6/en/monitoring-alerting/rbac/#users-with-rancher-cluster-manager-based-permissions) - -### 1. Get the JSON model of the dashboard that you want to persist - -To create a persistent dashboard, you will need to get the JSON model of the dashboard you want to persist. You can use a premade dashboard or build your own. - -To use a premade dashboard, go to [https://grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards), open up its detail page, and click on the **Download JSON** button to get the JSON model for the next step. - -To use your own dashboard: - -1. Click on the link to open Grafana. On the cluster detail page, click **Monitoring**. -1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. - - > **Note:** Regardless of who has the password, in order to access the Grafana instance, you still need at least the Manage Services or View Monitoring permissions in the project that Rancher Monitoring is deployed into. Alternative credentials can also be supplied on deploying or upgrading the chart. -1. Create a dashboard using Grafana's UI. Once complete, go to the dashboard's settings by clicking on the gear icon in the top navigation menu. In the left navigation menu, click **JSON Model**. -1. Copy the JSON data structure that appears. - -### 2. Create a ConfigMap using the Grafana JSON model - -Create a ConfigMap in the namespace that contains your Grafana Dashboards (e.g. cattle-dashboards by default). - -The ConfigMap should look like this: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - grafana_dashboard: "1" - name: - namespace: cattle-dashboards # Change if using a non-default namespace -data: - .json: |- - -``` - -By default, Grafana is configured to watch all ConfigMaps with the `grafana_dashboard` label within the `cattle-dashboards` namespace. - -To specify that you would like Grafana to watch for ConfigMaps across all namespaces, refer to [this section.](#configuring-namespaces-for-the-grafana-dashboard-configmap) - -To create the ConfigMap in the Rancher UI, - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. -1. Click **More Resources > Core > ConfigMaps**. -1. Click **Create**. -1. Set up the key-value pairs similar to the example above. When entering the value for `.json`, click **Read from File** to upload the JSON data model as the value. -1. Click **Create**. - -**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. - -Dashboards that are persisted using ConfigMaps cannot be deleted or edited from the Grafana UI. - -If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. - -### Configuring Namespaces for the Grafana Dashboard ConfigMap - -To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: - -``` -grafana.sidecar.dashboards.searchNamespace=ALL -``` - -Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. - -{{% /tab %}} -{{% tab "Rancher before v2.5.8" %}} -> **Prerequisites:** -> -> - The monitoring application needs to be installed. -> - You must have the cluster-admin ClusterRole permission. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. On the **Clusters** page, go to the cluster where you want to configure the Grafana namespace and click **Explore**. -1. In the left navigation bar, click **Monitoring**. -1. Click **Grafana**. -1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. - - > **Note:** Regardless of who has the password, cluster administrator permission in Rancher is still required to access the Grafana instance. -1. Go to the dashboard that you want to persist. In the top navigation menu, go to the dashboard settings by clicking the gear icon. -1. In the left navigation menu, click **JSON Model**. -1. Copy the JSON data structure that appears. -1. Create a ConfigMap in the `cattle-dashboards` namespace. The ConfigMap needs to have the label `grafana_dashboard: "1"`. Paste the JSON into the ConfigMap in the format shown in the example below: - - ```yaml - apiVersion: v1 - kind: ConfigMap - metadata: - labels: - grafana_dashboard: "1" - name: - namespace: cattle-dashboards - data: - .json: |- - - ``` - -**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. - -Dashboards that are persisted using ConfigMaps cannot be deleted from the Grafana UI. If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. - -To prevent the persistent dashboard from being deleted when Monitoring v2 is uninstalled, add the following annotation to the `cattle-dashboards` namespace: - -``` -helm.sh/resource-policy: "keep" -``` - -{{% /tab %}} -{{% /tabs %}} - -# Known Issues - -For users who are using Monitoring V2 v9.4.203 or below, uninstalling the Monitoring chart will delete the `cattle-dashboards` namespace, which will delete all persisted dashboards, unless the namespace is marked with the annotation `helm.sh/resource-policy: "keep"`. - -This annotation will be added by default in the new monitoring chart released by Rancher v2.5.8, but it still needs to be manually applied for users of earlier Rancher versions. diff --git a/content/rancher/v2.6/en/monitoring-alerting/how-monitoring-works/_index.md b/content/rancher/v2.6/en/monitoring-alerting/how-monitoring-works/_index.md deleted file mode 100644 index 30b19cdbc6..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/how-monitoring-works/_index.md +++ /dev/null @@ -1,256 +0,0 @@ ---- -title: How Monitoring Works -weight: 1 ---- - -1. [Architecture Overview](#1-architecture-overview) -2. [How Prometheus Works](#2-how-prometheus-works) -3. [How Alertmanager Works](#3-how-alertmanager-works) -4. [Monitoring V2 Specific Components](#4-monitoring-v2-specific-components) -5. [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics) - -# 1. Architecture Overview - -_**The following sections describe how data flows through the Monitoring V2 application:**_ - -### Prometheus Operator - -Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created. When the Prometheus configuration resources are created, Prometheus Operator calls the Prometheus API to sync the new configuration. As the diagram at the end of this section shows, the Prometheus Operator acts as the intermediary between Prometheus and Kubernetes, calling the Prometheus API to synchronize Prometheus with the monitoring-related resources in Kubernetes. - -### ServiceMonitors and PodMonitors - -ServiceMonitors and PodMonitors declaratively specify targets, such as Services and Pods, that need to be monitored. - -- Targets are scraped on a recurring schedule based on the configured Prometheus scrape interval, and the metrics that are scraped are stored into the Prometheus Time Series Database (TSDB). - -- In order to perform the scrape, ServiceMonitors and PodMonitors are defined with label selectors that determine which Services or Pods should be scraped and endpoints that determine how the scrape should happen on the given target, e.g., scrape/metrics in TCP 10252, proxying through IP addr x.x.x.x. - -- Out of the box, Monitoring V2 comes with certain pre-configured exporters that are deployed based on the type of Kubernetes cluster that it is deployed on. For more information, see [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics). - -### How PushProx Works - -- Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called **PushProx**. The Kubernetes components that expose metrics to Prometheus through PushProx are the following: -`kube-controller-manager`, `kube-scheduler`, `etcd`, and `kube-proxy`. - -- For each PushProx exporter, we deploy one PushProx client onto all target nodes. For example, a PushProx client is deployed onto all controlplane nodes for kube-controller-manager, all etcd nodes for kube-etcd, and all nodes for kubelet. - -- We deploy exactly one PushProx proxy per exporter. The process for exporting metrics is as follows: - -1. The PushProx Client establishes an outbound connection with the PushProx Proxy. -1. The client then polls the proxy for scrape requests that have come into the proxy. -1. When the proxy receives a scrape request from Prometheus, the client sees it as a result of the poll. -1. The client scrapes the internal component. -1. The internal component responds by pushing metrics back to the proxy. - - -

Process for Exporting Metrics with PushProx:
- -![Process for Exporting Metrics with PushProx]({{}}/img/rancher/pushprox-process.svg) - -### PrometheusRules - -PrometheusRules allow users to define rules for what metrics or time series database queries should result in alerts being fired. Rules are evaluated on an interval. - -- **Recording rules** create a new time series based on existing series that have been collected. They are frequently used to precompute complex queries. -- **Alerting rules** run a particular query and fire an alert from Prometheus if the query evaluates to a non-zero value. - -### Alert Routing - -Once Prometheus determines that an alert needs to be fired, alerts are forwarded to **Alertmanager**. - -- Alerts contain labels that come from the PromQL query itself and additional labels and annotations that can be provided as part of specifying the initial PrometheusRule. - -- Before receiving any alerts, Alertmanager will use the **routes** and **receivers** specified in its configuration to form a routing tree on which all incoming alerts are evaluated. Each node of the routing tree can specify additional grouping, labeling, and filtering that needs to happen based on the labels attached to the Prometheus alert. A node on the routing tree (usually a leaf node) can also specify that an alert that reaches it needs to be sent out to a configured Receiver, e.g., Slack, PagerDuty, SMS, etc. Note that Alertmanager will send an alert first to **alertingDriver**, then alertingDriver will send or forward alert to the proper destination. - -- Routes and receivers are also stored in the Kubernetes API via the Alertmanager Secret. When the Secret is updated, Alertmanager is also updated automatically. Note that routing occurs via labels only (not via annotations, etc.). - -
How data flows through the monitoring application:
- - -# 2. How Prometheus Works - -### Storing Time Series Data - -After collecting metrics from exporters, Prometheus stores the time series in a local on-disk time series database. Prometheus optionally integrates with remote systems, but `rancher-monitoring` uses local storage for the time series database. - -Once stored, users can query this TSDB using PromQL, the query language for Prometheus. - -PromQL queries can be visualized in one of two ways: - -1. By supplying the query in Prometheus's Graph UI, which will show a simple graphical view of the data. -1. By creating a Grafana Dashboard that contains the PromQL query and additional formatting directives that label axes, add units, change colors, use alternative visualizations, etc. - -### Defining Rules for Prometheus - -Rules define queries that Prometheus needs to execute on a regular `evaluationInterval` to perform certain actions, such as firing an alert (alerting rules) or precomputing a query based on others existing in its TSDB (recording rules). These rules are encoded in PrometheusRules custom resources. When PrometheusRule custom resources are created or updated, the Prometheus Operator observes the change and calls the Prometheus API to synchronize the set of rules that Prometheus is currently evaluating on a regular interval. - -A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: - -- The name of the new alert or record -- A PromQL expression for the new alert or record -- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) -- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. - -On evaluating a [rule](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#rule), Prometheus will execute the provided PromQL query, add additional provided labels (or annotations - only for alerting rules), and execute the appropriate action for the rule. For example, an Alerting Rule that adds `team: front-end` as a label to the provided PromQL query will append that label to the fired alert, which will allow Alertmanager to forward the alert to the correct Receiver. - -### Alerting and Recording Rules - -Prometheus doesn't maintain the state of whether alerts are active. It fires alerts repetitively at every evaluation interval, relying on Alertmanager to group and filter the alerts into meaningful notifications. - -The `evaluation_interval` constant defines how often Prometheus evaluates its alerting rules against the time series database. Similar to the `scrape_interval`, the `evaluation_interval` also defaults to one minute. - -The rules are contained in a set of rule files. Rule files include both alerting rules and recording rules, but only alerting rules result in alerts being fired after their evaluation. - -For recording rules, Prometheus runs a query, then stores it as a time series. This synthetic time series is useful for storing the results of an expensive or time-consuming query so that it can be queried more quickly in the future. - -Alerting rules are more commonly used. Whenever an alerting rule evaluates to a positive number, Prometheus fires an alert. - -The Rule file adds labels and annotations to alerts before firing them, depending on the use case: - -- Labels indicate information that identifies the alert and could affect the routing of the alert. For example, if when sending an alert about a certain container, the container ID could be used as a label. - -- Annotations denote information that doesn't affect where an alert is routed, for example, a runbook or an error message. - -# 3. How Alertmanager Works - -The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of the following tasks: - -- Deduplicating, grouping, and routing alerts to the correct receiver integration such as email, PagerDuty, or OpsGenie - -- Silencing and inhibition of alerts - -- Tracking alerts that fire over time - -- Sending out the status of whether an alert is currently firing, or if it is resolved - -### Alerts Forwarded by alertingDrivers - -When alertingDrivers are installed, this creates a `Service` that can be used as the receiver's URL for Teams or SMS, based on the alertingDriver's configuration. The URL in the Receiver points to the alertingDrivers; so the Alertmanager sends alert first to alertingDriver, then alertingDriver forwards or sends alert to the proper destination. - -### Routing Alerts to Receivers - -Alertmanager coordinates where alerts are sent. It allows you to group alerts based on labels and fire them based on whether certain labels are matched. One top-level route accepts all alerts. From there, Alertmanager continues routing alerts to receivers based on whether they match the conditions of the next route. - -While the Rancher UI forms only allow editing a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager Secret. - -### Configuring Multiple Receivers - -By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. - -By editing custom YAML in the Alertmanager or Receiver configuration, you can also send alerts to multiple notification systems. For more information, see the section on configuring [Receivers.](../configuration/receiver/#configuring-multiple-receivers) - -# 4. Monitoring V2 Specific Components - -Prometheus Operator introduces a set of [Custom Resource Definitions](https://github.com/prometheus-operator/prometheus-operator#customresourcedefinitions) that allow users to deploy and manage Prometheus and Alertmanager instances by creating and modifying those custom resources on a cluster. - -Prometheus Operator will automatically update your Prometheus configuration based on the live state of the resources and configuration options that are edited in the Rancher UI. - -### Resources Deployed by Default - -By default, a set of resources curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project are deployed onto your cluster as part of installing the Rancher Monitoring Application to set up a basic Monitoring/Alerting stack. - -The resources that get deployed onto your cluster to support this solution can be found in the [`rancher-monitoring`](https://github.com/rancher/charts/tree/main/charts/rancher-monitoring) Helm chart, which closely tracks the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community with certain changes tracked in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md). - -### Default Exporters - -Monitoring V2 deploys three default exporters that provide additional metrics for Prometheus to store: - -1. `node-exporter`: exposes hardware and OS metrics for Linux hosts. For more information on `node-exporter`, refer to the [upstream documentation](https://prometheus.io/docs/guides/node-exporter/). - -1. `windows-exporter`: exposes hardware and OS metrics for Windows hosts (only deployed on Windows clusters). For more information on `windows-exporter`, refer to the [upstream documentation](https://github.com/prometheus-community/windows_exporter). - -1. `kube-state-metrics`: expose additional metrics that track the state of resources contained in the Kubernetes API (e.g., pods, workloads, etc.). For more information on `kube-state-metrics`, refer to the [upstream documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs). - -ServiceMonitors and PodMonitors will scrape these exporters, as defined [here](#defining-what-metrics-are-scraped). Prometheus stores these metrics, and you can query the results via either Prometheus's UI or Grafana. - -See the [architecture](#1-architecture-overview) section for more information on recording rules, alerting rules, and Alertmanager. - -### Components Exposed in the Rancher UI - -When the monitoring application is installed, you will be able to edit the following components in the Rancher UI: - -| Component | Type of Component | Purpose and Common Use Cases for Editing | -|--------------|------------------------|---------------------------| -| ServiceMonitor | Custom resource | Sets up Kubernetes Services to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | -| PodMonitor | Custom resource | Sets up Kubernetes Pods to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | -| Receiver | Configuration block (part of Alertmanager) | Modifies information on where to send an alert (e.g., Slack, PagerDuty, etc.) and any necessary information to send the alert (e.g., TLS certs, proxy URLs, etc.). Automatically updates the Alertmanager custom resource. | -| Route | Configuration block (part of Alertmanager) | Modifies the routing tree that is used to filter, label, and group alerts based on labels and send them to the appropriate Receiver. Automatically updates the Alertmanager custom resource. | -| PrometheusRule | Custom resource | Defines additional queries that need to trigger alerts or define materialized views of existing series that are within Prometheus's TSDB. Automatically updates the Prometheus custom resource. | - -### PushProx - -PushProx allows Prometheus to scrape metrics across a network boundary, which prevents users from having to expose metrics ports for internal Kubernetes components on each node in a Kubernetes cluster. - -Since the metrics for Kubernetes components are generally exposed on the host network of nodes in the cluster, PushProx deploys a DaemonSet of clients that sit on the hostNetwork of each node and make an outbound connection to a single proxy that is sitting on the Kubernetes API. Prometheus can then be configured to proxy scrape requests through the proxy to each client, which allows it to scrape metrics from the internal Kubernetes components without requiring any inbound node ports to be open. - -Refer to [Scraping Metrics with PushProx](#scraping-metrics-with-pushprox) for more. - -# 5. Scraping and Exposing Metrics - -### Defining what Metrics are Scraped - -ServiceMonitors and PodMonitors define targets that are intended for Prometheus to scrape. The [Prometheus custom resource](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md#prometheus) tells Prometheus which ServiceMonitors or PodMonitors it should use to find out where to scrape metrics from. - -The Prometheus Operator observes the ServiceMonitors and PodMonitors. When it observes that they are created or updated, it calls the Prometheus API to update the scrape configuration in the Prometheus custom resource and keep it in sync with the scrape configuration in the ServiceMonitors or PodMonitors. This scrape configuration tells Prometheus which endpoints to scrape metrics from and how it will label the metrics from those endpoints. - -Prometheus scrapes all of the metrics defined in its scrape configuration at every `scrape_interval`, which is one minute by default. - -The scrape configuration can be viewed as part of the Prometheus custom resource that is exposed in the Rancher UI. - -### How the Prometheus Operator Sets up Metrics Scraping - -The Prometheus Deployment or StatefulSet scrapes metrics, and the configuration of Prometheus is controlled by the Prometheus custom resources. The Prometheus Operator watches for Prometheus and Alertmanager resources, and when they are created, the Prometheus Operator creates a Deployment or StatefulSet for Prometheus or Alertmanager with the user-defined configuration. - -When the Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created, it knows that the scrape configuration needs to be updated in Prometheus. It updates Prometheus by first updating the configuration and rules files in the volumes of Prometheus's Deployment or StatefulSet. Then it calls the Prometheus API to sync the new configuration, resulting in the Prometheus Deployment or StatefulSet to be modified in place. - -### How Kubernetes Component Metrics are Exposed - -Prometheus scrapes metrics from deployments known as [exporters,](https://prometheus.io/docs/instrumenting/exporters/) which export the time series data in a format that Prometheus can ingest. In Prometheus, time series consist of streams of timestamped values belonging to the same metric and the same set of labeled dimensions. - -### Scraping Metrics with PushProx - -Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called PushProx. For detailed information on PushProx, refer [here](#how-pushprox-works) and to the above [architecture](#1-architecture-overview) section. - -### Scraping Metrics - -The following Kubernetes components are directly scraped by Prometheus: - -- kubelet* -- ingress-nginx** -- coreDns/kubeDns -- kube-api-server - -\* You can optionally use `hardenedKubelet.enabled` to use a PushProx, but that is not the default. - -** For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. - - -### Scraping Metrics Based on Kubernetes Distribution - -Metrics are scraped differently based on the Kubernetes distribution. For help with terminology, refer [here](#terminology). For details, see the table below: - -
How Metrics are Exposed to Prometheus
- -| Kubernetes Component | RKE | RKE2 | KubeADM | K3s | -|-----|-----|-----|-----|-----| -| kube-controller-manager | rkeControllerManager.enabled |rke2ControllerManager.enabled | kubeAdmControllerManager.enabled | k3sServer.enabled | -| kube-scheduler | rkeScheduler.enabled | rke2Scheduler.enabled |kubeAdmScheduler.enabled | k3sServer.enabled | -| etcd | rkeEtcd.enabled | rke2Etcd.enabled | kubeAdmEtcd.enabled | Not available | -| kube-proxy | rkeProxy.enabled | rke2Proxy.enabled | kubeAdmProxy.enabled | k3sServer.enabled | -| kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | -| ingress-nginx* | Collects metrics directly exposed by kubelet, exposed by rkeIngressNginx.enabled | Collects metrics directly exposed by kubelet, Exposed by rke2IngressNginx.enabled | Not available | Not available | -| coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | -| kube-api-server | Collects metrics directly exposed by kube-api-server |Collects metrics directly exposed by kube-api-server | Collects metrics directly exposed by kube-appi-server | Collects metrics directly exposed by kube-api-server | - -\* For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. - -### Terminology - -- **kube-scheduler:** The internal Kubernetes component that uses information in the pod spec to decide on which node to run a pod. -- **kube-controller-manager:** The internal Kubernetes component that is responsible for node management (detecting if a node fails), pod replication and endpoint creation. -- **etcd:** The internal Kubernetes component that is the distributed key/value store which Kubernetes uses for persistent storage of all cluster information. -- **kube-proxy:** The internal Kubernetes component that watches the API server for pods/services changes in order to maintain the network up to date. -- **kubelet:** The internal Kubernetes component that watches the API server for pods on a node and makes sure they are running. -- **ingress-nginx:** An Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer. -- **coreDns/kubeDns:** The internal Kubernetes component responsible for DNS. -- **kube-api-server:** The main internal Kubernetes component that is responsible for exposing APIs for the other master components. diff --git a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/rbac/_index.md b/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/rbac/_index.md deleted file mode 100644 index d656246d98..0000000000 --- a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/rbac/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Role-Based Access Control -shortTitle: RBAC -weight: 2 ---- - -This section describes the expectations for Role-Based Access Control (RBAC) for Prometheus Federator. - -As described in the section on [namespaces](../_index.md#namespaces), Prometheus Federator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings: - -- ClusterRoleBindings -- RoleBindings in the Project Release Namespace - -On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under: - -- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin` -- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit` -- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view` - -By default, these roleRefs will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). - -> **Note** For Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates. - -If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels: - -- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}` -- `helm.cattle.io/project-helm-chart-role-aggregate-from: ` - -By default, `rancher-project-monitoring`, the underlying chart deployed by Prometheus Federator, creates three default Roles per Project Release Namespace that provide `admin`, `edit`, and `view` users to permissions to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack to provide least privilege. However, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or create Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces. \ No newline at end of file diff --git a/content/rancher/v2.6/en/overview/_index.md b/content/rancher/v2.6/en/overview/_index.md deleted file mode 100644 index 22d719374d..0000000000 --- a/content/rancher/v2.6/en/overview/_index.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Overview -weight: 1 ---- - -Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. - -# Run Kubernetes Everywhere - -Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. - -# Meet IT Requirements - -Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: - -- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. -- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. -- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. - -# Empower DevOps Teams - -Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. - -The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. - -![Platform]({{}}/img/rancher/platform.png) - -# Features of the Rancher API Server - -The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: - -### Authorization and Role-Based Access Control - -- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.6/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. -- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.6/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.6/en/admin-settings/pod-security-policies/) policies. - -### Working with Kubernetes - -- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.6/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.6/en/cluster-admin/upgrading-kubernetes) -- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.6/en/helm-charts/) that make it easy to repeatedly deploy applications. -- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.6/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.6/en/k8s-in-rancher/) -- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.6/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. -- **Istio:** Our [integration with Istio]({{}}/rancher/v2.6/en/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. - -### Working with Cloud Infrastructure - -- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.6/en/cluster-admin/nodes/) in all clusters. -- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/) in the cloud. - -### Cluster Visibility - -- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. -- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. -- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. - -# Editing Downstream Clusters with Rancher - -The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. - -After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/) - -The following table summarizes the options and settings available for each cluster type: - -{{% include file="/rancher/v2.6/en/cluster-provisioning/cluster-capabilities-table" %}} diff --git a/content/rancher/v2.6/en/pipelines/_index.md b/content/rancher/v2.6/en/pipelines/_index.md deleted file mode 100644 index 5c9a2e868d..0000000000 --- a/content/rancher/v2.6/en/pipelines/_index.md +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: Pipelines -weight: 10 ---- - -> As of Rancher v2.5, Git-based deployment pipelines are now deprecated. We recommend handling pipelines with Rancher Continuous Delivery powered by [Fleet]({{}}/rancher/v2.6/en/deploy-across-clusters/fleet). To get to Fleet in Rancher, click ☰ > Continuous Delivery. -> ->**Notice:** -> -> - Pipelines in Kubernetes 1.21+ are no longer supported. -> - Fleet does not replace Rancher pipelines; the distinction is that Rancher pipelines are now powered by Fleet. - -Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. - -Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. - -After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: - -- Build your application from code to image. -- Validate your builds. -- Deploy your build images to your cluster. -- Run unit tests. -- Run regression tests. - ->**Note:** Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. - -This section covers the following topics: - -- [Concepts](#concepts) -- [How Pipelines Work](#how-pipelines-work) -- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) -- [Setting up Pipelines](#setting-up-pipelines) - - [Configure version control providers](#1-configure-version-control-providers) - - [Configure repositories](#2-configure-repositories) - - [Configure the pipeline](#3-configure-the-pipeline) -- [Pipeline Configuration Reference](#pipeline-configuration-reference) -- [Running your Pipelines](#running-your-pipelines) -- [Triggering a Pipeline](#triggering-a-pipeline) - - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) - -# Concepts - -For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.6/en/pipelines/concepts) - -# How Pipelines Work - -After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. - -A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. - -Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.6/en/pipelines/example-repos/) to view some common pipeline deployments. - -When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: - - - **Jenkins:** - - The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. - - >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. - - - **Docker Registry:** - - Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. - - - **Minio:** - - Minio storage is used to store the logs for pipeline executions. - - >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.6/en/pipelines/storage). - -# Roles-based Access Control for Pipelines - -If you can access a project, you can enable repositories to start building pipelines. - -Only [administrators]({{}}/rancher/v2.6/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. - -Project members can only configure repositories and pipelines. - -# Setting up Pipelines - -### Prerequisite - -> **Prerequisite:** Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy features before using pipelines. Note that pipelines in Kubernetes 1.21+ are no longer supported. -> -> 1. In the upper left corner, click **☰ > Global Settings**. -> 1. Click **Feature Flags**. -> 1. Go to the `legacy` feature flag and click **⋮ > Activate**. - -1. [Configure version control providers](#1-configure-version-control-providers) -2. [Configure repositories](#2-configure-repositories) -3. [Configure the pipeline](#3-configure-the-pipeline) - -### 1. Configure Version Control Providers - -Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider: - -- GitHub -- GitLab -- Bitbucket - -Select your provider's tab below and follow the directions. - -{{% tabs %}} -{{% tab "GitHub" %}} - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Click the **Configuration** tab. -1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to set up an OAuth App in Github. -1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. -1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "GitLab" %}} - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Click the **Configuration** tab. -1. Click **GitLab**. -1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. -1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. -1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. -1. Click **Authenticate**. - ->**Note:** -> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. -> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. -{{% /tab %}} -{{% tab "Bitbucket Cloud" %}} - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Click the **Configuration** tab. -1. Click **Bitbucket** and leave **Use Bitbucket Cloud** selected by default. -1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. -1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. -1. Click **Authenticate**. - -{{% /tab %}} -{{% tab "Bitbucket Server" %}} - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Click the **Configuration** tab. -1. Click **Bitbucket** and choose the **Use private Bitbucket Server setup** option. -1. Follow the directions displayed to **Setup a Bitbucket Server application**. -1. Enter the host address of your Bitbucket server installation. -1. Click **Authenticate**. - ->**Note:** -> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: -> -> 1. Setup Rancher server with a certificate from a trusted CA. -> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). -> -{{% /tab %}} -{{% /tabs %}} - -**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. - -### 2. Configure Repositories - -After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Click on **Configure Repositories**. - -1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. - -1. For each repository that you want to set up a pipeline, click on **Enable**. - -1. When you're done enabling all your repositories, click on **Done**. - -**Results:** You have a list of repositories that you can start configuring pipelines for. - -### 3. Configure the Pipeline - -Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Find the repository that you want to set up a pipeline for. -1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.6/en/pipelines/config) - - * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. - * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. - -1. Select which `branch` to use from the list of branches. - -1. Optional: Set up notifications. - -1. Set up the trigger rules for the pipeline. - -1. Enter a **Timeout** for the pipeline. - -1. When all the stages and steps are configured, click **Done**. - -**Results:** Your pipeline is now configured and ready to be run. - - -# Pipeline Configuration Reference - -Refer to [this page]({{}}/rancher/v2.6/en/pipelines/config) for details on how to configure a pipeline to: - -- Run a script -- Build and publish images -- Publish catalog templates -- Deploy YAML -- Deploy a catalog app - -The configuration reference also covers how to configure: - -- Notifications -- Timeouts -- The rules that trigger a pipeline -- Environment variables -- Secrets - - -# Running your Pipelines - -Run your pipeline for the first time. Find your pipeline and select the vertical **⋮ > Run**. - -During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: - -- `docker-registry` -- `jenkins` -- `minio` - -This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. - -# Triggering a Pipeline - -When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. - -Available Events: - -* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. -* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. -* **Tag**: When a tag is created in the repository, the pipeline is triggered. - -> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.6/en/pipelines/example-repos/). - -### Modifying the Event Triggers for the Repository - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. Find the repository where you want to modify the event triggers. Select the vertical **⋮ > Setting**. -1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. -1. Click **Save**. diff --git a/content/rancher/v2.6/en/pipelines/config/_index.md b/content/rancher/v2.6/en/pipelines/config/_index.md deleted file mode 100644 index 86b10606e4..0000000000 --- a/content/rancher/v2.6/en/pipelines/config/_index.md +++ /dev/null @@ -1,643 +0,0 @@ ---- -title: Pipeline Configuration Reference -weight: 1 ---- - -In this section, you'll learn how to configure pipelines. - -- [Step Types](#step-types) -- [Step Type: Run Script](#step-type-run-script) -- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) -- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) -- [Step Type: Deploy YAML](#step-type-deploy-yaml) -- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) -- [Notifications](#notifications) -- [Timeouts](#timeouts) -- [Triggers and Trigger Rules](#triggers-and-trigger-rules) -- [Environment Variables](#environment-variables) -- [Secrets](#secrets) -- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) -- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) - - [Executor Quota](#executor-quota) - - [Resource Quota for Executors](#resource-quota-for-executors) - - [Custom CA](#custom-ca) -- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) -- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) - -# Step Types - -Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. - -Step types include: - -- [Run Script](#step-type-run-script) -- [Build and Publish Images](#step-type-build-and-publish-images) -- [Publish Catalog Template](#step-type-publish-catalog-template) -- [Deploy YAML](#step-type-deploy-yaml) -- [Deploy Catalog App](#step-type-deploy-catalog-app) - - - -### Configuring Steps By UI - -If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. - -1. Add stages to your pipeline execution by clicking **Add Stage**. - - 1. Enter a **Name** for each stage of your pipeline. - 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. - -1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. - -### Configuring Steps by YAML - -For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - - name: Publish my image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: rancher/rancher:v2.0.0 - # Optionally push to remote registry - pushRemote: true - registry: reg.example.com -``` -# Step Type: Run Script - -The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configuring Script by UI - -1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. - -1. Click **Add**. - -### Configuring Script by YAML -```yaml -# example -stages: -- name: Build something - steps: - - runScriptConfig: - image: golang - shellScript: go build -``` -# Step Type: Build and Publish Images - -The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. - -The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. - -### Configuring Building and Publishing Images by UI -1. From the **Step Type** drop-down, choose **Build and Publish**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | - Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | - Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | - Build Context

(**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). - -### Configuring Building and Publishing Images by YAML - -You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: - -Variable Name | Description -------------------------|------------------------------------------------------------ -PLUGIN_DRY_RUN | Disable docker push -PLUGIN_DEBUG | Docker daemon executes in debug mode -PLUGIN_MIRROR | Docker daemon registry mirror -PLUGIN_INSECURE | Docker daemon allows insecure registries -PLUGIN_BUILD_ARGS | Docker build args, a comma separated list - -
- -```yaml -# This example shows an environment variable being used -# in the Publish Image step. This variable allows you to -# publish an image to an insecure registry: - -stages: -- name: Publish Image - steps: - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - pushRemote: true - registry: example.com - env: - PLUGIN_INSECURE: "true" -``` - -# Step Type: Publish Catalog Template - -The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a git hosted chart repository. It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. - -### Configuring Publishing a Catalog Template by UI - -1. From the **Step Type** drop-down, choose **Publish Catalog Template**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | - Catalog Template Name | The name of the template. For example, wordpress. | - Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | - Protocol | You can choose to publish via HTTP(S) or SSH protocol. | - Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | - Git URL | The Git URL of the chart repository that the template will be published to. | - Git Branch | The Git branch of the chart repository that the template will be published to. | - Author Name | The author name used in the commit message. | - Author Email | The author email used in the commit message. | - - -### Configuring Publishing a Catalog Template by YAML - -You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: - -* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. -* CatalogTemplate: The name of the template. -* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. -* GitUrl: The git URL of the chart repository that the template will be published to. -* GitBranch: The git branch of the chart repository that the template will be published to. -* GitAuthor: The author name used in the commit message. -* GitEmail: The author email used in the commit message. -* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. - -```yaml -# example -stages: -- name: Publish Wordpress Template - steps: - - publishCatalogConfig: - path: ./charts/wordpress/latest - catalogTemplate: wordpress - version: ${CICD_GIT_TAG} - gitUrl: git@github.com:myrepo/charts.git - gitBranch: master - gitAuthor: example-user - gitEmail: user@example.com - envFrom: - - sourceName: publish-keys - sourceKey: DEPLOY_KEY -``` - -# Step Type: Deploy YAML - -This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. - -### Configure Deploying YAML by UI - -1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. - -1. Enter the **YAML Path**, which is the path to the manifest file in the source code. - -1. Click **Add**. - -### Configure Deploying YAML by YAML - -```yaml -# example -stages: -- name: Deploy - steps: - - applyYamlConfig: - path: ./deployment.yaml -``` - -# Step Type :Deploy Catalog App - -The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. - -### Configure Deploying Catalog App by UI - -1. From the **Step Type** drop-down, choose **Deploy Catalog App**. - -1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. - - Field | Description | - ---------|----------| - Catalog | The catalog from which the app template will be used. | - Template Name | The name of the app template. For example, wordpress. | - Template Version | The version of the app template you want to deploy. | - Namespace | The target namespace where you want to deploy the app. | - App Name | The name of the app you want to deploy. | - Answers | Key-value pairs of answers used to deploy the app. | - - -### Configure Deploying Catalog App by YAML - -You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. - -Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: - -* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. -* Version: The version of the template you want to deploy. -* Answers: Key-value pairs of answers used to deploy the app. -* Name: The name of the app you want to deploy. -* TargetNamespace: The target namespace where you want to deploy the app. - -```yaml -# example -stages: -- name: Deploy App - steps: - - applyAppConfig: - catalogTemplate: cattle-global-data:library-mysql - version: 0.3.8 - answers: - persistence.enabled: "false" - name: testmysql - targetNamespace: test -``` - -# Timeouts - -By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. - -### Configuring Timeouts by UI - -Enter a new value in the **Timeout** field. - -### Configuring Timeouts by YAML - -In the `timeout` section, enter the timeout value in minutes. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -# timeout in minutes -timeout: 30 -``` - -# Notifications - -You can enable notifications to any notifiers based on the build status of a pipeline. Before enabling notifications, Rancher recommends setting up notifiers so it will be easy to add recipients immediately. - -### Configuring Notifications by UI - -1. Within the **Notification** section, turn on notifications by clicking **Enable**. - -1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. - -1. If you don't have any existing notifiers, Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. - - > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. - -1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. - -### Configuring Notifications by YAML - -In the `notification` section, you will provide the following information: - -* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. - * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. - * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. -* **Condition:** Select which conditions of when you want the notification to be sent. -* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. - -```yaml -# Example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls -notification: - recipients: - - # Recipient - recipient: "#mychannel" - # ID of Notifier - notifier: "c-wdcsr:n-c9pg7" - - recipient: "test@example.com" - notifier: "c-wdcsr:n-lkrhd" - # Select which statuses you want the notification to be sent - condition: ["Failed", "Success", "Changed"] - # Ability to override the default message (Optional) - message: "my-message" -``` - -# Triggers and Trigger Rules - -After you configure a pipeline, you can trigger it using different methods: - -- **Manually:** - - After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. - -- **Automatically:** - - When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. - - To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. - -Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: - -- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. - -- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. - -If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. - -Wildcard character (`*`) expansion is supported in `branch` conditions. - -This section covers the following topics: - -- [Configuring pipeline triggers](#configuring-pipeline-triggers) -- [Configuring stage triggers](#configuring-stage-triggers) -- [Configuring step triggers](#configuring-step-triggers) -- [Configuring triggers by YAML](#configuring-triggers-by-yaml) - -### Configuring Pipeline Triggers - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. -1. Click on **Show Advanced Options**. -1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. - - 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. - - 1. **Optional:** Add more branches that trigger a build. - -1. Click **Done**. - -### Configuring Stage Triggers - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. -1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. -1. Click **Show advanced options**. -1. In the **Trigger Rules** section, configure rules to run or skip the stage. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the stage and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the stage. | - | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - -### Configuring Step Triggers - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. -1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. -1. Click **Show advanced options**. -1. In the **Trigger Rules** section, configure rules to run or skip the step. - - 1. Click **Add Rule**. - - 1. Choose the **Type** that triggers the step and enter a value. - - | Type | Value | - | ------ | -------------------------------------------------------------------- | - | Branch | The name of the branch that triggers the step. | - | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | - -1. Click **Save**. - - -### Configuring Triggers by YAML - -```yaml -# example -stages: - - name: Build something - # Conditions for stages - when: - branch: master - event: [ push, pull_request ] - # Multiple steps run concurrently - steps: - - runScriptConfig: - image: busybox - shellScript: date -R - # Conditions for steps - when: - branch: [ master, dev ] - event: push -# branch conditions for the pipeline -branch: - include: [ master, feature/*] - exclude: [ dev ] -``` - -# Environment Variables - -When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. - -### Configuring Environment Variables by UI - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. -1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. -1. Click **Show advanced options**. -1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. -1. Add your environment variable(s) into either the script or file. -1. Click **Save**. - -### Configuring Environment Variables by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} - env: - FIRST_KEY: VALUE - SECOND_KEY: VALUE2 -``` - -# Secrets - -If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.6/en/k8s-in-rancher/secrets/). - -### Prerequisite -Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. -
- ->**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). - -### Configuring Secrets by UI - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. -1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. -1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. -1. Click **Show advanced options**. -1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. -1. Click **Save**. - -### Configuring Secrets by YAML - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: echo ${ALIAS_ENV} - # environment variables from project secrets - envFrom: - - sourceName: my-secret - sourceKey: secret-key - targetKey: ALIAS_ENV -``` - -# Pipeline Variable Substitution Reference - -For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. - -Variable Name | Description -------------------------|------------------------------------------------------------ -`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). -`CICD_GIT_URL` | URL of the Git repository. -`CICD_GIT_COMMIT` | Git commit ID being executed. -`CICD_GIT_BRANCH` | Git branch of this event. -`CICD_GIT_REF` | Git reference specification of this event. -`CICD_GIT_TAG` | Git tag name, set on tag event. -`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). -`CICD_PIPELINE_ID` | Rancher ID for the pipeline. -`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. -`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. -`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. -`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

[Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) - -# Global Pipeline Execution Settings - -After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. - -### Changing Pipeline Settings - -> **Prerequisite:** Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy features before using pipelines. Note that pipelines in Kubernetes 1.21+ are no longer supported. -> -> 1. In the upper left corner, click **☰ > Global Settings**. -> 1. Click **Feature Flags**. -> 1. Go to the `legacy` feature flag and click **⋮ > Activate**. - -To edit these settings: - -1. In the upper left corner, click **☰ > Cluster Management**. -1. Go to the cluster where you want to configure pipelines and click **Explore**. -1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. -1. In the left navigation bar, click **Legacy > Project > Pipelines**. - -- [Executor Quota](#executor-quota) -- [Resource Quota for Executors](#resource-quota-for-executors) -- [Custom CA](#custom-ca) - -### Executor Quota - -Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. - -### Resource Quota for Executors - -Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. - -Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. - -To configure compute resources for pipeline-step containers: - -You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. - -In a step, you will provide the following information: - -* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. -* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. -* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. -* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. - -```yaml -# example -stages: - - name: Build something - steps: - - runScriptConfig: - image: busybox - shellScript: ls - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi - - publishImageConfig: - dockerfilePath: ./Dockerfile - buildContext: . - tag: repo/app:v1 - cpuRequest: 100m - cpuLimit: 1 - memoryRequest:100Mi - memoryLimit: 1Gi -``` - ->**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. - -### Custom CA - -If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. - -1. Click **Edit cacerts**. - -1. Paste in the CA root certificates and click **Save cacerts**. - -**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. - -# Persistent Data for Pipeline Components - -The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.6/en/pipelines/storage) - -# Example rancher-pipeline.yml - -An example pipeline configuration file is on [this page.]({{}}/rancher/v2.6/en/pipelines/example) diff --git a/content/rancher/v2.6/en/pipelines/storage/_index.md b/content/rancher/v2.6/en/pipelines/storage/_index.md deleted file mode 100644 index 5e81c0595a..0000000000 --- a/content/rancher/v2.6/en/pipelines/storage/_index.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: Configuring Persistent Data for Pipeline Components -weight: 600 ---- - -The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. - -This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/how-storage-works/) - ->**Prerequisites (for both parts A and B):** -> ->[Persistent volumes]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/) must be available for the cluster. - -### A. Configuring Persistent Data for Docker Registry - -1. Click **☰ > Cluster Management**. -1. Go to the cluster that you created and click **Explore**. -1. Click **Workload**. - -1. Find the `docker-registry` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the dropdown. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} - -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. - -1. Click **Upgrade**. - -### B. Configuring Persistent Data for Minio - -1. Click **☰ > Cluster Management**. -1. Go to the cluster that you created and click **Explore**. -1. Click **Workload**. -1. Go to the `minio` workload and select **⋮ > Edit**. - -1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: - - - **Add Volume > Add a new persistent volume (claim)** - - **Add Volume > Use an existing persistent volume (claim)** - -1. Complete the form that displays to choose a persistent volume for the internal Docker registry. -{{% tabs %}} - -{{% tab "Add a new persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Select a volume claim **Source**: - - - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. - - - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% tab "Use an existing persistent volume" %}} -
-1. Enter a **Name** for the volume claim. - -1. Choose a **Persistent Volume Claim** from the drop-down. - -1. From the **Customize** section, choose the read/write access for the volume. - -1. Click **Define**. - -{{% /tab %}} -{{% /tabs %}} - -1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. - -1. Click **Upgrade**. - -**Result:** Persistent storage is configured for your pipeline components. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/amazon-aws-qs/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/amazon-aws-qs/_index.md deleted file mode 100644 index 8b2c2b1d16..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/amazon-aws-qs/_index.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: Rancher AWS Quick Start Guide -description: Read this step by step Rancher AWS guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 100 ---- -The following steps will quickly deploy a Rancher server on AWS in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Amazon AWS will incur charges. - -- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. -- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. -- [IAM Policy created](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start): Defines the permissions an account attached with this policy has. -- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. - -### Example IAM Policy - -The AWS module just creates an EC2 KeyPair, an EC2 SecurityGroup and an EC2 instance. A simple policy would be: - -``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "ec2:*", - "Resource": "*" - } - ] -} -``` - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the AWS folder containing the terraform files by executing `cd quickstart/rancher/aws`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `aws_access_key` - Amazon AWS Access Key - - `aws_secret_key` - Amazon AWS Secret Key - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. -Suggestions include: - - `aws_region` - Amazon AWS region, choose the closest instead of the default (`us-east-1`) - - `prefix` - Prefix for all created resources - - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget - - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/aws`. - -##### Result - -Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -## What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/aws` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/digital-ocean-qs/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/digital-ocean-qs/_index.md deleted file mode 100644 index 2764b2ff6b..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/digital-ocean-qs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Rancher DigitalOcean Quick Start Guide -description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 120 ---- -The following steps will quickly deploy a Rancher server on DigitalOcean in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to DigitalOcean will incur charges. - -- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. -- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/rancher/do`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `do_token` - DigitalOcean access key - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. -Suggestions include: - - `do_region` - DigitalOcean region, choose the closest instead of the default (`nyc1`) - - `prefix` - Prefix for all created resources - - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 15 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/do`. - -#### Result - -Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/do` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/google-gcp-qs/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/google-gcp-qs/_index.md deleted file mode 100644 index 8d4970acad..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/google-gcp-qs/_index.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Rancher GCP Quick Start Guide -description: Read this step by step Rancher GCP guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 130 ---- -The following steps will quickly deploy a Rancher server on GCP in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Google GCP will incur charges. - -- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. -- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. -- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the GCP folder containing the terraform files by executing `cd quickstart/rancher/gcp`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `gcp_account_json` - GCP service account file path and file name - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. -Suggestions include: - - `gcp_region` - Google GCP region, choose the closest instead of the default (`us-east4`) - - `gcp_zone` - Google GCP zone, choose the closest instead of the default (`us-east4-a`) - - `prefix` - Prefix for all created resources - - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/gcp`. - -#### Result - -Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.6/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/gcp` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/hetzner-cloud-qs/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/hetzner-cloud-qs/_index.md deleted file mode 100644 index f60d65d397..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/hetzner-cloud-qs/_index.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: Rancher Hetzner Cloud Quick Start Guide -description: Read this step by step Rancher Hetzner Cloud guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 140 ---- -The following steps will quickly deploy a Rancher server on Hetzner Cloud in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Hetzner Cloud will incur charges. - -- [Hetzner Cloud Account](https://www.hetzner.com): You will require an account on Hetzner as this is where the server and cluster will run. -- [Hetzner API Access Key](https://docs.hetzner.cloud/#getting-started): Use these instructions to create a Hetzner Cloud API Key if you don't have one. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to Hetzner. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the Hetzner folder containing the terraform files by executing `cd quickstart/rancher/hcloud`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `hcloud_token` - Hetzner API access key - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Hetzner Quickstart Readme](https://github.com/rancher/quickstart/tree/master/hcloud) for more information. -Suggestions include: - - `prefix` - Prefix for all created resources - - `instance_type` - Instance type, minimum required is `cx21` - - `hcloud_location` - Hetzner Cloud location, choose the closest instead of the default (`fsn1`) - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 15 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/hcloud`. - -#### Result - -Two Kubernetes clusters are deployed into your Hetzner account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/hcloud` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md deleted file mode 100644 index 9b727971c6..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/microsoft-azure-qs/_index.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: Rancher Azure Quick Start Guide -description: Read this step by step Rancher Azure guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. -weight: 115 ---- - -The following steps will quickly deploy a Rancher server on Azure in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). - -## Prerequisites - ->**Note** ->Deploying to Microsoft Azure will incur charges. - -- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. -- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. -- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. -- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. -- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. - - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the Azure folder containing the terraform files by executing `cd quickstart/rancher/azure`. - -3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. - -4. Edit `terraform.tfvars` and customize the following variables: - - `azure_subscription_id` - Microsoft Azure Subscription ID - - `azure_client_id` - Microsoft Azure Client ID - - `azure_client_secret` - Microsoft Azure Client Secret - - `azure_tenant_id` - Microsoft Azure Tenant ID - - `rancher_server_admin_password` - Admin password for created Rancher server - -5. **Optional:** Modify optional variables within `terraform.tfvars`. -See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. -Suggestions include: - - `azure_location` - Microsoft Azure region, choose the closest instead of the default (`East US`) - - `prefix` - Prefix for all created resources - - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget - - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster - - `windows_admin_password` - The admin password of the windows worker node - -6. Run `terraform init`. - -7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: - - ``` - Apply complete! Resources: 16 added, 0 changed, 0 destroyed. - - Outputs: - - rancher_node_ip = xx.xx.xx.xx - rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io - workload_node_ip = yy.yy.yy.yy - ``` - -8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). -9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/azure`. - -#### Result - -Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.6/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/azure` folder, execute `terraform destroy --auto-approve`. - -2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md deleted file mode 100644 index 2831579706..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/quickstart-manual-setup/_index.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Helm CLI Quick Start -weight: 300 ---- - -These instructions capture a quick way to set up a proof-of-concept Rancher installation. - -These instructions assume you have a Linux virtual machine that you will communicate with from your local workstation. Rancher will be installed on the Linux machine. You will need to retrieve the IP address of that machine so that you can access Rancher from your local workstation. Rancher is designed to manage Kubernetes clusters remotely, so any Kubernetes cluster that Rancher manages in the future will also need to be able to reach this IP address. - -We don't recommend installing Rancher locally because it creates a networking problem. Installing Rancher on localhost does not allow Rancher to communicate with downstream Kubernetes clusters, so on localhost you wouldn't be able to test Rancher's cluster provisioning or cluster management functionality. - -Your Linux machine can be anywhere. It could be an Amazon EC2 instance, a Digital Ocean droplet, or an Azure virtual machine, to name a few examples. Other Rancher docs often use 'node' as a generic term for all of these. One possible way to deploy a Linux machine is by setting up an Amazon EC2 instance as shown in [this tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/). - -The full installation requirements are [here]({{}}/rancher/v2.6/en/installation/requirements/). - - -## Install K3s on Linux - -Install a K3s cluster by running this command on the Linux machine: - -``` -curl -sfL https://get.k3s.io | sh -s - server -``` - -Save the IP of the Linux machine. - -## Save the kubeconfig to your workstation - -The kubeconfig file is important for accessing the Kubernetes cluster. Copy the file at `/etc/rancher/k3s/k3s.yaml` from the Linux machine and save it to your local workstation in the directory `~/.kube/config`. One way to do this is by using the `scp` tool and run this command on your local machine: - -{{% tabs %}} -{{% tab "Mac and Linux" %}} - -``` -scp root@:/etc/rancher/k3s/k3s.yaml ~/.kube/config -``` - -{{% /tab %}} -{{% tab "Windows" %}} - -By default, "scp" is not a recognized command, so we need to install a module first. - -In Windows Powershell: - -``` -Find-Module Posh-SSH -Install-Module Posh-SSH - -## Get the remote kubeconfig file -scp root@:/etc/rancher/k3s/k3s.yaml $env:USERPROFILE\.kube\config -``` - -{{% /tab %}} -{{% /tabs %}} - -## Edit the Rancher server URL in the kubeconfig - -In the kubeconfig file, you will need to change the value of the `server` field to `:6443`. The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443. This edit is needed so that when you run Helm or kubectl commands from your local workstation, you will be able to communicate with the Kubernetes cluster that Rancher will be installed on. - -{{% tabs %}} -{{% tab "Mac and Linux" %}} - -One way to open the kubeconfig file for editing is to use Vim: - -``` -vi ~/.kube/config -``` - -Press `i` to put Vim in insert mode. To save your work, press `Esc`. Then press `:wq` and press `Enter`. - -{{% /tab %}} -{{% tab "Windows" %}} - -In Windows Powershell, you can use `notepad.exe` for editing the kubeconfig file: - -``` -notepad.exe $env:USERPROFILE\.kube\config -``` - -Once edited, either press `ctrl+s` or go to `File > Save` to save your work. - - -{{% /tab %}} -{{% /tabs %}} - -## Install Rancher with Helm - -Then from your local workstation, run the following commands. You will need to have [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) and [helm.](https://helm.sh/docs/intro/install/) installed. - -``` -helm repo add rancher-latest https://releases.rancher.com/server-charts/latest - -kubectl create namespace cattle-system - -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.crds.yaml - -helm repo add jetstack https://charts.jetstack.io - -helm repo update - -helm install cert-manager jetstack/cert-manager \ - --namespace cert-manager \ - --create-namespace \ - --version v1.7.1 - -# Windows Powershell -helm install cert-manager jetstack/cert-manager ` - --namespace cert-manager ` - --create-namespace ` - --version v1.7.1 -``` - -The final command to install Rancher is below. The command requires a domain name that forwards traffic to the Linux machine. For the sake of simplicity in this tutorial, you can use a fake domain name to create your proof-of-concept. An example of a fake domain name would be `.sslip.io`. - -``` -helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=.sslip.io \ - --set replicas=1 \ - --set bootstrapPassword= - -# Windows Powershell -helm install rancher rancher-latest/rancher ` - --namespace cattle-system ` - --set hostname=.sslip.io ` - --set replicas=1 ` - --set bootstrapPassword= -``` -``` - -Now if you navigate to `.sslip.io` in a web browser, you should see the Rancher UI. - -To make these instructions simple, we used a fake domain name and self-signed certificates to do this installation. Therefore, you will probably need to add a security exception to your web browser to see the Rancher UI. Note that for production installs, you would need a high-availability setup with a load balancer, a real domain name and real certificates. - -These instructions also left out the full installation requirements and other installation options. If you have any issues with these steps, refer to the full [Helm CLI installation docs.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/) - -To launch new Kubernetes clusters with your new Rancher server, you may need to set up cloud credentials in Rancher. For more information, see [Launching Kubernetes clusters with Rancher.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/quickstart-vagrant/_index.md b/content/rancher/v2.6/en/quick-start-guide/deployment/quickstart-vagrant/_index.md deleted file mode 100644 index 9def38cea9..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/deployment/quickstart-vagrant/_index.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: Vagrant Quick Start -weight: 200 ---- -The following steps quickly deploy a Rancher Server with a single node cluster attached. - ->**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). - -## Prerequisites - -- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. -- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. -- At least 4GB of free RAM. - -### Note -- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: - - `vagrant plugin install vagrant-vboxmanage` - - `vagrant plugin install vagrant-vbguest` - -## Getting Started - -1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. - -2. Go into the folder containing the Vagrantfile by executing `cd quickstart/rancher/vagrant`. - -3. **Optional:** Edit `config.yaml` to: - - - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) - - Change the password of the `admin` user for logging into Rancher. (`default_password`) - -4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. - -5. Once provisioning finishes, go to `https://192.168.56.101` in the browser. The default user/password is `admin/adminPassword`. - -**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. - -### What's Next? - -Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). - -## Destroying the Environment - -1. From the `quickstart/rancher/vagrant` folder execute `vagrant destroy -f`. - -2. Wait for the confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/workload/_index.md b/content/rancher/v2.6/en/quick-start-guide/workload/_index.md deleted file mode 100644 index a3be7493b6..0000000000 --- a/content/rancher/v2.6/en/quick-start-guide/workload/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Deploying Workloads -weight: 200 ---- - -These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. - -- [Workload with Ingress](./quickstart-deploy-workload-ingress) -- [Workload with NodePort](./quickstart-deploy-workload-nodeport) diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke-1.6-benchmark-2.6/_index.md b/content/rancher/v2.6/en/security/hardening-guides/rke-1.6-benchmark-2.6/_index.md deleted file mode 100644 index 73d8f57c74..0000000000 --- a/content/rancher/v2.6/en/security/hardening-guides/rke-1.6-benchmark-2.6/_index.md +++ /dev/null @@ -1,3100 +0,0 @@ ---- -title: RKE CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 -weight: 101 -aliases: - - /rancher/v2.6/en/security/hardening-guides/1.6-benchmark-2.6/ ---- - -### RKE CIS v1.6 Kubernetes Benchmark - Rancher v2.6 with Kubernetes v1.18 to v1.23 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). - -#### Overview - -This document is a companion to the [Rancher v2.6 RKE security hardening guide]({{}}/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: - -| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | -| ----------------------- | --------------- | --------------------- | ------------------- | -| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6 | CIS v1.6 | Kubernetes v1.18 up to v1.23 | - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. - -> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. - -### Controls -## 1.1 Master Node Configuration Files -### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -All configuration is passed in as arguments at container run time. - -### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. -All configuration is passed in as arguments at container run time. - -### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - -### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - -### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - -### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - -### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -All configuration is passed in as arguments at container run time. - -### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. -All configuration is passed in as arguments at container run time. - -### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) - - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 - -**Audit:** - -```bash -stat -c permissions=%a -``` - -### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) - - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root - -**Audit:** - -```bash -stat -c %U:%G -``` - -### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, -chmod 700 /var/lib/etcd - -**Audit:** - -```bash -stat -c %a /node/var/lib/etcd -``` - -**Expected Result**: - -```console -'700' is equal to '700' -``` - -**Returned Value**: - -```console -700 -``` - -### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) - - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). -For example, chown etcd:etcd /var/lib/etcd - -A system service account is required for etcd data directory ownership. -Refer to Rancher's hardening guide for more details on how to configure this ownership. - -**Audit:** - -```bash -stat -c %U:%G /node/var/lib/etcd -``` - -**Expected Result**: - -```console -'etcd:etcd' is present -``` - -**Returned Value**: - -```console -etcd:etcd -``` - -### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. - -### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. - -### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - -### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. -All configuration is passed in as arguments at container run time. - -### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - -### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. -All configuration is passed in as arguments at container run time. - -### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown -R root:root /etc/kubernetes/pki/ - -**Audit Script:** `check_files_owner_in_dir.sh` - -```bash -#!/usr/bin/env bash - -# This script is used to ensure the owner is set to root:root for -# the given directory and all the files in it -# -# inputs: -# $1 = /full/path/to/directory -# -# outputs: -# true/false - -INPUT_DIR=$1 - -if [[ "${INPUT_DIR}" == "" ]]; then - echo "false" - exit -fi - -if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then - echo "false" - exit -fi - -statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) -while read -r statInfoLine; do - f=$(echo ${statInfoLine} | cut -d' ' -f1) - p=$(echo ${statInfoLine} | cut -d' ' -f2) - - if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then - if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "root:root" ]]; then - echo "false" - exit - fi - fi -done <<< "${statInfoLines}" - - -echo "true" -exit - -``` - -**Audit Execution:** - -```bash -./check_files_owner_in_dir.sh /node/etc/kubernetes/ssl -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 644 /etc/kubernetes/pki/*.crt - -**Audit Script:** `check_files_permissions.sh` - -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` - -**Audit Execution:** - -```bash -./check_files_permissions.sh /node/etc/kubernetes/ssl/!(*key).pem -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 600 /etc/kubernetes/ssl/*key.pem - -**Audit Script:** `check_files_permissions.sh` - -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` - -**Audit Execution:** - -```bash -./check_files_permissions.sh /node/etc/kubernetes/ssl/*key.pem -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -## 1.2 API Server -### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---anonymous-auth=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --basic-auth-file= parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--basic-auth-file' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --token-auth-file= parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--token-auth-file' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --kubelet-https parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-https' is not present OR '--kubelet-https' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the -kubelet client certificate and key parameters as below. ---kubelet-client-certificate= ---kubelet-client-key= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the ---kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. ---kubelet-certificate-authority= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-certificate-authority' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. -One such example could be as below. ---authorization-mode=RBAC - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' not have 'AlwaysAllow' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes Node. ---authorization-mode=Node,RBAC - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'Node' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes RBAC, -for example: ---authorization-mode=Node,RBAC - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'RBAC' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set the desired limits in a configuration file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameters. ---enable-admission-plugins=...,EventRateLimit,... ---admission-control-config-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'EventRateLimit' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --enable-admission-plugins parameter, or set it to a -value that does not include AlwaysAdmit. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -AlwaysPullImages. ---enable-admission-plugins=...,AlwaysPullImages,... - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -SecurityContextDeny, unless PodSecurityPolicy is already in place. ---enable-admission-plugins=...,SecurityContextDeny,... - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and ensure that the --disable-admission-plugins parameter is set to a -value that does not include ServiceAccount. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --disable-admission-plugins parameter to -ensure it does not include NamespaceLifecycle. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes PodSecurityPolicy: ---enable-admission-plugins=...,PodSecurityPolicy,... -Then restart the API Server. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes NodeRestriction. ---enable-admission-plugins=...,NodeRestriction,... - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and remove the --insecure-bind-address parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--insecure-bind-address' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---insecure-port=0 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'0' is equal to '0' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and either remove the --secure-port parameter or -set it to a different (non-zero) desired port. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -6443 is greater than 0 OR '--secure-port' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.21 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-path parameter to a suitable path and -file where you would like audit logs to be written, for example: ---audit-log-path=/var/log/apiserver/audit.log - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-log-path' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: ---audit-log-maxage=30 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -30 is greater or equal to 30 -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate -value. ---audit-log-maxbackup=10 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -10 is greater or equal to 10 -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. -For example, to set it as 100 MB: ---audit-log-maxsize=100 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -100 is greater or equal to 100 -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -and set the below parameter as appropriate and if needed. -For example, ---request-timeout=300s - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--request-timeout' is not present OR '--request-timeout' is not present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---service-account-lookup=true -Alternatively, you can delete the --service-account-lookup parameter from this file so -that the default takes effect. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-lookup' is not present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --service-account-key-file parameter -to the public key file for service accounts: ---service-account-key-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-key-file' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate and key file parameters. ---etcd-certfile= ---etcd-keyfile= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the TLS certificate and private key file parameters. ---tls-cert-file= ---tls-private-key-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the client certificate authority file. ---client-ca-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--client-ca-file' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the etcd certificate authority file parameter. ---etcd-cafile= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-cafile' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--encryption-provider-config' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -In this file, choose aescbc, kms or secretbox as the encryption provider. - -**Audit Script:** `check_encryption_provider_config.sh` - -```bash -#!/usr/bin/env bash - -# This script is used to check the encrption provider config is set to aesbc -# -# outputs: -# true/false - -# TODO: Figure out the file location from the kube-apiserver commandline args -ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" - -if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then - echo "false" - exit -fi - -for provider in "$@" -do - if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then - echo "true" - exit - fi -done - -echo "false" -exit - -``` - -**Audit Execution:** - -```bash -./check_encryption_provider_config.sh aescbc -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -- aescbc: true -``` - -### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM -_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM -_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM -_SHA384 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -## 1.3 Controller Manager -### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, -for example: ---terminated-pod-gc-threshold=10 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--terminated-pod-gc-threshold' is present -``` - -**Returned Value**: - -```console -root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true -``` - -### 1.3.2 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true -``` - -### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node to set the below parameter. ---use-service-account-credentials=true - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'true' is not equal to 'false' -``` - -**Returned Value**: - -```console -root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true -``` - -### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --service-account-private-key-file parameter -to the private key file for service accounts. ---service-account-private-key-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-private-key-file' is present -``` - -**Returned Value**: - -```console -root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true -``` - -### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --root-ca-file parameter to the certificate bundle file`. ---root-ca-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--root-ca-file' is present -``` - -**Returned Value**: - -```console -root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true -``` - -### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. ---feature-gates=RotateKubeletServerCertificate=true - -Cluster provisioned by RKE handles certificate rotation directly through RKE. - -### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml -on the master node and ensure the correct value for the --bind-address parameter - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present -``` - -**Returned Value**: - -```console -root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true -``` - -## 1.4 Scheduler -### 1.4.1 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 121587 121567 0 12:27 ? 00:00:12 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --address=127.0.0.1 --leader-elect=true --profiling=false --v=2 --bind-address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -``` - -### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml -on the master node and ensure the correct value for the --bind-address parameter - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present -``` - -**Returned Value**: - -```console -root 121587 121567 0 12:27 ? 00:00:12 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --address=127.0.0.1 --leader-elect=true --profiling=false --v=2 --bind-address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -``` - -## 2 Etcd Node Configuration Files -### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml -on the master node and set the below parameters. ---cert-file= ---key-file= - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--cert-file' is present AND '--key-file' is present -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 2 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and set the below parameter. ---client-cert-auth="true" - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--client-cert-auth' is present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 2 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 1 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the -master node and set the below parameters. ---peer-client-file= ---peer-key-file= - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-cert-file' is present AND '--peer-key-file' is present -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 5 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and set the below parameter. ---peer-client-cert-auth=true - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-client-cert-auth' is present OR 'true' is equal to 'true' -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 4 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master -node and either remove the --peer-auto-tls parameter or set it to false. ---peer-auto-tls=false - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-auto-tls' is not present OR '--peer-auto-tls' is present -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 4 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) - - -**Result:** pass - -**Remediation:** -[Manual test] -Follow the etcd documentation and create a dedicated certificate authority setup for the -etcd service. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the -master node and set the below parameter. ---trusted-ca-file= - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--trusted-ca-file' is present -``` - -**Returned Value**: - -```console -etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 3 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -## 3.1 Authentication and Authorization -### 3.1.1 Client certificate authentication should not be used for users (Manual) - - -**Result:** warn - -**Remediation:** -Alternative mechanisms provided by Kubernetes such as the use of OIDC should be -implemented in place of client certificates. - -## 3.2 Logging -### 3.2.1 Ensure that a minimal audit policy is created (Automated) - - -**Result:** pass - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-policy-file' is present -``` - -**Returned Value**: - -```console -root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json -``` - -### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) - - -**Result:** warn - -**Remediation:** -Consider modification of the audit policy in use on the cluster to include these items, at a -minimum. - -## 4.1 Worker Node Configuration Files -### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -All configuration is passed in as arguments at container run time. - -### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. -All configuration is passed in as arguments at container run time. - -### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 $proykubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected Result**: - -```console -'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present -``` - -**Returned Value**: - -```console -600 -``` - -### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is not present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present -``` - -### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the following command to modify the file permissions of the ---client-ca-file chmod 644 - -**Audit Script:** `check_cafile_permissions.sh` - -```bash -#!/usr/bin/env bash - -CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') -if test -z $CAFILE; then CAFILE=$kubeletcafile; fi -if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi - -``` - -**Audit Execution:** - -```bash -./check_cafile_permissions.sh -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the following command to modify the ownership of the --client-ca-file. -chown root:root - -**Audit Script:** `check_cafile_ownership.sh` - -```bash -#!/usr/bin/env bash - -CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') -if test -z $CAFILE; then CAFILE=$kubeletcafile; fi -if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi - -``` - -**Audit Execution:** - -```bash -./check_cafile_ownership.sh -``` - -**Expected Result**: - -```console -'root:root' is not present -``` - -### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chmod 644 /var/lib/kubelet/config.yaml - -Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. -All configuration is passed in as arguments at container run time. - -### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chown root:root /var/lib/kubelet/config.yaml - -Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. -All configuration is passed in as arguments at container run time. - -## 4.2 Kubelet -### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to -false. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---anonymous-auth=false -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If -using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---authorization-mode=Webhook -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---client-ca-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set readOnlyPort to 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---read-only-port=0 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present OR '' is not present -``` - -### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a -value other than 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---streaming-connection-idle-timeout=5m -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 121813 121792 4 12:27 ? 00:03:37 kubelet --fail-swap-on=false --resolv-conf=/etc/resolv.conf --authorization-mode=Webhook --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --v=2 --volume-plugin-dir=/var/lib/kubelet/volumeplugins --address=0.0.0.0 --make-iptables-util-chains=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --hostname-override= --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-.pem --network-plugin=cni --streaming-connection-idle-timeout=30m --root-dir=/var/lib/kubelet --event-qps=0 --feature-gates=RotateKubeletServerCertificate=true --protect-kernel-defaults=true --cloud-provider= --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet--key.pem --cgroups-per-qos=True --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=rancher/mirrored-pause:3.5 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --anonymous-auth=false --authentication-token-webhook=true --node-ip= --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --read-only-port=0 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf -``` - -### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set protectKernelDefaults: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---protect-kernel-defaults=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove the --make-iptables-util-chains argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present OR '' is not present -``` - -### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) - - -**Result:** Not Applicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and remove the --hostname-override argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors - -### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present -``` - -### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set tlsCertFile to the location -of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile -to the location of the corresponding private key file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameters in KUBELET_CERTIFICATE_ARGS variable. ---tls-cert-file= ---tls-private-key-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present AND '' is not present -``` - -### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to add the line rotateCertificates: true or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS -variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'--rotate-certificates' is not present OR '--rotate-certificates' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 121813 121792 4 12:27 ? 00:03:37 kubelet --fail-swap-on=false --resolv-conf=/etc/resolv.conf --authorization-mode=Webhook --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --v=2 --volume-plugin-dir=/var/lib/kubelet/volumeplugins --address=0.0.0.0 --make-iptables-util-chains=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --hostname-override= --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-.pem --network-plugin=cni --streaming-connection-idle-timeout=30m --root-dir=/var/lib/kubelet --event-qps=0 --feature-gates=RotateKubeletServerCertificate=true --protect-kernel-defaults=true --cloud-provider= --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet--key.pem --cgroups-per-qos=True --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=rancher/mirrored-pause:3.5 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --anonymous-auth=false --authentication-token-webhook=true --node-ip= --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --read-only-port=0 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf -``` - -### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. ---feature-gates=RotateKubeletServerCertificate=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -Clusters provisioned by RKE handles certificate rotation directly through RKE. - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set TLSCipherSuites: to -TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -or to a subset of these values. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the --tls-cipher-suites parameter as follows, or to a subset of these values. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/kubelet/config.yaml -``` - -**Expected Result**: - -```console -'' is not present -``` - -## 5.1 RBAC and Service Accounts -### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) - - -**Result:** warn - -**Remediation:** -Identify all clusterrolebindings to the cluster-admin role. Check if they are used and -if they need this role or if they could use a role with fewer privileges. -Where possible, first bind users to a lower privileged role and then remove the -clusterrolebinding to the cluster-admin role : -kubectl delete clusterrolebinding [name] - -### 5.1.2 Minimize access to secrets (Manual) - - -**Result:** warn - -**Remediation:** -Where possible, remove get, list and watch access to secret objects in the cluster. - -### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) - - -**Result:** warn - -**Remediation:** -Where possible replace any use of wildcards in clusterroles and roles with specific -objects or actions. - -### 5.1.4 Minimize access to create pods (Manual) - - -**Result:** warn - -**Remediation:** -Where possible, remove create access to pod objects in the cluster. - -### 5.1.5 Ensure that default service accounts are not actively used. (Automated) - - -**Result:** pass - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value -automountServiceAccountToken: false - -**Audit Script:** `check_for_default_sa.sh` - -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) -if [[ ${count_sa} -gt 0 ]]; then - echo "false" - exit -fi - -for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") -do - for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') - do - read kind name <<<$(IFS=","; echo $result) - resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) - if [[ ${resource_count} -gt 0 ]]; then - echo "false" - exit - fi - done -done - - -echo "true" -``` - -**Audit Execution:** - -```bash -./check_for_default_sa.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) - - -**Result:** warn - -**Remediation:** -Modify the definition of pods and service accounts which do not need to mount service -account tokens to disable it. - -## 5.2 Pod Security Policies -### 5.2.1 Minimize the admission of privileged containers (Manual) - - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that -the .spec.privileged field is omitted or set to false. - -### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostPID field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostIPC field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostNetwork field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.allowPrivilegeEscalation field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.6 Minimize the admission of root containers (Manual) - - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of -UIDs not including 0. - -### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) - - -**Result:** warn - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - -### 5.2.8 Minimize the admission of containers with added capabilities (Manual) - - -**Result:** warn - -**Remediation:** -Ensure that allowedCapabilities is not present in PSPs for the cluster unless -it is set to an empty array. - -### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) - - -**Result:** warn - -**Remediation:** -Review the use of capabilites in applications runnning on your cluster. Where a namespace -contains applicaions which do not require any Linux capabities to operate consider adding -a PSP which forbids the admission of containers which do not drop all capabilities. - -## 5.3 Network Policies and CNI -### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) - - -**Result:** warn - -**Remediation:** -If the CNI plugin in use does not support network policies, consideration should be given to -making use of a different plugin, or finding an alternate mechanism for restricting traffic -in the Kubernetes cluster. - -### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create NetworkPolicy objects as you need them. - -**Audit Script:** `check_for_network_policies.sh` - -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do - policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') - if [[ ${policy_count} -eq 0 ]]; then - echo "false" - exit - fi -done - -echo "true" - -``` - -**Audit Execution:** - -```bash -./check_for_network_policies.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -## 5.4 Secrets Management -### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) - - -**Result:** warn - -**Remediation:** -if possible, rewrite application code to read secrets from mounted secret files, rather than -from environment variables. - -### 5.4.2 Consider external secret storage (Manual) - - -**Result:** warn - -**Remediation:** -Refer to the secrets management options offered by your cloud provider or a third-party -secrets management solution. - -## 5.5 Extensible Admission Control -### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and setup image provenance. - -## 5.7 General Policies -### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) - - -**Result:** warn - -**Remediation:** -Follow the documentation and create namespaces for objects in your deployment as you need -them. - -### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) - - -**Result:** warn - -**Remediation:** -Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you -would need to enable alpha features in the apiserver by passing "--feature- -gates=AllAlpha=true" argument. -Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS -parameter to "--feature-gates=AllAlpha=true" -KUBE_API_ARGS="--feature-gates=AllAlpha=true" -Based on your system, restart the kube-apiserver service. For example: -systemctl restart kube-apiserver.service -Use annotations to enable the docker/default seccomp profile in your pod definitions. An -example is as below: -apiVersion: v1 -kind: Pod -metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default -spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - -### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and apply security contexts to your pods. For a -suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker -Containers. - -### 5.7.4 The default namespace should not be used (Automated) - - -**Result:** pass - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - -**Audit Script:** `check_for_default_ns.sh` - -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) -if [[ ${count} -gt 0 ]]; then - echo "false" - exit -fi - -echo "true" - - -``` - -**Audit Execution:** - -```bash -./check_for_default_ns.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-benchmark-2.6/_index.md b/content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-benchmark-2.6/_index.md deleted file mode 100644 index 39c4c3409f..0000000000 --- a/content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-benchmark-2.6/_index.md +++ /dev/null @@ -1,3326 +0,0 @@ ---- -title: RKE2 CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 -weight: 101 ---- - -### CIS v1.6 Kubernetes Benchmark - Rancher v2.6 RKE2 with Kubernetes v1.21 up to v1.23 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). - -#### Overview - -This document is a companion to the [Rancher v2.6 RKE2 security hardening guide]({{}}/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher with RKE2 provisioned clusters, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: - -| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | -| ----------------------- | --------------- | --------------------- | ------------------- | -| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6.5+ | CIS v1.6 | Kubernetes v1.21 up to v1.23 | - -Because Rancher and RKE2 install Kubernetes services as containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -RKE2 launches control plane components as static pods, managed by the kubelet, and uses containerd as the container runtime. Configuration is defined by arguments passed to the container at the time of initialization or via configuration file. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE2 nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. - -> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. - -### Controls - ---- -## 1.1 Master Node Configuration Files -### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the -master node. -For example, chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' -``` - -**Expected Result**: - -```console -'644' is equal to '644' -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is present -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' -``` - -**Expected Result**: - -```console -'644' is equal to '644' -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) - - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 - -**Audit:** - -```bash -stat -c %a -``` - -### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) - - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root - -**Audit:** - -```bash -stat -c %U:%G -``` - -### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). For example, -chmod 700 /var/lib/etcd - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/rke2/server/db/etcd -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -700 -``` - -### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) - - -**Result:** pass - -**Remediation:** -On the etcd server node, get the etcd data directory, passed as an argument --data-dir, -from the below command: -ps -ef | grep etcd -Run the below command (based on the etcd data directory found above). -For example, chown etcd:etcd /var/lib/etcd - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd -``` - -**Expected Result**: - -```console -'etcd:etcd' is present -``` - -**Returned Value**: - -```console -etcd:etcd -``` - -### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 /etc/kubernetes/admin.conf - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/rke2/server/cred/admin.kubeconfig -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root /etc/kubernetes/admin.conf - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 scheduler - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root scheduler - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod 644 controllermanager - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown root:root controllermanager - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chown -R root:root /etc/kubernetes/pki/ - -**Audit:** - -```bash -stat -c %U:%G /var/lib/rancher/rke2/server/tls -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) - - -**Result:** warn - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 644 /var/lib/rancher/rke2/server/tls/*.crt - -**Audit Script:** `check_files_permissions.sh` - -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` - -**Audit Execution:** - -```bash -./check_files_permissions.sh /var/lib/rancher/rke2/server/tls/*.crt -``` - -### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the master node. -For example, -chmod -R 600 /etc/kubernetes/pki/*.key - -**Audit Script:** `check_files_permissions.sh` - -```bash -#!/usr/bin/env bash - -# This script is used to ensure the file permissions are set to 644 or -# more restrictive for all files in a given directory or a wildcard -# selection of files -# -# inputs: -# $1 = /full/path/to/directory or /path/to/fileswithpattern -# ex: !(*key).pem -# -# $2 (optional) = permission (ex: 600) -# -# outputs: -# true/false - -# Turn on "extended glob" for use of '!' in wildcard -shopt -s extglob - -# Turn off history to avoid surprises when using '!' -set -H - -USER_INPUT=$1 - -if [[ "${USER_INPUT}" == "" ]]; then - echo "false" - exit -fi - - -if [[ -d ${USER_INPUT} ]]; then - PATTERN="${USER_INPUT}/*" -else - PATTERN="${USER_INPUT}" -fi - -PERMISSION="" -if [[ "$2" != "" ]]; then - PERMISSION=$2 -fi - -FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) - -while read -r fileInfo; do - p=$(echo ${fileInfo} | cut -d' ' -f2) - - if [[ "${PERMISSION}" != "" ]]; then - if [[ "$p" != "${PERMISSION}" ]]; then - echo "false" - exit - fi - else - if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then - echo "false" - exit - fi - fi -done <<< "${FILES_PERMISSIONS}" - - -echo "true" -exit - -``` - -**Audit Execution:** - -```bash -./check_files_permissions.sh /var/lib/rancher/rke2/server/tls/*.key -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -**Returned Value**: - -```console -true -``` - -## 1.2 API Server -### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---anonymous-auth=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and remove the --basic-auth-file= parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--basic-auth-file' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and configure alternate mechanisms for authentication. Then, -edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and remove the --token-auth-file= parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--token-auth-file' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and remove the --kubelet-https parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-https' is not present OR '--kubelet-https' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the -apiserver and kubelets. Then, edit API server pod specification file -/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the master node and set the -kubelet client certificate and key parameters as below. ---kubelet-client-certificate= ---kubelet-client-key= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and setup the TLS connection between -the apiserver and kubelets. Then, edit the API server pod specification file -/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the master node and set the ---kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. ---kubelet-certificate-authority= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--kubelet-certificate-authority' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. -One such example could be as below. ---authorization-mode=RBAC - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' not have 'AlwaysAllow' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes Node. ---authorization-mode=Node,RBAC - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'Node' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --authorization-mode parameter to a value that includes RBAC, -for example: ---authorization-mode=Node,RBAC - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'Node,RBAC' has 'RBAC' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and set the desired limits in a configuration file. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -and set the below parameters. ---enable-admission-plugins=...,EventRateLimit,... ---admission-control-config-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and either remove the --enable-admission-plugins parameter, or set it to a -value that does not include AlwaysAdmit. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NodeRestriction,PodSecurityPolicy' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -AlwaysPullImages. ---enable-admission-plugins=...,AlwaysPullImages,... - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - - -**Result:** warn - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to include -SecurityContextDeny, unless PodSecurityPolicy is already in place. ---enable-admission-plugins=...,SecurityContextDeny,... - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create ServiceAccount objects as per your environment. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and ensure that the --disable-admission-plugins parameter is set to a -value that does not include ServiceAccount. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --disable-admission-plugins parameter to -ensure it does not include NamespaceLifecycle. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create Pod Security Policy objects as per your environment. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes PodSecurityPolicy: ---enable-admission-plugins=...,PodSecurityPolicy,... -Then restart the API Server. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NodeRestriction,PodSecurityPolicy' has 'PodSecurityPolicy' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --enable-admission-plugins parameter to a -value that includes NodeRestriction. ---enable-admission-plugins=...,NodeRestriction,... - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'NodeRestriction,PodSecurityPolicy' has 'NodeRestriction' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and remove the --insecure-bind-address parameter. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--insecure-bind-address' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---insecure-port=0 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'0' is equal to '0' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and either remove the --secure-port parameter or -set it to a different (non-zero) desired port. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -6443 is greater than 0 OR '--secure-port' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.21 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --audit-log-path parameter to a suitable path and -file where you would like audit logs to be written, for example: ---audit-log-path=/var/log/apiserver/audit.log - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--audit-log-path' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: ---audit-log-maxage=30 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -30 is greater or equal to 30 -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate -value. ---audit-log-maxbackup=10 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -10 is greater or equal to 10 -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. -For example, to set it as 100 MB: ---audit-log-maxsize=100 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -100 is greater or equal to 100 -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -and set the below parameter as appropriate and if needed. -For example, ---request-timeout=300s - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--request-timeout' is not present OR '--request-timeout' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---service-account-lookup=true -Alternatively, you can delete the --service-account-lookup parameter from this file so -that the default takes effect. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-lookup' is not present OR '--service-account-lookup' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --service-account-key-file parameter -to the public key file for service accounts: ---service-account-key-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-key-file' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the etcd certificate and key file parameters. ---etcd-certfile= ---etcd-keyfile= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-certfile' is present AND '--etcd-keyfile' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the TLS certificate and private key file parameters. ---tls-cert-file= ---tls-private-key-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection on the apiserver. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the client certificate authority file. ---client-ca-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--client-ca-file' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the etcd certificate authority file parameter. ---etcd-cafile= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--etcd-cafile' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml -on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--encryption-provider-config' is present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) - - -**Result:** pass - -**Remediation:** -Follow the Kubernetes documentation and configure a EncryptionConfig file. -In this file, choose aescbc, kms or secretbox as the encryption provider. - -**Audit:** - -```bash -/bin/sh -c 'if grep aescbc /var/lib/rancher/rke2/server/cred/encryption-config.json; then echo 0; fi' -``` - -**Expected Result**: - -```console -'0' is present -``` - -**Returned Value**: - -```console -{"kind":"EncryptionConfiguration","apiVersion":"apiserver.config.k8s.io/v1","resources":[{"resources":["secrets"],"providers":[{"aescbc":{"keys":[{"name":"aescbckey","secret":"ZP3yNnlCjzcKMBXfmNBmpGbiY+oXne+WP6EM42lZIbE="}]}},{"identity":{}}]}]} 0 -``` - -### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) - - -**Result:** pass - -**Remediation:** -Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml -on the master node and set the below parameter. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM -_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM -_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM -_SHA384 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep -``` - -**Expected Result**: - -```console -'--tls-cipher-suites' is not present -``` - -**Returned Value**: - -```console -root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -## 1.3 Controller Manager -### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, -for example: ---terminated-pod-gc-threshold=10 - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--terminated-pod-gc-threshold' is present -``` - -**Returned Value**: - -```console -root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.3.2 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node to set the below parameter. ---use-service-account-credentials=true - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'true' is not equal to 'false' -``` - -**Returned Value**: - -```console -root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node and set the --service-account-private-key-file parameter -to the private key file for service accounts. ---service-account-private-key-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--service-account-private-key-file' is present -``` - -**Returned Value**: - -```console -root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node and set the --root-ca-file parameter to the certificate bundle file`. ---root-ca-file= - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'--root-ca-file' is present -``` - -**Returned Value**: - -```console -root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. ---feature-gates=RotateKubeletServerCertificate=true - -### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml -on the master node and ensure the correct value for the --bind-address parameter - -**Audit:** - -```bash -/bin/ps -ef | grep kube-controller-manager | grep -v grep -``` - -**Expected Result**: - -```console -'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present -``` - -**Returned Value**: - -```console -root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true -``` - -## 1.4 Scheduler -### 1.4.1 Ensure that the --profiling argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml file -on the master node and set the below parameter. ---profiling=false - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -root 5533 5414 0 14:58 ? 00:00:02 kube-scheduler --permit-port-sharing=true --address=127.0.0.1 --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=10259 -``` - -### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) - - -**Result:** pass - -**Remediation:** -Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml -on the master node and ensure the correct value for the --bind-address parameter - -**Audit:** - -```bash -/bin/ps -ef | grep kube-scheduler | grep -v grep -``` - -**Expected Result**: - -```console -'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present -``` - -**Returned Value**: - -```console -root 5533 5414 0 14:58 ? 00:00:02 kube-scheduler --permit-port-sharing=true --address=127.0.0.1 --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=10259 -``` - -## 2 Etcd Node Configuration Files -### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Follow the etcd service documentation and configure TLS encryption. -Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml -on the master node and set the below parameters. ---cert-file= ---key-file= - -### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master -node and set the below parameter. ---client-cert-auth="true" - -### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master -node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--auto-tls' is not present OR '--auto-tls' is not present -``` - -**Returned Value**: - -```console -etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 0 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Follow the etcd service documentation and configure peer TLS encryption as appropriate -for your etcd cluster. -Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the -master node and set the below parameters. ---peer-client-file= ---peer-key-file= - -### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master -node and set the below parameter. ---peer-client-cert-auth=true - -### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) - - -**Result:** pass - -**Remediation:** -Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master -node and either remove the --peer-auto-tls parameter or set it to false. ---peer-auto-tls=false - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--peer-auto-tls' is not present OR '--peer-auto-tls' is not present -``` - -**Returned Value**: - -```console -etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 6 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -### 2.7 Ensure that a unique Certificate Authority is used for etcd (Manual) - - -**Result:** pass - -**Remediation:** -[Manual test] -Follow the etcd documentation and create a dedicated certificate authority setup for the -etcd service. -Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the -master node and set the below parameter. ---trusted-ca-file= - -**Audit:** - -```bash -/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep -``` - -**Expected Result**: - -```console -'--trusted-ca-file' is not present -``` - -**Returned Value**: - -```console -etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 3 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json -``` - -## 3.1 Authentication and Authorization -### 3.1.1 Client certificate authentication should not be used for users (Manual) - - -**Result:** warn - -**Remediation:** -Alternative mechanisms provided by Kubernetes such as the use of OIDC should be -implemented in place of client certificates. - -## 3.2 Logging -### 3.2.1 Ensure that a minimal audit policy is created (Automated) - - -**Result:** pass - -**Remediation:** -Create an audit policy file for your cluster. - -**Audit:** - -```bash -/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file -``` - -**Expected Result**: - -```console -'audit-policy-file' is equal to 'audit-policy-file' -``` - -**Returned Value**: - -```console -audit-policy-file -``` - -### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) - - -**Result:** warn - -**Remediation:** -Consider modification of the audit policy in use on the cluster to include these items, at a -minimum. - -## 4.1 Worker Node Configuration Files -### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -**Audit:** - -```bash -/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c permissions=%a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' -``` - -**Expected Result**: - -```console -'permissions' is not present -``` - -### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) - - -**Result:** Not Applicable - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - -### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /node/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %a /node/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'permissions' is present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present -``` - -**Returned Value**: - -```console -644 -``` - -### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Manual) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, chown root:root /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'root:root' is not present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present -``` - -**Returned Value**: - -```console -root:root -``` - -### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'644' is equal to '644' -``` - -**Returned Value**: - -```console -permissions=644 -``` - -### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the below command (based on the file location on your system) on the each worker node. -For example, -chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) - - -**Result:** warn - -**Remediation:** -Run the following command to modify the file permissions of the ---client-ca-file chmod 644 - -**Audit:** - -```bash -stat -c %a /var/lib/rancher/rke2/server/tls/server-ca.crt -``` - -### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the following command to modify the ownership of the --client-ca-file. -chown root:roset: trueot - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/client-ca.crt; then stat -c %U:%G /var/lib/rancher/rke2/agent/client-ca.crt; fi' -``` - -**Expected Result**: - -```console -'root:root' is equal to 'root:root' -``` - -**Returned Value**: - -```console -root:root -``` - -### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) - - -**Result:** pass - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'644' is equal to '644' -``` - -**Returned Value**: - -```console -permissions=644 -``` - -### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) - - -**Result:** pass - -**Remediation:** -Run the following command (using the config file location identified in the Audit step) -chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig - -**Audit:** - -```bash -/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' -``` - -**Expected Result**: - -```console -'root:root' is present -``` - -**Returned Value**: - -```console -root:root -``` - -## 4.2 Kubelet -### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to -false. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---anonymous-auth=false -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'false' is equal to 'false' -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If -using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---authorization-mode=Webhook -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'' is not present -``` - -**Returned Value**: - -```console -apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key -``` - -### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to -the location of the client CA file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_AUTHZ_ARGS variable. ---client-ca-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'' is not present -``` - -**Returned Value**: - -```console -apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key -``` - -### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set readOnlyPort to 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---read-only-port=0 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'0' is equal to '0' AND '--read-only-port' is present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a -value other than 0. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---streaming-connection-idle-timeout=5m -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'--streaming-connection-idle-timeout' is not present OR '--streaming-connection-idle-timeout' is present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set protectKernelDefaults: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. ---protect-kernel-defaults=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'' is not present -``` - -**Returned Value**: - -```console -apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key -``` - -### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove the --make-iptables-util-chains argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'--make-iptables-util-chains' is not present OR '--make-iptables-util-chains' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) - - -**Result:** warn - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and remove the --hostname-override argument from the -KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'--event-qps' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set tlsCertFile to the location -of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile -to the location of the corresponding private key file. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the below parameters in KUBELET_CERTIFICATE_ARGS variable. ---tls-cert-file= ---tls-private-key-file= -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'--tls-cert-file' is present AND '--tls-private-key-file' is present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to add the line rotateCertificates: true or -remove it altogether to use the default value. -If using command line arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS -variable. -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'--rotate-certificates' is not present OR '--rotate-certificates' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) - - -**Result:** pass - -**Remediation:** -Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. ---feature-gates=RotateKubeletServerCertificate=true -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'RotateKubeletServerCertificate' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) - - -**Result:** pass - -**Remediation:** -If using a Kubelet config file, edit the file to set TLSCipherSuites: to -TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -or to a subset of these values. -If using executable arguments, edit the kubelet service file -/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and -set the --tls-cipher-suites parameter as follows, or to a subset of these values. ---tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 -Based on your system, restart the kubelet service. For example: -systemctl daemon-reload -systemctl restart kubelet.service - -**Audit:** - -```bash -/bin/ps -fC kubelet -``` - -**Audit Config:** - -```bash -/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig -``` - -**Expected Result**: - -```console -'--tls-cipher-suites' is not present -``` - -**Returned Value**: - -```console -UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key -``` - -## 5.1 RBAC and Service Accounts -### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) - - -**Result:** warn - -**Remediation:** -Identify all clusterrolebindings to the cluster-admin role. Check if they are used and -if they need this role or if they could use a role with fewer privileges. -Where possible, first bind users to a lower privileged role and then remove the -clusterrolebinding to the cluster-admin role : -kubectl delete clusterrolebinding [name] - -### 5.1.2 Minimize access to secrets (Manual) - - -**Result:** warn - -**Remediation:** -Where possible, remove get, list and watch access to secret objects in the cluster. - -### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) - - -**Result:** warn - -**Remediation:** -Where possible replace any use of wildcards in clusterroles and roles with specific -objects or actions. - -### 5.1.4 Minimize access to create pods (Manual) - - -**Result:** warn - -**Remediation:** -Where possible, remove create access to pod objects in the cluster. - -### 5.1.5 Ensure that default service accounts are not actively used. (Automated) - - -**Result:** pass - -**Remediation:** -Create explicit service accounts wherever a Kubernetes workload requires specific access -to the Kubernetes API server. -Modify the configuration of each default service account to include this value -automountServiceAccountToken: false - -**Audit Script:** `check_for_default_sa.sh` - -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) -if [[ ${count_sa} -gt 0 ]]; then - echo "false" - exit -fi - -for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") -do - for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') - do - read kind name <<<$(IFS=","; echo $result) - resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) - if [[ ${resource_count} -gt 0 ]]; then - echo "false" - exit - fi - done -done - - -echo "true" -``` - -**Audit Execution:** - -```bash -./check_for_default_sa.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) - - -**Result:** warn - -**Remediation:** -Modify the definition of pods and service accounts which do not need to mount service -account tokens to disable it. - -## 5.2 Pod Security Policies -### 5.2.1 Minimize the admission of privileged containers (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that -the .spec.privileged field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp global-restricted-psp -o json | jq -r '.spec.runAsUser.rule' -``` - -**Expected Result**: - -```console -'MustRunAsNonRoot' is equal to 'MustRunAsNonRoot' -``` - -**Returned Value**: - -```console -MustRunAsNonRoot -``` - -### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostPID field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostIPC field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.hostNetwork field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.allowPrivilegeEscalation field is omitted or set to false. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.6 Minimize the admission of root containers (Automated) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of -UIDs not including 0. - -**Audit:** - -```bash -kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) - - -**Result:** pass - -**Remediation:** -Create a PSP as described in the Kubernetes documentation, ensuring that the -.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. - -**Audit:** - -```bash -kubectl get psp global-restricted-psp -o json | jq -r .spec.requiredDropCapabilities[] -``` - -**Expected Result**: - -```console -'ALL' is equal to 'ALL' -``` - -**Returned Value**: - -```console -ALL -``` - -### 5.2.8 Minimize the admission of containers with added capabilities (Manual) - - -**Result:** warn - -**Remediation:** -Ensure that allowedCapabilities is not present in PSPs for the cluster unless -it is set to an empty array. - -### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) - - -**Result:** warn - -**Remediation:** -Review the use of capabilites in applications runnning on your cluster. Where a namespace -contains applicaions which do not require any Linux capabities to operate consider adding -a PSP which forbids the admission of containers which do not drop all capabilities. - -## 5.3 Network Policies and CNI -### 5.3.1 Ensure that the CNI in use supports Network Policies (Automated) - - -**Result:** pass - -**Remediation:** -If the CNI plugin in use does not support network policies, consideration should be given to -making use of a different plugin, or finding an alternate mechanism for restricting traffic -in the Kubernetes cluster. - -**Audit:** - -```bash -kubectl get pods -n kube-system -l k8s-app=canal -o json | jq .items[] | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' -``` - -**Expected Result**: - -```console -1 is greater than 0 -``` - -**Returned Value**: - -```console ---count=1 -``` - -### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) - - -**Result:** pass - -**Remediation:** -Follow the documentation and create NetworkPolicy objects as you need them. - -**Audit Script:** `check_for_rke2_network_policies.sh` - -```bash -#!/bin/bash - -set -eE - -handle_error() { - echo "false" -} - -trap 'handle_error' ERR - -for namespace in kube-system kube-public default; do - policy_count=$(/var/lib/rancher/rke2/bin/kubectl get networkpolicy -n ${namespace} -o json | jq -r '.items | length') - if [ ${policy_count} -eq 0 ]; then - echo "false" - exit - fi -done - -echo "true" - -``` - -**Audit Execution:** - -```bash -./check_for_rke2_network_policies.sh -``` - -**Expected Result**: - -```console -'true' is equal to 'true' -``` - -**Returned Value**: - -```console -true -``` - -## 5.4 Secrets Management -### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) - - -**Result:** warn - -**Remediation:** -if possible, rewrite application code to read secrets from mounted secret files, rather than -from environment variables. - -### 5.4.2 Consider external secret storage (Manual) - - -**Result:** warn - -**Remediation:** -Refer to the secrets management options offered by your cloud provider or a third-party -secrets management solution. - -## 5.5 Extensible Admission Control -### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and setup image provenance. - -## 5.6 The v1.5.1 guide skips 5.6 and goes from 5.5 to 5.7. We are including it here merely for explanation. -## 5.7 General Policies -### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) - - -**Result:** warn - -**Remediation:** -Follow the documentation and create namespaces for objects in your deployment as you need -them. - -### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) - - -**Result:** warn - -**Remediation:** -Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you -would need to enable alpha features in the apiserver by passing "--feature- -gates=AllAlpha=true" argument. -Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS -parameter to "--feature-gates=AllAlpha=true" -KUBE_API_ARGS="--feature-gates=AllAlpha=true" -Based on your system, restart the kube-apiserver service. For example: -systemctl restart kube-apiserver.service -Use annotations to enable the docker/default seccomp profile in your pod definitions. An -example is as below: -apiVersion: v1 -kind: Pod -metadata: - name: trustworthy-pod - annotations: - seccomp.security.alpha.kubernetes.io/pod: docker/default -spec: - containers: - - name: trustworthy-container - image: sotrustworthy:latest - -### 5.7.3 Apply Security Context to Your Pods and Containers (Automated) - - -**Result:** warn - -**Remediation:** -Follow the Kubernetes documentation and apply security contexts to your pods. For a -suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker -Containers. - -### 5.7.4 The default namespace should not be used (Manual) - - -**Result:** warn - -**Remediation:** -Ensure that namespaces are created to allow for appropriate segregation of Kubernetes -resources and that all new resources are created in a specific namespace. - diff --git a/content/rke/_index.md b/content/rke/_index.md deleted file mode 100644 index e7b5e9c8e4..0000000000 --- a/content/rke/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: RKE -weight: 1 -showBreadcrumb: false ---- diff --git a/content/rke/latest/_index.md b/content/rke/latest/_index.md deleted file mode 100644 index f0de8adf0b..0000000000 --- a/content/rke/latest/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: RKE -showBreadcrumb: false ---- diff --git a/content/rke/latest/en/_index.md b/content/rke/latest/en/_index.md deleted file mode 100644 index 9ff72d7c3a..0000000000 --- a/content/rke/latest/en/_index.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Overview of RKE -shortTitle: RKE -description: RKE solves Kubernetes installation complexity. With RKE, Kubernetes installation is simplified, regardless of what OSs and platforms you’re running. -weight: 1 ---- - -Rancher Kubernetes Engine (RKE) is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. It works on bare-metal and virtualized servers. RKE solves the problem of installation complexity, a common issue in the Kubernetes community. With RKE, the installation and operation of Kubernetes is both simplified and easily automated, and it's entirely independent of the operating system and platform you're running. As long as you can run a supported version of Docker, you can deploy and run Kubernetes with RKE. diff --git a/content/rke/latest/en/cert-mgmt/_index.md b/content/rke/latest/en/cert-mgmt/_index.md deleted file mode 100644 index 5ca6b8301f..0000000000 --- a/content/rke/latest/en/cert-mgmt/_index.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Certificate Management -weight: 150 ---- - -_Available as of v0.2.0_ - -> **Note:** This is not "TLS Certificates management in Kubernetes". Refer the [Kubernetes documentation](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) and RKE [cluster.yaml example]({{}}/rke/latest/en/example-yamls/) for more details. - -Certificates are an important part of Kubernetes clusters and are used for all Kubernetes cluster components. RKE has a `rke cert` command to help work with certificates. - -* [Ability to generate certificate sign requests for the Kubernetes components](#generating-certificate-signing-requests-csrs-and-keys) -* [Rotate Auto-Generated Certificates](#certificate-rotation) - -## Generating Certificate Signing Requests (CSRs) and Keys - -If you want to create and sign the certificates by a real Certificate Authority (CA), you can use RKE to [generate a set of Certificate Signing Requests (CSRs) and keys]({{}}/rke/latest/en/installation/certs/#generating-certificate-signing-requests-csrs-and-keys). - -You can use the CSRs and keys to sign the certificates by a real CA. After the certificates are signed, these custom certificates can be used by RKE to as [custom certificates]({{}}/rke/latest/en/installation/certs/) for the Kubernetes cluster. - -## Certificate Rotation - -By default, Kubernetes clusters require certificates and RKE will automatically generate certificates for the clusters. Rotating these certificates are important before the certificates expire as well as if a certificate is compromised. - -After the certificates are rotated, the Kubernetes components are automatically restarted. Certificates can be rotated for the following services: - -- etcd -- kubelet (node certificate) -- kubelet (serving certificate, if [enabled]({{}}/rke/latest/en/config-options/services/#kubelet-options)) -- kube-apiserver -- kube-proxy -- kube-scheduler -- kube-controller-manager - -RKE has the ability to rotate the auto-generated certificates with some simple commands: - -* Rotating all service certificates while using the same CA -* Rotating a certificate on an individual service while using the same CA -* Rotating the CA and all service certificates - -Whenever you're trying to rotate certificates, the `cluster.yml` that was used to deploy the Kubernetes cluster is required. You can reference a different location for this file by using the `--config` option when running `rke cert rotate`. - -### Rotating all Service Certificates while using the same CA - -To rotate the service certificates for all the Kubernetes services, run the following command, i.e. `rke cert rotate`. After all the service certificates are rotated, these services will automatically be restarted to start using the new certificate. - -``` -$ rke cert rotate -INFO[0000] Initiating Kubernetes cluster -INFO[0000] Rotating Kubernetes cluster certificates -INFO[0000] [certificates] Generating Kubernetes API server certificates -INFO[0000] [certificates] Generating Kube Controller certificates -INFO[0000] [certificates] Generating Kube Scheduler certificates -INFO[0001] [certificates] Generating Kube Proxy certificates -INFO[0001] [certificates] Generating Node certificate -INFO[0001] [certificates] Generating admin certificates and kubeconfig -INFO[0001] [certificates] Generating Kubernetes API server proxy client certificates -INFO[0001] [certificates] Generating etcd-xxxxx certificate and key -INFO[0001] [certificates] Generating etcd-yyyyy certificate and key -INFO[0002] [certificates] Generating etcd-zzzzz certificate and key -INFO[0002] Successfully Deployed state file at [./cluster.rkestate] -INFO[0002] Rebuilding Kubernetes cluster with rotated certificates -..... -INFO[0050] [worker] Successfully restarted Worker Plane.. -``` - -### Rotating a Certificate on an Individual Service while using the same CA - -To rotate the certificate for an individual Kubernetes service, use the `--service` option when rotating certificates to specify the service. After the specified Kubernetes service has had its certificate rotated, it is automatically restarted to start using the new certificate. - -Example of rotating the certificate for only the `kubelet`: - -``` -$ rke cert rotate --service kubelet -INFO[0000] Initiating Kubernetes cluster -INFO[0000] Rotating Kubernetes cluster certificates -INFO[0000] [certificates] Generating Node certificate -INFO[0000] Successfully Deployed state file at [./cluster.rkestate] -INFO[0000] Rebuilding Kubernetes cluster with rotated certificates -..... -INFO[0033] [worker] Successfully restarted Worker Plane.. -``` - -### Rotating the CA and all service certificates - -If the CA certificate needs to be rotated, you are required to rotate all the services certificates as they need to be signed with the newly rotated CA certificate. To include rotating the CA with the service certificates, add the `--rotate-ca` option. After the CA and all the service certificates are rotated, these services will automatically be restarted to start using the new certificate. - -Rotating the CA certificate will result in restarting other system pods, that will also use the new CA certificate. This includes: - -- Networking pods (canal, calico, flannel, and weave) -- Ingress Controller pods -- KubeDNS pods - -``` -$ rke cert rotate --rotate-ca -INFO[0000] Initiating Kubernetes cluster -INFO[0000] Rotating Kubernetes cluster certificates -INFO[0000] [certificates] Generating CA kubernetes certificates -INFO[0000] [certificates] Generating Kubernetes API server aggregation layer requestheader client CA certificates -INFO[0000] [certificates] Generating Kubernetes API server certificates -INFO[0000] [certificates] Generating Kube Controller certificates -INFO[0000] [certificates] Generating Kube Scheduler certificates -INFO[0000] [certificates] Generating Kube Proxy certificates -INFO[0000] [certificates] Generating Node certificate -INFO[0001] [certificates] Generating admin certificates and kubeconfig -INFO[0001] [certificates] Generating Kubernetes API server proxy client certificates -INFO[0001] [certificates] Generating etcd-xxxxx certificate and key -INFO[0001] [certificates] Generating etcd-yyyyy certificate and key -INFO[0001] [certificates] Generating etcd-zzzzz certificate and key -INFO[0001] Successfully Deployed state file at [./cluster.rkestate] -INFO[0001] Rebuilding Kubernetes cluster with rotated certificates -``` diff --git a/content/rke/latest/en/config-options/_index.md b/content/rke/latest/en/config-options/_index.md deleted file mode 100644 index 3fe0a5ab2e..0000000000 --- a/content/rke/latest/en/config-options/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: Kubernetes Configuration Options -description: There are a lot of different Kubernetes Configuration options you can choose from when setting up your cluster.yml for RKE -weight: 200 ---- - -When setting up your `cluster.yml` for RKE, there are a lot of different options that can be configured to control the behavior of how RKE launches Kubernetes. - -There are several options that can be configured in cluster configuration option. There are several [example yamls]({{}}/rke/latest/en/example-yamls/) that contain all the options. - -### Configuring Nodes -* [Nodes]({{}}/rke/latest/en/config-options/nodes/) -* [Ignoring unsupported Docker versions](#supported-docker-versions) -* [Private Registries]({{}}/rke/latest/en/config-options/private-registries/) -* [Cluster Level SSH Key Path](#cluster-level-ssh-key-path) -* [SSH Agent](#ssh-agent) -* [Bastion Host]({{}}/rke/latest/en/config-options/bastion-host/) - -### Configuring Kubernetes Cluster -* [Cluster Name](#cluster-name) -* [Kubernetes Version](#kubernetes-version) -* [Prefix Path](#prefix-path) -* [System Images]({{}}/rke/latest/en/config-options/system-images/) -* [Services]({{}}/rke/latest/en/config-options/services/) -* [Extra Args and Binds and Environment Variables]({{}}/rke/latest/en/config-options/services/services-extras/) -* [External Etcd]({{}}/rke/latest/en/config-options/services/external-etcd/) -* [Authentication]({{}}/rke/latest/en/config-options/authentication/) -* [Authorization]({{}}/rke/latest/en/config-options/authorization/) -* [Rate Limiting]({{}}/rke/latest/en/config-options/rate-limiting/) -* [Cloud Providers]({{}}/rke/latest/en/config-options/cloud-providers/) -* [Audit Log]({{}}/rke/latest/en/config-options/audit-log) -* [Add-ons]({{}}/rke/latest/en/config-options/add-ons/) - * [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/) - * [DNS providers]({{}}/rke/latest/en/config-options/add-ons/dns/) - * [Ingress Controllers]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) - * [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/) - * [User-Defined Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) - * [Add-ons Job Timeout](#add-ons-job-timeout) - - -## Cluster Level Options - -### Cluster Name - -By default, the name of your cluster will be `local`. If you want a different name, you would use the `cluster_name` directive to change the name of your cluster. The name will be set in your cluster's generated kubeconfig file. - -```yaml -cluster_name: mycluster -``` - -### Supported Docker Versions - -By default, RKE will check the installed Docker version on all hosts and fail with an error if the version is not supported by Kubernetes. The list of supported Docker versions is set specifically for each Kubernetes version in kontainer-driver-metadata depending on the RKE version used, as shown below. To override this behavior, set this option to `true`. Refer to the following: - -- For RKE v1.3.x, see this [link](https://github.com/rancher/kontainer-driver-metadata/blob/release-v2.6/rke/k8s_docker_info.go). -- For RKE v1.2.x, see this [link](https://github.com/rancher/kontainer-driver-metadata/blob/release-v2.5/rke/k8s_docker_info.go). - -The default value is `false`. - -```yaml -ignore_docker_version: true -``` - -### Kubernetes Version - -For information on upgrading Kubernetes, refer to the [upgrade section.]({{}}/rke/latest/en/upgrades/) - -Rolling back to previous Kubernetes versions is not supported. - -### Prefix Path - -For some operating systems including ROS, and CoreOS, RKE stores its resources to a different prefix path, this prefix path is by default for these operating systems is: -``` -/opt/rke -``` -So `/etc/kubernetes` will be stored in `/opt/rke/etc/kubernetes` and `/var/lib/etcd` will be stored in `/opt/rke/var/lib/etcd` etc. - -To change the default prefix path for any cluster, you can use the following option in the cluster configuration file `cluster.yml`: -``` -prefix_path: /opt/custom_path -``` - -### Cluster Level SSH Key Path - -RKE connects to host(s) using `ssh`. Typically, each node will have an independent path for each ssh key, i.e. `ssh_key_path`, in the `nodes` section, but if you have a SSH key that is able to access **all** hosts in your cluster configuration file, you can set the path to that ssh key at the top level. Otherwise, you would set the ssh key path in the [nodes]({{}}/rke/latest/en/config-options/nodes/). - -If ssh key paths are defined at the cluster level and at the node level, the node-level key will take precedence. - -```yaml -ssh_key_path: ~/.ssh/test -``` - -### SSH Agent - -RKE supports using ssh connection configuration from a local ssh agent. The default value for this option is `false`. If you want to set using a local ssh agent, you would set this to `true`. - -```yaml -ssh_agent_auth: true -``` - -If you want to use an SSH private key with a passphrase, you will need to add your key to `ssh-agent` and have the environment variable `SSH_AUTH_SOCK` configured. - -``` -$ eval "$(ssh-agent -s)" -Agent pid 3975 -$ ssh-add /home/user/.ssh/id_rsa -Enter passphrase for /home/user/.ssh/id_rsa: -Identity added: /home/user/.ssh/id_rsa (/home/user/.ssh/id_rsa) -$ echo $SSH_AUTH_SOCK -/tmp/ssh-118TMqxrXsEx/agent.3974 -``` - -### Add-ons Job Timeout - -You can define [add-ons]({{}}/rke/latest/en/config-options/add-ons/) to be deployed after the Kubernetes cluster comes up, which uses Kubernetes [jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/). RKE will stop attempting to retrieve the job status after the timeout, which is in seconds. The default timeout value is `30` seconds. - -### cri-dockerd - -Kubernetes will remove code in the kubelet that interacts with Docker (dockershim) in a future Kubernetes release. For more information, see [Dockershim Deprecation FAQ: When will dockershim be removed?](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). The component that replaces this code is called `cri-dockerd` and can be enabled using the following configuration: - -``` -enable_cri_dockerd: true -``` diff --git a/content/rke/latest/en/config-options/add-ons/_index.md b/content/rke/latest/en/config-options/add-ons/_index.md deleted file mode 100644 index e449e079d7..0000000000 --- a/content/rke/latest/en/config-options/add-ons/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Add-Ons -weight: 260 ---- - -RKE supports configuring pluggable add-ons in the cluster YML. Add-ons are used to deploy several cluster components including: - -* [Network plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/) -* [Ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) -* [DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) -* [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/) - -These add-ons require images that can be found under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each add-on, but these can be overridden by changing the image tag in `system_images`. - -There are a few things worth noting: - -* In addition to these pluggable add-ons, you can specify an add-on that you want deployed after the cluster deployment is complete. -* As of v0.1.8, RKE will update an add-on if it is the same name. -* Before v0.1.8, update any add-ons by using `kubectl edit`. - -- [Critical and Non-Critical Add-ons](#critical-and-non-critical-add-ons) -- [Add-on Deployment Jobs](#add-on-deployment-jobs) -- [Add-on Placement](#add-on-placement) -- [Tolerations](#tolerations) - -# Critical and Non-Critical Add-ons - -As of version v0.1.7, add-ons are split into two categories: - -- **Critical add-ons:** If these add-ons fail to deploy for any reason, RKE will error out. All system add-ons, such as the [network plug-in]({{}}/rke/latest/en/config-options/add-ons/network-plugins/), KubeDNS, and [ingress controllers]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/), are considered critical. - -- **Non-critical add-ons:** If these add-ons fail to deploy, RKE will only log a warning and continue deploying any other add-ons. [User-defined add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) are considered non-critical. - -# Add-on Deployment Jobs - -RKE uses Kubernetes jobs to deploy add-ons. In some cases, add-ons deployment takes longer than expected. As of with version v0.1.7, RKE provides an option to control the job check timeout in seconds. This timeout is set at the cluster level. - -```yaml -addon_job_timeout: 30 -``` - -# Add-on Placement - -_Applies to v0.2.3 and higher_ - -| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | -| ------------------ | ------------------------------------------ | ------------ | ----------- | -| Calico | `beta.kubernetes.io/os:NotIn:windows` | none | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | -| Flannel | `beta.kubernetes.io/os:NotIn:windows` | none | - `operator:Exists` | -| Canal | `beta.kubernetes.io/os:NotIn:windows` | none | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | -| Weave | `beta.kubernetes.io/os:NotIn:windows` | none | - `NoSchedule:Exists`
- `NoExecute:Exists` | -| CoreDNS | `node-role.kubernetes.io/worker:Exists` | `beta.kubernetes.io/os:linux` | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | -| kube-dns | - `beta.kubernetes.io/os:NotIn:windows`
- `node-role.kubernetes.io/worker` `Exists` | none | - `NoSchedule:Exists`
- `NoExecute:Exists`
- `CriticalAddonsOnly:Exists` | -| nginx-ingress | - `beta.kubernetes.io/os:NotIn:windows`
- `node-role.kubernetes.io/worker` `Exists` | none | - `NoSchedule:Exists`
- `NoExecute:Exists` | -| metrics-server | - `beta.kubernetes.io/os:NotIn:windows`
- `node-role.kubernetes.io/worker` `Exists` | none | - `NoSchedule:Exists`
- `NoExecute:Exists` | - -# Tolerations - -_Available as of v1.2.4_ - -Tolerations can be configured per add-on and apply to Deployment resources. The configured tolerations will replace the existing tolerations so make sure you configure all the tolerations you need. See the specific add-on doc pages for more information. diff --git a/content/rke/latest/en/config-options/add-ons/dns/_index.md b/content/rke/latest/en/config-options/add-ons/dns/_index.md deleted file mode 100644 index 2c37998036..0000000000 --- a/content/rke/latest/en/config-options/add-ons/dns/_index.md +++ /dev/null @@ -1,268 +0,0 @@ ---- -title: DNS providers -weight: 262 ---- - -- [Available DNS Providers](#available-dns-providers) -- [Disabling deployment of a DNS Provider](#disabling-deployment-of-a-dns-provider) -- [CoreDNS](#coredns) - - [Scheduling CoreDNS](#scheduling-coredns) - - [Upstream nameservers](#coredns-upstream-nameservers) - - [Priority Class Name](#coredns-priority-class-name) - - [Tolerations](#coredns-tolerations) -- [kube-dns](#kube-dns) - - [Scheduling kube-dns](#scheduling-kube-dns) - - [Upstream nameservers](#kube-dns-upstream-nameservers) - - [Priority Class Name](#kube-dns-priority-class-name) - - [Tolerations](#kube-dns-tolerations) -- [NodeLocal DNS](#nodelocal-dns) - - [Configuring NodeLocal DNS](#configuring-nodelocal-dns) - - [Priority Class Name](#nodelocal-priority-class-name) - - [Removing NodeLocal DNS](#removing-nodelocal-dns) - -# Available DNS Providers - -RKE provides the following DNS providers that can be deployed as add-ons: - - * [CoreDNS](https://coredns.io) - * [kube-dns](https://github.com/kubernetes/dns) - -| RKE version | Kubernetes version | Default DNS provider | -|-------------|--------------------|----------------------| -| v0.2.5 and higher | v1.14.0 and higher | CoreDNS | -| v0.2.5 and higher | v1.13.x and lower | kube-dns | -| v0.2.4 and lower | any | kube-dns | - -CoreDNS was made the default in RKE v0.2.5 when using Kubernetes 1.14 and higher. If you are using an RKE version lower than v0.2.5, kube-dns will be deployed by default. - -> **Note:** If you switch from one DNS provider to another, the existing DNS provider will be removed before the new one is deployed. - -# Disabling Deployment of a DNS Provider - -_Available as of v0.2.0_ - -You can disable the default DNS provider by specifying `none` to the dns `provider` directive in the cluster configuration. Be aware that this will prevent your pods from doing name resolution in your cluster. - -```yaml -dns: - provider: none -``` - -# CoreDNS - -_Available as of v0.2.5_ - -CoreDNS can only be used on Kubernetes v1.12.0 and higher. - -RKE will deploy CoreDNS as a Deployment with the default replica count of 1. The pod consists of 1 container: `coredns`. RKE will also deploy coredns-autoscaler as a Deployment, which will scale the coredns Deployment by using the number of cores and nodes. Please see [Linear Mode](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler#linear-mode) for more information about this logic. - -The images used for CoreDNS are under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with CoreDNS, but these can be overridden by changing the image tag in `system_images`. - -### Scheduling CoreDNS - -If you only want the CoreDNS pod to be deployed on specific nodes, you can set a `node_selector` in the `dns` section. The label in the `node_selector` would need to match the label on the nodes for the CoreDNS pod to be deployed. - -```yaml -nodes: -- address: 1.1.1.1 - role: [controlplane,worker,etcd] - user: root - labels: - app: dns - -dns: - provider: coredns - node_selector: - app: dns -``` - - -### CoreDNS Upstream nameservers - -By default, CoreDNS will use the host configured nameservers (usually residing at `/etc/resolv.conf`) to resolve external queries. If you want to configure specific upstream nameservers to be used by CoreDNS, you can use the `upstreamnameservers` directive. - -When you set `upstreamnameservers`, the `provider` also needs to be set. - -```yaml -dns: - provider: coredns - upstreamnameservers: - - 1.1.1.1 - - 8.8.4.4 -``` - - -### CoreDNS Priority Class Name - -_Available as of RKE v1.2.6+_ - -The [pod priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#pod-priority) is set by configuring a priority class name under `options`: - -```yaml -dns: - options: - coredns_autoscaler_priority_class_name: system-cluster-critical - coredns_priority_class_name: system-cluster-critical - provider: coredns -``` - -### CoreDNS Tolerations - -_Available as of v1.2.4_ - -The configured tolerations apply to the `coredns` and the `coredns-autoscaler` Deployment. - -```yaml -dns: - provider: coredns - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 -``` - -To check for applied tolerations on the `coredns` and `coredns-autoscaler` Deployment, use the following commands: - -``` -kubectl -n kube-system get deploy coredns -o jsonpath='{.spec.template.spec.tolerations}' -kubectl -n kube-system get deploy coredns-autoscaler -o jsonpath='{.spec.template.spec.tolerations}' -``` - -# kube-dns - -RKE will deploy kube-dns as a Deployment with the default replica count of 1. The pod consists of 3 containers: `kubedns`, `dnsmasq` and `sidecar`. RKE will also deploy kube-dns-autoscaler as a Deployment, which will scale the kube-dns Deployment by using the number of cores and nodes. Please see [Linear Mode](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler#linear-mode) for more information about this logic. - -The images used for kube-dns are under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with kube-dns, but these can be overridden by changing the image tag in `system_images`. - -### Scheduling kube-dns - -_Available as of v0.2.0_ - -If you only want the kube-dns pod to be deployed on specific nodes, you can set a `node_selector` in the `dns` section. The label in the `node_selector` would need to match the label on the nodes for the kube-dns pod to be deployed. - -```yaml -nodes: -- address: 1.1.1.1 - role: [controlplane,worker,etcd] - user: root - labels: - app: dns - -dns: - provider: kube-dns - node_selector: - app: dns -``` - -### kube-dns Upstream nameservers - -_Available as of v0.2.0_ - -By default, kube-dns will use the host configured nameservers (usually residing at `/etc/resolv.conf`) to resolve external queries. If you want to configure specific upstream nameservers to be used by kube-dns, you can use the `upstreamnameservers` directive. - -When you set `upstreamnameservers`, the `provider` also needs to be set. - -```yaml -dns: - provider: kube-dns - upstreamnameservers: - - 1.1.1.1 - - 8.8.4.4 -``` - -### kube-dns Priority Class Name - -_Available as of RKE v1.2.6+_ - -The [pod priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#pod-priority) is set by configuring a priority class name under `options`: - -```yaml -dns: - options: - kube_dns_autoscaler_priority_class_name: system-cluster-critical - kube_dns_priority_class_name: system-cluster-critical - provider: kube-dns -``` - - -### kube-dns Tolerations - -_Available as of v1.2.4_ - -The configured tolerations apply to the `kube-dns` and the `kube-dns-autoscaler` Deployment. - -```yaml -dns: - provider: kube-dns - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - -``` - -To check for applied tolerations on the `coredns` and `coredns-autoscaler` Deployment, use the following commands: - -``` -kubectl get deploy kube-dns -n kube-system -o jsonpath='{.spec.template.spec.tolerations}' -kubectl get deploy kube-dns-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.tolerations}' -``` - - - -# NodeLocal DNS - -_Available as of v1.1.0_ - -> **Note:** The option to enable NodeLocal DNS is available for: -> -> * Kubernetes v1.15.11 and up -> * Kubernetes v1.16.8 and up -> * Kubernetes v1.17.4 and up - -NodeLocal DNS is an additional component that can be deployed on each node to improve DNS performance. It is not a replacement for the `provider` parameter, you will still need to have one of the available DNS providers configured. See [Using NodeLocal DNSCache in Kubernetes clusters](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) for more information on how NodeLocal DNS works. - -Enable NodeLocal DNS by configuring an IP address. - -### Configuring NodeLocal DNS - -The `ip_address` parameter is used to configure what link-local IP address will be configured one each host to listen on, make sure this IP address is not already configured on the host. - -```yaml -dns: - provider: coredns - nodelocal: - ip_address: "169.254.20.10" -``` - -> **Note:** When enabling NodeLocal DNS on an existing cluster, pods that are currently running will not be modified, the updated `/etc/resolv.conf` configuration will take effect only for pods started after enabling NodeLocal DNS. - -### NodeLocal Priority Class Name - -_Available as of RKE v1.2.6+_ - -The [pod priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#pod-priority) is set by configuring a priority class name under `options`: - -```yaml -dns: - options: - nodelocal_autoscaler_priority_class_name: system-cluster-critical - nodelocal_priority_class_name: system-cluster-critical - provider: coredns # a DNS provider must be configured -``` - -### Removing NodeLocal DNS - -By removing the `ip_address` value, NodeLocal DNS will be removed from the cluster. - -> **Warning:** When removing NodeLocal DNS, a disruption to DNS can be expected. The updated `/etc/resolv.conf` configuration will take effect only for pods that are started after removing NodeLocal DNS. In general pods using the default `dnsPolicy: ClusterFirst` will need to be re-deployed. diff --git a/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md b/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md deleted file mode 100644 index 7992d4482e..0000000000 --- a/content/rke/latest/en/config-options/add-ons/ingress-controllers/_index.md +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: K8s Ingress Controllers -description: By default, RKE deploys the NGINX ingress controller. Learn how to schedule and disable default k8s ingress controllers, and how to configure NGINX controller -weight: 262 ---- - -- [Default Ingress](#default-ingress) -- [Scheduling Ingress Controllers](#scheduling-ingress-controllers) -- [Ingress Priority Class Name](#ingress-priority-class-name) -- [Tolerations](#tolerations) -- [Disabling the Default Ingress Controller](#disabling-the-default-ingress-controller) -- [Configuring NGINX Ingress Controller](#configuring-nginx-ingress-controller) -- [Disabling NGINX Ingress Default Backend](#disabling-nginx-ingress-default-backend) -- [Configuring an NGINX Default Certificate](#configuring-an-nginx-default-certificate) - -### Default Ingress - -By default, RKE deploys the NGINX ingress controller on all schedulable nodes. - -> **Note:** As of v0.1.8, only workers are considered schedulable nodes, but before v0.1.8, worker and controlplane nodes were considered schedulable nodes. - -RKE will deploy the ingress controller as a DaemonSet with `hostNetwork: true`, so ports `80`, and `443` will be opened on each node where the controller is deployed. - -> **Note:** As of v1.1.11, the network options of the ingress controller are configurable. See [Configuring network options](#configuring-network-options). - -The images used for ingress controller is under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with the ingress controller, but these can be overridden by changing the image tag in `system_images`. - -### Scheduling Ingress Controllers - -If you only wanted ingress controllers to be deployed on specific nodes, you can set a `node_selector` for the ingress. The label in the `node_selector` would need to match the label on the nodes for the ingress controller to be deployed. - -```yaml -nodes: -- address: 1.1.1.1 - role: [controlplane,worker,etcd] - user: root - labels: - app: ingress - -ingress: - provider: nginx - node_selector: - app: ingress -``` - -### Ingress Priority Class Name - -_Available as of RKE v1.2.6+_ - -The [pod priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#pod-priority) is set by configuring a priority class name: - -```yaml -ingress: - provider: nginx - ingress_priority_class_name: system-cluster-critical -``` - -### Tolerations - -_Available as of v1.2.4_ - -The configured tolerations apply to the `default-http-backend` Deployment. - -```yaml -ingress: - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 -``` - -To check for applied tolerations `default-http-backend` Deployment, use the following commands: - -``` -kubectl -n ingress-nginx get deploy default-http-backend -o jsonpath='{.spec.template.spec.tolerations}' -``` - -### Disabling the Default Ingress Controller - -You can disable the default controller by specifying `none` to the ingress `provider` directive in the cluster configuration. - -```yaml -ingress: - provider: none -``` -### Configuring NGINX Ingress Controller - -For the configuration of NGINX, there are configuration options available in Kubernetes. There are a [list of options for the NGINX config map](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md) , [command line extra_args](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/cli-arguments.md) and [annotations](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). - -```yaml -ingress: - provider: nginx - options: - map-hash-bucket-size: "128" - ssl-protocols: SSLv2 - extra_args: - enable-ssl-passthrough: "" -``` - -### Disabling NGINX Ingress Default Backend - -As of v0.20.0, you can disable the [default backend service](https://kubernetes.github.io/ingress-nginx/user-guide/default-backend/) for the ingress controller. This is possible because `ingress-nginx` will fall back to a local 404 page, and does not require a backend service. The service can be enabled/disabled with a boolean value. - -```yaml -ingress: - default_backend: false -``` - -> **What happens if the field is omitted?** The value of `default_backend` will default to `true`. This maintains behavior with older versions of `rke`. However, a future version of `rke` will change the default value to `false`. - -### Configuring network options - -{{% tabs %}} -{{% tab "v1.3.x" %}} -For Kubernetes v1.21 and up, the NGINX ingress controller no longer runs in `hostNetwork: true` but uses hostPorts for port `80` and port `443`. This was done so the admission webhook can be configured to be accessed using ClusterIP so it can only be reached inside the cluster. If you want to change the mode and/or the ports, see the options below. -{{% /tab %}} -{{% tab "v1.1.11 and up & v1.2.x" %}} -By default, the nginx ingress controller is configured using `hostNetwork: true` on the default ports `80` and `443`. If you want to change the mode and/or the ports, see the options below. -{{% /tab %}} -{{% /tabs %}} - -Configure the nginx ingress controller using `hostPort` and override the default ports: - -```yaml -ingress: - provider: nginx - network_mode: hostPort - http_port: 9090 - https_port: 9443 - extra_args: - http-port: 8080 - https-port: 8443 -``` - -Configure the nginx ingress controller using `hostNetwork`: - -```yaml -ingress: - provider: nginx - network_mode: hostNetwork -``` - -Configure the nginx ingress controller with no network mode which will make it run on the overlay network (for example, if you want to expose the nginx ingress controller using a `LoadBalancer`) and override the default ports: - -```yaml -ingress: - provider: nginx - network_mode: none - extra_args: - http-port: 8080 - https-port: 8443 -``` - -### Configuring an NGINX Default Certificate - -When configuring an ingress object with TLS termination, you must provide it with a certificate used for encryption/decryption. Instead of explicitly defining a certificate each time you configure an ingress, you can set up a custom certificate that's used by default. - -Setting up a default certificate is especially helpful in environments where a wildcard certificate is used, as the certificate can be applied in multiple subdomains. - ->**Prerequisites:** -> ->- Access to the `cluster.yml` used to create the cluster. ->- The PEM encoded certificate you will use as the default certificate. - -1. Obtain or generate your certificate key pair in a PEM encoded form. - -2. Generate a Kubernetes secret from your PEM encoded certificate with the following command, substituting your certificate for `mycert.cert` and `mycert.key`. - - ``` - kubectl -n ingress-nginx create secret tls ingress-default-cert --cert=mycert.cert --key=mycert.key -o yaml --dry-run=true > ingress-default-cert.yaml - ``` -3. Include the contents of `ingress-default-cert.yml` inline with your RKE `cluster.yml` file. For example: - - ```yaml - addons: |- - --- - apiVersion: v1 - data: - tls.crt: [ENCODED CERT] - tls.key: [ENCODED KEY] - kind: Secret - metadata: - creationTimestamp: null - name: ingress-default-cert - namespace: ingress-nginx - type: kubernetes.io/tls - ``` -4. Define your ingress resource with the following `default-ssl-certificate` argument, which references the secret we created earlier under `extra_args` in your `cluster.yml`: - - ```yaml - ingress: - provider: "nginx" - extra_args: - default-ssl-certificate: "ingress-nginx/ingress-default-cert" - ``` - -5. **Optional:** If you want to apply the default certificate to ingresses in a cluster that already exists, you must delete the NGINX ingress controller pods to have Kubernetes schedule new pods with the newly configured `extra_args`. - - ``` - kubectl delete pod -l app=ingress-nginx -n ingress-nginx - ``` diff --git a/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md b/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md deleted file mode 100644 index 0171477036..0000000000 --- a/content/rke/latest/en/config-options/add-ons/metrics-server/_index.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Metrics Server -weight: 263 ---- - -By default, RKE deploys [Metrics Server](https://github.com/kubernetes-incubator/metrics-server) to provide metrics on resources in your cluster. - -RKE will deploy Metrics Server as a Deployment. - -The image used for Metrics Server is under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there is a default image associated with the Metrics Server, but these can be overridden by changing the image tag in `system_images`. - -- [Tolerations](#tolerations) -- [Priority Class Name](#metrics-server-priority-class-name) -- [Disabling the Metrics Server](#disabling-the-metrics-server) - -### Tolerations - -_Available as of v1.2.4_ - -The configured tolerations apply to the `metrics-server` Deployment. - -```yaml -monitoring: - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 -``` - -To check for applied tolerations on the `metrics-server` Deployment, use the following commands: - -``` -kubectl -n kube-system get deploy metrics-server -o jsonpath='{.spec.template.spec.tolerations}' -``` - -### Metrics Server Priority Class Name - -_Available as of RKE v1.2.6+_ - -The [pod priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#pod-priority) is set by configuring a priority class name: - -```yaml -monitoring: - provider: metrics-server - metrics_server_priority_class_name: system-cluster-critical -``` - -### Disabling the Metrics Server - -_Available as of v0.2.0_ - -You can disable the default controller by specifying `none` to the monitoring `provider` directive in the cluster configuration. - -```yaml -monitoring: - provider: none -``` diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md deleted file mode 100644 index 1648aac99c..0000000000 --- a/content/rke/latest/en/config-options/add-ons/network-plugins/_index.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -title: Network Plug-ins -weight: 261 ---- - -RKE provides the following network plug-ins that are deployed as add-ons: - -- Flannel -- Calico -- Canal -- Weave - -> After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn’t allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. - -- [Changing the Default Network Plug-in](#changing-the-default-network-plug-in) -- [Disabling Deployment of a Network Plug-in](#disabling-deployment-of-a-network-plug-in) -- [Network Plug-in Options](#network-plug-in-options) -- [Canal](#canal) - - [Canal Network Plug-in Options](#canal-network-plug-in-options) - - [Canal Interface](#canal-interface) - - [Canal Network Plug-in Tolerations](#canal-network-plug-in-tolerations) -- [Flannel](#flannel) - - [Flannel Network Plug-in Options](#flannel-network-plug-in-options) - - [Flannel Interface](#flannel-interface) -- [Calico](#calico) - - [Calico Network Plug-in Options](#calico-network-plug-in-options) - - [Calico Cloud Provider](#calico-cloud-provider) - - [Calico Network Plug-in Tolerations](#calico-network-plug-in-tolerations) -- [Weave](#weave) - - [Weave Network Plug-in Options](#weave-network-plug-in-options) -- [Custom Network Plug-ins](#custom-network-plug-ins) - -# Changing the Default Network Plug-in - -By default, the network plug-in is `canal`. If you want to use another network plug-in, you need to specify which network plug-in to enable at the cluster level in the `cluster.yml`. - -```yaml -# Setting the flannel network plug-in -network: - plugin: flannel -``` - -The images used for network plug-ins are under the [`system_images` directive]({{}}/rke/latest/en/config-options/system-images/). For each Kubernetes version, there are default images associated with each network plug-in, but these can be overridden by changing the image tag in `system_images`. - -# Disabling Deployment of a Network Plug-in - -You can disable deploying a network plug-in by specifying `none` to the network `plugin` directive in the cluster configuration. - -```yaml -network: - plugin: none -``` - -# Network Plug-in Options - -Besides the different images that could be used to deploy network plug-ins, certain network plug-ins support additional options that can be used to customize the network plug-in. - -- [Canal](#canal) -- [Flannel](#flannel) -- [Calico](#calico) -- [Weave](#weave) - -# Canal - -### Canal Network Plug-in Options - -```yaml -network: - plugin: canal - options: - canal_iface: eth1 - canal_flannel_backend_type: vxlan - canal_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ - canal_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ -``` - -### Canal Interface - -By setting the `canal_iface`, you can configure the interface to use for inter-host communication. - -The `canal_flannel_backend_type` option allows you to specify the type of [flannel backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md) to use. By default the `vxlan` backend is used. - -### Canal Network Plug-in Tolerations - -_Available as of v1.2.4_ - -The configured tolerations apply to the `calico-kube-controllers` Deployment. - -```yaml -network: - plugin: canal - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 -``` - -To check for applied tolerations on the `calico-kube-controllers` Deployment, use the following command: - -``` -kubectl -n kube-system get deploy calico-kube-controllers -o jsonpath='{.spec.template.spec.tolerations}' -``` - -# Flannel -### Flannel Network Plug-in Options - -```yaml -network: - plugin: flannel - options: - flannel_iface: eth1 - flannel_backend_type: vxlan - flannel_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ - flannel_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ -``` - -### Flannel Interface - -By setting the `flannel_iface`, you can configure the interface to use for inter-host communication. -The `flannel_backend_type` option allows you to specify the type of [flannel backend](https://github.com/coreos/flannel/blob/master/Documentation/backends.md) to use. By default the `vxlan` backend is used. - - -# Calico - -### Calico Network Plug-in Options - -```yaml -network: - plugin: calico - options: - calico_cloud_provider: aws - calico_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ - calico_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ -``` -### Calico Cloud Provider - -Calico currently only supports 2 cloud providers, AWS or GCE, which can be set using `calico_cloud_provider`. - -**Valid Options** - -- `aws` -- `gce` - -### Calico Network Plug-in Tolerations - -_Available as of v1.2.4_ - -The configured tolerations apply to the `calico-kube-controllers` Deployment. - -```yaml -network: - plugin: calico - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 -``` - -To check for applied tolerations on the `calico-kube-controllers` Deployment, use the following command: - -``` -kubectl -n kube-system get deploy calico-kube-controllers -o jsonpath='{.spec.template.spec.tolerations}' -``` - -# Weave -### Weave Network Plug-in Options - -```yaml -network: - plugin: weave - options: - weave_autoscaler_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ - weave_priority_class_name: system-cluster-critical # Available as of RKE v1.2.6+ - weave_network_provider: - password: "Q]SZOQ5wp@n$oijz" -``` - -### Weave Encryption - -Weave encryption can be enabled by passing a string password to the network provider config. - -# Custom Network Plug-ins - -It is possible to add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. In the `addons` field, you can add the add-on manifest of a cluster that has the network plugin-that you want, as shown in [this example.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md b/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md deleted file mode 100644 index d942be998a..0000000000 --- a/content/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example/_index.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: Custom Network Plug-in Example -weight: 1 ---- - -The below example shows how to configure a custom network plug-in with an in-line add-on to the `cluster.yml`. - -First, to edit the network plug-ins, change the `network` section of the YAML from: - -``` -network: - options: - flannel_backend_type: "vxlan" - plugin: "canal" -``` -to: -``` -network: - plugin: none -``` - -Then, in the `addons` section of the `cluster.yml`, you can add the add-on manifest of a cluster that has the network plugin-that you want. In the below example, we are replacing the Canal plugin with a Flannel plugin by adding the add-on manifest for the cluster through the `addons` field: - -``` -addons: |- - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: flannel - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel - subjects: - - kind: ServiceAccount - name: flannel - namespace: kube-system - --- - kind: ClusterRole - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: flannel - rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch - --- - kind: ConfigMap - apiVersion: v1 - metadata: - name: kube-flannel-cfg - namespace: "kube-system" - labels: - tier: node - app: flannel - data: - cni-conf.json: | - { - "name":"cbr0", - "cniVersion":"0.3.1", - "plugins":[ - { - "type":"flannel", - "delegate":{ - "forceAddress":true, - "isDefaultGateway":true - } - }, - { - "type":"portmap", - "capabilities":{ - "portMappings":true - } - } - ] - } - net-conf.json: | - { - "Network": "10.42.0.0/16", - "Backend": { - "Type": "vxlan" - } - } - --- - apiVersion: extensions/v1beta1 - kind: DaemonSet - metadata: - name: kube-flannel - namespace: "kube-system" - labels: - tier: node - k8s-app: flannel - spec: - template: - metadata: - labels: - tier: node - k8s-app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - serviceAccountName: flannel - containers: - - name: kube-flannel - image: rancher/coreos-flannel:v0.10.0-rancher1 - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 300m - memory: 500M - requests: - cpu: 150m - memory: 64M - command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: install-cni - image: rancher/flannel-cni:v0.3.0-rancher1 - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: kube-flannel-cfg - key: cni-conf.json - - name: CNI_CONF_NAME - value: "10-flannel.conflist" - volumeMounts: - - name: cni - mountPath: /host/etc/cni/net.d - - name: host-cni-bin - mountPath: /host/opt/cni/bin/ - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - - operator: Exists - effect: NoExecute - - key: node.kubernetes.io/not-ready - effect: NoSchedule - operator: Exists - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg - - name: host-cni-bin - hostPath: - path: /opt/cni/bin - updateStrategy: - rollingUpdate: - maxUnavailable: 20% - type: RollingUpdate - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: flannel - namespace: kube-system -``` -**Result:** The cluster is up with the custom network plug-in. \ No newline at end of file diff --git a/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md b/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md deleted file mode 100644 index fb874b9b13..0000000000 --- a/content/rke/latest/en/config-options/add-ons/user-defined-add-ons/_index.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: User-Defined Add-Ons -weight: 263 ---- - -Besides the [network plug-in]({{}}/rke/latest/en/config-options/add-ons/network-plugins) and [ingress controllers]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/), you can define any add-on that you want deployed after the Kubernetes cluster is deployed. - -There are two ways that you can specify an add-on. - -- [In-line Add-ons](#in-line-add-ons) -- [Referencing YAML Files for Add-ons](#referencing-yaml-files-for-add-ons) - -> **Note:** When using user-defined add-ons, you *must* define a namespace for *all* your resources, otherwise they will end up in the `kube-system` namespace. - -RKE uploads the YAML manifest as a configmap to the Kubernetes cluster. Then, it runs a Kubernetes job that mounts the configmap and deploys the add-on using `kubectl apply -f`. - -RKE only adds additional add-ons when using `rke up` multiple times. RKE does **not** support removing of cluster add-ons when doing `rke up` with a different list of add-ons. - -As of v0.1.8, RKE will update an add-on if it is the same name. - -Before v0.1.8, update any add-ons by using `kubectl edit`. - -## In-line Add-ons - -To define an add-on directly in the YAML file, make sure to use the YAML's block indicator `|-` as the `addons` directive is a multi-line string option. It's possible to specify multiple YAML resource definitions by separating them using the `---` directive. - -```yaml -addons: |- - --- - apiVersion: v1 - kind: Pod - metadata: - name: my-nginx - namespace: default - spec: - containers: - - name: my-nginx - image: nginx - ports: - - containerPort: 80 -``` - -## Referencing YAML files for Add-ons -Use the `addons_include` directive to reference a local file or a URL for any user-defined add-ons. - -```yaml -addons_include: - - https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/operator.yaml - - https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/ceph/cluster.yaml - - /opt/manifests/example.yaml - - ./nginx.yaml -``` diff --git a/content/rke/latest/en/config-options/audit-log/_index.md b/content/rke/latest/en/config-options/audit-log/_index.md deleted file mode 100644 index 1ddb802989..0000000000 --- a/content/rke/latest/en/config-options/audit-log/_index.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Audit Log -weight: 251 ---- - -Kubernetes auditing provides a security-relevant chronological set of records about a cluster. Kube-apiserver performs auditing. Each request on each stage of its execution generates an event, which is then pre-processed according to a certain policy and written to a backend. The policy determines what’s recorded and the backends persist the records. - -You might want to configure the audit log as part of compliance with the CIS (Center for Internet Security) Kubernetes Benchmark controls. - -For configuration details, refer to the [official Kubernetes documentation.](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/) - -### Enabled by default - -In RKE v1.1.0 and higher and when using specific Kubernetes versions, audit log is enabled by default. See the table below to check when audit log is enabled by default. - -| RKE version | Kubernetes version | audit log Enabled | -|-------------|--------------------|----------------------| -| v1.1.0 and higher | v1.17.4 and higher (v1.17.x) | Yes | -| v1.1.0 and higher | v1.16.8 and higher (v1.16.x) | Yes | -| v1.1.0 and higher | v1.15.11 and higher (v1.15.x) | Yes | - -### Example Configurations - -The audit log can be enabled by default using the following configuration in `cluster.yml`: - -```yaml -services: - kube-api: - audit_log: - enabled: true -``` - -When the audit log is enabled, you should be able to see the default values at `/etc/kubernetes/audit-policy.yaml` (This is located at `/etc/kubernetes/audit.yaml` before RKE v1.1.0): - -```yaml -# Minimum Configuration: Capture event metadata. -... -rules: -- level: Metadata -... -``` - -When the audit log is enabled, default values are also set for the audit log path, maximum age, maximum number of backups, maximum size in megabytes, and format. To see the default values, run: - -``` -ps -ef | grep kube-apiserver -``` - -The default values for audit log were changed in RKE v1.1.0 to the following: - -```yaml ---audit-log-maxage=30 # The maximum number of days to retain old audit log files ---audit-log-maxbackup=10 # The maximum number of audit log files to retain ---audit-log-path=/var/log/kube-audit/audit-log.json # The log file path that log backend uses to write audit events ---audit-log-maxsize=100 # The maximum size in megabytes of the audit log file before it gets rotated ---audit-policy-file=/etc/kubernetes/audit-policy.yaml # The file containing your audit log rules ---audit-log-format=json # The log file format - -``` - -The default values for the audit log before RKE v1.1.0 are: - -```yaml ---audit-log-maxage=5 # The maximum number of days to retain old audit log files ---audit-log-maxbackup=5 # The maximum number of audit log files to retain ---audit-log-path=/var/log/kube-audit/audit-log.json # The log file path that log backend uses to write audit events ---audit-log-maxsize=100 # The maximum size in megabytes of the audit log file before it gets rotated ---audit-policy-file=/etc/kubernetes/audit.yaml # The file containing your audit log rules ---audit-log-format=json # The log file format - -``` - -To customize the audit log, the `configuration` directive is used. - -A rules policy is passed to kube-apiserver using the `--audit-policy-file` or the `policy` directive in the `cluster.yml`. Below is an example `cluster.yml` with custom values and an audit log policy nested under the `configuration` directive. This example audit log policy is taken from the official [Kubernetes documentation:](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy) - -```yaml -services: - kube-api: - audit_log: - enabled: true - configuration: - max_age: 6 - max_backup: 6 - max_size: 110 - path: /var/log/kube-audit/audit-log.json - format: json - policy: - apiVersion: audit.k8s.io/v1 # This is required. - kind: Policy - omitStages: - - "RequestReceived" - rules: - # Log pod changes at RequestResponse level - - level: RequestResponse - resources: - - group: "" - # Resource "pods" doesn't match requests to any subresource of pods, - # which is consistent with the RBAC policy. - resources: ["pods"] - # Log "pods/log", "pods/status" at Metadata level - - level: Metadata - resources: - - group: "" - resources: ["pods/log", "pods/status"] - - # Don't log requests to a configmap called "controller-leader" - - level: None - resources: - - group: "" - resources: ["configmaps"] - resourceNames: ["controller-leader"] - - # Don't log watch requests by the "system:kube-proxy" on endpoints or services - - level: None - users: ["system:kube-proxy"] - verbs: ["watch"] - resources: - - group: "" # core API group - resources: ["endpoints", "services"] - - # Don't log authenticated requests to certain non-resource URL paths. - - level: None - userGroups: ["system:authenticated"] - nonResourceURLs: - - "/api*" # Wildcard matching. - - "/version" - - # Log the request body of configmap changes in kube-system. - - level: Request - resources: - - group: "" # core API group - resources: ["configmaps"] - # This rule only applies to resources in the "kube-system" namespace. - # The empty string "" can be used to select non-namespaced resources. - namespaces: ["kube-system"] - - # Log configmap and secret changes in all other namespaces at the Metadata level. - - level: Metadata - resources: - - group: "" # core API group - resources: ["secrets", "configmaps"] - - # Log all other resources in core and extensions at the Request level. - - level: Request - resources: - - group: "" # core API group - - group: "extensions" # Version of group should NOT be included. - - # A catch-all rule to log all other requests at the Metadata level. - - level: Metadata - # Long-running requests like watches that fall under this rule will not - # generate an audit event in RequestReceived. - omitStages: - - "RequestReceived" -``` diff --git a/content/rke/latest/en/config-options/authentication/_index.md b/content/rke/latest/en/config-options/authentication/_index.md deleted file mode 100644 index efc2817a39..0000000000 --- a/content/rke/latest/en/config-options/authentication/_index.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Authentication -weight: 235 ---- - -RKE supports x509 authentication strategy. You can additionally define a list of SANs (Subject Alternative Names) to add to the Kubernetes API Server PKI certificates. As an example, this allows you to connect to your Kubernetes cluster API Server through a load balancer instead of a single node. - -```yaml -authentication: - strategy: x509 - sans: - - "10.18.160.10" - - "my-loadbalancer-1234567890.us-west-2.elb.amazonaws.com" -``` - -RKE also supports the webhook authentication strategy. You can enable both x509 and webhook strategies by using a `|` separator in the configuration. Contents of the webhook config file should be provided, see [Kubernetes webhook documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) for information on the file format. Additionally, a cache timeout for webhook authentication responses can be set. - -```yaml -authentication: - strategy: x509|webhook - webhook: - config_file: "...." - cache_timeout: 5s -``` diff --git a/content/rke/latest/en/config-options/authorization/_index.md b/content/rke/latest/en/config-options/authorization/_index.md deleted file mode 100644 index 6d40ca8954..0000000000 --- a/content/rke/latest/en/config-options/authorization/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Authorization -weight: 240 ---- - -Kubernetes supports multiple [Authorization Modules](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#authorization-modules). Currently, RKE only supports the [RBAC module](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). - -By default, RBAC is already enabled. If you wanted to turn off RBAC support, **which isn't recommended**, you set the authorization mode to `none` in your `cluster.yml`. - -```yaml -authorization: - # Use `mode: none` to disable authorization - mode: rbac -``` diff --git a/content/rke/latest/en/config-options/bastion-host/_index.md b/content/rke/latest/en/config-options/bastion-host/_index.md deleted file mode 100644 index d2710e8c42..0000000000 --- a/content/rke/latest/en/config-options/bastion-host/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Bastion/Jump Host Configuration -weight: 220 ---- - -Since RKE uses `ssh` to connect to [nodes]({{}}/rke/latest/en/config-options/nodes/), you can configure the `cluster.yml` so RKE will use a bastion host. Keep in mind that the [port requirements]({{}}/rke/latest/en/os/#ports) for the RKE node move to the configured bastion host. Our private SSH key(s) only needs to reside on the host running RKE. You do not need to copy your private SSH key(s) to the bastion host. - -```yaml -bastion_host: - address: x.x.x.x - user: ubuntu - port: 22 - ssh_key_path: /home/user/.ssh/bastion_rsa - # or - # ssh_key: |- - # -----BEGIN RSA PRIVATE KEY----- - # - # -----END RSA PRIVATE KEY----- - # Optionally using SSH certificates - # ssh_cert_path: /home/user/.ssh/id_rsa-cert.pub - # or - # ssh_cert: |- - # ssh-rsa-cert-v01@openssh.com AAAAHHNza... -``` - -## Bastion Host Options - -### Address - -The `address` directive will be used to set the hostname or IP address of the bastion host. RKE must be able to connect to this address. - -### SSH Port - -You specify which `port` to be used when connecting to the bastion host. The default port is `22`. - -### SSH Users - -You specify the `user` to be used when connecting to this node. - -### SSH Key Path - -You specify the path, i.e. `ssh_key_path`, for the SSH private key to be used when connecting to the bastion host. - -### SSH Key - -Instead of setting the path to the SSH key, you can specify the actual key, i.e. `ssh_key`, to be used to connect to the bastion host. - -### SSH Certificate Path - -You specify the path, i.e. `ssh_cert_path`, for the signed SSH certificate to be used when connecting to the bastion host. - -### SSH Certificate - -Instead of setting the path to the signed SSH certificate, you can specify the actual certificate, i.e. `ssh_cert`, to be used to connect to the bastion host. diff --git a/content/rke/latest/en/config-options/cloud-providers/_index.md b/content/rke/latest/en/config-options/cloud-providers/_index.md deleted file mode 100644 index f9a58253e1..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/_index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Cloud Providers -weight: 250 ---- - -RKE supports the ability to set your specific cloud provider for your Kubernetes cluster. There are specific cloud configurations for these cloud providers. -To enable a cloud provider its name as well as any required configuration options must be provided under the `cloud_provider` directive in the cluster YML. - -* [AWS]({{}}/rke/latest/en/config-options/cloud-providers/aws) -* [Azure]({{}}/rke/latest/en/config-options/cloud-providers/azure) -* [OpenStack]({{}}/rke/latest/en/config-options/cloud-providers/openstack) -* [vSphere]({{}}/rke/latest/en/config-options/cloud-providers/vsphere) - -Outside of this list, RKE also supports the ability to handle any [custom cloud provider]({{}}/rke/latest/en/config-options/cloud-providers/custom). diff --git a/content/rke/latest/en/config-options/cloud-providers/aws/_index.md b/content/rke/latest/en/config-options/cloud-providers/aws/_index.md deleted file mode 100644 index 95df8b4be4..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/aws/_index.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: AWS Cloud Provider -weight: 251 ---- - -To enable the AWS cloud provider, there are no RKE configuration options. You only need to set the name as `aws`. In order to use the AWS cloud provider, all cluster nodes must have already been configured with an [appropriate IAM role](#iam-requirements) and your AWS resources must be [tagged with a cluster ID](#tagging-aws-resources). - -```yaml -cloud_provider: - name: aws -``` - -## IAM Requirements - -In a cluster with the AWS cloud provider enabled, nodes must have at least the `ec2:Describe*` action. - -In order to use Elastic Load Balancers (ELBs) and EBS volumes with Kubernetes, the node(s) will need to have the an IAM role with appropriate permissions. - -IAM policy for nodes with the `controlplane` role: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeTags", - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVolumes", - "ec2:CreateSecurityGroup", - "ec2:CreateTags", - "ec2:CreateVolume", - "ec2:ModifyInstanceAttribute", - "ec2:ModifyVolume", - "ec2:AttachVolume", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateRoute", - "ec2:DeleteRoute", - "ec2:DeleteSecurityGroup", - "ec2:DeleteVolume", - "ec2:DetachVolume", - "ec2:RevokeSecurityGroupIngress", - "ec2:DescribeVpcs", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:AttachLoadBalancerToSubnets", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:CreateLoadBalancerPolicy", - "elasticloadbalancing:CreateLoadBalancerListeners", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancerListeners", - "elasticloadbalancing:DescribeLoadBalancers", - "elasticloadbalancing:DescribeLoadBalancerAttributes", - "elasticloadbalancing:DetachLoadBalancerFromSubnets", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", - "elasticloadbalancing:ModifyLoadBalancerAttributes", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", - "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", - "elasticloadbalancing:AddTags", - "elasticloadbalancing:CreateListener", - "elasticloadbalancing:CreateTargetGroup", - "elasticloadbalancing:DeleteListener", - "elasticloadbalancing:DeleteTargetGroup", - "elasticloadbalancing:DescribeListeners", - "elasticloadbalancing:DescribeLoadBalancerPolicies", - "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:ModifyListener", - "elasticloadbalancing:ModifyTargetGroup", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", - "iam:CreateServiceLinkedRole", - "kms:DescribeKey" - ], - "Resource": [ - "*" - ] - } - ] -} -``` - -IAM policy for nodes with the `etcd` or `worker` role: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:DescribeRegions", - "ecr:GetAuthorizationToken", - "ecr:BatchCheckLayerAvailability", - "ecr:GetDownloadUrlForLayer", - "ecr:GetRepositoryPolicy", - "ecr:DescribeRepositories", - "ecr:ListImages", - "ecr:BatchGetImage" - ], - "Resource": "*" - } - ] -} -``` - -## Tagging AWS Resources - -The AWS cloud provider uses tagging to discover and manage resources, the following resources are not automatically tagged by Kubernetes or RKE: - -- **VPC**: The VPC used by the cluster -- **Subnet**: The subnets used by the cluster -- **EC2 instances**: All nodes launched for the cluster -- **Security Groups**: The security group(s) used by nodes in the cluster - - >**Note:** If creating a `LoadBalancer` service and there is more than one security group attached to nodes, you must tag only one of the security groups as `owned` so that Kubernetes knows which group to add and remove rules. A single untagged security group is allowed, however, sharing this between clusters is not recommended. - -[AWS Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) - -You must tag with one of the following: - -| Key | Value | -|---|---| -| kubernetes.io/cluster/`` | shared | - -`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster only. - -If you do not share resources between clusters, you can change the tag to: - -| Key | Value | -|---|---| -| kubernetes.io/cluster/`` | owned | - -## Tagging for Load Balancers - -When provisioning a `LoadBalancer` service Kubernetes will attempt to discover the correct subnets, this is also achieved by tags and requires adding additional subnet tags to ensure internet-facing and internal ELBs are created in the correct subnets. - -[AWS Documentation: Subnet tagging for load balancers](https://docs.aws.amazon.com/eks/latest/userguide/load-balancing.html#subnet-tagging-for-load-balancers) diff --git a/content/rke/latest/en/config-options/cloud-providers/azure/_index.md b/content/rke/latest/en/config-options/cloud-providers/azure/_index.md deleted file mode 100644 index 16b1e43bd8..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/azure/_index.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Azure Cloud Provider -weight: 252 ---- - -To enable the Azure cloud provider, besides setting the name as `azure`, there are specific configuration options that must be set. Additionally, the Azure node name must also match the Kubernetes node name. - -```yaml -cloud_provider: - name: azure - azureCloudProvider: - aadClientId: xxxxxxxxx - aadClientSecret: xxxxxxxxx - location: xxxxxxxxx - resourceGroup: xxxxxxxxx - subnetName: xxxxxxxxx - subscriptionId: xxxxxxxxx - vnetName: xxxxxxxxx - tenantId: xxxxxxxxx - securityGroupName: xxxxxxxxx -``` - -## Overriding the hostname - -Since the Azure node name must match the Kubernetes node name, you override the Kubernetes name on the node by setting the `hostname_override` for each node. If you do not set the `hostname_override`, the Kubernetes node name will be set as the `address`, which will cause the Azure cloud provider to fail. - -```yaml -nodes: - - address: x.x.x.x - hostname_override: azure-rke1 - user: ubuntu - role: - - controlplane - - etcd - - worker -``` - -## Azure Configuration Options - -Besides the minimum set of options, there are many other options that are supported in RKE: - -| Azure Configuration Options | Type | Required | Description | -|:----------------------------: |:------: |:---------:|:-----------:| -| tenantId | string | * | The Azure Active Directory (Azure AD) tenant ID for the subscription that the cluster is deployed in. | -| subscriptionId | string | * | The ID of the Azure subscription that the cluster is deployed in. | -| aadClientId | string | * | The client ID for an Azure AD application with RBAC access to talk to Azure Resource Manager APIs. This is used for [service principal](https://github.com/Azure/aks-engine/blob/master/docs/topics/service-principals.md) authentication. | -| aadClientSecret | string | * | The client secret for an Azure AD application with RBAC access to talk to Azure Resource Manager APIs. This is used for [service principal](https://github.com/Azure/aks-engine/blob/master/docs/topics/service-principals.md) authentication. | -| cloud | string | | The cloud environment identifier. Takes values from [here](https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13). | -| resourceGroup | string | | The name of the resource group that the Vnet is deployed in. | -| location | string | | The location of the resource group that the cluster is deployed in. | -| vnetName | string | | The name of the virtual network that the cluster is deployed in. | -| vnetResourceGroup | string | | The name of the resource group that the virtual network is deployed in. | -| subnetName | string | | The name of the subnet that the cluster is deployed in. | -| securityGroupName | string | | The name of the security group attached to the cluster's subnet. | -| routeTableName | string | | The name of the route table attached to the subnet that the cluster is deployed in. | -| primaryAvailabilitySetName | string | | The name of the availability set that should be used as the load balancer backend. If this is set, the Azure cloud provider will only add nodes from that availability set to the load balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then the cloud provider will try to add all nodes to a single backend pool which is forbidden. In other words, if you use multiple agent pools (availability sets), you **must** set this field. | -| vmType | string | | The type of Azure nodes. Candidate values are: `vmss` and `standard`. If not set, it will be default to `standard`. Set to `vmss` if the cluster is running on [Azure virtual machine scale sets](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview) instead of standard machines. | -| primaryScaleSetName | string | | The name of the scale set that should be used as the load balancer backend. If this is set, the Azure cloud provider will only add nodes from that scale set to the load balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then the cloud provider will try to add all nodes to a single backend pool which is forbidden. In other words, if you use multiple agent pools (scale sets), you **must** set this field. | -| aadClientCertPath | string | | The path of a client certificate for an Azure AD application with RBAC access to talk to Azure Resource Manager APIs. This is used for [client certificate authentication](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-service-to-service). | -| aadClientCertPassword | string | | The password of the client certificate for an Azure AD application with RBAC access to talk to Azure Resource Manager APIs. This is used for [client certificate authentication](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-service-to-service). | -| cloudProviderBackoff | bool | | Enable exponential backoff to manage resource request retries. | -| cloudProviderBackoffRetries | int | | Backoff retry limit. | -| cloudProviderBackoffExponent | int | | Backoff exponent. | -| cloudProviderBackoffDuration | int | | Backoff duration. | -| cloudProviderBackoffJitter | int | | Backoff jitter. | -| cloudProviderRateLimit | bool | | Enable rate limiting. | -| cloudProviderRateLimitQPS | int | | Rate limit QPS. | -| cloudProviderRateLimitBucket | int | | Rate limit bucket Size. | -| useInstanceMetadata | bool | | Use instance metadata service where possible. | -| useManagedIdentityExtension | bool | | Use managed service identity for the virtual machine to access Azure Resource Manager APIs. This is used for [managed identity authentication](https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview). For user-assigned managed identity, `UserAssignedIdentityID` needs to be set. | -| UserAssignedIdentityID | string | | The client ID of the user assigned Managed Service Identity (MSI) which is assigned to the underlying VMs. This is used for [managed identity authentication](https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview). | -| maximumLoadBalancerRuleCount | int | | The limit enforced by Azure Load balancer. The default is `0` and maximum is `148`. | -| LoadBalancerSku | string | | SKU of the load balancer and public IP. Valid values are `basic` or `standard`. Default(blank) to `basic`. | -| ExcludeMasterFromStandardLB | bool | | Excludes master nodes (labeled with `node-role.kubernetes.io/master`) from the backend pool of Azure standard loadbalancer. Defaults to `nil`. | diff --git a/content/rke/latest/en/config-options/cloud-providers/custom/_index.md b/content/rke/latest/en/config-options/cloud-providers/custom/_index.md deleted file mode 100644 index 1cd581f75c..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/custom/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Custom Cloud Provider -weight: 255 ---- - -If you want to enable a different cloud provider, RKE allows for custom cloud provider options. A name must be provided and the custom Cloud Provider options can be passed in as a multiline string in `customCloudProvider`. - -For example, in order to use the oVirt cloud provider with Kubernetes, here's the following cloud provider information: - -``` -[connection] -uri = https://localhost:8443/ovirt-engine/api -username = admin@internal -password = admin -``` - -To add this cloud config file to RKE, the `cloud_provider` would be need to be set. - -```yaml -cloud_provider: - name: ovirt - # Note the pipe as this is what indicates a multiline string - customCloudProvider: |- - [connection] - uri = https://localhost:8443/ovirt-engine/api - username = admin@internal - password = admin -``` diff --git a/content/rke/latest/en/config-options/cloud-providers/openstack/_index.md b/content/rke/latest/en/config-options/cloud-providers/openstack/_index.md deleted file mode 100644 index 4675a77975..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/openstack/_index.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: OpenStack Cloud Provider -weight: 253 ---- - -To enable the OpenStack cloud provider, besides setting the name as `openstack`, there are specific configuration options that must be set. The OpenStack configuration options are grouped into different sections. - -```yaml -cloud_provider: - name: openstack - openstackCloudProvider: - global: - username: xxxxxxxxxxxxxx - password: xxxxxxxxxxxxxx - auth-url: https://1.2.3.4/identity/v3 - tenant-id: xxxxxxxxxxxxxx - domain-id: xxxxxxxxxxxxxx - load_balancer: - subnet-id: xxxxxxxxxxxxxx - block_storage: - ignore-volume-az: true - route: - router-id: xxxxxxxxxxxxxx - metadata: - search-order: xxxxxxxxxxxxxx -``` - -## Overriding the hostname - -The OpenStack cloud provider uses the instance name (as determined from OpenStack metadata) as the name of the Kubernetes Node object, you must override the Kubernetes name on the node by setting the `hostname_override` for each node. If you do not set the `hostname_override`, the Kubernetes node name will be set as the `address`, which will cause the OpenStack cloud provider to fail. - -## OpenStack Configuration Options - -The OpenStack configuration options are divided into 5 groups. - -* Global -* Load Balancer -* Block Storage -* Route -* Metadata - -### Global - -These are the options that are available under the `global` directive. - -| OpenStack's Global Configuration Options | Type | Required | -|:--------------------: |:------: |:---------:| -| auth_url | string | * | -| username | string | * | -| user-id | string | * | -| password | string | * | -| tenant-id | string | * | -| tenant-name | string | | -| trust-id | string | | -| domain-id | string | | -| domain-name | string | | -| region | string | | -| ca-file | string | | - -### Load Balancer - -These are the options that are available under the `load_balancer` directive. - -| OpenStack's Load Balancer Configuration Options | Type | Required | -|:----------------------: |:------: |:---------:| -| lb-version | string | | -| use-octavia | bool | | -| subnet-id | string | | -| floating-network-id | string | | -| lb-method | string | | -| lb-provider | string | | -| manage-security-groups | bool | | -| create-monitor | bool | | -| monitor-delay | int | * if `create-monitor` is true | -| monitor-timeout | int | * if `create-monitor` is true | -| monitor-max-retries | int | * if `create-monitor` is true | - - -### Block Storage - -These are the options that are available under the `block_storage` directive. - -| OpenStack's Block Storage Configuration Options | Type | Required | -|:--------------------: |:------: |:---------:| -| bs-version | string | | -| trust-device-path | bool | | -| ignore-volume-az | bool | | - -### Route - -This is the option that is available under the `route` directive. - -| OpenStack's Route Configuration Option | Type | Required | -|:--------------------: |:------: |:---------:| -| router-id | string | | - -### Metadata - -These are the options that are available under the `metadata` directive. - -| OpenStack's Metadata Configuration Options | Type | Required | -|:--------------------: |:------: |:---------:| -| search-order | string | | -| request-timeout | int | | - -For more information of OpenStack configurations options please refer to the official Kubernetes [documentation](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#openstack). diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md deleted file mode 100644 index dea75f8121..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/vsphere/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: vSphere Cloud Provider -weight: 254 ---- - -This section describes how to enable the vSphere cloud provider. You will need to use the `cloud_provider` directive in the cluster YAML file. - -The [vSphere Cloud Provider](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) interacts with VMware infrastructure (vCenter or standalone ESXi server) to provision and manage storage for persistent volumes in a Kubernetes cluster. - -When provisioning Kubernetes using RKE CLI or using [RKE clusters]({{< baseurl >}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/) in Rancher, the vSphere Cloud Provider can be enabled by configuring the `cloud_provider` directive in the cluster YAML file. - -### Related Links - -- **Configuration:** For details on vSphere configuration in RKE, refer to the [configuration reference.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference) -- **Troubleshooting:** For guidance on troubleshooting a cluster with the vSphere cloud provider enabled, refer to the [troubleshooting section.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting) -- **Storage:** If you are setting up storage, see the [official vSphere documentation on storage for Kubernetes,](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) or the [official Kubernetes documentation on persistent volumes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) If you are using Rancher, refer to the [Rancher documentation on provisioning storage in vSphere.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) -- **For Rancher users:** Refer to the Rancher documentation on [creating vSphere Kubernetes clusters]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere) and [provisioning storage.]({{}}/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere) - -# Prerequisites - -- **Credentials:** You'll need to have credentials of a vCenter/ESXi user account with privileges allowing the cloud provider to interact with the vSphere infrastructure to provision storage. Refer to [this document](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/vcp-roles.html) to create and assign a role with the required permissions in vCenter. -- **VMware Tools** must be running in the Guest OS for all nodes in the cluster. -- **Disk UUIDs:** All nodes must be configured with disk UUIDs. This is required so that attached VMDKs present a consistent UUID to the VM, allowing the disk to be mounted properly. See the section on [enabling disk UUIDs.]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid) - -# Enabling the vSphere Provider with the RKE CLI - -To enable the vSphere Cloud Provider in the cluster, you must add the top-level `cloud_provider` directive to the cluster configuration file, set the `name` property to `vsphere` and add the `vsphereCloudProvider` directive containing the configuration matching your infrastructure. See the [configuration reference]({{}}/rke/latest/en/config-options/cloud-providers/vsphere/config-reference) for the gory details. \ No newline at end of file diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/_index.md deleted file mode 100644 index b079ae7dd3..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: vSphere Configuration Reference -weight: 3 ---- - -This section shows an example of how to configure the vSphere cloud provider. - -The vSphere cloud provider must be enabled to allow dynamic provisioning of volumes. - -For more details on deploying a Kubernetes cluster on vSphere, refer to the [official cloud provider documentation.](https://cloud-provider-vsphere.sigs.k8s.io/tutorials/kubernetes-on-vsphere-with-kubeadm.html) - -> **Note:** This documentation reflects the new vSphere Cloud Provider configuration schema introduced in Kubernetes v1.9 which differs from previous versions. - -# vSphere Configuration Example - -Given the following: - -- VMs in the cluster are running in the same datacenter `eu-west-1` managed by the vCenter `vc.example.com`. -- The vCenter has a user `provisioner` with password `secret` with the required roles assigned, see [Prerequisites](#prerequisites). -- The vCenter has a datastore named `ds-1` which should be used to store the VMDKs for volumes. -- A `vm/kubernetes` folder exists in vCenter. - -The corresponding configuration for the provider would then be as follows: - -```yaml -rancher_kubernetes_engine_config: - (...) - cloud_provider: - name: vsphere - vsphereCloudProvider: - virtual_center: - vc.example.com: - user: provisioner - password: secret - port: 443 - datacenters: /eu-west-1 - workspace: - server: vc.example.com - folder: myvmfolder - default-datastore: ds-1 - datacenter: /eu-west-1 - resourcepool-path: /eu-west-1/host/hn1/resources/myresourcepool - -``` -# Configuration Options - -The vSphere configuration options are divided into 5 groups: - -* [global](#global) -* [virtual_center](#virtual_center) -* [workspace](#workspace) -* [disk](#disk) -* [network](#network) - -### global - -The main purpose of global options is to be able to define a common set of configuration parameters that will be inherited by all vCenters defined under the `virtual_center` directive unless explicitly defined there. - -Accordingly, the `global` directive accepts the same configuration options that are available under the `virtual_center` directive. Additionally it accepts a single parameter that can only be specified here: - -| global Options | Type | Required | Description | -|:---------------:|:-------:|:---------:|:---------| -| insecure-flag | boolean | | Set to **true** if the vCenter/ESXi uses a self-signed certificate. | - -**Example:** - -```yaml -(...) - global: - insecure-flag: true -``` - -### virtual_center - -This configuration directive specifies the vCenters that are managing the nodes in the cluster. You must define at least one vCenter/ESXi server. If the nodes span multiple vCenters then all must be defined. - -Each vCenter is defined by adding a new entry under the `virtual_center` directive with the vCenter IP or FQDN as the name. All required parameters must be provided for each vCenter unless they are already defined under the `global` directive. - -| virtual_center Options | Type | Required | Description | -|:----------------------:|:--------:|:---------:|:-----------| -| user | string | * | vCenter/ESXi user used to authenticate with this server. | -| password | string | * | User's password. | -| port | string | | Port to use to connect to this server. Defaults to 443. | -| datacenters | string | * | Comma-separated list of all datacenters in which cluster nodes are running in. | -| soap-roundtrip-count | uint | | Round tripper count for API requests to the vCenter (num retries = value - 1). | - -> The following additional options (introduced in Kubernetes v1.11) are not yet supported in RKE. - -| virtual_center Options | Type | Required | Description | -|:----------------------:|:--------:|:---------:|:-------| -| secret-name | string | | Name of secret resource containing credential key/value pairs. Can be specified in lieu of user/password parameters.| -| secret-namespace | string | | Namespace in which the secret resource was created in. | -| ca-file | string | | Path to CA cert file used to verify the vCenter certificate. | - -**Example:** - -```yaml -(...) - virtual_center: - 172.158.111.1: {} # This vCenter inherits all it's properties from global options - 172.158.110.2: # All required options are set explicitly - user: vc-user - password: othersecret - datacenters: eu-west-2 -``` - -### workspace - -This configuration group specifies how storage for volumes is created in vSphere. -The following configuration options are available: - -| workspace Options | Type | Required | Description | -|:----------------------:|:--------:|:---------:|:---------| -| server | string | * | IP or FQDN of the vCenter/ESXi that should be used for creating the volumes. Must match one of the vCenters defined under the `virtual_center` directive.| -| datacenter | string | * | Name of the datacenter that should be used for creating volumes. For ESXi enter *ha-datacenter*.| -| folder | string | * | Path of folder in which to create dummy VMs used for volume provisioning (relative from the root folder in vCenter), e.g. "vm/kubernetes".| -| default-datastore | string | | Name of default datastore to place VMDKs if neither datastore or storage policy are specified in the volume options of a PVC. If datastore is located in a storage folder or is a member of a datastore cluster, specify the full path. | -| resourcepool-path | string | | Absolute or relative path to the resource pool where the dummy VMs for [Storage policy based provisioning](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/policy-based-mgmt.html) should be created. If a relative path is specified, it is resolved with respect to the datacenter's *host* folder. Examples: `//host//Resources/`, `Resources/`. For standalone ESXi specify `Resources`. | - -**Example:** - -```yaml -(...) - workspace: - server: 172.158.111.1 # matches IP of vCenter defined in the virtual_center block - datacenter: eu-west-1 - folder: vm/kubernetes - default-datastore: ds-1 -``` - -### disk - -The following configuration options are available under the disk directive: - -| disk Options | Type | Required | Description | -|:--------------------:|:--------:|:---------:|:----------------| -| scsicontrollertype | string | | SCSI controller type to use when attaching block storage to VMs. Must be one of: *lsilogic-sas* or *pvscsi*. Default: *pvscsi*. | - -### network - -The following configuration options are available under the network directive: - -| network Options | Type | Required | Description | -|:-------------------:|:--------:|:---------:|:-----------------------------------------------------------------------------| -| public-network | string | | Name of public **VM Network** to which the VMs in the cluster are connected. Used to determine public IP addresses of VMs.| diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid/_index.md deleted file mode 100644 index 6afccab7d7..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/vsphere/enabling-uuid/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Enabling Disk UUIDs for vSphere VMs -weight: 2 ---- - -In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. This is required so that attached VMDKs present a consistent UUID to the VM, allowing the disk to be mounted properly. - -Depending on whether you are provisioning the VMs using the [vSphere node driver]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere) in Rancher or using your own scripts or third-party tools, there are different methods available to enable disk UUIDs for VMs: - -- [Using the vSphere console](#using-the-vsphere-console) -- [Using the GOVC CLI tool](#using-the-govc-cli-tool) -- [Using a Rancher node template](#using-a-rancher-node-template) - -### Using the vSphere Console - -The required property can be set while creating or modifying VMs in the vSphere Console: - -1. For each VM navigate to the tab **VM Options** and click on **Edit Configuration**. -2. Add the parameter `disk.EnableUUID` with a value of **TRUE**. - - {{< img "/img/rke/vsphere-advanced-parameters.png" "vsphere-advanced-parameters" >}} - -### Using the GOVC CLI tool - -You can also modify properties of VMs with the [govc](https://github.com/vmware/govmomi/tree/master/govc) command-line tool to enable disk UUIDs: - -```sh -$ govc vm.change -vm -e disk.enableUUID=TRUE -``` - -### Using a Rancher Node Template - -In Rancher v2.0.4+, disk UUIDs are enabled in vSphere node templates by default. - -If you are using Rancher before v2.0.4, refer to the [vSphere node template documentation.]({{}}/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4//) for details on how to enable a UUID with a Rancher node template. diff --git a/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md b/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md deleted file mode 100644 index a63f81c36b..0000000000 --- a/content/rke/latest/en/config-options/cloud-providers/vsphere/troubleshooting/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Troubleshooting vSphere Clusters -weight: 4 ---- - -If you are experiencing issues while provisioning a cluster with enabled vSphere Cloud Provider or while creating vSphere volumes for your workloads, you should inspect the logs of the following K8s services: - -- controller-manager (Manages volumes in vCenter) -- kubelet: (Mounts vSphere volumes to pods) - -If your cluster is not configured with external [Cluster Logging]({{}}/rancher/v2.x//en/cluster-admin/tools//logging/), you will need to SSH into nodes to get the logs of the `kube-controller-manager` (running on one of the control plane nodes) and the `kubelet` (pertaining to the node where the stateful pod has been scheduled). - -The easiest way to create a SSH session with a node is the Rancher CLI tool. - -1. [Configure the Rancher CLI]({{}}/rancher/v2.x/en/cli/) for your cluster. -2. Run the following command to get a shell to the corresponding nodes: - - ```sh -$ rancher ssh - ``` - -3. Inspect the logs of the controller-manager and kubelet containers looking for errors related to the vSphere cloud provider: - - ```sh - $ docker logs --since 15m kube-controller-manager - $ docker logs --since 15m kubelet - ``` diff --git a/content/rke/latest/en/config-options/dual-stack/_index.md b/content/rke/latest/en/config-options/dual-stack/_index.md deleted file mode 100644 index 1bec3578e7..0000000000 --- a/content/rke/latest/en/config-options/dual-stack/_index.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Dual-stack -weight: 255 ---- - -As of RKE `v1.3.0`, [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) networking support has been added, which allows allocating both IPv4 and IPv6 addresses to pods and services. - -### Requirements - -In order to use the dual-stack feature, RKE and the infrastructure it's deploy to must be configured as follows: - -- Kubernetes 1.21 or newer is used. -- RKE is configured to use Calico as the Container Network Interface (CNI) provider. Other providers are not supported. -- RKE is deployed on Amazon EC2 instances with the following prerequisites: - - Enable IPv6 support: set the network range at VPC and its subnetworks. - - Add a IPv6 default gateway to VPC routes. - - Add inbound/outbound rules for IPv6 traffic to your cluster's security group(s). - - Ensure instances have `Auto-assign IPv6 IP` enabled. See the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html) for instructions. - - Disable source/destination checks on all instances in the cluster. See the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) for instructions. - -For more information on configuring your AWS enivronment for IPv6, refer to the AWS [Getting started with IPv6](https://docs.aws.amazon.com/vpc/latest/userguide/get-started-IPv6.html) documentation. - -### Example RKE Configuration - -The following is an example RKE configuration that can be used to deploy RKE with dual-stack support configured: - - -``` -kubernetes_version: "v1.21.1-rancher2-1" -services: - kube-api: - service_cluster_ip_range: 10.43.0.0/16,fd98::/108 - kube-controller: - service_cluster_ip_range: 10.43.0.0/16,fd98::/108 - cluster_cidr: 10.42.0.0/16,fd01::/64 - -network: - plugin: calico -``` diff --git a/content/rke/latest/en/config-options/nodes/_index.md b/content/rke/latest/en/config-options/nodes/_index.md deleted file mode 100644 index fad9ee1409..0000000000 --- a/content/rke/latest/en/config-options/nodes/_index.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -title: Nodes -weight: 210 ---- - -The `nodes` directive is the only required section in the `cluster.yml` file. It's used by RKE to specify cluster node(s), ssh credentials used to access the node(s) and which roles these nodes will be in the Kubernetes cluster. - -This section covers the following topics: - -- [Node configuration example](#node-configuration-example) -- [Kubernetes roles](#kubernetes-roles) - - [etcd](#etcd) - - [Controlplane](#controlplane) - - [Worker](#worker) -- [Node options](#node-options) - - [Address](#address) - - [Internal address](#internal-address) - - [Overriding the hostname](#overriding-the-hostname) - - [SSH port](#ssh-port) - - [SSH users](#ssh-users) - - [SSH key path](#ssh-key-path) - - [SSH key](#ssh-key) - - [SSH certificate path](#ssh-certificate-path) - - [SSH certificate](#ssh-certificate) - - [Docker socket](#docker-socket) - - [Labels](#labels) - - [Taints](#taints) - -# Node Configuration Example - -The following example shows node configuration in an example `cluster.yml`: - -```yaml -nodes: - - address: 1.1.1.1 - user: ubuntu - role: - - controlplane - - etcd - ssh_key_path: /home/user/.ssh/id_rsa - port: 2222 - - address: 2.2.2.2 - user: ubuntu - role: - - worker - ssh_key: |- - -----BEGIN RSA PRIVATE KEY----- - - -----END RSA PRIVATE KEY----- - - address: 3.3.3.3 - user: ubuntu - role: - - worker - ssh_key_path: /home/user/.ssh/id_rsa - ssh_cert_path: /home/user/.ssh/id_rsa-cert.pub - - address: 4.4.4.4 - user: ubuntu - role: - - worker - ssh_key_path: /home/user/.ssh/id_rsa - ssh_cert: |- - ssh-rsa-cert-v01@openssh.com AAAAHHNza... - taints: # Available as of v0.3.0 - - key: test-key - value: test-value - effect: NoSchedule - - address: example.com - user: ubuntu - role: - - worker - hostname_override: node3 - internal_address: 192.168.1.6 - labels: - app: ingress -``` - -# Kubernetes Roles - -You can specify the list of roles that you want the node to be as part of the Kubernetes cluster. Three roles are supported: `controlplane`, `etcd` and `worker`. Node roles are not mutually exclusive. It's possible to assign any combination of roles to any node. It's also possible to change a node's role using the upgrade process. - -> **Note:** Before v0.1.8, workloads/pods might have run on any nodes with `worker` or `controlplane` roles, but as of v0.1.8, they will only be deployed to any `worker` nodes. - -### etcd - -With this role, the `etcd` container will be run on these nodes. Etcd keeps the state of your cluster and is the most important component in your cluster, single source of truth of your cluster. Although you can run etcd on just one node, it typically takes 3, 5 or more nodes to create an HA configuration. Etcd is a distributed reliable key-value store which stores all Kubernetes state. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **etcd** role is shown below: - -Taint Key | Taint Value | Taint Effect ----------------------------------------|--------------|-------------- -`node-role.kubernetes.io/etcd` | `true` | `NoExecute` - -### Controlplane - -With this role, the stateless components that are used to deploy Kubernetes will run on these nodes. These components are used to run the API server, scheduler, and controllers. [Taint set on nodes](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) with the **controlplane** role is shown below: - -Taint Key | Taint Value | Taint Effect ----------------------------------------|--------------|-------------- -`node-role.kubernetes.io/controlplane` | `true` | `NoSchedule` - -### Worker - -With this role, any workloads or pods that are deployed will land on these nodes. - -# Node Options - -Within each node, there are multiple directives that can be used. - -### Address - -The `address` directive will be used to set the hostname or IP address of the node. RKE must be able to connect to this address. - -### Internal Address - -The `internal_address` provides the ability to have nodes with multiple addresses set a specific address to use for inter-host communication on a private network. If the `internal_address` is not set, the `address` is used for inter-host communication. The `internal_address` directive will set the address used for inter-host communication of the Kubernetes components, e.g. kube-apiserver and etcd. To change the interface used for the vxlan traffic of the Canal or Flannel network plug-ins please refer to the [Network Plug-ins Documentation]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). - -### Overriding the Hostname - -The `hostname_override` is used to be able to provide a friendly name for RKE to use when registering the node in Kubernetes. This hostname doesn't need to be a routable address, but it must be a valid [Kubernetes resource name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). If the `hostname_override` isn't set, then the `address` directive is used when registering the node in Kubernetes. - -> **Note:** When [cloud providers]({{}}/rke/latest/en/config-options/cloud-providers/) are configured, you may need to override the hostname in order to use the cloud provider correctly. There is an exception for the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws), where the `hostname_override` field will be explicitly ignored. - -### SSH Port - -In each node, you specify which `port` to be used when connecting to this node. The default port is `22`. - -### SSH Users - -For each node, you specify the `user` to be used when connecting to this node. This user must be a member of the Docker group or allowed to write to the node's Docker socket. - -### SSH Key Path - -For each node, you specify the path, i.e. `ssh_key_path`, for the SSH private key to be used when connecting to this node. The default key path for each node is `~/.ssh/id_rsa`. - -> **Note:** If you have a private key that can be used across all nodes, you can set the [SSH key path at the cluster level]({{}}/rke/latest/en/config-options/#cluster-level-ssh-key-path). The SSH key path set in each node will always take precedence. - -### SSH Key - -Instead of setting the path to the SSH key, you can alternatively specify the actual key, i.e. `ssh_key`, to be used to connect to the node. - -### SSH Certificate Path - -For each node, you can specify the path, i.e. `ssh_cert_path`, for the signed SSH certificate to be used when connecting to this node. - -### SSH Certificate - -Instead of setting the path to the signed SSH certificate, you can alternatively specify the actual certificate, i.e. `ssh_cert`, to be used to connect to the node. - -### Docker Socket - -If the Docker socket is different than the default, you can set the `docker_socket`. The default is `/var/run/docker.sock` - -### Labels - -You have the ability to add an arbitrary map of labels for each node. It can be used when using the [ingress controller's]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) `node_selector` option. - -### Taints - -_Available as of v0.3.0_ - -You have the ability to add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) for each node. diff --git a/content/rke/latest/en/config-options/private-registries/_index.md b/content/rke/latest/en/config-options/private-registries/_index.md deleted file mode 100644 index 89453fe951..0000000000 --- a/content/rke/latest/en/config-options/private-registries/_index.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: Private Registries -weight: 215 ---- - -RKE supports the ability to configure multiple private Docker registries in the `cluster.yml`. By passing in your registry and credentials, it allows the nodes to pull images from these private registries. - -```yaml -private_registries: - - url: registry.com - user: Username - password: password - - url: myregistry.com - user: myuser - password: mypassword -``` - -If you are using a Docker Hub registry, you can omit the `url` or set it to `docker.io`. - -> **Note:** Although the directive is named `url`, there is no need to prefix the host or IP address with `https://`. - -Valid `url` examples include: - -```yaml -url: registry.com -url: registry.com:5555 -url: 1.1.1.1 -url: 1.1.1.1:5555/artifactory -``` - -### Default Registry - -As of v0.1.10, RKE supports specifying a default registry from the list of private registries to be used with all [system images]({{}}/rke/latest/en/config-options/system-images/) . In this example .RKE will use `registry.com` as the default registry for all system images, e.g. `rancher/rke-tools:v0.1.14` will become `registry.com/rancher/rke-tools:v0.1.14`. - -```yaml -private_registries: - - url: registry.com - user: Username - password: password - is_default: true # All system images will be pulled using this registry. -``` - -### Air-gapped Setups - -By default, all system images are being pulled from DockerHub. If you are on a system that does not have access to DockerHub, you will need to create a private registry that is populated with all the required [system images]({{}}/rke/latest/en/config-options/system-images/). - -As of v0.1.10, you have to configure your private registry credentials, but you can specify this registry as a default registry so that all [system images]({{}}/rke/latest/en/config-options/system-images/) are pulled from the designated private registry. You can use the command `rke config --system-images` to get the list of default system images to populate your private registry. - -Before v0.1.10, you had to configure your private registry credentials **and** update the names of all the [system images]({{}}/rke/latest/en/config-options/system-images/) in the `cluster.yml` so that the image names would have the private registry URL appended before each image name. - - -### Amazon Elastic Container Registry (ECR) Private Registry Setup - -[Amazon ECR](https://docs.aws.amazon.com/AmazonECR/latest/userguide/what-is-ecr.html) is an AWS managed container image registry service that is secure, scalable, and reliable. There are two ways in which to provide ECR credentials to set up your ECR private registry: using an instance profile or adding a configuration snippet, which are hard-coded credentials in environment variables for the `kubelet` and credentials under the `ecrCredentialPlugin`. - - - **Instance Profile**: An instance profile is the preferred and more secure approach to provide ECR credentials (when running in EC2, etc.). The instance profile will be autodetected and used by default. For more information on configuring an instance profile with ECR permissions, go [here](https://docs.aws.amazon.com/AmazonECR/latest/userguide/security-iam.html). - - - **Configuration Snippet**: You will use the configuration snippet below rather than an instance profile only if the following conditions exist in your node: - - - Node is not an EC2 instance - - Node is an EC2 instance but does not have an instance profile configured - - Node is an EC2 instance and has an instance profile configured but has no permissions for ECR - -> **Note:** The ECR credentials are only used in the `kubelet` and `ecrCredentialPlugin` areas. This is important to remember if you have issues while creating a new cluster or when pulling images during reconcile/upgrades. -> -> - Kubelet: For add-ons, custom workloads, etc., the instance profile or credentials are used by the -> downstream cluster nodes -> - Pulling system images (directly via Docker): For bootstrap, upgrades, reconcile, etc., the instance profile -> or credentials are used by nodes running RKE or running the Rancher pods. - -``` - # Configuration snippet to be used when the instance profile is unavailable. - services: - kubelet: - extra_env: - - "AWS_ACCESS_KEY_ID=ACCESSKEY" - - "AWS_SECRET_ACCESS_KEY=SECRETKEY" - private_registries: - - url: ACCOUNTID.dkr.ecr.REGION.amazonaws.com - is_default: true - ecrCredentialPlugin: - aws_access_key_id: "ACCESSKEY" - aws_secret_access_key: "SECRETKEY" -``` - \ No newline at end of file diff --git a/content/rke/latest/en/config-options/rate-limiting/_index.md b/content/rke/latest/en/config-options/rate-limiting/_index.md deleted file mode 100644 index 2e942415a7..0000000000 --- a/content/rke/latest/en/config-options/rate-limiting/_index.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: Rate Limiting -weight: 241 ---- - -Using the `EventRateLimit` admission control enforces a limit on the number of events that the API Server will accept in a given time period. In a large multi-tenant cluster, there might be a small percentage of tenants that flood the server with event requests, which could have a significant impact on the performance of the cluster overall. Therefore, it is recommended to limit the rate of events that the API server will accept. - -You might want to configure event rate limit as part of compliance with the CIS (Center for Internet Security) Kubernetes Benchmark. Event rate limiting corresponds to the CIS Kubernetes Benchmark 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored). - -Rate limits can be configured for the server, a namespace, a user, or a combination of a source and an object. - -For configuration details, refer to the [official Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#eventratelimit) - -### Example Configurations - -The following configuration in the `cluster.yml` can be used to enable the event rate limit by default: - -```yaml -services: - kube-api: - event_rate_limit: - enabled: true -``` - -When the event rate limit is enabled, you should be able to see the default values at `/etc/kubernetes/admission.yaml`: - -```yaml -... -plugins: -- configuration: - apiVersion: eventratelimit.admission.k8s.io/v1alpha1 - kind: Configuration - limits: - - burst: 20000 - qps: 5000 - type: Server -... -``` - -To customize the event rate limit, the entire Kubernetes resource for the configuration must be provided in the `configuration` directive: - -```yaml -services: - kube-api: - event_rate_limit: - enabled: true - configuration: - apiVersion: eventratelimit.admission.k8s.io/v1alpha1 - kind: Configuration - limits: - - type: Server - qps: 6000 - burst: 30000 -``` \ No newline at end of file diff --git a/content/rke/latest/en/config-options/secrets-encryption/_index.md b/content/rke/latest/en/config-options/secrets-encryption/_index.md deleted file mode 100644 index f2539991d7..0000000000 --- a/content/rke/latest/en/config-options/secrets-encryption/_index.md +++ /dev/null @@ -1,205 +0,0 @@ ---- -title: Encrypting Secret Data at Rest -weight: 230 ---- - -As of version `v0.3.1` RKE adds the support for managing secret data encryption at rest, which is [supported by Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#before-you-begin) since version `v1.13`. - -At-rest data encryption is required for: - -- Compliance requirements -- Additional layer of security -- Reduce security impact of etcd node compromise -- Reduce security impact of etcd backups compromise -- Ability to use external Key Management Systems - -RKE provides users with two paths of configuration to enable at-rest data encryption: - -- Managed at-rest data encryption -- Custom configuration for at-rest data encryption - -Both configuration options can be added during initial cluster provisioning or by updating an existing cluster. - -To utilize this feature, a new field `secrets_encryption_config` is added to the [Kubernetes API service configuration]({{}}//rke/latest/en/config-options/services/#kubernetes-api-server). A full custom configuration looks like this: - -```yaml -services: - kube-api: - secrets_encryption_config: - enabled: true - custom_config: - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: k-fw5hn - secret: RTczRjFDODMwQzAyMDVBREU4NDJBMUZFNDhCNzM5N0I= - - identity: {} - -``` -# Managed At-Rest Data Encryption - -Enabling and disabling at-rest data encryption in Kubernetes is a relatively complex process that requires several steps to be performed by the Kubernetes cluster administrator. The managed configuration aims to reduce this overhead and provides a simple abstraction layer to manage the process. - -### Enable Encryption -Managed at-rest data encryption is disabled by default and can be enabled by using the following configuration: - -```yaml -services: - kube-api: - secrets_encryption_config: - enabled: true -``` -Once enabled, RKE will perform the following [actions](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#encrypting-your-data) to enable at-rest data encryption: - -- Generate a new random 32-byte encryption key -- Generate an encryption provider configuration file using the new key The default [provider](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#providers) used is `aescbc` -- Deploy the provider configuration file to all nodes with `controlplane` role -- Update the `kube-apiserver` container arguments to point to the provider configuration file. -- Restart the `kube-apiserver` container. - -After the `kube-api server` is restarted, data encryption is enabled. However, all existing secrets are still stored in plain text. RKE will [rewrite](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted) all secrets to ensure encryption is fully in effect. - -### Disable Encryption -To disable encryption, you can either set the `enabled` flag to `false`, or simply remove the `secrets_encryption_config` block entirely from cluster.yml. - -```yaml -services: - kube-api: - secrets_encryption_config: - enabled: false -``` - -Once encryption is disabled in `cluster.yml`, RKE will perform the following [actions](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#encrypting-your-data) to disable encryption in your cluster: - -- Generate a new provider configuration file with the no-encryption `identity{}` provider as the first provider, and the previous `aescbc` set in the second place. This will allow Kubernetes to use the first entry to write the secrets, and the second one to decrypt them. -- Deploy the new provider configuration and restart `kube-apiserver`. -- Rewrite all secrets. This is required because, at this point, new data will be written to disk in plain text, but the existing data is still encrypted using the old provider. By rewriting all secrets, RKE ensures that all stored data is decrypted. -- Update `kube-apiserver` arguments to remove the encryption provider configuration and restart the `kube-apiserver`. -- Remove the provider configuration file. - - -# Key Rotation -Sometimes there is a need to rotate encryption config in your cluster. For example, the key is compromised. There are two ways to rotate the keys: with an RKE CLI command, or by disabling and re-enabling encryption in `cluster.yml`. - -### Rotating Keys with the RKE CLI - -With managed configuration, RKE CLI has the ability to perform the key rotation process documented [here](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#rotating-a-decryption-key) with one command. To perform this operation, the following subcommand is used: -```bash -$ ./rke encrypt rotate-key --help -NAME: - rke encrypt rotate-key - Rotate cluster encryption provider key - -USAGE: - rke encrypt rotate-key [command options] [arguments...] - -OPTIONS: - --config value Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] - --ssh-agent-auth Use SSH Agent Auth defined by SSH_AUTH_SOCK - --ignore-docker-version Disable Docker version check - -``` -This command will perform the following actions: - -- Generate a new random 32-byte encryption key -- Generate a new provider configuration with the new key as the first provider and the old key as the second provider. When the secrets are rewritten, the first key will be used to encrypt the data on the write operation, while the second key (the old key) will be used to decrypt the stored data during the the read operation -- Deploy the new provider configuration to all `controlplane` nodes and restart the `kube-apiserver` -- Rewrite all secrets. This process will re-encrypt all the secrets with the new key. -- Update the configuration to remove the old key and restart the `kube-apiserver` - -### Rotating Keys by Disabling and Re-enabling Encryption in cluster.yml - -For a cluster with encryption enabled, you can rotate the encryption keys by updating `cluster.yml`. If you disable and re-enable the data encryption in the `cluster.yml`, RKE will not reuse old keys. Instead, it will generate new keys every time, yielding the same result as a key rotation with the RKE CLI. - -# Custom At-Rest Data Encryption Configuration -With managed configuration, RKE provides the user with a very simple way to enable and disable encryption with minimal interaction and configuration. However, it doesn't allow for any customization to the configuration. - -With custom encryption configuration, RKE allows the user to provide their own configuration. Although RKE will help the user to deploy the configuration and rewrite the secrets if needed, it doesn't provide a configuration validation on user's behalf. It's the user responsibility to make sure their configuration is valid. - ->**Warning:** Using invalid Encryption Provider Configuration could cause several issues with your cluster, ranging from crashing the Kubernetes API service, `kube-api`, to completely losing access to encrypted data. - -### Example: Using Custom Encryption Configuration with User Provided 32-byte Random Key - -The following describes the steps required to configure custom encryption with a user provided 32-byte random key. - -Step 1: Generate a 32-byte random key and base64 encode it. If you're on Linux or macOS, run the following command: - -``` -head -c 32 /dev/urandom | base64 -``` - -Place that value in the secret field. - -```yaml -kube-api: - secrets_encryption_config: - enabled: true - custom_config: - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - Providers: - - AESCBC: - Keys: - - Name: key1 - Secret: - resources: - - secrets - - identity: {} -``` - - -### Example: Using Custom Encryption Configuration with Amazon KMS - -An example for custom configuration would be enabling an external key management system like [Amazon KMS](https://aws.amazon.com/kms/). The following is an example of the configuration for AWS KMS: - -```yaml - -services: - kube-api: - extra_binds: - - "/var/run/kmsplugin/:/var/run/kmsplugin/" - secrets_encryption_config: - enabled: true - custom_config: - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - kms: - name: aws-encryption-provider - endpoint: unix:///var/run/kmsplugin/socket.sock - cachesize: 1000 - timeout: 3s - - identity: {} -``` - -Documentation for AWS KMS can be found [here](https://github.com/kubernetes-sigs/aws-encryption-provider). When Custom Configuration is set to to enable the AWS KMS provider, you should consider the following points: - -- Since RKE runs the `kube-api` service in a container, it's required that you use the `extra_binds` feature to bind-mount the KMS provider socket location inside the `kube-api` container. -- The AWS KMS provider runs as a pod in the cluster. Therefor, the proper way to enable it is to: - 1. Deploy your cluster with at-rest encryption disabled. - 2. Deploy the KMS pod and make sure it's working correctly. - 3. Update your cluster with the custom encryption configuration to utilize the KMS provider. -- Kube API connects to the KMS provider using a Unix socket. You should configure your KMS deployment to run pods on all `controlplane` nodes in the cluster. -- Your `controlplane` node should be configured with an AMI profile that has access to the KMS key you used in your configuration. - -### How to Prevent Restore Failures after Rotating Keys -It's important to understand that enabling encryption for you cluster means that you can no longer access encrypted data in your etcd database and/or etcd database backups without using your encryption keys. - -The encryption configuration is stored in the cluster state file `cluster.rkestate`, which is decoupled from the etcd backups. For example, in any of the following backup cases, the restore process will fail: - -- The snapshot is taken while encryption is enabled and restored when it's disabled. In this case, the encryption keys are no longer stored in the cluster state. -- The snapshot is taken before the keys are rotated and restore is attempted after. In this case, the old keys used for encryption at the time of the snapshot no longer exist in the cluster state file. - -Therefore, we recommend that when you enable or disable encryption, or when you rotate keys, you should [create a snapshot]({{}}/rke/latest/en/etcd-snapshots/one-time-snapshots/) so that your backup requires the same keys that you have access to. - -This also means you should not rotate the keys during the restore process, because you would lose the encryption keys in `cluster.rkestate`. - -The same applies to the custom configuration use case, however in this case it will depend on the user-provided encryption configuration. diff --git a/content/rke/latest/en/config-options/services/_index.md b/content/rke/latest/en/config-options/services/_index.md deleted file mode 100644 index 77a3a96919..0000000000 --- a/content/rke/latest/en/config-options/services/_index.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Default Kubernetes Services -description: To deploy Kubernetes, RKE deploys several default Kubernetes services. Read about etcd, kube-api server, kubelet, kube-proxy and more -weight: 230 ---- - -To deploy Kubernetes, RKE deploys several core components or services in Docker containers on the nodes. Based on the roles of the node, the containers deployed may be different. - ->**Note:** All services support additional custom arguments, Docker mount binds, and extra environment variables. -> ->To configure advanced options for Kubernetes services such as `kubelet`, `kube-controller`, and `kube-apiserver` that are not documented below, see the [`extra_args` documentation]({{}}/rke/latest/en/config-options/services/services-extras/) for more details. - -| Component | Services key name in cluster.yml | -|-------------------------|----------------------------------| -| etcd | `etcd` | -| kube-apiserver | `kube-api` | -| kube-controller-manager | `kube-controller` | -| kubelet | `kubelet` | -| kube-scheduler | `scheduler` | -| kube-proxy | `kubeproxy` | - -## etcd - -Kubernetes uses [etcd](https://etcd.io/) as a store for cluster state and data. Etcd is a reliable, consistent and distributed key-value store. - -RKE supports running etcd in a single node mode or in HA cluster mode. It also supports adding and removing etcd nodes to the cluster. - -You can enable etcd to [take recurring snapshots]({{}}/rke/latest/en/etcd-snapshots/#recurring-snapshots). These snapshots can be used to [restore etcd]({{}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). - -By default, RKE will deploy a new etcd service, but you can also run Kubernetes with an [external etcd service]({{}}/rke/latest/en/config-options/services/external-etcd/). - -## Kubernetes API Server - -> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) when creating [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_api`. This only applies to Rancher v2.0.5 and v2.0.6. - -The [Kubernetes API](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) REST service, which handles requests and data for all Kubernetes objects and provide shared state for all the other Kubernetes components. - -```yaml -services: - kube-api: - # IP range for any services created on Kubernetes - # This must match the service_cluster_ip_range in kube-controller - service_cluster_ip_range: 10.43.0.0/16 - # Expose a different port range for NodePort services - service_node_port_range: 30000-32767 - pod_security_policy: false - # Enable AlwaysPullImages Admission controller plugin - # Available as of v0.2.0 - always_pull_images: false - secrets_encryption_config: - enabled: true -``` - -### Kubernetes API Server Options - -RKE supports the following options for the `kube-api` service : - -- **Service Cluster IP Range** (`service_cluster_ip_range`) - This is the virtual IP address that will be assigned to services created on Kubernetes. By default, the service cluster IP range is `10.43.0.0/16`. If you change this value, then it must also be set with the same value on the Kubernetes Controller Manager (`kube-controller`). -- **Node Port Range** (`service_node_port_range`) - The port range to be used for Kubernetes services created with the [type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) `NodePort`. By default, the port range is `30000-32767`. -- **Pod Security Policy** (`pod_security_policy`) - An option to enable the [Kubernetes Pod Security Policy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). By default, we do not enable pod security policies as it is set to `false`. - > **Note:** If you set `pod_security_policy` value to `true`, RKE will configure an open policy to allow any pods to work on the cluster. You will need to configure your own policies to fully utilize PSP. -- **Always Pull Images** (`always_pull_images`) - Enable `AlwaysPullImages` Admission controller plugin. Enabling `AlwaysPullImages` is a security best practice. It forces Kubernetes to validate the image and pull credentials with the remote image registry. Local image layer cache will still be used, but it does add a small bit of overhead when launching containers to pull and compare image hashes. _Note: Available as of v0.2.0_ -- **Secrets Encryption Config** (`secrets_encryption_config`) - Manage Kubernetes at-rest data encryption. Documented [here]({{}}//rke/latest/en/config-options/secrets-encryption) -## Kubernetes Controller Manager - -> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) when creating [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. - -The [Kubernetes Controller Manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) service is the component responsible for running Kubernetes main control loops. The controller manager monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. - -```yaml -services: - kube-controller: - # CIDR pool used to assign IP addresses to pods in the cluster - cluster_cidr: 10.42.0.0/16 - # IP range for any services created on Kubernetes - # This must match the service_cluster_ip_range in kube-api - service_cluster_ip_range: 10.43.0.0/16 -``` - -### Kubernetes Controller Manager Options - -RKE supports the following options for the `kube-controller` service: - -- **Cluster CIDR** (`cluster_cidr`) - The CIDR pool used to assign IP addresses to pods in the cluster. By default, each node in the cluster is assigned a `/24` network from this pool for pod IP assignments. The default value for this option is `10.42.0.0/16`. -- **Service Cluster IP Range** (`service_cluster_ip_range`) - This is the virtual IP address that will be assigned to services created on Kubernetes. By default, the service cluster IP range is `10.43.0.0/16`. If you change this value, then it must also be set with the same value on the Kubernetes API server (`kube-api`). - -## Kubelet - -The [kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) services acts as a "node agent" for Kubernetes. It runs on all nodes deployed by RKE, and gives Kubernetes the ability to manage the container runtime on the node. - -```yaml -services: - kubelet: - # Base domain for the cluster - cluster_domain: cluster.local - # IP address for the DNS service endpoint - cluster_dns_server: 10.43.0.10 - # Fail if swap is on - fail_swap_on: false - # Generate per node serving certificate - generate_serving_certificate: false -``` - -### Kubelet Options - -RKE supports the following options for the `kubelet` service: - -- **Cluster Domain** (`cluster_domain`) - The [base domain](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) for the cluster. All services and DNS records created on the cluster. By default, the domain is set to `cluster.local`. -- **Cluster DNS Server** (`cluster_dns_server`) - The IP address assigned to the DNS service endpoint within the cluster. DNS queries will be sent to this IP address which is used by KubeDNS. The default value for this option is `10.43.0.10` -- **Fail if Swap is On** (`fail_swap_on`) - In Kubernetes, the default behavior for the kubelet is to **fail** if swap is enabled on the node. RKE does **not** follow this default and allows deployments on nodes with swap enabled. By default, the value is `false`. If you'd like to revert to the default kubelet behavior, set this option to `true`. -- **Generate Serving Certificate** (`generate_serving_certificate`) - Generate a certificate signed by the `kube-ca` Certificate Authority for the kubelet to use as a server certificate. The default value for this option is `false`. Before enabling this option, please read [the requirements](#kubelet-serving-certificate-requirements) - -### Kubelet Serving Certificate Requirements - -If `hostname_override` is configured for one or more nodes in `cluster.yml`, please make sure the correct IP address is configured in `address` (and the internal address in `internal_address`) to make sure the generated certificate contains the correct IP address(es). - -An example of an error situation is an EC2 instance where the the public IP address is configured in `address`, and `hostname_override` is used, the connection between `kube-apiserver` and `kubelet` will fail because the `kubelet` will be contacted on the private IP address and the generated certificate will not be valid (the error `x509: certificate is valid for value_in_address, not private_ip` will be seen). The resolution is to provide the internal IP address in `internal_address`. - -For more information on host overrides, refer to the [node configuration page.]({{}}/rke/latest/en/config-options/nodes/#overriding-the-hostname) - -## Kubernetes Scheduler - -The [Kubernetes Scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) service is responsible for scheduling cluster workloads based on various configurations, metrics, resource requirements and workload-specific requirements. - -Currently, RKE doesn't support any specific options for the `scheduler` service. - -## Kubernetes Network Proxy -The [Kubernetes network proxy](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/) service runs on all nodes and manages endpoints created by Kubernetes for TCP/UDP ports. - -Currently, RKE doesn't support any specific options for the `kubeproxy` service. diff --git a/content/rke/latest/en/config-options/services/external-etcd/_index.md b/content/rke/latest/en/config-options/services/external-etcd/_index.md deleted file mode 100644 index 8ee04bb779..0000000000 --- a/content/rke/latest/en/config-options/services/external-etcd/_index.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: External etcd -weight: 232 ---- - -By default, RKE will launch etcd servers, but RKE also supports being able to use an external etcd. RKE only supports connecting to a TLS enabled etcd setup. - -> **Note:** RKE will not accept having external etcd servers in conjunction with [nodes]({{}}/rke/latest/en/config-options/nodes/) with the `etcd` role. - -```yaml -services: - etcd: - path: /etcdcluster - external_urls: - - https://etcd-example.com:2379 - ca_cert: |- - -----BEGIN CERTIFICATE----- - xxxxxxxxxx - -----END CERTIFICATE----- - cert: |- - -----BEGIN CERTIFICATE----- - xxxxxxxxxx - -----END CERTIFICATE----- - key: |- - -----BEGIN PRIVATE KEY----- - xxxxxxxxxx - -----END PRIVATE KEY----- -``` - -## External etcd Options - -### Path - -The `path` defines the location of where the etcd cluster is on the endpoints. - -### External URLs - -The `external_urls` are the endpoints of where the etcd cluster is hosted. There can be multiple endpoints for the etcd cluster. - -### CA Cert/Cert/KEY - -The certificates and private keys used to authenticate and access the etcd service. diff --git a/content/rke/latest/en/config-options/services/services-extras/_index.md b/content/rke/latest/en/config-options/services/services-extras/_index.md deleted file mode 100644 index 9a6d579854..0000000000 --- a/content/rke/latest/en/config-options/services/services-extras/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Extra Args, Extra Binds, and Extra Environment Variables -weight: 231 ---- - -RKE supports additional service arguments, volume binds and environment variables. - -### Extra Args - -For any of the Kubernetes services, you can update the `extra_args` to change the existing defaults. - -As of `v0.1.3`, using `extra_args` will add new arguments and **override** any existing defaults. For example, if you need to modify the default admission plugins list, you need to include the default list and edit it with your changes so all changes are included. - -Before `v0.1.3`, using `extra_args` would only add new arguments to the list and there was no ability to change the default list. - -All service defaults and parameters are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version): - -- For RKE v0.3.0+, the service defaults and parameters are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version). The service defaults are located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go). The default list of admissions plugins is the same for all Kubernetes versions and is located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go#L11). - -- For RKE before v0.3.0, the service defaults and admission plugins are defined per [`kubernetes_version`]({{}}/rke/latest/en/config-options/#kubernetes-version) and located [here](https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go). - -```yaml -services: - kube-controller: - extra_args: - cluster-name: "mycluster" -``` - -### Extra Binds - -Additional volume binds can be added to services using the `extra_binds` arguments. - -```yaml -services: - kubelet: - extra_binds: - - "/dev:/host/dev" - - "/usr/libexec/kubernetes/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:z" -``` - -### Extra Environment Variables - -Additional environment variables can be added to services by using the `extra_env` arguments. - -```yaml -services: - kubelet: - extra_env: - - "HTTP_PROXY=http://your_proxy" -``` diff --git a/content/rke/latest/en/config-options/system-images/_index.md b/content/rke/latest/en/config-options/system-images/_index.md deleted file mode 100644 index 148168a582..0000000000 --- a/content/rke/latest/en/config-options/system-images/_index.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: System Images -weight: 225 ---- -When RKE is deploying Kubernetes, there are several images that are pulled. These images are used as Kubernetes system components as well as helping to deploy these system components. - -As of `v0.1.6`, the functionality of a couple of the system images were consolidated into a single `rancher/rke-tools` image to simplify and speed the deployment process. - -You can configure the [network plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/), [ingress controller]({{}}/rke/latest/en/config-options/add-ons/ingress-controllers/) and [dns provider]({{}}/rke/latest/en/config-options/add-ons/dns/) as well as the options for these add-ons separately in the `cluster.yml`. - -Below is an example of the list of system images used to deploy Kubernetes through RKE. The default versions of Kubernetes are tied to specific versions of system images. - -- For RKE v0.2.x and below, the map of versions and the system image versions is located here: https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go - -- For RKE v0.3.0 and above, the map of versions and the system image versions is located here: https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_rke_system_images.go - -> **Note:** As versions of RKE are released, the tags on these images will no longer be up to date. This list is specific for `v1.10.3-rancher2`. - -```yaml -system_images: - etcd: rancher/coreos-etcd:v3.2.24 - alpine: rancher/rke-tools:v0.1.24 - nginx_proxy: rancher/rke-tools:v0.1.24 - cert_downloader: rancher/rke-tools:v0.1.24 - kubernetes: rancher/hyperkube:v1.13.1-rancher1 - kubernetes_services_sidecar: rancher/rke-tools:v0.1.24 - pod_infra_container: rancher/pause-amd64:3.1 - - # kube-dns images - kubedns: rancher/k8s-dns-kube-dns-amd64:1.15.0 - dnsmasq: rancher/k8s-dns-dnsmasq-nanny-amd64:1.15.0 - kubedns_sidecar: rancher/k8s-dns-sidecar-amd64:1.15.0 - kubedns_autoscaler: rancher/cluster-proportional-autoscaler-amd64:1.0.0 - - # CoreDNS images - coredns: coredns/coredns:1.2.6 - coredns_autoscaler: rancher/cluster-proportional-autoscaler-amd64:1.0.0 - - # Flannel images - flannel: rancher/coreos-flannel:v0.10.0 - flannel_cni: rancher/coreos-flannel-cni:v0.3.0 - - # Calico images - calico_node: rancher/calico-node:v3.4.0 - calico_cni: rancher/calico-cni:v3.4.0 - calico_controllers: "" - calico_ctl: rancher/calico-ctl:v2.0.0 - - # Canal images - canal_node: rancher/calico-node:v3.4.0 - canal_cni: rancher/calico-cni:v3.4.0 - canal_flannel: rancher/coreos-flannel:v0.10.0 - - # Weave images - weave_node: weaveworks/weave-kube:2.5.0 - weave_cni: weaveworks/weave-npc:2.5.0 - - # Ingress controller images - ingress: rancher/nginx-ingress-controller:0.21.0-rancher1 - ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4 - - # Metrics server image - metrics_server: rancher/metrics-server-amd64:v0.3.1 -``` - -Before `v0.1.6`, instead of using the `rancher/rke-tools` image, we used the following images: - -```yaml -system_images: - alpine: alpine:latest - nginx_proxy: rancher/rke-nginx-proxy:v0.1.1 - cert_downloader: rancher/rke-cert-deployer:v0.1.1 - kubernetes_services_sidecar: rancher/rke-service-sidekick:v0.1.0 -``` - -### Air-gapped Setups - -If you have an air-gapped setup and cannot access `docker.io`, you will need to set up your [private registry]({{}}/rke/latest/en/config-options/private-registries/) in your cluster configuration file. After you set up private registry, you will need to update these images to pull from your private registry. diff --git a/content/rke/latest/en/etcd-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/_index.md deleted file mode 100644 index 5af3516d95..0000000000 --- a/content/rke/latest/en/etcd-snapshots/_index.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Backups and Disaster Recovery -weight: 150 -aliases: - - /rke/latest/en/installation/etcd-snapshots/ ---- - -_Available as of v0.1.7_ - -RKE clusters can be configured to automatically take snapshots of etcd. In a disaster scenario, you can restore these snapshots, which are stored on other nodes in the cluster. Snapshots are always saved locally in `/opt/rke/etcd-snapshots`. - -_Available as of v0.2.0_ - -RKE can upload your snapshots to a S3 compatible backend. - -**Note:** As of RKE v0.2.0, the `pki.bundle.tar.gz` file is no longer required because of a change in how the [Kubernetes cluster state is stored]({{}}/rke/latest/en/installation/#kubernetes-cluster-state). - -# Backing Up a Cluster - -You can create [one-time snapshots]({{}}/rke/latest/en/etcd-snapshots/one-time-snapshots) to back up your cluster, and you can also configure [recurring snapshots]({{}}/rke/latest/en/etcd-snapshots/recurring-snapshots). - -# Restoring a Cluster from Backup - -You can use RKE to [restore your cluster from backup]({{}}/rke/latest/en/etcd-snapshots/restoring-from-backup). - -# Example Scenarios - -These [example scenarios]({{}}/rke/latest/en/etcd-snapshots/example-scenarios) for backup and restore are different based on your version of RKE. - -# How Snapshots Work - -For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. - -The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. - -In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. - -_Available as of v1.1.4_ - -Each snapshot will include the cluster state file in addition to the etcd snapshot file. - -### Snapshot Naming - -The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. - -An example one-time snapshot name is `rke_etcd_snapshot_2020-10-15T16:47:24+02:00`. An example recurring snapshot name is `2020-10-15T14:53:26Z_etcd`. - -### How Restoring from a Snapshot Works - -On restore, the following process is used: - -1. The snapshot is retrieved from S3, if S3 is configured. -1. The snapshot is unzipped (if zipped). -1. It is checked if the cluster state file is included in the snapshot, if it is included, it will be used instead of the local cluster state file (_Available as of v1.1.4_) -1. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. -1. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. -1. The cluster is restored and post-restore actions will be done in the cluster. - -## Troubleshooting - -If you have trouble restoring your cluster, you can refer to the [troubleshooting]({{}}/rke/latest/en/etcd-snapshots/troubleshooting) page. diff --git a/content/rke/latest/en/etcd-snapshots/example-scenarios/_index.md b/content/rke/latest/en/etcd-snapshots/example-scenarios/_index.md deleted file mode 100644 index 3cae808ab7..0000000000 --- a/content/rke/latest/en/etcd-snapshots/example-scenarios/_index.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: Example Scenarios -weight: 4 ---- - -These example scenarios for backup and restore are different based on your version of RKE. - -{{% tabs %}} -{{% tab "RKE v0.2.0+" %}} - -This walkthrough will demonstrate how to restore an etcd cluster from a local snapshot with the following steps: - -1. [Back up the cluster](#1-back-up-the-cluster) -1. [Simulate a node failure](#2-simulate-a-node-failure) -1. [Add a new etcd node to the cluster](#3-add-a-new-etcd-node-to-the-kubernetes-cluster) -1. [Restore etcd on the new node from the backup](#4-restore-etcd-on-the-new-node-from-the-backup) -1. [Confirm that cluster operations are restored](#5-confirm-that-cluster-operations-are-restored) - -In this example, the Kubernetes cluster was deployed on two AWS nodes. - -| Name | IP | Role | -|:-----:|:--------:|:----------------------:| -| node1 | 10.0.0.1 | [controlplane, worker] | -| node2 | 10.0.0.2 | [etcd] | - - -### 1. Back Up the Cluster - -Take a local snapshot of the Kubernetes cluster. - -You can upload this snapshot directly to an S3 backend with the [S3 options]({{}}/rke/latest/en/etcd-snapshots/one-time-snapshots/#options-for-rke-etcd-snapshot-save). - -``` -$ rke etcd snapshot-save --name snapshot.db --config cluster.yml -``` - -### 2. Simulate a Node Failure - -To simulate the failure, let's power down `node2`. - -``` -root@node2:~# poweroff -``` - -| Name | IP | Role | -|:-----:|:--------:|:----------------------:| -| node1 | 10.0.0.1 | [controlplane, worker] | -| ~~node2~~ | ~~10.0.0.2~~ | ~~[etcd]~~ | - -### 3. Add a New etcd Node to the Kubernetes Cluster - -Before updating and restoring etcd, you will need to add the new node into the Kubernetes cluster with the `etcd` role. In the `cluster.yml`, comment out the old node and add in the new node. - -```yaml -nodes: - - address: 10.0.0.1 - hostname_override: node1 - user: ubuntu - role: - - controlplane - - worker -# - address: 10.0.0.2 -# hostname_override: node2 -# user: ubuntu -# role: -# - etcd - - address: 10.0.0.3 - hostname_override: node3 - user: ubuntu - role: - - etcd -``` - -### 4. Restore etcd on the New Node from the Backup - -> **Prerequisite:** If the snapshot was created using RKE v1.1.4 or higher, the cluster state file should be included in the snapshot. The cluster state file will be automatically extracted and used for the restore. If the snapshot was created using RKE v1.1.3 or lower, please ensure your `cluster.rkestate` is present before starting the restore, because this contains your certificate data for the cluster. - -After the new node is added to the `cluster.yml`, run the `rke etcd snapshot-restore` to launch `etcd` from the backup: - -``` -$ rke etcd snapshot-restore --name snapshot.db --config cluster.yml -``` - -The snapshot is expected to be saved at `/opt/rke/etcd-snapshots`. - -If you want to directly retrieve the snapshot from S3, add in the [S3 options](#options-for-rke-etcd-snapshot-restore). - -> **Note:** As of v0.2.0, the file `pki.bundle.tar.gz` is no longer required for the restore process because the certificates required to restore are preserved within the `cluster.rkestate`. - -### 5. Confirm that Cluster Operations are Restored - -The `rke etcd snapshot-restore` command triggers `rke up` using the new `cluster.yml`. Confirm that your Kubernetes cluster is functional by checking the pods on your cluster. - -``` -> kubectl get pods -NAME READY STATUS RESTARTS AGE -nginx-65899c769f-kcdpr 1/1 Running 0 17s -nginx-65899c769f-pc45c 1/1 Running 0 17s -nginx-65899c769f-qkhml 1/1 Running 0 17s -``` - -{{% /tab %}} -{{% tab "RKE before v0.2.0" %}} - -This walkthrough will demonstrate how to restore an etcd cluster from a local snapshot with the following steps: - -1. [Take a local snapshot of the cluster](#take-a-local-snapshot-of-the-cluster-rke-before-v0.2.0) -1. [Store the snapshot externally](#store-the-snapshot-externally-rke-before-v0.2.0) -1. [Simulate a node failure](#simulate-a-node-failure-rke-before-v0.2.0) -1. [Remove the Kubernetes cluster and clean the nodes](#remove-the-kubernetes-cluster-and-clean-the-nodes-rke-before-v0.2.0) -1. [Retrieve the backup and place it on a new node](#retrieve-the-backup-and-place-it-on-a-new-node-rke-before-v0.2.0) -1. [Add a new etcd node to the Kubernetes cluster](#add-a-new-etcd-node-to-the-kubernetes-cluster-rke-before-v0.2.0) -1. [Restore etcd on the new node from the backup](#restore-etcd-on-the-new-node-from-the-backup-rke-before-v0.2.0) -1. [Restore Operations on the Cluster](#restore-operations-on-the-cluster-rke-before-v0.2.0) - -### Example Scenario of restoring from a Local Snapshot - -In this example, the Kubernetes cluster was deployed on two AWS nodes. - -| Name | IP | Role | -|:-----:|:--------:|:----------------------:| -| node1 | 10.0.0.1 | [controlplane, worker] | -| node2 | 10.0.0.2 | [etcd] | - - -### 1. Take a Local Snapshot of the Cluster - -Back up the Kubernetes cluster by taking a local snapshot: - -``` -$ rke etcd snapshot-save --name snapshot.db --config cluster.yml -``` - - -### 2. Store the Snapshot Externally - -After taking the etcd snapshot on `node2`, we recommend saving this backup in a persistent place. One of the options is to save the backup and `pki.bundle.tar.gz` file on an S3 bucket or tape backup. - -``` -# If you're using an AWS host and have the ability to connect to S3 -root@node2:~# s3cmd mb s3://rke-etcd-backup -root@node2:~# s3cmd \ - /opt/rke/etcd-snapshots/snapshot.db \ - /opt/rke/etcd-snapshots/pki.bundle.tar.gz \ - s3://rke-etcd-backup/ -``` - - -### 3. Simulate a Node Failure - -To simulate the failure, let's power down `node2`. - -``` -root@node2:~# poweroff -``` - -| Name | IP | Role | -|:-----:|:--------:|:----------------------:| -| node1 | 10.0.0.1 | [controlplane, worker] | -| ~~node2~~ | ~~10.0.0.2~~ | ~~[etcd]~~ | - - -### 4. Remove the Kubernetes Cluster and Clean the Nodes - -The following command removes your cluster and cleans the nodes so that the cluster can be restored without any conflicts: - -``` -rke remove --config rancher-cluster.yml -``` - - -### 5. Retrieve the Backup and Place it On a New Node - -Before restoring etcd and running `rke up`, we need to retrieve the backup saved on S3 to a new node, e.g. `node3`. - -``` -# Make a Directory -root@node3:~# mkdir -p /opt/rke/etcdbackup - -# Get the Backup from S3 -root@node3:~# s3cmd get \ - s3://rke-etcd-backup/snapshot.db \ - /opt/rke/etcd-snapshots/snapshot.db - -# Get the pki bundle from S3 -root@node3:~# s3cmd get \ - s3://rke-etcd-backup/pki.bundle.tar.gz \ - /opt/rke/etcd-snapshots/pki.bundle.tar.gz -``` - -> **Note:** If you had multiple etcd nodes, you would have to manually sync the snapshot and `pki.bundle.tar.gz` across all of the etcd nodes in the cluster. - - -### 6. Add a New etcd Node to the Kubernetes Cluster - -Before updating and restoring etcd, you will need to add the new node into the Kubernetes cluster with the `etcd` role. In the `cluster.yml`, comment out the old node and add in the new node. ` - -```yaml -nodes: - - address: 10.0.0.1 - hostname_override: node1 - user: ubuntu - role: - - controlplane - - worker -# - address: 10.0.0.2 -# hostname_override: node2 -# user: ubuntu -# role: -# - etcd - - address: 10.0.0.3 - hostname_override: node3 - user: ubuntu - role: - - etcd -``` - - -### 7. Restore etcd on the New Node from the Backup - -After the new node is added to the `cluster.yml`, run the `rke etcd snapshot-restore` command to launch `etcd` from the backup: - -``` -$ rke etcd snapshot-restore --name snapshot.db --config cluster.yml -``` - -The snapshot and `pki.bundle.tar.gz` file are expected to be saved at `/opt/rke/etcd-snapshots` on each etcd node. - - -### 8. Restore Operations on the Cluster - -Finally, we need to restore the operations on the cluster. We will make the Kubernetes API point to the new `etcd` by running `rke up` again using the new `cluster.yml`. - -``` -$ rke up --config cluster.yml -``` - -Confirm that your Kubernetes cluster is functional by checking the pods on your cluster. - -``` -> kubectl get pods -NAME READY STATUS RESTARTS AGE -nginx-65899c769f-kcdpr 1/1 Running 0 17s -nginx-65899c769f-pc45c 1/1 Running 0 17s -nginx-65899c769f-qkhml 1/1 Running 0 17s -``` - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md deleted file mode 100644 index ea37b69f44..0000000000 --- a/content/rke/latest/en/etcd-snapshots/one-time-snapshots/_index.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: One-time Snapshots -weight: 1 ---- - -One-time snapshots are handled differently depending on your version of RKE. - -{{% tabs %}} -{{% tab "RKE v0.2.0+" %}} - -To save a snapshot of etcd from each etcd node in the cluster config file, run the `rke etcd snapshot-save` command. - -The snapshot is saved in `/opt/rke/etcd-snapshots`. - -When running the command, an additional container is created to take the snapshot. When the snapshot is completed, the container is automatically removed. - -The one-time snapshot can be uploaded to a S3 compatible backend by using the additional options to specify the S3 backend. - -To create a local one-time snapshot, run: - -``` -$ rke etcd snapshot-save --config cluster.yml --name snapshot-name -``` - -**Result:** The snapshot is saved in `/opt/rke/etcd-snapshots`. - -To save a one-time snapshot to S3, run: - -``` -$ rke etcd snapshot-save \ ---config cluster.yml \ ---name snapshot-name \ ---s3 \ ---access-key S3_ACCESS_KEY \ ---secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name \ ---folder s3-folder-name \ # Optional - Available as of v0.3.0 ---s3-endpoint s3.amazonaws.com -``` - -**Result:** The snapshot is saved in `/opt/rke/etcd-snapshots` as well as uploaded to the S3 backend. - -### Options for `rke etcd snapshot-save` - -| Option | Description | S3 Specific | -| --- | --- | --- | -| `--name` value | Specify snapshot name | | -| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | | -| `--s3` | Enabled backup to s3 | * | -| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | -| `--s3-endpoint-ca` value | Specify a path to a CA cert file to connect to a custom s3 endpoint (optional) _Available as of v0.2.5_ | * | -| `--access-key` value | Specify s3 accessKey | * | -| `--secret-key` value | Specify s3 secretKey | * | -| `--bucket-name` value | Specify s3 bucket name | * | -| `--folder` value | Specify folder inside bucket where backup will be stored. This is optional. _Available as of v0.3.0_ | * | -| `--region` value | Specify the s3 bucket location (optional) | * | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | - -The `--access-key` and `--secret-key` options are not required if the `etcd` nodes are AWS EC2 instances that have been configured with a suitable IAM instance profile. - -##### Using a custom CA certificate for S3 - -_Available as of v0.2.0_ - -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 backend uses a self-signed or custom certificate, provide a custom certificate using the `--s3-endpoint-ca` to connect to the S3 backend. - -### IAM Support for Storing Snapshots in S3 - -In addition to API access keys, RKE supports using IAM roles for S3 authentication. The cluster etcd nodes must be assigned an IAM role that has read/write access to the designated backup bucket on S3. Also, the nodes must have network access to the S3 endpoint specified. - -Below is an [example IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) that would allow nodes to store and retrieve backups from S3: - -``` -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "ListObjectsInBucket", - "Effect": "Allow", - "Action": ["s3:ListBucket"], - "Resource": ["arn:aws:s3:::bucket-name"] - }, - { - "Sid": "AllObjectActions", - "Effect": "Allow", - "Action": "s3:*Object", - "Resource": ["arn:aws:s3:::bucket-name/*"] - } - ] -} -``` - -For details on giving an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -{{% /tab %}} -{{% tab "RKE before v0.2.0" %}} - -To save a snapshot of etcd from each etcd node in the cluster config file, run the `rke etcd snapshot-save` command. - -When running the command, an additional container is created to take the snapshot. When the snapshot is completed, the container is automatically removed. - -RKE saves a backup of the certificates, i.e. a file named `pki.bundle.tar.gz`, in the same location. The snapshot and pki bundle file are required for the restore process. - -To create a local one-time snapshot, run: - -``` -$ rke etcd snapshot-save --config cluster.yml --name snapshot-name -``` - -**Result:** The snapshot is saved in `/opt/rke/etcd-snapshots`. - -### Options for `rke etcd snapshot-save` - -| Option | Description | -| --- | --- | -| `--name` value | Specify snapshot name | -| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md b/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md deleted file mode 100644 index f2df1fb870..0000000000 --- a/content/rke/latest/en/etcd-snapshots/recurring-snapshots/_index.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Recurring Snapshots -weight: 2 ---- - -Recurring snapshots are handled differently based on your version of RKE. - -{{% tabs %}} -{{% tab "RKE v0.2.0+"%}} - -To schedule automatic recurring etcd snapshots, you can enable the `etcd-snapshot` service with [extra configuration options](#options-for-the-etcd-snapshot-service). `etcd-snapshot` runs in a service container alongside the `etcd` container. By default, the `etcd-snapshot` service takes a snapshot for every node that has the `etcd` role and stores them to local disk in `/opt/rke/etcd-snapshots`. - -If you set up the [options for S3](#options-for-the-etcd-snapshot-service), the snapshot will also be uploaded to the S3 backend. - -### Snapshot Service Logging - -When a cluster is launched with the `etcd-snapshot` service enabled, you can view the `etcd-rolling-snapshots` logs to confirm backups are being created automatically. - -``` -$ docker logs etcd-rolling-snapshots - -time="2018-05-04T18:39:16Z" level=info msg="Initializing Rolling Backups" creation=1m0s retention=24h0m0s -time="2018-05-04T18:40:16Z" level=info msg="Created backup" name="2018-05-04T18:40:16Z_etcd" runtime=108.332814ms -time="2018-05-04T18:41:16Z" level=info msg="Created backup" name="2018-05-04T18:41:16Z_etcd" runtime=92.880112ms -time="2018-05-04T18:42:16Z" level=info msg="Created backup" name="2018-05-04T18:42:16Z_etcd" runtime=83.67642ms -time="2018-05-04T18:43:16Z" level=info msg="Created backup" name="2018-05-04T18:43:16Z_etcd" runtime=86.298499ms -``` - -### Options for the `Etcd-Snapshot` Service - -|Option|Description| S3 Specific | -|---|---| --- | -|**interval_hours**| The duration in hours between recurring backups. This supercedes the `creation` option (which was used in RKE before v0.2.0) and will override it if both are specified. (Default: 12)| | -|**retention**| The number of snapshots to retain before rotation. If the retention is configured in both `etcd.retention` (time period to keep snapshots in hours), which was required in RKE before v0.2.0, and at `etcd.backup_config.retention` (number of snapshots), the latter will be used. (Default: 6) | | -|**bucket_name**| S3 bucket name where backups will be stored| * | -|**folder**| Folder inside S3 bucket where backups will be stored. This is optional. _Available as of v0.3.0_ | * | -|**access_key**| S3 access key with permission to access the backup bucket.| * | -|**secret_key** |S3 secret key with permission to access the backup bucket.| * | -|**region** |S3 region for the backup bucket. This is optional.| * | -|**endpoint** |S3 regions endpoint for the backup bucket.| * | -|**custom_ca** |Custom certificate authority to use when connecting to the endpoint. Only required for private S3 compatible storage solutions. Available for RKE v0.2.5+.| * | - -The `--access-key` and `--secret-key` options are not required if the `etcd` nodes are AWS EC2 instances that have been configured with a suitable IAM instance profile. - -##### Using a custom CA certificate for S3 - -The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 backend uses a self-signed or custom certificate, provide a custom certificate using the option `custom_ca` to connect to the S3 backend. - -### IAM Support for Storing Snapshots in S3 - -In addition to API access keys, RKE supports using IAM roles for S3 authentication. The cluster etcd nodes must be assigned an IAM role that has read/write access to the designated backup bucket on S3. Also, the nodes must have network access to the S3 endpoint specified. - -Below is an [example IAM policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_examples_s3_rw-bucket.html) that would allow nodes to store and retrieve backups from S3: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "ListObjectsInBucket", - "Effect": "Allow", - "Action": ["s3:ListBucket"], - "Resource": ["arn:aws:s3:::bucket-name"] - }, - { - "Sid": "AllObjectActions", - "Effect": "Allow", - "Action": "s3:*Object", - "Resource": ["arn:aws:s3:::bucket-name/*"] - } - ] -} -``` - -For details on giving an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) - -### Configuring the Snapshot Service in YAML - -```yaml -services: - etcd: - backup_config: - interval_hours: 12 - retention: 6 - s3backupconfig: - access_key: S3_ACCESS_KEY - secret_key: S3_SECRET_KEY - bucket_name: s3-bucket-name - region: "" - folder: "" # Optional - Available as of v0.3.0 - endpoint: s3.amazonaws.com - custom_ca: |- - -----BEGIN CERTIFICATE----- - $CERTIFICATE - -----END CERTIFICATE----- -``` - -{{% /tab %}} -{{% tab "RKE before v0.2.0"%}} - -To schedule automatic recurring etcd snapshots, you can enable the `etcd-snapshot` service with [extra configuration options](#options-for-the-local-etcd-snapshot-service). `etcd-snapshot` runs in a service container alongside the `etcd` container. By default, the `etcd-snapshot` service takes a snapshot for every node that has the `etcd` role and stores them to local disk in `/opt/rke/etcd-snapshots`. - -RKE saves a backup of the certificates, i.e. a file named `pki.bundle.tar.gz`, in the same location. The snapshot and pki bundle file are required for the restore process in versions before v0.2.0. - -### Snapshot Service Logging - -When a cluster is launched with the `etcd-snapshot` service enabled, you can view the `etcd-rolling-snapshots` logs to confirm backups are being created automatically. - -``` -$ docker logs etcd-rolling-snapshots - -time="2018-05-04T18:39:16Z" level=info msg="Initializing Rolling Backups" creation=1m0s retention=24h0m0s -time="2018-05-04T18:40:16Z" level=info msg="Created backup" name="2018-05-04T18:40:16Z_etcd" runtime=108.332814ms -time="2018-05-04T18:41:16Z" level=info msg="Created backup" name="2018-05-04T18:41:16Z_etcd" runtime=92.880112ms -time="2018-05-04T18:42:16Z" level=info msg="Created backup" name="2018-05-04T18:42:16Z_etcd" runtime=83.67642ms -time="2018-05-04T18:43:16Z" level=info msg="Created backup" name="2018-05-04T18:43:16Z_etcd" runtime=86.298499ms -``` - -### Options for the Local `Etcd-Snapshot` Service - -|Option|Description| -|---|---| -|**Snapshot**|By default, the recurring snapshot service is disabled. To enable the service, you need to define it as part of `etcd` and set it to `true`.| -|**Creation**|By default, the snapshot service will take snapshots every 5 minutes (`5m0s`). You can change the time between snapshots as part of the `creation` directive for the `etcd` service.| -|**Retention**|By default, all snapshots are saved for 24 hours (`24h`) before being deleted and purged. You can change how long to store a snapshot as part of the `retention` directive for the `etcd` service.| - -### Configuring the Snapshot Service in YAML - -```yaml -services: - etcd: - snapshot: true - creation: 5m0s - retention: 24h -``` - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md b/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md deleted file mode 100644 index 50f22e8692..0000000000 --- a/content/rke/latest/en/etcd-snapshots/restoring-from-backup/_index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: Restoring from Backup -weight: 3 ---- - -The details of restoring your cluster from backup are different depending on your version of RKE. - -{{% tabs %}} -{{% tab "RKE v0.2.0+"%}} - -If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot and should be run on an etcd node of the the specific cluster that has suffered the disaster. - -The following actions will be performed when you run the command: - -- Syncs the snapshot or downloads the snapshot from S3, if necessary. -- Checks snapshot checksum across etcd nodes to make sure they are identical. -- Deletes your current cluster and cleans old data by running `rke remove`. This removes the entire Kubernetes cluster, not just the etcd cluster. -- Rebuilds the etcd cluster from the chosen snapshot. -- Creates a new cluster by running `rke up`. -- Restarts cluster system pods. - ->**Warning:** You should back up any important data in your cluster before running `rke etcd snapshot-restore` because the command deletes your current Kubernetes cluster and replaces it with a new one. - -The snapshot used to restore your etcd cluster can either be stored locally in `/opt/rke/etcd-snapshots` or from a S3 compatible backend. - -_Available as of v1.1.4_ - -If the snapshot contains the cluster state file, it will automatically be extracted and used for the restore. If you want to force the use of the local state file, you can add `--use-local-state` to the command. If the snapshot was created using an RKE version before v1.1.4, or if the snapshot does not contain a state file, make sure the cluster state file (by default available as `cluster.rkestate`) is present before executing the command. - -### Example of Restoring from a Local Snapshot - -To restore etcd from a local snapshot, run: - -``` -$ rke etcd snapshot-restore --config cluster.yml --name mysnapshot -``` - -The snapshot is assumed to be located in `/opt/rke/etcd-snapshots`. - -**Note:** The `pki.bundle.tar.gz` file is not needed because RKE v0.2.0 changed how the [Kubernetes cluster state is stored]({{}}/rke/latest/en/installation/#kubernetes-cluster-state). - -### Example of Restoring from a Snapshot in S3 - -When restoring etcd from a snapshot located in S3, the command needs the S3 information in order to connect to the S3 backend and retrieve the snapshot. - -```shell -$ rke etcd snapshot-restore \ ---config cluster.yml \ ---name snapshot-name \ ---s3 \ ---access-key S3_ACCESS_KEY \ ---secret-key S3_SECRET_KEY \ ---bucket-name s3-bucket-name \ ---folder s3-folder-name \ # Optional - Available as of v0.3.0 ---s3-endpoint s3.amazonaws.com -``` -**Note:** if you were restoring a cluster that had Rancher installed, the Rancher UI should start up after a few minutes; you don't need to re-run Helm. - -### Options for `rke etcd snapshot-restore` - -| Option | Description | S3 Specific | -| --- | --- | ---| -| `--name` value | Specify snapshot name | | -| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | | -| `--use-local-state` | Force the use of the local state file instead of looking for a state file in the snapshot _Available as of v1.1.4_ | | -| `--s3` | Enabled backup to s3 |* | -| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | -| `--access-key` value | Specify s3 accessKey | *| -| `--secret-key` value | Specify s3 secretKey | *| -| `--bucket-name` value | Specify s3 bucket name | *| -| `--folder` value | Specify folder inside bucket where backup will be stored. This is optional. This is optional. _Available as of v0.3.0_ | *| -| `--region` value | Specify the s3 bucket location (optional) | *| -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | - -{{% /tab %}} -{{% tab "RKE before v0.2.0"%}} - -If there is a disaster with your Kubernetes cluster, you can use `rke etcd snapshot-restore` to recover your etcd. This command reverts etcd to a specific snapshot and should be run on an etcd node of the the specific cluster that has suffered the disaster. - -The following actions will be performed when you run the command: - -- Removes the old etcd cluster -- Rebuilds the etcd cluster using the local snapshot - -Before you run this command, you must: - -- Run `rke remove` to remove your Kubernetes cluster and clean the nodes -- Download your etcd snapshot from S3, if applicable. Place the etcd snapshot and the `pki.bundle.tar.gz` file in `/opt/rke/etcd-snapshots`. Manually sync the snapshot across all `etcd` nodes. - -After the restore, you must rebuild your Kubernetes cluster with `rke up`. - ->**Warning:** You should back up any important data in your cluster before running `rke etcd snapshot-restore` because the command deletes your current etcd cluster and replaces it with a new one. - -### Example of Restoring from a Local Snapshot - -To restore etcd from a local snapshot, run: - -``` -$ rke etcd snapshot-restore --config cluster.yml --name mysnapshot -``` - -The snapshot is assumed to be located in `/opt/rke/etcd-snapshots`. - -The snapshot must be manually synched across all `etcd` nodes. - -The `pki.bundle.tar.gz` file is also expected to be in the same location. - -### Options for `rke etcd snapshot-restore` - -| Option | Description | -| --- | --- | -| `--name` value | Specify snapshot name | -| `--config` value | Specify an alternate cluster YAML file (default: `cluster.yml`) [$RKE_CONFIG] | -| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK]({{}}/rke/latest/en/config-options/#ssh-agent) | -| `--ignore-docker-version` | [Disable Docker version check]({{}}/rke/latest/en/config-options/#supported-docker-versions) | - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rke/latest/en/etcd-snapshots/troubleshooting/_index.md b/content/rke/latest/en/etcd-snapshots/troubleshooting/_index.md deleted file mode 100644 index 372142f649..0000000000 --- a/content/rke/latest/en/etcd-snapshots/troubleshooting/_index.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Troubleshooting -weight: 5 ---- - -As of **v0.1.9**, the **rke-bundle-cert** container is removed on both success and failure of a restore. To debug any issues, you will need to look at the **logs** generated from rke. - -As of **v0.1.8** and below, the **rke-bundle-cert** container is left over from a failed etcd restore. If you are having an issue with restoring an **etcd snapshot** then you can do the following on each etcd nodes before attempting to do another restore: - -``` -docker container rm --force rke-bundle-cert -``` - -The rke-bundle-cert container is usually removed when a backup or restore of **etcd** succeeds. Whenever something goes wrong, the **rke-bundle-cert** container will be left over. You can look -at the logs or inspect the container to see what the issue is. - -``` -docker container logs --follow rke-bundle-cert -docker container inspect rke-bundle-cert -``` - -The important thing to note is the mounts of the container and location of the `pki.bundle.tar.gz`. diff --git a/content/rke/latest/en/example-yamls/_index.md b/content/rke/latest/en/example-yamls/_index.md deleted file mode 100644 index d7b6009742..0000000000 --- a/content/rke/latest/en/example-yamls/_index.md +++ /dev/null @@ -1,406 +0,0 @@ ---- -title: Example Cluster.ymls -weight: 300 -aliases: - - /rke/latest/en/config-options/example-yamls/ ---- - -There are lots of different [configuration options]({{}}/rke/latest/en/config-options/) that can be set in the cluster configuration file for RKE. Here are some examples of files: - -> **Note for Rancher 2 users** If you are configuring Cluster Options using a [Config File]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) when creating [Rancher Launched Kubernetes]({{}}/rancher/v2.x/en/cluster-provisioning/rke-clusters/), the names of services should contain underscores only: `kube_api` and `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. - -## Minimal `cluster.yml` example - -```yaml -nodes: - - address: 1.2.3.4 - user: ubuntu - role: - - controlplane - - etcd - - worker -``` - -## Full `cluster.yml` example - -```yaml -nodes: - - address: 1.1.1.1 - user: ubuntu - role: - - controlplane - - etcd - port: 2222 - docker_socket: /var/run/docker.sock - - address: 2.2.2.2 - user: ubuntu - role: - - worker - ssh_key_path: /home/user/.ssh/id_rsa - ssh_key: |- - -----BEGIN RSA PRIVATE KEY----- - - -----END RSA PRIVATE KEY----- - ssh_cert_path: /home/user/.ssh/test-key-cert.pub - ssh_cert: |- - ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3Bl.... - - address: example.com - user: ubuntu - role: - - worker - hostname_override: node3 - internal_address: 192.168.1.6 - labels: - app: ingress - taints: - - key: test-key - value: test-value - effect: NoSchedule - -# If set to true, RKE will not fail when unsupported Docker version -# are found -ignore_docker_version: false - -# Enable running cri-dockerd -# Up to Kubernetes 1.23, kubelet contained code called dockershim -# to support Docker runtime. The replacement is called cri-dockerd -# and should be enabled if you want to keep using Docker as your -# container runtime -# Only available to enable in Kubernetes 1.21 and higher -enable_cri_dockerd: true - -# Cluster level SSH private key -# Used if no ssh information is set for the node -ssh_key_path: ~/.ssh/test - -# Enable use of SSH agent to use SSH private keys with passphrase -# This requires the environment `SSH_AUTH_SOCK` configured pointing -#to your SSH agent which has the private key added -ssh_agent_auth: true - -# List of registry credentials -# If you are using a Docker Hub registry, you can omit the `url` -# or set it to `docker.io` -# is_default set to `true` will override the system default -# registry set in the global settings -private_registries: - - url: registry.com - user: Username - password: password - is_default: true - -# Bastion/Jump host configuration -bastion_host: - address: x.x.x.x - user: ubuntu - port: 22 - ssh_key_path: /home/user/.ssh/bastion_rsa -# or -# ssh_key: |- -# -----BEGIN RSA PRIVATE KEY----- -# -# -----END RSA PRIVATE KEY----- - -# Set the name of the Kubernetes cluster -cluster_name: mycluster - - -# The Kubernetes version used. The default versions of Kubernetes -# are tied to specific versions of the system images. -# -# For RKE v0.2.x and below, the map of Kubernetes versions and their system images is -# located here: -# https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go -# -# For RKE v0.3.0 and above, the map of Kubernetes versions and their system images is -# located here: -# https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_rke_system_images.go -# -# In case the kubernetes_version and kubernetes image in -# system_images are defined, the system_images configuration -# will take precedence over kubernetes_version. -kubernetes_version: v1.10.3-rancher2 - -# System Images are defaulted to a tag that is mapped to a specific -# Kubernetes Version and not required in a cluster.yml. -# Each individual system image can be specified if you want to use a different tag. -# -# For RKE v0.2.x and below, the map of Kubernetes versions and their system images is -# located here: -# https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go -# -# For RKE v0.3.0 and above, the map of Kubernetes versions and their system images is -# located here: -# https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_rke_system_images.go -# -system_images: - kubernetes: rancher/hyperkube:v1.10.3-rancher2 - etcd: rancher/coreos-etcd:v3.1.12 - alpine: rancher/rke-tools:v0.1.9 - nginx_proxy: rancher/rke-tools:v0.1.9 - cert_downloader: rancher/rke-tools:v0.1.9 - kubernetes_services_sidecar: rancher/rke-tools:v0.1.9 - kubedns: rancher/k8s-dns-kube-dns-amd64:1.14.8 - dnsmasq: rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.8 - kubedns_sidecar: rancher/k8s-dns-sidecar-amd64:1.14.8 - kubedns_autoscaler: rancher/cluster-proportional-autoscaler-amd64:1.0.0 - pod_infra_container: rancher/pause-amd64:3.1 - -services: - etcd: - # Custom uid/guid for etcd directory and files - uid: 52034 - gid: 52034 - # if external etcd is used - # path: /etcdcluster - # external_urls: - # - https://etcd-example.com:2379 - # ca_cert: |- - # -----BEGIN CERTIFICATE----- - # xxxxxxxxxx - # -----END CERTIFICATE----- - # cert: |- - # -----BEGIN CERTIFICATE----- - # xxxxxxxxxx - # -----END CERTIFICATE----- - # key: |- - # -----BEGIN PRIVATE KEY----- - # xxxxxxxxxx - # -----END PRIVATE KEY----- - # Note for Rancher v2.0.5 and v2.0.6 users: If you are configuring - # Cluster Options using a Config File when creating Rancher Launched - # Kubernetes, the names of services should contain underscores - # only: `kube_api`. - kube-api: - # IP range for any services created on Kubernetes - # This must match the service_cluster_ip_range in kube-controller - service_cluster_ip_range: 10.43.0.0/16 - # Expose a different port range for NodePort services - service_node_port_range: 30000-32767 - pod_security_policy: false - # Encrypt secret data at Rest - # Available as of v0.3.1 - secrets_encryption_config: - enabled: true - custom_config: - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: k-fw5hn - secret: RTczRjFDODMwQzAyMDVBREU4NDJBMUZFNDhCNzM5N0I= - - identity: {} - # Enable audit logging - # Available as of v1.0.0 - audit_log: - enabled: true - configuration: - max_age: 6 - max_backup: 6 - max_size: 110 - path: /var/log/kube-audit/audit-log.json - format: json - policy: - apiVersion: audit.k8s.io/v1 # This is required. - kind: Policy - omitStages: - - "RequestReceived" - rules: - # Log pod changes at RequestResponse level - - level: RequestResponse - resources: - - group: "" - # Resource "pods" doesn't match requests to any subresource of pods, - # which is consistent with the RBAC policy. - resources: ["pods"] - # Using the EventRateLimit admission control enforces a limit on the number of events - # that the API Server will accept in a given time period - # Available as of v1.0.0 - event_rate_limit: - enabled: true - configuration: - apiVersion: eventratelimit.admission.k8s.io/v1alpha1 - kind: Configuration - limits: - - type: Server - qps: 6000 - burst: 30000 - # Enable AlwaysPullImages Admission controller plugin - # Available as of v0.2.0 - always_pull_images: false - # Add additional arguments to the kubernetes API server - # This WILL OVERRIDE any existing defaults - extra_args: - # Enable audit log to stdout - audit-log-path: "-" - # Increase number of delete workers - delete-collection-workers: 3 - # Set the level of log output to debug-level - v: 4 - # Note for Rancher 2 users: If you are configuring Cluster Options - # using a Config File when creating Rancher Launched Kubernetes, - # the names of services should contain underscores only: - # `kube_controller`. This only applies to Rancher v2.0.5 and v2.0.6. - kube-controller: - # CIDR pool used to assign IP addresses to pods in the cluster - cluster_cidr: 10.42.0.0/16 - # IP range for any services created on Kubernetes - # This must match the service_cluster_ip_range in kube-api - service_cluster_ip_range: 10.43.0.0/16 - # Add additional arguments to the kubernetes API server - # This WILL OVERRIDE any existing defaults - extra_args: - # Set the level of log output to debug-level - v: 4 - # Enable RotateKubeletServerCertificate feature gate - feature-gates: RotateKubeletServerCertificate=true - # Enable TLS Certificates management - # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ - cluster-signing-cert-file: "/etc/kubernetes/ssl/kube-ca.pem" - cluster-signing-key-file: "/etc/kubernetes/ssl/kube-ca-key.pem" - kubelet: - # Base domain for the cluster - cluster_domain: cluster.local - # IP address for the DNS service endpoint - cluster_dns_server: 10.43.0.10 - # Fail if swap is on - fail_swap_on: false - # Configure pod-infra-container-image argument - pod-infra-container-image: "k8s.gcr.io/pause:3.2" - # Generate a certificate signed by the kube-ca Certificate Authority - # for the kubelet to use as a server certificate - # Available as of v1.0.0 - generate_serving_certificate: true - extra_args: - # Set max pods to 250 instead of default 110 - max-pods: 250 - # Enable RotateKubeletServerCertificate feature gate - feature-gates: RotateKubeletServerCertificate=true - # Optionally define additional volume binds to a service - extra_binds: - - "/usr/libexec/kubernetes/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins" - scheduler: - extra_args: - # Set the level of log output to debug-level - v: 4 - kubeproxy: - extra_args: - # Set the level of log output to debug-level - v: 4 - -# Currently, only authentication strategy supported is x509. -# You can optionally create additional SANs (hostnames or IPs) to -# add to the API server PKI certificate. -# This is useful if you want to use a load balancer for the -# control plane servers. -authentication: - strategy: x509 - sans: - - "10.18.160.10" - - "my-loadbalancer-1234567890.us-west-2.elb.amazonaws.com" - -# Kubernetes Authorization mode -# Use `mode: rbac` to enable RBAC -# Use `mode: none` to disable authorization -authorization: - mode: rbac - -# If you want to set a Kubernetes cloud provider, you specify -# the name and configuration -cloud_provider: - name: aws - -# Add-ons are deployed using kubernetes jobs. RKE will give -# up on trying to get the job status after this timeout in seconds.. -addon_job_timeout: 30 - -# Specify network plugin-in (canal, calico, flannel, weave, or none) -network: - plugin: canal - # Specify MTU - mtu: 1400 - options: - # Configure interface to use for Canal - canal_iface: eth1 - canal_flannel_backend_type: vxlan - # Available as of v1.2.6 - canal_autoscaler_priority_class_name: system-cluster-critical - canal_priority_class_name: system-cluster-critical - # Available as of v1.2.4 - tolerations: - - key: "node.kubernetes.io/unreachable" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - - key: "node.kubernetes.io/not-ready" - operator: "Exists" - effect: "NoExecute" - tolerationseconds: 300 - # Available as of v1.1.0 - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 6 - -# Specify DNS provider (coredns or kube-dns) -dns: - provider: coredns - # Available as of v1.1.0 - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 20% - maxSurge: 15% - linear_autoscaler_params: - cores_per_replica: 0.34 - nodes_per_replica: 4 - prevent_single_point_failure: true - min: 2 - max: 3 - -# Specify monitoring provider (metrics-server) -monitoring: - provider: metrics-server - # Available as of v1.1.0 - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 8 - -# Currently only nginx ingress provider is supported. -# To disable ingress controller, set `provider: none` -# `node_selector` controls ingress placement and is optional -ingress: - provider: nginx - node_selector: - app: ingress - # Available as of v1.1.0 - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 5 - -# All add-on manifests MUST specify a namespace -addons: |- - --- - apiVersion: v1 - kind: Pod - metadata: - name: my-nginx - namespace: default - spec: - containers: - - name: my-nginx - image: nginx - ports: - - containerPort: 80 - -addons_include: - - https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/rook-operator.yaml - - https://raw.githubusercontent.com/rook/rook/master/cluster/examples/kubernetes/rook-cluster.yaml - - /path/to/manifest -``` diff --git a/content/rke/latest/en/installation/_index.md b/content/rke/latest/en/installation/_index.md deleted file mode 100644 index 215ea8e025..0000000000 --- a/content/rke/latest/en/installation/_index.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: RKE Kubernetes Installation -description: RKE is a fast, versatile Kubernetes installer you can use to install Kubernetes on your Linux hosts. Learn the simple steps for an RKE Kubernetes installation -weight: 50 ---- - -RKE is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. You can get started in a couple of quick and easy steps: - -1. [Download the RKE Binary](#download-the-rke-binary) - 1. [Alternative RKE macOS Install - Homebrew](#alternative-rke-macos-x-install-homebrew) - 1. [Alternative RKE macOS Install - MacPorts](#alternative-rke-macos-install-macports) -1. [Prepare the Nodes for the Kubernetes Cluster](#prepare-the-nodes-for-the-kubernetes-cluster) -1. [Creating the Cluster Configuration File](#creating-the-cluster-configuration-file) -1. [Deploying Kubernetes with RKE](#deploying-kubernetes-with-rke) -1. [Save your Files](#save-your-files) -1. [Interacting with your Kubernetes Cluster](#interacting-with-your-kubernetes-cluster) - -## Download the RKE binary - -1. From your workstation, open a web browser and look up the [latest available RKE release](https://github.com/rancher/rke/#latest-release). You can click on the release notes link to go straight to that release or manually navigate to our [RKE Releases](https://github.com/rancher/rke/releases) page and download the latest available RKE installer applicable to your operating system and architecture: - - >**Note:** - > Be aware that the release that is marked as `Latest release` on GitHub release page might not be the actual latest available release of RKE. - - - **macOS**: `rke_darwin-amd64` - - **Linux (Intel/AMD)**: `rke_linux-amd64` - - **Linux (ARM 32-bit)**: `rke_linux-arm` - - **Linux (ARM 64-bit)**: `rke_linux-arm64` - - **Windows (32-bit)**: `rke_windows-386.exe` - - **Windows (64-bit)**: `rke_windows-amd64.exe` - -2. Copy the RKE binary to a folder in your `$PATH` and rename it `rke` (or `rke.exe` for Windows) - - ``` - # macOS - $ mv rke_darwin-amd64 rke - # Linux - $ mv rke_linux-amd64 rke - # Windows PowerShell - > mv rke_windows-amd64.exe rke.exe - ``` - -3. Make the RKE binary that you just downloaded executable. Open Terminal, change directory to the location of the RKE binary, and then run one of the commands below. - - >**Using Windows?** - >The file is already an executable. Skip to [Prepare the Nodes for the Kubernetes Cluster](#prepare-the-nodes-for-the-kubernetes-cluster). - - ``` - $ chmod +x rke - ``` - -4. Confirm that RKE is now executable by running the following command: - - ``` - $ rke --version - ``` - - -### Alternative RKE macOS Install - Homebrew - -RKE can also be installed and updated using Homebrew, a package manager for macOS. - -1. Install Homebrew. See https://brew.sh/ for instructions. - -2. Using `brew`, install RKE by running the following command in a Terminal window: - - ``` - $ brew install rke - ``` - -If you have already installed RKE using `brew`, you can upgrade RKE by running: - -``` -$ brew upgrade rke -``` - -### Alternative RKE macOS Install - MacPorts - -RKE can also be installed and updated using MacPorts, a package manager for macOS. - -1. Install MacPorts. See https://www.macports.org/ for instructions. - -2. Using `port`, install RKE by running the following command in a Terminal window: - - ``` - $ port install rke - ``` - -If you have already installed RKE using `port`, you can upgrade RKE by running: - -``` -$ port upgrade rke -``` - -## Prepare the Nodes for the Kubernetes cluster - -The Kubernetes cluster components are launched using Docker on a Linux distro. You can use any Linux you want, as long as you can install Docker on it. - -> For information on which Docker versions were tested with your version of RKE, refer to the [terms of service](https://rancher.com/support-maintenance-terms) for installing Rancher on RKE. - -Review the [OS requirements]({{}}/rke/latest/en/installation/os/) and configure each node appropriately. - -## Creating the Cluster Configuration File - -RKE uses a cluster configuration file, referred to as `cluster.yml` to determine what nodes will be in the cluster and how to deploy Kubernetes. There are [many configuration options]({{}}/rke/latest/en/config-options/) that can be set in the `cluster.yml`. In our example, we will be assuming the minimum of one [node]({{}}/rke/latest/en/config-options/nodes) for your Kubernetes cluster. - -There are two easy ways to create a `cluster.yml`: - -- Using our [minimal `cluster.yml`]({{}}/rke/latest/en/example-yamls/#minimal-cluster-yml-example) and updating it based on the node that you will be using. -- Using `rke config` to query for all the information needed. - -### Using `rke config` - -Run `rke config` to create a new `cluster.yml` in the current directory. This command will prompt you for all the information needed to build a cluster. See [cluster configuration options]({{}}/rke/latest/en/config-options/) for details on the various options. - -``` -rke config --name cluster.yml -``` - -#### Other RKE Configuration Options - -You can create an empty template `cluster.yml` file by specifying the `--empty` flag. - -``` -rke config --empty --name cluster.yml -``` - -Instead of creating a file, you can print the generated configuration to stdout using the `--print` flag. - -``` -rke config --print -``` - -### High Availability - -RKE is HA ready, you can specify more than one `controlplane` node in the `cluster.yml` file. RKE will deploy master components on all of these nodes and the kubelets are configured to connect to `127.0.0.1:6443` by default which is the address of `nginx-proxy` service that proxy requests to all master nodes. - -To create an HA cluster, specify more than one host with role `controlplane`. - -### Certificates - -_Available as of v0.2.0_ - -By default, Kubernetes clusters require certificates and RKE auto-generates the certificates for all cluster components. You can also use [custom certificates]({{}}/rke/latest/en/installation/certs/). After the Kubernetes cluster is deployed, you can [manage these auto-generated certificates]({{}}/rke/latest/en/cert-mgmt/#certificate-rotation). - -## Deploying Kubernetes with RKE - -After you've created your `cluster.yml`, you can deploy your cluster with a simple command. This command assumes the `cluster.yml` file is in the same directory as where you are running the command. - -``` -rke up - -INFO[0000] Building Kubernetes cluster -INFO[0000] [dialer] Setup tunnel for host [10.0.0.1] -INFO[0000] [network] Deploying port listener containers -INFO[0000] [network] Pulling image [alpine:latest] on host [10.0.0.1] -... -INFO[0101] Finished building Kubernetes cluster successfully -``` - -The last line should read `Finished building Kubernetes cluster successfully` to indicate that your cluster is ready to use. As part of the Kubernetes creation process, a `kubeconfig` file has been created and written at `kube_config_cluster.yml`, which can be used to start interacting with your Kubernetes cluster. - -> **Note:** If you have used a different file name from `cluster.yml`, then the kube config file will be named `kube_config_.yml`. - -## Save Your Files - -> **Important** -> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. - -Save a copy of the following files in a secure location: - -- `cluster.yml`: The RKE cluster configuration file. -- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. -- `cluster.rkestate`: The [Kubernetes Cluster State file](#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ - -> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. - -### Kubernetes Cluster State - -The Kubernetes cluster state, which consists of the cluster configuration file `cluster.yml` and components certificates in Kubernetes cluster, is saved by RKE, but depending on your RKE version, the cluster state is saved differently. - -As of v0.2.0, RKE creates a `.rkestate` file in the same directory that has the cluster configuration file `cluster.yml`. The `.rkestate` file contains the current state of the cluster including the RKE configuration and the certificates. It is required to keep this file in order to update the cluster or perform any operation on it through RKE. - -Before v0.2.0, RKE saved the Kubernetes cluster state as a secret. When updating the state, RKE pulls the secret, updates/changes the state and saves a new secret. - -## Interacting with your Kubernetes cluster - -After your cluster is up and running, you can start using the [generated kubeconfig file]({{}}/rke/latest/en/kubeconfig) to start interacting with your Kubernetes cluster using `kubectl`. - -After installation, there are several maintenance items that might arise: - -* [Certificate Management]({{}}/rke/latest/en/cert-mgmt/) -* [Adding and Removing Nodes in the cluster]({{}}/rke/latest/en/managing-clusters) diff --git a/content/rke/latest/en/installation/certs/_index.md b/content/rke/latest/en/installation/certs/_index.md deleted file mode 100644 index 7e28460254..0000000000 --- a/content/rke/latest/en/installation/certs/_index.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Custom Certificates -weight: 150 ---- - -_Available as of v0.2.0_ - -By default, Kubernetes clusters require certificates and RKE auto-generates the certificates for all the Kubernetes services. RKE can also use custom certificates for these Kubernetes services. - -When [deploying Kubernetes with RKE]({{}}/rke/latest/en/installation/#deploying-kubernetes-with-rke), there are two additional options that can be used with `rke up` so that RKE uses custom certificates. - -| Option | Description | -| --- | --- | -| `--custom-certs` | Use custom certificates from a cert dir. The default directory is `/cluster_certs`. | -| `--cert-dir` value | Specify a certificate dir path | - -## Using Custom Certificates - -``` -# Use certificates located in the default directory `/cluster_certs` -$ rke up --custom-certs - -# Use certificates located in your own directory -$ rke up --custom-certs --cert-dir ~/my/own/certs -``` - -## Certificates - -The following certificates must exist in the certificate directory. - -| Name | Certificate | Key | -|---|---|---| -| Master CA | kube-ca.pem | - | -| Kube API | kube-apiserver.pem | kube-apiserver-key.pem | -| Kube Controller Manager | kube-controller-manager.pem | kube-controller-manager-key.pem | -| Kube Scheduler | kube-scheduler.pem | kube-scheduler-key.pem | -| Kube Proxy | kube-proxy.pem | kube-proxy-key.pem | -| Kube Admin | kube-admin.pem | kube-admin-key.pem | -| Kube Node | kube-node.pem | kube-node-key.pem | -| Apiserver Proxy Client | kube-apiserver-proxy-client.pem | kube-apiserver-proxy-client-key.pem | -| Etcd Nodes | kube-etcd-x-x-x-x.pem | kube-etcd-x-x-x-x-key.pem | -| Kube Api Request Header CA | kube-apiserver-requestheader-ca.pem* | kube-apiserver-requestheader-ca-key.pem | -| Service Account Token | - | kube-service-account-token-key.pem | - -\* Is the same as kube-ca.pem - -## Generating Certificate Signing Requests (CSRs) and Keys - -If you want to create and sign the certificates by a real Certificate Authority (CA), you can use RKE to generate a set of Certificate Signing Requests (CSRs) and keys. Using the `rke cert generate-csr` command, you can generate the CSRs and keys. - -1. Set up your `cluster.yml` with the [node information]({{}}/rke/latest/en/config-options/nodes/). - -2. Run `rke cert generate-csr` to generate certificates for the node(s) in the `cluster.yml`. By default, the CSRs and keys will be saved in `./cluster_certs`. To have them saved in a different directory, use `--cert-dir` to define what directory to have them saved in. - - ``` - $ rke cert generate-csr - INFO[0000] Generating Kubernetes cluster CSR certificates - INFO[0000] [certificates] Generating Kubernetes API server csr - INFO[0000] [certificates] Generating Kube Controller csr - INFO[0000] [certificates] Generating Kube Scheduler csr - INFO[0000] [certificates] Generating Kube Proxy csr - INFO[0001] [certificates] Generating Node csr and key - INFO[0001] [certificates] Generating admin csr and kubeconfig - INFO[0001] [certificates] Generating Kubernetes API server proxy client csr - INFO[0001] [certificates] Generating etcd-x.x.x.x csr and key - INFO[0001] Successfully Deployed certificates at [./cluster_certs] - ``` - -3. In addition to the CSRs, you also need to generate the kube-service-account-token-key.pem key. To do this, run the following: - ``` - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./cluster_certs/kube-service-account-token-key.pem -out ./cluster_certs/kube-service-account-token.pem - ``` - -**Result:** The CSRs and keys will be deployed in `./cluster_certs` directory, assuming you didn't specify a `--cert-dir`. The CSR files will contain the right Alternative DNS and IP Names for the certificates. You can use them to sign the certificates by a real CA. After the certificates are signed, those certificates can be used by RKE as custom certificates. - -``` -$ tree cluster_certs - -cluster_certs -├── kube-admin-csr.pem -├── kube-admin-key.pem -├── kube-apiserver-csr.pem -├── kube-apiserver-key.pem -├── kube-apiserver-proxy-client-csr.pem -├── kube-apiserver-proxy-client-key.pem -├── kube-controller-manager-csr.pem -├── kube-controller-manager-key.pem -├── kube-etcd-x-x-x-x-csr.pem -├── kube-etcd-x-x-x-x-key.pem -├── kube-node-csr.pem -├── kube-node-key.pem -├── kube-proxy-csr.pem -├── kube-proxy-key.pem -├── kube-scheduler-csr.pem -├── kube-service-account-token-key.pem -├── kube-service-account-token.pem -└── kube-scheduler-key.pem - -0 directories, 18 files - -``` diff --git a/content/rke/latest/en/kubeconfig/_index.md b/content/rke/latest/en/kubeconfig/_index.md deleted file mode 100644 index 27c596ba37..0000000000 --- a/content/rke/latest/en/kubeconfig/_index.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: Kubeconfig File -weight: 145 ---- - -In order to start interacting with your Kubernetes cluster, you will use a different binary called `kubectl`. You will need to [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local machine. - -A _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl commandline tool (or other clients). - -For more details on how kubeconfig and kubectl work together, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). - -When you deployed Kubernetes, a kubeconfig is automatically generated for your RKE cluster. This file is created and saved as `kube_config_cluster.yml`. - ->**Note:** By default, kubectl checks `~/.kube/config` for a kubeconfig file, but you can use any directory you want using the `--kubeconfig` flag. For example: -> ->``` -kubectl --kubeconfig /custom/path/kube.config get pods -``` - -Confirm that kubectl is working by checking the version of your Kubernetes cluster - -``` -kubectl --kubeconfig kube_config_cluster.yml version - -Client Version: version.Info{Major:"1", Minor:"10", GitVersion:"v1.10.0", GitCommit:"fc32d2f3698e36b93322a3465f63a14e9f0eaead", GitTreeState:"clean", BuildDate:"2018-03-27T00:13:02Z", GoVersion:"go1.9.4", Compiler:"gc", Platform:"darwin/amd64"} -Server Version: version.Info{Major:"1", Minor:"8+", GitVersion:"v1.8.9-rancher1", GitCommit:"68595e18f25e24125244e9966b1e5468a98c1cd4", GitTreeState:"clean", BuildDate:"2018-03-13T04:37:53Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} -``` - -The client and server version are reported, indicating that you have a local `kubectl` client and are able to request the server version from the newly built cluster. Now, you can issue [any kubectl command](https://kubernetes.io/docs/reference/kubectl/kubectl/) to your cluster, like requesting the nodes that are in the cluster. - -``` -kubectl --kubeconfig kube_config_cluster.yml get nodes -NAME STATUS ROLES AGE VERSION -10.0.0.1 Ready controlplane,etcd,worker 35m v1.10.3-rancher1 -``` diff --git a/content/rke/latest/en/managing-clusters/_index.md b/content/rke/latest/en/managing-clusters/_index.md deleted file mode 100644 index 3c7de40b2f..0000000000 --- a/content/rke/latest/en/managing-clusters/_index.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Adding and Removing Nodes -description: RKE supports adding/removing nodes for worker and controlplane hosts. Learn about the changes you need to make to the cluster.yml in order to add/remove nodes -weight: 175 -aliases: - - /rke/latest/en/installation/managing-clusters/ ---- - -### Adding/Removing Nodes - -RKE supports adding/removing [nodes]({{}}/rke/latest/en/config-options/nodes/) for worker and controlplane hosts. - -In order to add additional nodes, you update the original `cluster.yml` file with any additional nodes and specify their role in the Kubernetes cluster. - -In order to remove nodes, remove the node information from the nodes list in the original `cluster.yml`. - -After you've made changes to add/remove nodes, run `rke up` with the updated `cluster.yml`. - -### Adding/Removing Worker Nodes - -You can add/remove only worker nodes, by running `rke up --update-only`. This will ignore everything else in the `cluster.yml` except for any worker nodes. - -> **Note:** When using `--update-only`, other actions that do not specifically relate to nodes may be deployed or updated, for example [addons]({{< baseurl >}}/rke/latest/en/config-options/add-ons). - -### Removing Kubernetes Components from Nodes - -In order to remove the Kubernetes components from nodes, you use the `rke remove` command. - -> **Warning:** This command is irreversible and will destroy the Kubernetes cluster, including etcd snapshots on S3. If there is a disaster and your cluster is inaccessible, refer to the process for [restoring your cluster from a snapshot]({{}}/rke/latest/en/etcd-snapshots/#etcd-disaster-recovery). - -The `rke remove` command does the following to each node in the `cluster.yml`: - -- Remove the Kubernetes component deployed on it - - `etcd` - - `kube-apiserver` - - `kube-controller-manager` - - `kubelet` - - `kube-proxy` - - `nginx-proxy` - -The cluster's etcd snapshots are removed, including both local snapshots and snapshots that are stored on S3. - -> **Note:** Pods are not removed from the nodes. If the node is re-used, the pods will automatically be removed when the new Kubernetes cluster is created. - -- Clean each host from the directories left by the services: - - /etc/kubernetes/ssl - - /var/lib/etcd - - /etc/cni - - /opt/cni - - /var/run/calico diff --git a/content/rke/latest/en/os/_index.md b/content/rke/latest/en/os/_index.md deleted file mode 100644 index 16d05b3b9c..0000000000 --- a/content/rke/latest/en/os/_index.md +++ /dev/null @@ -1,350 +0,0 @@ ---- -title: Requirements -weight: 5 ---- -**In this section:** - - -- [Operating System](#operating-system) - - [General Linux Requirements](#general-linux-requirements) - - [SUSE Linux Enterprise Server (SLES) / openSUSE](#suse-linux-enterprise-server-sles-opensuse) - - [Using Upstream Docker](#using-upstream-docker) - - [Using SUSE/openSUSE packaged Docker](#using-suse-opensuse-packaged-docker) - - [Adding the Software Repository for Docker](#adding-the-software-repository-for-docker) - - [openSUSE MicroOS/Kubic (Atomic)](#opensuse-microos-kubic-atomic) - - [openSUSE MicroOS](#opensuse-microos) - - [openSUSE Kubic](#opensuse-kubic) - - [Red Hat Enterprise Linux (RHEL) / Oracle Linux (OL) / CentOS](#red-hat-enterprise-linux-rhel-oracle-linux-ol-centos) - - [Using upstream Docker](#using-upstream-docker-1) - - [Using RHEL/CentOS packaged Docker](#using-rhel-centos-packaged-docker) - - [Red Hat Atomic](#red-hat-atomic) - - [OpenSSH version](#openssh-version) - - [Creating a Docker Group](#creating-a-docker-group) - - [Flatcar Container Linux](#flatcar-container-linux) -- [Software](#software) - - [OpenSSH](#openssh) - - [Kubernetes](#kubernetes) - - [Docker](#docker) - - [Installing Docker](#installing-docker) - - [Checking the Installed Docker Version](#checking-the-installed-docker-version) -- [Hardware](#hardware) - - [Worker Role](#worker-role) - - [Large Kubernetes Clusters](#large-kubernetes-clusters) - - [Etcd clusters](#etcd-clusters) -- [Ports](#ports) - - [Opening port TCP/6443 using `iptables`](#opening-port-tcp-6443-using-iptables) - - [Opening port TCP/6443 using `firewalld`](#opening-port-tcp-6443-using-firewalld) -- [SSH Server Configuration](#ssh-server-configuration) - - - -## Operating System - -### General Linux Requirements - -RKE runs on almost any Linux OS with Docker installed. For details on which OS and Docker versions were tested with each version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/). - -- [SSH user]({{}}/rke/latest/en/config-options/nodes/#ssh-user) - The SSH user used for node access must be a member of the `docker` group on the node: - - ``` - usermod -aG docker - ``` - -> **Note:** Users added to the `docker` group are granted effective root permissions on the host by means of the Docker API. Only choose a user that is intended for this purpose and has its credentials and access properly secured. - - See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) to see how you can configure access to Docker without using the `root` user. - -- Swap should be disabled on any worker nodes - -- Please check the network plugin documentation for any additional requirements (for example, kernel modules) - - [Calico](https://docs.projectcalico.org/getting-started/kubernetes/requirements#kernel-dependencies) - - [Flannel](https://github.com/flannel-io/flannel/tree/master/Documentation) - - Canal (Combination Calico and Flannel) - - [Weave](https://www.weave.works/docs/net/latest/install/installing-weave/) - -> **Note:** If you or your cloud provider are using a custom minimal kernel, some required (network) kernel modules might not be present. - -- Following sysctl settings must be applied - -``` -net.bridge.bridge-nf-call-iptables=1 -``` - -### SUSE Linux Enterprise Server (SLES) / openSUSE - -If you are using SUSE Linux Enterprise Server or openSUSE follow the instructions below. - -#### Using upstream Docker -If you are using upstream Docker, the package name is `docker-ce` or `docker-ee`. You can check the installed package by executing: - -``` -rpm -q docker-ce -``` - -When using the upstream Docker packages, please follow [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user). - -#### Using SUSE/openSUSE packaged docker -If you are using the Docker package supplied by SUSE/openSUSE, the package name is `docker`. You can check the installed package by executing: - -``` -rpm -q docker -``` - -#### Adding the Software repository for docker -In SUSE Linux Enterprise Server 15 SP2 docker is found in the Containers module. -This module will need to be added before istalling docker. - -To list available modules you can run SUSEConnect to list the extensions and the activation command -``` -node:~ # SUSEConnect --list-extensions -AVAILABLE EXTENSIONS AND MODULES - - Basesystem Module 15 SP2 x86_64 (Activated) - Deactivate with: SUSEConnect -d -p sle-module-basesystem/15.2/x86_64 - - Containers Module 15 SP2 x86_64 - Activate with: SUSEConnect -p sle-module-containers/15.2/x86_64 -``` -Run this SUSEConnect command to activate the Containers module. -``` -node:~ # SUSEConnect -p sle-module-containers/15.2/x86_64 -Registering system to registration proxy https://rmt.seader.us - -Updating system details on https://rmt.seader.us ... - -Activating sle-module-containers 15.2 x86_64 ... --> Adding service to system ... --> Installing release package ... - -Successfully registered system -``` -In order to run docker cli commands with your user then you need to add this user to the `docker` group. -It is preferred not to use the root user for this. - -``` -usermod -aG docker -``` - -To verify that the user is correctly configured, log out of the node and login using SSH or your preferred method, and execute `docker ps`: - -``` -ssh user@node -user@node:~> docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -user@node:~> -``` -### openSUSE MicroOS/Kubic (Atomic) -Consult the project pages for openSUSE MicroOS and Kubic for installation -#### openSUSE MicroOS -Designed to host container workloads with automated administration & patching. Installing openSUSE MicroOS you get a quick, small environment for deploying Containers, or any other workload that benefits from Transactional Updates. As rolling release distribution the software is always up-to-date. -https://microos.opensuse.org -#### openSUSE Kubic -Based on openSUSE MicroOS, designed with the same things in mind but is focused on being a Certified Kubernetes Distribution. -https://kubic.opensuse.org -Installation instructions: -https://kubic.opensuse.org/blog/2021-02-08-MicroOS-Kubic-Rancher-RKE/ - -### Red Hat Enterprise Linux (RHEL) / Oracle Linux (OL) / CentOS - -If using Red Hat Enterprise Linux, Oracle Linux or CentOS, you cannot use the `root` user as [SSH user]({{}}/rke/latest/en/config-options/nodes/#ssh-user) due to [Bugzilla 1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). Please follow the instructions below how to setup Docker correctly, based on the way you installed Docker on the node. - ->**Note:** In RHEL 8.4, two extra services are included on the NetworkManager: `nm-cloud-setup.service` and `nm-cloud-setup.timer`. These services add a routing table that interferes with the CNI plugin's configuration. If these services are enabled, you must disable them using the command below, and then reboot the node to restore connectivity: -> -> ``` - systemctl disable nm-cloud-setup.service nm-cloud-setup.timer - reboot - ``` -> -> In addition, the default firewall settings of RHEL 8.4 prevent RKE1 pods from reaching out to Rancher to connect to the cluster agent. To allow Docker containers to reach out to the internet and connect to Rancher, make the following updates to the firewall settings: -> ``` - firewall-cmd --zone=public --add-masquerade --permanent - firewall-cmd --reload - ``` - -#### Using upstream Docker -If you are using upstream Docker, the package name is `docker-ce` or `docker-ee`. You can check the installed package by executing: - -``` -rpm -q docker-ce -``` - -When using the upstream Docker packages, please follow [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user). - -#### Using RHEL/CentOS packaged Docker -If you are using the Docker package supplied by Red Hat / CentOS, the package name is `docker`. You can check the installed package by executing: - -``` -rpm -q docker -``` - -If you are using the Docker package supplied by Red Hat / CentOS, the `dockerroot` group is automatically added to the system. You will need to edit (or create) `/etc/docker/daemon.json` to include the following: - -``` -{ - "group": "dockerroot" -} -``` - -Restart Docker after editing or creating the file. After restarting Docker, you can check the group permission of the Docker socket (`/var/run/docker.sock`), which should show `dockerroot` as group: - -``` -srw-rw----. 1 root dockerroot 0 Jul 4 09:57 /var/run/docker.sock -``` - -Add the SSH user you want to use to this group, this can't be the `root` user. - -``` -usermod -aG dockerroot -``` - -To verify that the user is correctly configured, log out of the node and login with your SSH user, and execute `docker ps`: - -``` -ssh @node -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -### Red Hat Atomic - -Before trying to use RKE with Red Hat Atomic nodes, there are a couple of updates to the OS that need to occur in order to get RKE working. - -#### OpenSSH version - -By default, Atomic hosts ship with OpenSSH 6.4, which doesn't support SSH tunneling, which is a core RKE requirement. If you upgrade to the latest version of OpenSSH supported by Atomic, it will correct the SSH issue. - -#### Creating a Docker Group - -By default, Atomic hosts do not come with a Docker group. You can update the ownership of the Docker socket by enabling the specific user in order to launch RKE. - -``` -# chown /var/run/docker.sock -``` - -### Flatcar Container Linux - -When using Flatcar Container Linux nodes, it is required to use the following configuration in the cluster configuration file: - -{{% tabs %}} -{{% tab "Canal"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: canal - options: - canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} - -{{% tab "Calico"%}} - -```yaml -rancher_kubernetes_engine_config: - network: - plugin: calico - options: - calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds - flannel_backend_type: vxlan - - services: - kube-controller: - extra_args: - flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ -``` -{{% /tab %}} -{{% /tabs %}} - -It is also required to enable the Docker service, you can enable the Docker service using the following command: - -``` -systemctl enable docker.service -``` - -## Software - -This section describes the requirements for Docker, Kubernetes, and SSH. - -### OpenSSH - -In order to SSH into each node, OpenSSH 7.0+ must be installed on each node. - -### Kubernetes - -Refer to the [RKE release notes](https://github.com/rancher/rke/releases) for the supported versions of Kubernetes. - -### Docker - -Each Kubernetes version supports different Docker versions. The Kubernetes release notes contain the [current list](https://kubernetes.io/docs/setup/release/notes/#dependencies) of validated Docker versions. - -### Installing Docker - -Refer to [Installing Docker]({{}}/rancher/v2.5/en/installation/requirements/installing-docker/) - -### Checking the Installed Docker Version - -Confirm that a Kubernetes supported version of Docker is installed on your machine, by running `docker version --format '{{.Server.Version}}'`. - -## Hardware - -This section describes the hardware requirements for the worker role, large Kubernetes clusters, and etcd clusters. - -### Worker Role - -The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. - -Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. - -### Large Kubernetes Clusters - -For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters](https://kubernetes.io/docs/setup/best-practices/cluster-large/). - -### Etcd Clusters - -For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation](https://etcd.io/docs/v3.5/op-guide/hardware/). - -## Ports -{{< ports-rke-nodes >}} -{{< requirements_ports_rke >}} - -If you are using an external firewall, make sure you have this port opened between the machine you are using to run `rke` and the nodes that you are going to use in the cluster. - - -### Opening port TCP/6443 using `iptables` - -``` -# Open TCP/6443 for all -iptables -A INPUT -p tcp --dport 6443 -j ACCEPT - -# Open TCP/6443 for one specific IP -iptables -A INPUT -p tcp -s your_ip_here --dport 6443 -j ACCEPT -``` - -### Opening port TCP/6443 using `firewalld` - -``` -# Open TCP/6443 for all -firewall-cmd --zone=public --add-port=6443/tcp --permanent -firewall-cmd --reload - -# Open TCP/6443 for one specific IP -firewall-cmd --permanent --zone=public --add-rich-rule=' - rule family="ipv4" - source address="your_ip_here/32" - port protocol="tcp" port="6443" accept' -firewall-cmd --reload -``` - -## SSH Server Configuration - -Your SSH server system-wide configuration file, located at `/etc/ssh/sshd_config`, must include this line that allows TCP forwarding: - -``` -AllowTcpForwarding yes -``` diff --git a/content/rke/latest/en/troubleshooting/_index.md b/content/rke/latest/en/troubleshooting/_index.md deleted file mode 100644 index c05e95884d..0000000000 --- a/content/rke/latest/en/troubleshooting/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Troubleshooting -weight: 400 ---- - -* [SSH Connectivity Errors]({{}}/rke/latest/en/troubleshooting/ssh-connectivity-errors/) -* [Provisioning Errors]({{}}/rke/latest/en/troubleshooting/provisioning-errors/) diff --git a/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md b/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md deleted file mode 100644 index a9867b3271..0000000000 --- a/content/rke/latest/en/troubleshooting/provisioning-errors/_index.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Provisioning Errors -weight: 200 ---- - -### Failed to get job complete status - -Most common reason for this error is that a node is having issues that block the deploy job from completing successfully. See [Get node conditions]({{}}/rancher/v2.x/en/troubleshooting/kubernetes-resources/#get-node-conditions) how to check node conditions. - -You can also retrieve the log from the job to see if it has an indication of the error, make sure you replace `rke-network-plugin-deploy-job` with the job name from the error: - -Example command to get logs for error `Failed to get job complete status for job rke-network-plugin-deploy-job`: -``` -kubectl -n kube-system get pods -l job-name=rke-network-plugin-deploy-job --no-headers -o custom-columns=NAME:.metadata.name | xargs -L1 kubectl -n kube-system logs -``` - -### Failed to apply the ServiceAccount needed for job execution - -Because this action requires connectivity from the host running `rke up` to the controlplane nodes, this is usually caused by incorrect proxy configuration on the host running `rke up`. The message printed after this error usually is the response from the proxy that is blocking the request. Please verify the `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables are correctly configured, especially `NO_PROXY` if the host cannot reach the controlplane nodes via the configured proxy. (this IP range then needs to be added to `NO_PROXY` to make it work) diff --git a/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md b/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md deleted file mode 100644 index 81240247b6..0000000000 --- a/content/rke/latest/en/troubleshooting/ssh-connectivity-errors/_index.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: SSH Connectivity Errors -weight: 100 -aliases: -- /rancher/v2.x/en/installation/ha/rke-add-on/troubleshooting/ssh-tunneling/ - ---- - -### Failed to set up SSH tunneling for host [xxx.xxx.xxx.xxx]: Can't retrieve Docker Info - -#### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed) - -* User specified to connect with does not have permission to access the Docker socket. This can be checked by logging into the host and running the command `docker ps`: - -``` -$ ssh -i ssh_privatekey_file user@server -user@server$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -``` - -See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. - -* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [RKE OS Requirements](https://rancher.com/docs/rke/latest/en/os/#red-hat-enterprise-linux-rhel-oracle-enterprise-linux-oel-centos) for more on how to set this up. - -* SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: -``` -$ nc xxx.xxx.xxx.xxx 22 -SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 -``` - -#### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found - -* The key file specified as `ssh_key_path` cannot be accessed. Make sure that you specified the private key file (not the public key, `.pub`), and that the user that is running the `rke` command can access the private key file. -* The key file specified as `ssh_key_path` is malformed. Check if the key is valid by running `ssh-keygen -y -e -f private_key_file`. This will print the public key of the private key, which will fail if the private key file is not valid. - -#### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain - -* The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. - -#### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys - -* If you want to use encrypted private keys, you should use `ssh-agent` to load your keys with your passphrase. You can configure RKE to use that agent by specifying `--ssh-agent-auth` on the command-line, it will use the `SSH_AUTH_SOCK` environment variable in the environment where the `rke` command is run. - -#### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? - -* The node is not reachable on the configured `address` and `port`. diff --git a/content/rke/latest/en/upgrades/_index.md b/content/rke/latest/en/upgrades/_index.md deleted file mode 100644 index f64c128afd..0000000000 --- a/content/rke/latest/en/upgrades/_index.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: Upgrades -weight: 100 ---- - -After RKE has deployed Kubernetes, you can upgrade the versions of the components in your Kubernetes cluster, the [definition of the Kubernetes services]({{}}/rke/latest/en/config-options/services/) or the [add-ons]({{}}/rke/latest/en/config-options/add-ons/). - -The default Kubernetes version for each RKE version can be found in the release notes accompanying [the RKE download](https://github.com/rancher/rke/releases/). RKE v1.x should be used. - -You can also select a newer version of Kubernetes to install for your cluster. - -Each version of RKE has a specific [list of supported Kubernetes versions.](#listing-supported-kubernetes-versions) - -In case the Kubernetes version is defined in the `kubernetes_version` directive and under the `system-images` directive, the `system-images` configuration will take precedence over the `kubernetes_version`. - -This page covers the following topics: - -- [How upgrades work](#how-upgrades-work) -- [Prerequisites](#prerequisites) -- [Upgrading Kubernetes](#upgrading-kubernetes) -- [Configuring the upgrade strategy](#configuring-the-upgrade-strategy) -- [Maintaining availability for applications during upgrades](#maintaining-availability-for-applications-during-upgrades) -- [Listing supported Kubernetes versions](#listing-supported-kubernetes-versions) -- [Kubernetes version precedence](#kubernetes-version-precedence) -- [Using an unsupported Kubernetes version](#using-an-unsupported-kubernetes-version) -- [Mapping the Kubernetes version to services](#mapping-the-kubernetes-version-to-services) -- [Service upgrades](#service-upgrades) -- [Upgrading Nodes Manually](#upgrading-nodes-manually) -- [Rolling Back the Kubernetes Version](#rolling-back-the-kubernetes-version) -- [Troubleshooting](#troubleshooting) - -### How Upgrades Work - -In [this section,]({{}}/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. - -### Prerequisites - -- Ensure that any `system_images` configuration is absent from the `cluster.yml`. The Kubernetes version should only be listed under the `system_images` directive if an [unsupported version](#using-an-unsupported-kubernetes-version) is being used. Refer to [Kubernetes version precedence](#kubernetes-version-precedence) for more information. -- Ensure that the correct files to manage [Kubernetes cluster state]({{}}/rke/latest/en/installation/#kubernetes-cluster-state) are present in the working directory. Refer to the tabs below for the required files, which differ based on the RKE version. - -{{% tabs %}} -{{% tab "RKE v0.2.0+" %}} -The `cluster.rkestate` file contains the current state of the cluster including the RKE configuration and the certificates. - -This file is created in the same directory that has the cluster configuration file `cluster.yml`. - -It is required to keep the `cluster.rkestate` file to perform any operation on the cluster through RKE, or when upgrading a cluster last managed via RKE v0.2.0 or later. -{{% /tab %}} -{{% tab "RKE before v0.2.0" %}} -Ensure that the `kube_config_cluster.yml` file is present in the working directory. - -RKE saves the Kubernetes cluster state as a secret. When updating the state, RKE pulls the secret, updates or changes the state, and saves a new secret. The `kube_config_cluster.yml` file is required for upgrading a cluster last managed via RKE v0.1.x. -{{% /tab %}} -{{% /tabs %}} - -### Upgrading Kubernetes - -To upgrade the Kubernetes version of an RKE-provisioned cluster, set the `kubernetes_version` string in the `cluster.yml` to the desired version from the [list of supported Kubernetes versions](#listing-supported-kubernetes-versions) for the specific version of RKE: - -```yaml -kubernetes_version: "v1.15.5-rancher1-1" -``` - -Then invoke `rke up`: - -``` -$ rke up --config cluster.yml -``` - -### Configuring the Upgrade Strategy - -As of v0.1.8, upgrades to add-ons are supported. [Add-ons]({{}}/rke/latest/en/config-options/add-ons/) can also be upgraded by changing any of the add-ons and running `rke up` again with the updated configuration file. - -As of v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade. - -For details on upgrade configuration options, refer to [Configuring the Upgrade Strategy.]({{}}/rke/latest/en/upgrades/configuring-strategy) - -### Maintaining Availability for Applications During Upgrades - -In [this section,]({{}}/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when you upgrade the cluster using `rke up`. - -### Listing Supported Kubernetes Versions - -Please refer to the [release notes](https://github.com/rancher/rke/releases) of the RKE version that you are running, to find the list of supported Kubernetes versions as well as the default Kubernetes version. Note: RKE v1.x should be used. - -You can also list the supported versions and system images of specific version of RKE release with a quick command. - -``` -$ rke config --list-version --all -v1.15.3-rancher2-1 -v1.13.10-rancher1-2 -v1.14.6-rancher2-1 -v1.16.0-beta.1-rancher1-1 -``` - -### Kubernetes Version Precedence - -In case both `kubernetes_version` and `system_images` are defined, the `system_images` configuration will take precedence over `kubernetes_version`. - -In addition, if neither `kubernetes_version` nor `system_images` are configured in the `cluster.yml`, RKE will apply the default Kubernetes version for the specific version of RKE used to invoke `rke up`. - -### Using an Unsupported Kubernetes Version - -As of v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, then RKE will error out. - -Before v0.2.0, if a version is defined in `kubernetes_version` and is not found in the specific list of supported Kubernetes versions, the default version from the supported list is used. - -If you want to use a different version from the supported list, please use the [system images]({{}}/rke/latest/en/config-options/system-images/) option. - -### Mapping the Kubernetes Version to Services - -In RKE, `kubernetes_version` is used to map the version of Kubernetes to the default services, parameters, and options. - -For RKE v0.3.0+, the service defaults are located [here](https://github.com/rancher/kontainer-driver-metadata/blob/master/rke/k8s_service_options.go). - -For RKE before v0.3.0, the service defaults are located [here](https://github.com/rancher/types/blob/release/v2.2/apis/management.cattle.io/v3/k8s_defaults.go). Note: The version in the path of the service defaults file corresponds to a Rancher version. Therefore, for Rancher v2.1.x, [this file](https://github.com/rancher/types/blob/release/v2.1/apis/management.cattle.io/v3/k8s_defaults.go) should be used. - -### Service Upgrades - -[Services]({{}}/rke/latest/en/config-options/services/) can be upgraded by changing any of the services arguments or `extra_args` and running `rke up` again with the updated configuration file. - -> **Note:** The following arguments, `service_cluster_ip_range` or `cluster_cidr`, cannot be changed as any changes to these arguments will result in a broken cluster. Currently, network pods are not automatically upgraded. - -### Upgrading Nodes Manually - -_Available as of v1.1.0_ - -You can manually update each type of node separately. As a best practice, upgrade the etcd nodes first, followed by controlplane and then worker nodes. - -### Rolling Back the Kubernetes Version - -_Available as of v1.1.0_ - -A cluster can be restored back to a snapshot that uses a previous Kubernetes version. - -### Troubleshooting - -_Applies to v1.1.0+_ - -If a node doesn't come up after an upgrade, the `rke up` command errors out. - -No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. - -If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. - -A failed node could be in many different states: - -- Powered off -- Unavailable -- User drains a node while upgrade is in process, so there are no kubelets on the node -- The upgrade itself failed - -Some expected failure scenarios include the following: - -- If the maximum unavailable number of nodes is reached during an upgrade, the RKE CLI will error out and exit the CLI with a failure code. -- If some nodes fail to upgrade, but the number of failed nodes doesn't reach the maximum unavailable number of nodes, the RKE CLI logs the nodes that were unable to upgrade and continues to upgrade the add-ons. After the add-ons are upgraded, RKE will error out and exit the CLI with a failure code regardless of add-on upgrade status. \ No newline at end of file diff --git a/content/rke/latest/en/upgrades/configuring-strategy/_index.md b/content/rke/latest/en/upgrades/configuring-strategy/_index.md deleted file mode 100644 index e9e8ce188c..0000000000 --- a/content/rke/latest/en/upgrades/configuring-strategy/_index.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Configuring the Upgrade Strategy -weight: 2 ---- - -In this section, you'll learn how to configure the maximum number of unavailable controlplane and worker nodes, how to drain nodes before upgrading them, and how to configure the replicas for addons such as Ingress. - -- [Maximum Unavailable Nodes](#maximum-unavailable-nodes) -- [Draining Nodes](#draining-nodes) -- [Replicas for Ingress and Networking Addons](#replicas-for-ingress-and-networking-addons) -- [Replicas for DNS and Monitoring Addons](#replicas-for-dns-and-monitoring-addons) -- [Example cluster.yml](#example-cluster-yml) - -### Maximum Unavailable Nodes - -The maximum number of unavailable controlplane and worker nodes can be configured in the `cluster.yml` before upgrading the cluster: - -- **max_unavailable_controlplane:** The maximum number of controlplane nodes that can fail without causing the cluster upgrade to fail. By default, `max_unavailable_controlplane` is defined as one node. -- **max_unavailable_worker:** The maximum number of worker nodes that can fail without causing the cluster upgrade to fail. By default, `max_unavailable_worker` is defined as 10 percent of all worker nodes.* - -/* This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node per batch. - -An example configuration of the cluster upgrade strategy is shown below: - -```yaml -upgrade_strategy: - max_unavailable_worker: 10% - max_unavailable_controlplane: 1 -``` - -### Draining Nodes - -By default, nodes are cordoned first before upgrading. Each node should always be cordoned before starting its upgrade so that new pods will not be scheduled to it, and traffic will not reach the node. In addition to cordoning each node, RKE can also be configured to drain each node before starting its upgrade. Draining a node will evict all the pods running on the computing resource. - -For information on draining and how to safely drain a node, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) - -If the `drain` directive is set to `true` in the `cluster.yml`, worker nodes will be drained before they are upgraded. The default value is false: - -```yaml -upgrade_strategy: - max_unavailable_worker: 10% - max_unavailable_controlplane: 1 - drain: false - node_drain_input: - force: false - ignore_daemonsets: true - delete_local_data: false - grace_period: -1 // grace period specified for each pod spec will be used - timeout: 60 -``` - -### Replicas for Ingress and Networking Addons - -The Ingress and network addons are launched as Kubernetes [daemonsets.](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) If no value is given for the [update strategy,](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) Kubernetes sets the update strategy to `rollingUpdate` by default, with `maxUnavailable` set to 1. - -An example configuration of the Ingress and network addons is shown below: - -```yaml -ingress: - provider: nginx - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 5 -network: - plugin: canal - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 6 -``` - -### Replicas for DNS and Monitoring Addons - -The DNS and monitoring addons are launched as Kubernetes [deployments.](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) These addons include `coredns`, `kubedns`, and `metrics-server`, the monitoring deployment. - -If no value is configured for their [update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) in the `cluster.yml`, Kubernetes sets the update strategy to `rollingUpdate` by default, with `maxUnavailable` set to 25% and `maxSurge` set to 25%. - -The DNS addons use `cluster-proportional-autoscaler`, which is an [open-source container image](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) that watches over the number of schedulable nodes and cores of the cluster and resizes the number of replicas for the required resource. This functionality is useful for applications that need to be autoscaled with the number of nodes in the cluster. For the DNS addon, the fields needed for the `cluster-proportional-autoscaler` are made configurable. - -The following table shows the default values for these fields: - -Field Name | Default Value ------------|-------------- -coresPerReplica | 128 -nodesPerReplica | 4 -min | 1 -preventSinglePointFailure | true - -The `cluster-proportional-autoscaler` uses this formula to calculate the number of replicas: - -```plain -replicas = max( ceil( cores * 1/coresPerReplica ) , ceil( nodes * 1/nodesPerReplica ) ) -replicas = min(replicas, max) -replicas = max(replicas, min) -``` - -An example configuration of the DNS and monitoring addons is shown below: - -```yaml -dns: - provider: coredns - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 20% - maxSurge: 15% - linear_autoscaler_params: - cores_per_replica: 0.34 - nodes_per_replica: 4 - prevent_single_point_failure: true - min: 2 - max: 3 -monitoring: - provider: metrics-server - update_strategy: - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 8 -``` - -### Example cluster.yml - -```yaml -# If you intened to deploy Kubernetes in an air-gapped environment, -# please consult the documentation on how to configure custom RKE images. -nodes: -# At least three etcd nodes, two controlplane nodes, and two worker nodes, -# nodes skipped for brevity -upgrade_strategy: - max_unavailable_worker: 10% - max_unavailable_controlplane: 1 - drain: false - node_drain_input: - force: false - ignore_daemonsets: true - delete_local_data: false - grace_period: -1 // grace period specified for each pod spec will be used - timeout: 60 -ingress: - provider: nginx - update_strategy: # Available in v2.4 - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 5 -network: - plugin: canal - update_strategy: # Available in v2.4 - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 6 -dns: - provider: coredns - update_strategy: # Available in v2.4 - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 20% - maxSurge: 15% - linear_autoscaler_params: - cores_per_replica: 0.34 - nodes_per_replica: 4 - prevent_single_point_failure: true - min: 2 - max: 3 -monitoring: - provider: metrics-server - update_strategy: # Available in v2.4 - strategy: RollingUpdate - rollingUpdate: - maxUnavailable: 8 -``` diff --git a/content/rke/latest/en/upgrades/how-upgrades-work/_index.md b/content/rke/latest/en/upgrades/how-upgrades-work/_index.md deleted file mode 100644 index c7eb6fa739..0000000000 --- a/content/rke/latest/en/upgrades/how-upgrades-work/_index.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: How Upgrades Work -weight: 1 ---- - -In this section, you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. The below sections describe how each type of node is upgraded by default when a cluster is upgraded using `rke up`. - -{{% tabs %}} -{{% tab "RKE v1.1.0+" %}} - -The following features are new in RKE v1.1.0: - -- The ability to upgrade or edit a cluster without downtime for your applications. -- The ability to manually upgrade nodes of a certain role without upgrading others. -- The ability to restore a Kubernetes cluster to an older Kubernetes version by restoring it to a snapshot that includes the older Kubernetes version. This capability allows you to safely upgrade one type of node at a time, because if an upgrade cannot be completed by all nodes in the cluster, you can downgrade the Kubernetes version of the nodes that were already upgraded. - -When a cluster is upgraded with `rke up`, using the default options, the following process is used: - -1. The etcd plane gets get updated, one node at a time. -1. Controlplane nodes get updated, one node at a time. This includes the controlplane components and worker plane components of the controlplane nodes. -1. Worker plane components of etcd nodes get updated, one node at a time. -1. Worker nodes get updated in batches of a configurable size. The default configuration for the maximum number of unavailable nodes is ten percent, rounded down to the nearest node, with a minimum batch size of one node. -1. [Addons]({{}}/rke/latest/en/config-options/add-ons/) get upgraded one by one. - -The following sections break down in more detail what happens when etcd nodes, controlplane nodes, worker nodes, and addons are upgraded. This information is intended to be used to help you understand the update strategy for the cluster, and may be useful when troubleshooting problems with upgrading the cluster. - -### Upgrades of etcd Nodes - -A cluster upgrade begins by upgrading the etcd nodes one at a time. - -If an etcd node fails at any time, the upgrade will fail and no more nodes will be upgraded. The cluster will be stuck in an updating state and not move forward to upgrading controlplane or worker nodes. - -### Upgrades of Controlplane Nodes - -Controlplane nodes are upgraded one at a time by default. The maximum number of unavailable controlplane nodes can also be configured, so that they can be upgraded in batches. - -As long as the maximum unavailable number or percentage of controlplane nodes has not been reached, Rancher will continue to upgrade other controlplane nodes, then the worker nodes. - -If any controlplane nodes were unable to be upgraded, the upgrade will not proceed to the worker nodes. - -### Upgrades of Worker Nodes - -By default, worker nodes are upgraded in batches. The size of the batch is determined by the maximum number of unavailable worker nodes, configured as the `max_unavailable_worker` directive in the `cluster.yml`. - -By default, the `max_unavailable_worker` nodes is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. - -For example, if you have 11 worker nodes and `max_unavailable_worker` is 25%, two nodes will be upgraded at once because 25% of 11 is 2.75. If you have two worker nodes and `max_unavailable_worker` is 1%, the worker nodes will be upgraded one at a time because the minimum batch size is one. - -When each node in a batch returns to a Ready state, the next batch of nodes begins to upgrade. If `kubelet` and `kube-proxy` have started, the node is Ready. As long as the `max_unavailable_worker` number of nodes have not failed, Rancher will continue to upgrade other worker nodes. - -RKE scans the cluster before starting the upgrade to find the powered down or unreachable hosts. The upgrade will stop if that number matches or exceeds the maximum number of unavailable nodes. - -RKE will cordon each node before upgrading it, and uncordon the node afterward. RKE can also be configured to [drain](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) nodes before upgrading them. - -RKE will handle all worker node upgrades before upgrading any add-ons. As long as the maximum number of unavailable worker nodes is not reached, RKE will attempt to upgrade the [addons.](#upgrades-of-addons) For example, if a cluster has two worker nodes and one worker node fails, but the maximum unavailable worker nodes is greater than one, the addons will still be upgraded. - -### Upgrades of Addons - -The availability of your applications partly depends on the availability of [RKE addons.]({{}}/rke/latest/en/config-options/add-ons/) Addons are used to deploy several cluster components, including network plug-ins, the Ingress controller, DNS provider, and metrics server. - -Because RKE addons are necessary for allowing traffic into the cluster, they will need to be updated in batches to maintain availability. You will need to configure the maximum number of unavailable replicas for each addon in the `cluster.yml` to ensure that your cluster will retain enough available replicas during an upgrade. - -For more information on configuring the number of replicas for each addon, refer to [this section.]({{}}/rke/latest/en/upgrades/configuring-strategy) - -For an example showing how to configure the addons, refer to the [example cluster.yml.]({{}}/rke/latest/en/upgrades/configuring-strategy/#example-cluster-yml) - -{{% /tab %}} -{{% tab "RKE before v1.1.0" %}} - -When a cluster is upgraded with `rke up`, using the default options, the following process is used: - -- etcd nodes get updated first, one at a time. -- Controlplane nodes get updated second, in batches of 50 or the total number of worker nodes, whichever is lower. -- Worker nodes and addons get updated third, in batches of 50 or the total number of worker nodes, whichever is lower. -- Addons get upgraded one by one. - -### Upgrades of Controlplane and etcd Nodes - -Controlplane and etcd nodes would be upgraded in batches of 50 nodes or the total number of controlplane nodes, whichever is lower. - -If a node fails at any time, the upgrade will stop upgrading any other nodes and fail. - -### Upgrades of Worker Nodes - -Worker nodes are upgraded simultaneously, in batches of either 50 or the total number of worker nodes, whichever is lower. If a worker node fails at any time, the upgrade stops. - -When a worker node is upgraded, it restarts several Docker processes, including the `kubelet` and `kube-proxy`. When `kube-proxy` comes up, it flushes `iptables`. When this happens, pods on this node can’t be accessed, resulting in downtime for the applications. - -{{% /tab %}} -{{% /tabs %}} diff --git a/content/rke/latest/en/upgrades/maintaining-availability/_index.md b/content/rke/latest/en/upgrades/maintaining-availability/_index.md deleted file mode 100644 index 03cc98b751..0000000000 --- a/content/rke/latest/en/upgrades/maintaining-availability/_index.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Maintaining Availability for Applications During Upgrades -weight: 1 ---- -_Available as of v1.1.0_ - -In this section, you'll learn the requirements to prevent downtime for your applications when you upgrade the cluster using `rke up`. - -An upgrade without downtime is one in which your workloads are available on at least a single node, and all critical addon services, such as Ingress and DNS, are available during the upgrade. - -The way that clusters are upgraded changed in RKE v1.1.0. For details, refer to [How Upgrades Work.]({{}}/rke/latest/en/upgrades/how-upgrades-work) - -This availability is achieved by upgrading worker nodes in batches of a configurable size, and ensuring that your workloads run on a number of nodes that exceeds that maximum number of unavailable worker nodes. - -To avoid downtime for your applications during an upgrade, you will need to configure your workloads to continue running despite the rolling upgrade of worker nodes. There are also requirements for the cluster architecture and Kubernetes target version. - -1. [Kubernetes Version Requirement](#1-kubernetes-version-requirement) -2. [Cluster Requirements](#2-cluster-requirements) -3. [Workload Requirements](#3-workload-requirements) - -### 1. Kubernetes Version Requirement - -When upgrading to a newer Kubernetes version, the upgrade must be from a minor release to the next minor version, or to within the same patch release series. - -### 2. Cluster Requirements - -The following must be true of the cluster that will be upgraded: - -1. The cluster has three or more etcd nodes. -1. The cluster has two or more controlplane nodes. -1. The cluster has two or more worker nodes. -1. The Ingress, DNS, and other addons are schedulable to a number of nodes that exceeds the maximum number of unavailable worker nodes, also called the batch size. By default, the minimum number of unavailable worker nodes is 10 percent of worker nodes, rounded down to the nearest node, with a minimum batch size of one node. - -### 3. Workload Requirements - -The following must be true of the cluster's applications: - -1. The application and Ingress are deployed across a number of nodes exceeding the maximum number of unavailable worker nodes, also called the batch size. By default, the minimum number of unavailable worker nodes is 10 percent of worker nodes, rounded down to the nearest node, with a minimum batch size of one node. -1. The applications must make use of liveness and readiness probes. - -For information on how to use node selectors to assign pods to nodes, refer to the [official Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) - -For information on configuring the number of replicas for each addon, refer to [this section.]({{}}/rke/latest/en/upgrades/configuring-strategy/) \ No newline at end of file diff --git a/data/.gitkeep b/data/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/data/cta/intro-k8s-rancher-online-training.toml b/data/cta/intro-k8s-rancher-online-training.toml deleted file mode 100644 index 689b834bcf..0000000000 --- a/data/cta/intro-k8s-rancher-online-training.toml +++ /dev/null @@ -1,7 +0,0 @@ -header = "Get free weekly training on Kubernetes and Rancher" -copy = "" -button = "Join here" -link = "https://info.rancher.com/rancher-kubernetes-online-training" - - -form-id = "" diff --git a/docs/backups/docker-installs/docker-installs.md b/docs/backups/docker-installs/docker-installs.md new file mode 100644 index 0000000000..162dec39a6 --- /dev/null +++ b/docs/backups/docker-installs/docker-installs.md @@ -0,0 +1,8 @@ +--- +title: Backup and Restore for Rancher Installed with Docker +shortTitle: Docker Installs +weight: 10 +--- + +- [Backups](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) +- [Restores](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md) \ No newline at end of file diff --git a/docs/cluster-provisioning/rke-clusters/options/options.md b/docs/cluster-provisioning/rke-clusters/options/options.md new file mode 100644 index 0000000000..aeaf493d3b --- /dev/null +++ b/docs/cluster-provisioning/rke-clusters/options/options.md @@ -0,0 +1,6 @@ +--- +title: RKE Cluster Configuration +weight: 2300 +--- + +This page has moved [here.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) diff --git a/docs/contribute-to-rancher.md b/docs/contribute-to-rancher.md new file mode 100644 index 0000000000..ee2664adae --- /dev/null +++ b/docs/contribute-to-rancher.md @@ -0,0 +1,137 @@ +--- +title: Contributing to Rancher +weight: 700 +--- + +This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. + +For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: + +- How to set up the Rancher development environment and run tests +- The typical flow of an issue through the development lifecycle +- Coding guidelines and development best practices +- Debugging and troubleshooting +- Developing the Rancher API + +On the Rancher Users Slack, the channel for developers is **#developer**. + +# Repositories + +All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. + +Repository | URL | Description +-----------|-----|------------- +Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. +Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. +API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. +User Interface | https://github.com/rancher/dashboard/ | This repository is the source of the Dashboard UI. +(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. +machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. +kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. +RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. +CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. +(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. +Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. +loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. + +To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. + +![Rancher diagram](/img/ranchercomponentsdiagram-2.6.svg)
+Rancher components used for provisioning/managing Kubernetes clusters. + +# Building + +Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. + +The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. + +# Bugs, Issues or Questions + +If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. + +If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). + +### Checklist for Filing Issues + +Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. + +:::note + +For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. + +::: + +:::note Important: + +Please remove any sensitive data as it will be publicly viewable. + +::: + +- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: + - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce + - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used + - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` + - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer + - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host + - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it +- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. + - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. +- **Logs:** Provide data/logs from the used resources. + - Rancher + - Docker install + + ``` + docker logs \ + --timestamps \ + $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') + ``` + - Kubernetes install using `kubectl` + + :::note + + Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ::: + + ``` + kubectl -n cattle-system \ + logs \ + -l app=rancher \ + --timestamps=true + ``` + - Docker install using `docker` on each of the nodes in the RKE cluster + + ``` + docker logs \ + --timestamps \ + $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') + ``` + - Kubernetes Install with RKE Add-On + + :::note + + Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ::: + + ``` + kubectl -n cattle-system \ + logs \ + --timestamps=true \ + -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') + ``` + - System logging (these might not all exist, depending on operating system) + - `/var/log/messages` + - `/var/log/syslog` + - `/var/log/kern.log` + - Docker daemon logging (these might not all exist, depending on operating system) + - `/var/log/docker.log` +- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. + +# Docs + +If you have any updates to our documentation, please make any pull request to our docs repo. + +- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. + +- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/docs/explanations.md b/docs/explanations.md new file mode 100644 index 0000000000..a8ce4af1e5 --- /dev/null +++ b/docs/explanations.md @@ -0,0 +1,6 @@ +--- +title: Explanations +weight: 400 +--- + +**Explanatory docs** are concerned primarily with providing theoretical knowledge for the "why" behind a task or a topic. Explanations are "understanding-oriented" in nature and will clarify a topic in order to broaden the user's knowledge. In this section, users can find additional context and background, alternatives or even opinions on topics, and often historical reasons, constraints, and insights into why a process works the way that it does. \ No newline at end of file diff --git a/docs/explanations/integrations-in-rancher/cis-scans/configuration-reference.md b/docs/explanations/integrations-in-rancher/cis-scans/configuration-reference.md new file mode 100644 index 0000000000..ee6c737d8e --- /dev/null +++ b/docs/explanations/integrations-in-rancher/cis-scans/configuration-reference.md @@ -0,0 +1,106 @@ +--- +title: Configuration +weight: 3 +--- + +This configuration reference is intended to help you manage the custom resources created by the `rancher-cis-benchmark` application. These resources are used for performing CIS scans on a cluster, skipping tests, setting the test profile that will be used during a scan, and other customization. + +To configure the custom resources, go to the **Cluster Dashboard** To configure the CIS scans, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to configure CIS scans and click **Explore**. +1. In the left navigation bar, click **CIS Benchmark**. + +### Scans + +A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. + +When configuring a scan, you need to define the name of the scan profile that will be used with the `scanProfileName` directive. + +An example ClusterScan custom resource is below: + +```yaml +apiVersion: cis.cattle.io/v1 +kind: ClusterScan +metadata: + name: rke-cis +spec: + scanProfileName: rke-profile-hardened +``` + +### Profiles + +A profile contains the configuration for the CIS scan, which includes the benchmark version to use and any specific tests to skip in that benchmark. + +:::caution + +By default, a few ClusterScanProfiles are installed as part of the `rancher-cis-benchmark` chart. If a user edits these default benchmarks or profiles, the next chart update will reset them back. So it is advisable for users to not edit the default ClusterScanProfiles. + +::: + +Users can clone the ClusterScanProfiles to create custom profiles. + +Skipped tests are listed under the `skipTests` directive. + +When you create a new profile, you will also need to give it a name. + +An example `ClusterScanProfile` is below: + +```yaml +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + annotations: + meta.helm.sh/release-name: clusterscan-operator + meta.helm.sh/release-namespace: cis-operator-system + labels: + app.kubernetes.io/managed-by: Helm + name: "" +spec: + benchmarkVersion: cis-1.5 + skipTests: + - "1.1.20" + - "1.1.21" +``` + +### Benchmark Versions + +A benchmark version is the name of benchmark to run using `kube-bench`, as well as the valid configuration parameters for that benchmark. + +A `ClusterScanBenchmark` defines the CIS `BenchmarkVersion` name and test configurations. The `BenchmarkVersion` name is a parameter provided to the `kube-bench` tool. + +By default, a few `BenchmarkVersion` names and test configurations are packaged as part of the CIS scan application. When this feature is enabled, these default BenchmarkVersions will be automatically installed and available for users to create a ClusterScanProfile. + +:::caution + +If the default BenchmarkVersions are edited, the next chart update will reset them back. Therefore we don't recommend editing the default ClusterScanBenchmarks. + +::: + +A ClusterScanBenchmark consists of the fields: + +- `ClusterProvider`: This is the cluster provider name for which this benchmark is applicable. For example: RKE, EKS, GKE, etc. Leave it empty if this benchmark can be run on any cluster type. +- `MinKubernetesVersion`: Specifies the cluster's minimum kubernetes version necessary to run this benchmark. Leave it empty if there is no dependency on a particular Kubernetes version. +- `MaxKubernetesVersion`: Specifies the cluster's maximum Kubernetes version necessary to run this benchmark. Leave it empty if there is no dependency on a particular k8s version. + +An example `ClusterScanBenchmark` is below: + +```yaml +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + annotations: + meta.helm.sh/release-name: clusterscan-operator + meta.helm.sh/release-namespace: cis-operator-system + creationTimestamp: "2020-08-28T18:18:07Z" + generation: 1 + labels: + app.kubernetes.io/managed-by: Helm + name: cis-1.5 + resourceVersion: "203878" + selfLink: /apis/cis.cattle.io/v1/clusterscanbenchmarks/cis-1.5 + uid: 309e543e-9102-4091-be91-08d7af7fb7a7 +spec: + clusterProvider: "" + minKubernetesVersion: 1.15.0 +``` \ No newline at end of file diff --git a/content/rancher/v2.6/en/cis-scans/custom-benchmark/_index.md b/docs/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md similarity index 100% rename from content/rancher/v2.6/en/cis-scans/custom-benchmark/_index.md rename to docs/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md diff --git a/content/rancher/v2.6/en/cis-scans/rbac/_index.md b/docs/explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md similarity index 100% rename from content/rancher/v2.6/en/cis-scans/rbac/_index.md rename to docs/explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md diff --git a/content/rancher/v2.6/en/cis-scans/skipped-tests/_index.md b/docs/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md similarity index 100% rename from content/rancher/v2.6/en/cis-scans/skipped-tests/_index.md rename to docs/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md diff --git a/docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md b/docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md new file mode 100644 index 0000000000..f7725ddddd --- /dev/null +++ b/docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md @@ -0,0 +1,9 @@ +--- +title: Architecture +weight: 1 +--- + +Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy everything in the cluster. This gives you a high degree of control, consistency, and auditability. Fleet focuses not only on the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster. + +![Architecture](/img/fleet-architecture.svg) + diff --git a/content/rancher/v2.6/en/deploy-across-clusters/fleet/proxy/_index.md b/docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md similarity index 100% rename from content/rancher/v2.6/en/deploy-across-clusters/fleet/proxy/_index.md rename to docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md diff --git a/content/rancher/v2.5/en/deploy-across-clusters/fleet/windows/_index.md b/docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md similarity index 100% rename from content/rancher/v2.5/en/deploy-across-clusters/fleet/windows/_index.md rename to docs/explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md diff --git a/docs/explanations/integrations-in-rancher/harvester.md b/docs/explanations/integrations-in-rancher/harvester.md new file mode 100644 index 0000000000..5bee9547a7 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/harvester.md @@ -0,0 +1,44 @@ +--- +title: Harvester Integration +weight: 10 +--- + +Introduced in Rancher v2.6.1, [Harvester](https://docs.harvesterhci.io/) is an open-source hyper-converged infrastructure (HCI) software built on Kubernetes. Harvester installs on bare metal servers and provides integrated virtualization and distributed storage capabilities. Although Harvester operates using Kubernetes, it does not require users to know Kubernetes concepts, making it a more user-friendly application. + +--- +**_New in v2.6.3_** + +Harvester is GA. Please refer to the [Harvester release notes](https://github.com/harvester/harvester/releases) for all updates. + +--- +### Feature Flag + +The Harvester feature flag is used to manage access to the Virtualization Management (VM) page in Rancher where users can navigate directly to Harvester clusters and access the Harvester UI. The Harvester feature flag is enabled by default. Click [here](../../pages-for-subheaders/enable-experimental-features.md) for more information on feature flags in Rancher. + +To navigate to the Harvester cluster, click **☰ > Virtualization Management**. From Harvester Clusters page, click one of the clusters listed to go to the single Harvester cluster view. + +* If the Harvester feature flag is enabled, Harvester clusters will be filtered out from any pages or apps (such as Fleet and the multi-cluster app) that list Kubernetes clusters. + +* If the Harvester feature flag is disabled, and a Harvester cluster is imported, the Harvester cluster will be shown in the Rancher cluster list in the Cluster Management page. Harvester clusters will only be shown on the cluster list when the feature flag is off. + +* With the Harvester integration, Harvester clusters can now be imported into Rancher as a cluster type `Harvester`. + +* Users may import a Harvester cluster only on the Virtualization Management page. Importing a cluster on the Cluster Management page is not supported, and a warning will advise you to return to the VM page to do so. + +### Harvester Node Driver + +The [Harvester node driver](https://docs.harvesterhci.io/v0.3/rancher/node-driver/) is marked as `tech preview` on RKE and RKE2 options in Rancher. This will be the case both on the Create page and once the driver is already enabled. The node driver is available whether or not the Harvester feature flag is enabled. Note that the node driver is off by default. Users may create RKE or RKE2 clusters on Harvester only from the Cluster Management page. + +Harvester allows `.ISO` images to be uploaded and displayed through the Harvester UI, but this is not supported in the Rancher UI. This is because `.ISO` images usually require additional setup that interferes with a clean deployment (without requiring user intervention), and they are not typically used in cloud environments. + +Click [here](../../pages-for-subheaders/about-provisioning-drivers.md#node-drivers) for more information on node drivers in Rancher. + +### Limitations + +--- +**Applicable to Rancher v2.6.1 and v2.6.2 only:** + +- Harvester v0.3.0 doesn’t support air-gapped environment installation. +- Harvester v0.3.0 doesn’t support upgrade from v0.2.0 nor upgrade to v1.0.0. + +--- \ No newline at end of file diff --git a/content/rancher/v2.6/en/istio/configuration-reference/rke2/_index.md b/docs/explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md similarity index 100% rename from content/rancher/v2.6/en/istio/configuration-reference/rke2/_index.md rename to docs/explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md diff --git a/docs/explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md b/docs/explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md new file mode 100644 index 0000000000..08d2db6883 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md @@ -0,0 +1,54 @@ +--- +title: Enable Istio with Pod Security Policies +weight: 1 +--- + +If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. + +The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). + +:::note Prerequisites: + +- The cluster must be an RKE Kubernetes cluster. +- The cluster must have been created with a default PodSecurityPolicy. + +To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. + +::: + +1. [Set the PodSecurityPolicy to unrestricted](#1-set-the-podsecuritypolicy-to-unrestricted) +2. [Enable the CNI](#2-enable-the-cni) +3. [Verify that the CNI is working.](#3-verify-that-the-cni-is-working) + +### 1. Set the PodSecurityPolicy to unrestricted + +An unrestricted PSP allows Istio to be installed. + +Set the PSP to `unrestricted` in the project where is Istio is installed, or the project where you plan to install Istio. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Find the **Project: System** and select the **⋮ > Edit Config**. +1. Change the Pod Security Policy option to be unrestricted, then click **Save**. + +### 2. Enable the CNI + +When installing or upgrading Istio through **Apps & Marketplace,** + +1. Click **Components**. +2. Check the box next to **Enabled CNI**. +3. Finish installing or upgrading Istio. + +The CNI can also be enabled by editing the `values.yaml`: + +``` +istio_cni.enabled: true +``` + +Istio should install successfully with the CNI enabled in the cluster. + +### 3. Verify that the CNI is working + +Verify that the CNI is working by deploying a [sample application](https://istio.io/latest/docs/examples/bookinfo/) or deploying one of your own applications. + diff --git a/content/rancher/v2.6/en/istio/configuration-reference/canal-and-project-network/_index.md b/docs/explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md similarity index 100% rename from content/rancher/v2.6/en/istio/configuration-reference/canal-and-project-network/_index.md rename to docs/explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md diff --git a/docs/explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md b/docs/explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md new file mode 100644 index 0000000000..da12c344c6 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md @@ -0,0 +1,125 @@ +--- +title: Selectors and Scrape Configs +weight: 2 +--- + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false`, which enables monitoring across all namespaces by default. + +This ensures you can view traffic, metrics and graphs for resources deployed in a namespace with `istio-injection=enabled` label. + +If you would like to limit Prometheus to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. Once you do this, you will need to add additional configuration to continue to monitor your resources. + +- [Limiting Monitoring to Specific Namespaces by Setting ignoreNamespaceSelectors to True](#limiting-monitoring-to-specific-namespaces-by-setting-ignorenamespaceselectors-to-true) +- [Enabling Prometheus to Detect Resources in Other Namespaces](#enabling-prometheus-to-detect-resources-in-other-namespaces) +- [Monitoring Specific Namespaces: Create a Service Monitor or Pod Monitor](#monitoring-specific-namespaces-create-a-service-monitor-or-pod-monitor) +- [Monitoring Across Namespaces: Set ignoreNamespaceSelectors to False](#monitoring-across-namespaces-set-ignorenamespaceselectors-to-false) + +### Limiting Monitoring to Specific Namespaces by Setting ignoreNamespaceSelectors to True + +To limit monitoring to specific namespaces, you will edit the `ignoreNamespaceSelectors` Helm chart option. You will configure this option when installing or upgrading the Monitoring Helm chart: + +1. When installing or upgrading the Monitoring Helm chart, edit the values.yml and set`prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. +1. Complete the install or upgrade. + +**Result:** Prometheus will be limited to specific namespaces which means one of the following configurations will need to be set up to continue to view data in various dashboards + +### Enabling Prometheus to Detect Resources in Other Namespaces + +There are two different ways to enable Prometheus to detect resources in other namespaces when `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`: + +- **Monitoring specific namespaces:** Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +- **Monitoring across namespaces:** Add an `additionalScrapeConfig` to your rancher-monitoring instance to scrape all targets in all namespaces. + +### Monitoring Specific Namespaces: Create a Service Monitor or Pod Monitor + +This option allows you to define which specific services or pods you would like monitored in a specific namespace. + +The usability tradeoff is that you have to create the service monitor or pod monitor per namespace since you cannot monitor across namespaces. + +:::note Prerequisite: + +Define a ServiceMonitor or PodMonitor for ``. An example ServiceMonitor is provided below. + +::: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the top navigation bar, open the kubectl shell. +1. If the ServiceMonitor or PodMonitor file is stored locally in your cluster, in `kubectl create -f .yaml`. +1. If the ServiceMonitor or PodMonitor is not stored locally, run `cat<< EOF | kubectl apply -f -`, paste the file contents into the terminal, then run `EOF` to complete the command. +1. Run `kubectl label namespace istio-injection=enabled` to enable the envoy sidecar injection. + +**Result:** `` can be scraped by prometheus. + +
Example Service Monitor for Istio Proxies
+ +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: envoy-stats-monitor + namespace: istio-system + labels: + monitoring: istio-proxies +spec: + selector: + matchExpressions: + - {key: istio-prometheus-ignore, operator: DoesNotExist} + namespaceSelector: + any: true + jobLabel: envoy-stats + endpoints: + - path: /stats/prometheus + targetPort: 15090 + interval: 15s + relabelings: + - sourceLabels: [__meta_kubernetes_pod_container_port_name] + action: keep + regex: '.*-envoy-prom' + - action: labeldrop + regex: "__meta_kubernetes_pod_label_(.+)" + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: namespace + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: pod_name +``` + +### Monitoring across namespaces: Set ignoreNamespaceSelectors to False + +This enables monitoring across namespaces by giving Prometheus additional scrape configurations. + +The usability tradeoff is that all of Prometheus' `additionalScrapeConfigs` are maintained in a single Secret. This could make upgrading difficult if monitoring is already deployed with additionalScrapeConfigs before installing Istio. + +1. When installing or upgrading the Monitoring Helm chart, edit the values.yml and set the `prometheus.prometheusSpec.additionalScrapeConfigs` array to the **Additional Scrape Config** provided below. +1. Complete the install or upgrade. + +**Result:** All namespaces with the `istio-injection=enabled` label will be scraped by prometheus. + +
Additional Scrape Config
+ +``` yaml +- job_name: 'istio/envoy-stats' + scrape_interval: 15s + metrics_path: /stats/prometheus + kubernetes_sd_configs: + - role: pod + relabel_configs: + - source_labels: [__meta_kubernetes_pod_container_port_name] + action: keep + regex: '.*-envoy-prom' + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:15090 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod_name +``` diff --git a/docs/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/docs/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md new file mode 100644 index 0000000000..469dc529f6 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md @@ -0,0 +1,64 @@ +--- +title: CPU and Memory Allocations +weight: 1 +--- + +This section describes the minimum recommended computing resources for the Istio components in a cluster. + +The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) + +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. + +:::tip + +In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. + +::: + +The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each core Istio component. + +In Kubernetes, the resource request indicates that the workload will not deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) + +| Workload | CPU - Request | Memory - Request | CPU - Limit | Memory - Limit | +|----------------------|---------------|------------|-----------------|-------------------| +| ingress gateway | 100m | 128mi | 2000m | 1024mi | +| egress gateway | 100m | 128mi | 2000m | 1024mi | +| istiod | 500m | 2048mi | No limit | No limit | +| proxy | 10m | 10mi | 2000m | 1024mi | +| **Totals:** | **710m** | **2314Mi** | **6000m** | **3072Mi** | + +# Configuring Resource Allocations + +You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. + +To make it easier to schedule the workloads to a node, a cluster-admin can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. + +You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/). + +To configure the resources allocated to an Istio component, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Apps & Marketplace**. +1. Click **Installed Apps**. +1. Go to the `istio-system` namespace. In one of the Istio workloads, such as `rancher-istio`, click **⋮ > Edit/Upgrade**. +1. Click **Upgrade** to edit the base components via changes to the values.yaml or add an [overlay file](../../../pages-for-subheaders/configuration-options.md#overlay-file). For more information about editing the overlay file, see [this section.](cpu-and-memory-allocations.md#editing-the-overlay-file) +1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. +1. Click **Upgrade**. to rollout changes + +**Result:** The resource allocations for the Istio components are updated. + +### Editing the Overlay File + +The overlay file can contain any of the values in the [Istio Operator spec.](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec) The overlay file included with the Istio application is just one example of a potential configuration of the overlay file. + +As long as the file contains `kind: IstioOperator` and the YAML options are valid, the file can be used as an overlay. + +In the example overlay file provided with the Istio application, the following section allows you to change Kubernetes resources: + +``` +# k8s: +# resources: +# requests: +# cpu: 200m +``` diff --git a/docs/explanations/integrations-in-rancher/istio/disable-istio.md b/docs/explanations/integrations-in-rancher/istio/disable-istio.md new file mode 100644 index 0000000000..c7956f35c7 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/istio/disable-istio.md @@ -0,0 +1,43 @@ +--- +title: Disabling Istio +weight: 4 +--- + +This section describes how to uninstall Istio in a cluster or disable a namespace, or workload. + +# Uninstall Istio in a Cluster + +To uninstall Istio, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Apps & Marketplace > Installed Apps**. +1. In the `istio-system` namespace, go to `rancher-istio` and click **⋮ > Delete**. +1. After `rancher-istio` is deleted, you can then select all the remaining apps in the `istio-system` namespace and click **Delete**. + +**Result:** The `rancher-istio` app in the cluster gets removed. The Istio sidecar cannot be deployed on any workloads in the cluster. + +:::note + +You can no longer disable and re-enable your Istio installation. If you would like to save your settings for a future install, view and save individual YAMLs to refer back to / reuse for future installations. + +::: + +**Troubleshooting Uninstall:** If you didn't follow the uninstall steps, you may encounter a warning during uninstall: + +`Error: uninstallation completed with 1 error(s): unable to build kubernetes objects for delete: unable to recognize "": no matches for kind "MonitoringDashboard" in version "monitoring.kiali.io/v1alpha1"` + +This could mean a few things. You either selected all the apps in the `istio-system` namespace and deleted them at the same time, or you deleted `rancher-istio` chart dependencies prior to deleting the `rancher-istio` chart. Since the uninstall did not complete properly, you will have resources remaining in the `istio-system` namespace that you will need to manually clean up. Another option to avoid manual clean up is to install `rancher-istio` again, then uninstall it in the correct order. + +# Disable Istio in a Namespace + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Go to the namespace where you want to enable Istio and click **⋮ > Enable Istio Auto Injection**. Alternately, click the namespace, and then on the namespace detail page, click **⋮ > Enable Istio Auto Injection**. + +**Result:** When workloads are deployed in this namespace, they will not have the Istio sidecar. + +# Remove the Istio Sidecar from a Workload + +Disable Istio in the namespace, then redeploy the workloads with in it. They will be deployed without the Istio sidecar. diff --git a/docs/explanations/integrations-in-rancher/istio/rbac-for-istio.md b/docs/explanations/integrations-in-rancher/istio/rbac-for-istio.md new file mode 100644 index 0000000000..f2d6a91ac1 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/istio/rbac-for-istio.md @@ -0,0 +1,44 @@ +--- +title: Role-based Access Control +weight: 3 +--- + +This section describes the permissions required to access Istio features. + +The rancher istio chart installs three `ClusterRoles` + +## Cluster-Admin Access + +By default, only those with the `cluster-admin` `ClusterRole` can: + +- Install istio app in a cluster +- Configure resource allocations for Istio + + +## Admin and Edit access + +By default, only Admin and Edit roles can: + +- Enable and disable Istio sidecar auto-injection for namespaces +- Add the Istio sidecar to workloads +- View the traffic metrics and traffic graph for the cluster +- Configure Istio's resources (such as the gateway, destination rules, or virtual services) + +## Summary of Default Permissions for Kubernetes Default roles + +Istio creates three `ClusterRoles` and adds Istio CRD access to the following default K8s `ClusterRole`: + +ClusterRole create by chart | Default K8s ClusterRole | Rancher Role | + ------------------------------:| ---------------------------:|---------:| + `istio-admin` | admin| Project Owner | + `istio-edit`| edit | Project Member | + `istio-view` | view | Read-only | + +Rancher will continue to use cluster-owner, cluster-member, project-owner, project-member, etc as role names, but will utilize default roles to determine access. For each default K8s `ClusterRole` there are different Istio CRD permissions and K8s actions (Create ( C ), Get ( G ), List ( L ), Watch ( W ), Update ( U ), Patch ( P ), Delete( D ), All ( * )) that can be performed. + + +|CRDs | Admin | Edit | View +|----------------------------| ------| -----| ----- +|
  • `config.istio.io`
    • `adapters`
    • `attributemanifests`
    • `handlers`
    • `httpapispecbindings`
    • `httpapispecs`
    • `instances`
    • `quotaspecbindings`
    • `quotaspecs`
    • `rules`
    • `templates`
| GLW | GLW | GLW +|
  • `networking.istio.io`
    • `destinationrules`
    • `envoyfilters`
    • `gateways`
    • `serviceentries`
    • `sidecars`
    • `virtualservices`
    • `workloadentries`
| * | * | GLW +|
  • `security.istio.io`
    • `authorizationpolicies`
    • `peerauthentications`
    • `requestauthentications`
| * | * | GLW \ No newline at end of file diff --git a/content/rancher/v2.6/en/logging/custom-resource-config/flows/_index.md b/docs/explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md similarity index 100% rename from content/rancher/v2.6/en/logging/custom-resource-config/flows/_index.md rename to docs/explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md diff --git a/docs/explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md b/docs/explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md new file mode 100644 index 0000000000..93cf01c067 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md @@ -0,0 +1,306 @@ +--- +title: Outputs and ClusterOutputs +weight: 2 +--- + +For the full details on configuring `Outputs` and `ClusterOutputs`, see the [Banzai Cloud Logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/output/) + +- [Configuration](#configuration) +- [YAML Examples](#yaml-examples) + - [Cluster Output to ElasticSearch](#cluster-output-to-elasticsearch) + - [Output to Splunk](#output-to-splunk) + - [Output to Syslog](#output-to-syslog) + - [Unsupported Outputs](#unsupported-outputs) + +# Configuration + +- [Outputs](#outputs) +- [ClusterOutputs](#clusteroutputs) + +# Outputs + +The `Output` resource defines where your `Flows` can send the log messages. `Outputs` are the final stage for a logging `Flow`. + +The `Output` is a namespaced resource, which means only a `Flow` within the same namespace can access it. + +You can use secrets in these definitions, but they must also be in the same namespace. + +`Outputs` can be configured by filling out forms in the Rancher UI. + +For the details of `Output` custom resource, see [OutputSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/output_types/) + +The Rancher UI provides forms for configuring the following `Output` types: + +- Amazon ElasticSearch +- Azure Storage +- Cloudwatch +- Datadog +- Elasticsearch +- File +- Fluentd +- GCS +- Kafka +- Kinesis Stream +- LogDNA +- LogZ +- Loki +- New Relic +- Splunk +- SumoLogic +- Syslog + +The Rancher UI provides forms for configuring the `Output` type, target, and access credentials if applicable. + +For example configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) + +# ClusterOutputs + +`ClusterOutput` defines an `Output` without namespace restrictions. It is only effective when deployed in the same namespace as the logging operator. + +`ClusterOutputs` can be configured by filling out forms in the Rancher UI. + +For the details of the `ClusterOutput` custom resource, see [ClusterOutput.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/clusteroutput_types/) + +# YAML Examples + +Once logging is installed, you can use these examples to help craft your own logging pipeline. + +- [Cluster Output to ElasticSearch](#cluster-output-to-elasticsearch) +- [Output to Splunk](#output-to-splunk) +- [Output to Syslog](#output-to-syslog) +- [Unsupported Outputs](#unsupported-outputs) + +### Cluster Output to ElasticSearch + +Let's say you wanted to send all logs in your cluster to an `elasticsearch` cluster. First, we create a cluster `Output`. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterOutput +metadata: + name: "example-es" + namespace: "cattle-logging-system" +spec: + elasticsearch: + host: elasticsearch.example.com + port: 9200 + scheme: http +``` + +We have created this `ClusterOutput`, without elasticsearch configuration, in the same namespace as our operator: `cattle-logging-system.`. Any time we create a `ClusterFlow` or `ClusterOutput`, we have to put it in the `cattle-logging-system` namespace. + +Now that we have configured where we want the logs to go, let's configure all logs to go to that `ClusterOutput`. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterFlow +metadata: + name: "all-logs" + namespace: "cattle-logging-system" +spec: + globalOutputRefs: + - "example-es" +``` + +We should now see our configured index with logs in it. + + +### Output to Splunk + +What if we have an application team who only wants logs from a specific namespaces sent to a `splunk` server? For this case, we can use namespaced `Outputs` and `Flows`. + +Before we start, let's set up that team's application: `coolapp`. + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: devteam +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coolapp + namespace: devteam + labels: + app: coolapp +spec: + replicas: 2 + selector: + matchLabels: + app: coolapp + template: + metadata: + labels: + app: coolapp + spec: + containers: + - name: generator + image: paynejacob/loggenerator:latest +``` + +With `coolapp` running, we will follow a similar path as when we created a `ClusterOutput`. However, unlike `ClusterOutputs`, we create our `Output` in our application's namespace. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: "devteam-splunk" + namespace: "devteam" +spec: + splunkHec: + hec_host: splunk.example.com + hec_port: 8088 + protocol: http +``` + +Once again, let's feed our `Output` some logs: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: "devteam-logs" + namespace: "devteam" +spec: + localOutputRefs: + - "devteam-splunk" +``` + + +### Output to Syslog + +Let's say you wanted to send all logs in your cluster to an `syslog` server. First, we create a `ClusterOutput`: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterOutput +metadata: + name: "example-syslog" + namespace: "cattle-logging-system" +spec: + syslog: + buffer: + timekey: 30s + timekey_use_utc: true + timekey_wait: 10s + flush_interval: 5s + format: + type: json + app_name_field: test + host: syslog.example.com + insecure: true + port: 514 + transport: tcp +``` + +Now that we have configured where we want the logs to go, let's configure all logs to go to that `Output`. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterFlow +metadata: + name: "all-logs" + namespace: cattle-logging-system +spec: + globalOutputRefs: + - "example-syslog" +``` + +### Unsupported Outputs + +For the final example, we create an `Output` to write logs to a destination that is not supported out of the box: + +:::note Note on syslog: + +`syslog` is a supported `Output`. However, this example still provides an overview on using unsupported plugins. + +::: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: syslog-config + namespace: cattle-logging-system +type: Opaque +stringData: + fluent-bit.conf: | + [INPUT] + Name forward + Port 24224 + + [OUTPUT] + Name syslog + InstanceName syslog-output + Match * + Addr syslog.example.com + Port 514 + Cluster ranchers + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fluentbit-syslog-forwarder + namespace: cattle-logging-system + labels: + output: syslog +spec: + selector: + matchLabels: + output: syslog + template: + metadata: + labels: + output: syslog + spec: + containers: + - name: fluentbit + image: paynejacob/fluent-bit-out-syslog:latest + ports: + - containerPort: 24224 + volumeMounts: + - mountPath: "/fluent-bit/etc/" + name: configuration + volumes: + - name: configuration + secret: + secretName: syslog-config +--- +apiVersion: v1 +kind: Service +metadata: + name: syslog-forwarder + namespace: cattle-logging-system +spec: + selector: + output: syslog + ports: + - protocol: TCP + port: 24224 + targetPort: 24224 +--- +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterFlow +metadata: + name: all-logs + namespace: cattle-logging-system +spec: + globalOutputRefs: + - syslog +--- +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterOutput +metadata: + name: syslog + namespace: cattle-logging-system +spec: + forward: + servers: + - host: "syslog-forwarder.cattle-logging-system" + require_ack_response: false + ignore_network_errors_at_startup: false +``` + +Let's break down what is happening here. First, we create a deployment of a container that has the additional `syslog` plugin and accepts logs forwarded from another `fluentd`. Next we create an `Output` configured as a forwarder to our deployment. The deployment `fluentd` will then forward all logs to the configured `syslog` destination. diff --git a/docs/explanations/integrations-in-rancher/logging/logging-architecture.md b/docs/explanations/integrations-in-rancher/logging/logging-architecture.md new file mode 100644 index 0000000000..d977b27981 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/logging/logging-architecture.md @@ -0,0 +1,29 @@ +--- +title: Architecture +weight: 1 +--- + +This section summarizes the architecture of the Rancher logging application. + +For more details about how the Banzai Cloud Logging operator works, see the [official documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) + +### How the Banzai Cloud Logging Operator Works + +The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. + +Fluent Bit queries the Kubernetes API and enriches the logs with metadata about the pods, and transfers both the logs and the metadata to Fluentd. Fluentd receives, filters, and transfers logs to multiple `Outputs`. + +The following custom resources are used to define how logs are filtered and sent to their `Outputs`: + +- A `Flow` is a namespaced custom resource that uses filters and selectors to route log messages to the appropriate `Outputs`. +- A `ClusterFlow` is used to route cluster-level log messages. +- An `Output` is a namespaced resource that defines where the log messages are sent. +- A `ClusterOutput` defines an `Output` that is available from all `Flows` and `ClusterFlows`. + +Each `Flow` must reference an `Output`, and each `ClusterFlow` must reference a `ClusterOutput`. + +The following figure from the [Banzai documentation](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) shows the new logging architecture: + +
How the Banzai Cloud Logging Operator Works with Fluentd and Fluent Bit
+ +![How the Banzai Cloud Logging Operator Works with Fluentd](/img/banzai-cloud-logging-operator.png) diff --git a/docs/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md b/docs/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md new file mode 100644 index 0000000000..ae4edcb652 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -0,0 +1,103 @@ +--- +title: rancher-logging Helm Chart Options +shortTitle: Helm Chart Options +weight: 4 +--- + +- [Enable/Disable Windows Node Logging](#enable-disable-windows-node-logging) +- [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) +- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) +- [Enabling the Logging Application to Work with SELinux](#enabling-the-logging-application-to-work-with-selinux) +- [Additional Logging Sources](#additional-logging-sources) +- [Systemd Configuration](#systemd-configuration) + +### Enable/Disable Windows Node Logging + +You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. + +By default, Windows node logging will be enabled if the Cluster Dashboard UI is used to install the logging application on a Windows cluster. + +In this scenario, setting `global.cattle.windows.enabled` to `false` will disable Windows node logging on the cluster. +When disabled, logs will still be collected from Linux nodes within the Windows cluster. + +:::note + +Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists where Windows nodeAgents are not deleted when performing a `helm upgrade` after disabling Windows logging in a Windows cluster. In this scenario, users may need to manually remove the Windows nodeAgents if they are already installed. + +::: + +### Working with a Custom Docker Root Directory + +If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. + +This will ensure that the Logging CRs created will use your specified path rather than the default Docker `data-root` location. + +Note that this only affects Linux nodes. + +If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. + +### Adding NodeSelector Settings and Tolerations for Custom Taints + +You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) + +### Enabling the Logging Application to Work with SELinux + +:::note Requirements: + +Logging v2 was tested with SELinux on RHEL/CentOS 7 and 8. + +::: + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. + +To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.](../../../pages-for-subheaders/selinux-rpm.md#installing-the-rancher-selinux-rpm) + +Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. + +### Additional Logging Sources + +By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. + +In some cases, Rancher may be able to collect additional logs. + +The following table summarizes the sources where additional logs may be collected for each node types: + +| Logging Source | Linux Nodes (including in Windows cluster) | Windows Nodes | +| --- | --- | ---| +| RKE | ✓ | ✓ | +| RKE2 | ✓ | | +| K3s | ✓ | | +| AKS | ✓ | | +| EKS | ✓ | | +| GKE | ✓ | | + +To enable hosted Kubernetes providers as additional logging sources, enable **Enable enhanced cloud provider logging** option when installing or upgrading the Logging Helm chart. + +When enabled, Rancher collects all additional node and control plane logs the provider has made available, which may vary between providers + +If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. + +### Systemd Configuration + +In Rancher logging, `SystemdLogPath` must be configured for K3s and RKE2 Kubernetes distributions. + +K3s and RKE2 Kubernetes distributions log to journald, which is the subsystem of systemd that is used for logging. In order to collect these logs, the `systemdLogPath` needs to be defined. While the `run/log/journal` directory is used by default, some Linux distributions do not default to this path. For example, Ubuntu defaults to `var/log/journal`. To determine your `systemdLogPath` configuration, see steps below. + +**Steps for Systemd Configuration:** + +* Run `cat /etc/systemd/journald.conf | grep -E ^\#?Storage | cut -d"=" -f2` on one of your nodes. +* If `persistent` is returned, your `systemdLogPath` should be `/var/log/journal`. +* If `volatile` is returned, your `systemdLogPath` should be `/run/log/journal`. +* If `auto` is returned, check if `/var/log/journal` exists. + * If `/var/log/journal` exists, then use `/var/log/journal`. + * If `/var/log/journal` does not exist, then use `/run/log/journal`. + +:::note Notes: + +If any value not described above is returned, Rancher Logging will not be able to collect control plane logs. To address this issue, you will need to perform the following actions on every control plane node: + +* Set `Storage=volatile` in journald.conf. +* Reboot your machine. +* Set `systemdLogPath` to `/run/log/journal`. + +::: \ No newline at end of file diff --git a/docs/explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md b/docs/explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md new file mode 100644 index 0000000000..498569ea50 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md @@ -0,0 +1,202 @@ +--- +title: Migrating to Rancher v2.5 Logging +weight: 2 +--- +Starting in v2.5, the logging feature available within Rancher has been completely overhauled. The [logging operator](https://github.com/banzaicloud/logging-operator) from Banzai Cloud has been adopted; Rancher configures this tooling for use when deploying logging. + +Among the many features and changes in the new logging functionality is the removal of project-specific logging configurations. Instead, one now configures logging at the namespace level. Cluster-level logging remains available, but configuration options differ. + +- [Installation](#installation) + - [Terminology](#terminology) +- [Cluster Logging](#cluster-logging) +- [Project Logging](#project-logging) +- [Output Configuration](#output-configuration) + - [Elasticsearch](#elasticsearch) + - [Splunk](#splunk) + - [Kafka](#kafka) + - [Fluentd](#fluentd) + - [Syslog](#syslog) +- [Custom Log Fields](#custom-log-fields) +- [System Logging](#system-logging) + +# Installation + +To install logging in Rancher v2.5+, refer to the [installation instructions](../../../pages-for-subheaders/logging.md#enabling-logging). + +### Terminology + +In v2.5+, logging configuration in the **Cluster Dashboard**. To configure logging custom resources after the Logging application is installed, go to the left navigation bar and click **Logging**. It is from this menu option that logging for both cluster and namespace is configured. + +:::note + +Logging is installed on a per-cluster basis. You will need to navigate between clusters to configure logging for each cluster. + +::: + +There are four key concepts to understand for v2.5+ logging: + +1. Outputs + + `Outputs` are a configuration resource that determine a destination for collected logs. This is where settings for aggregators such as ElasticSearch, Kafka, etc. are stored. `Outputs` are namespaced resources. + +2. Flows + + `Flows` are a configuration resource that determine collection, filtering, and destination rules for logs. It is within a flow that one will configure what logs to collect, how to mutate or filter them, and which `Outputs` to send the logs to. `Flows` are namespaced resources, and can connect either to an `Output` in the same namespace, or a `ClusterOutput`. + +3. ClusterOutputs + + `ClusterOutputs` serve the same functionality as `Outputs`, except they are a cluster-scoped resource. `ClusterOutputs` are necessary when collecting logs cluster-wide, or if you wish to provide an `Output` to all namespaces in your cluster. + +4. ClusterFlows + + `ClusterFlows` serve the same function as `Flows`, but at the cluster level. They are used to configure log collection for an entire cluster, instead of on a per-namespace level. `ClusterFlows` are also where mutations and filters are defined, same as `Flows` (in functionality). + +# Cluster Logging + +To configure cluster-wide logging for v2.5+ logging, one needs to set up a `ClusterFlow`. This object defines the source of logs, any transformations or filters to be applied, and finally the `Output` (or `Outputs`) for the logs. + +:::note Important: + +`ClusterFlows` must be defined within the `cattle-logging-system` namespace. `ClusterFlows` will not work if defined in any other namespace. + +::: + +In legacy logging, in order to collect logs from across the entire cluster, one only needed to enable cluster-level logging and define the desired `Output`. This basic approach remains in v2.5+ logging. To replicate legacy cluster-level logging, follow these steps: + +1. Define a `ClusterOutput` according to the instructions found under [Output Configuration](#output-configuration) +2. Create a `ClusterFlow`, ensuring that it is set to be created in the `cattle-logging-system` namespace + 1. Remove all _Include_ and _Exclude_ rules from the `Flow` definition. This ensures that all logs are gathered. + 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation + 3. Define your cluster `Output` or `Outputs` + +This will result in logs from all sources in the cluster (all pods, and all system components) being collected and sent to the `Output` or `Outputs` you defined in the `ClusterFlow`. + +# Project Logging + +Logging in v2.5+ is not project-aware. This means that in order to collect logs from pods running in project namespaces, you will need to define `Flows` for those namespaces. + +To collect logs from a specific namespace, follow these steps: + +1. Define an `Output` or `ClusterOutput` according to the instructions found under [Output Configuration](#output-configuration) +2. Create a `Flow`, ensuring that it is set to be created in the namespace in which you want to gather logs. + 1. If you wish to define _Include_ or _Exclude_ rules, you may do so. Otherwise, removal of all rules will result in all pods in the target namespace having their logs collected. + 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation + 3. Define your outputs - these can be either `ClusterOutput` or `Output` objects. + +This will result in logs from all sources in the namespace (pods) being collected and sent to the `Output` (or `Outputs`) you defined in your `Flow`. + +:::note + +To collect logs from a project, repeat the above steps for every namespace within the project. Alternatively, you can label your project workloads with a common label (e.g. `project=my-project`) and use a `ClusterFlow` to collect logs from all pods matching this label. + +::: + +# Output Configuration +In legacy logging, there are five logging destinations to choose from: Elasticsearch, Splunk, Kafka, Fluentd, and Syslog. With the exception of Syslog, all of these destinations are available in logging v2.5+. + + +### Elasticsearch + +| Legacy Logging | v2.5+ Logging | Notes | +|-----------------------------------------------|-----------------------------------|-----------------------------------------------------------| +| Endpoint | Target -> Host | Make sure to specify Scheme (https/http), as well as Port | +| X-Pack Security -> Username | Access -> User | | +| X-Pack Security -> Password | Access -> Password | Password must now be stored in a secret | +| SSL Configuration -> Client Private Key | SSL -> Client Key | Key must now be stored in a secret | +| SSL Configuration -> Client Certificate | SSL -> Client Cert | Certificate must now be stored in a secret | +| SSL Configuration -> Client Key Password | SSL -> Client Key Pass | Password must now be stored in a secret | +| SSL Configuration -> Enabled SSL Verification | SSL -> Certificate Authority File | Certificate must now be stored in a secret | + + +In legacy logging, indices were automatically created according to the format in the "Index Patterns" section. In v2.5 logging, default behavior has been changed to logging to a single index. You can still configure index pattern functionality on the `Output` object by editing as YAML and inputting the following values: + +``` +... +spec: + elasticsearch: + ... + logstash_format: true + logstash_prefix: + logstash_dateformat: "%Y-%m-%d" +``` + +Replace `` with the prefix for the indices that will be created. In legacy logging, this defaulted to the name of the cluster. + +### Splunk + +| Legacy Logging | v2.5+ Logging | Notes | +|------------------------------------------|----------------------------------------|----------------------------------------------------------------------------------------| +| HEC Configuration -> Endpoint | Target -> Host | Protocol (https/http) and port must be defined separately from the host | +| HEC Configuration -> Token | Access -> Token | Token must now be stored as a secret | +| HEC Configuration -> Index | Edit as YAML -> `index` | `index` field must be added as YAML key under `spec.splunkHec` | +| HEC Configuration -> Source | Edit as YAML -> `source` | `source` field must be added as YAML key under `spec.splunkHec` | +| SSL Configuration -> Client Private Key | Edit as YAML -> `client_key` | `client_key` field must be added as YAML key under `spec.splunkHec`. See (1) | +| SSL Configuration -> Client Certificate | Edit as YAML -> `client_cert` | `client_cert` field must be added as YAML key under `spec.splunkHec`. See (1) | +| SSL Configuration -> Client Key Password | _Not Supported_ | Specifying a password for the client private key is not currently supported. | +| SSL Configuration -> SSL Verify | Edit as YAML -> `ca_file` or `ca_path` | `ca_file` or `ca_path` field must be added as YAML key under `spec.splunkHec`. See (2) | + +_(1) `client_key` and `client_cert` values must be paths to the key and cert files, respectively. These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ + +_(2) Users can configure either `ca_file` (a path to a PEM-encoded CA certificate) or `ca_path` (a path to a directory containing CA certificates in PEM format). These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ + +### Kafka + +| Legacy Logging | v2.5+ Logging | Notes | +|-----------------------------------------|----------------------------|------------------------------------------------------| +| Kafka Configuration -> Endpoint Type | - | Zookeeper is no longer supported as an endpoint type | +| Kafka Configuration -> Endpoint | Target -> Brokers | Comma-separated list of brokers (host:port) | +| Kafka Configuration -> Topic | Target -> Default Topic | | +| SSL Configuration -> Client Private Key | SSL -> SSL Client Cert | Certificate must be stored as a secret | +| SSL Configuration -> Client Certificate | SSL -> SSL Client Cert Key | Key must be stored as a secret | +| SSL Configuration -> CA Certificate PEM | SSL -> SSL CA Cert | Certificate must be stored as a secret | +| SASL Configuration -> Username | Access -> Username | Username must be stored in a secret | +| SASL Configuration -> Password | Access -> Password | Password must be stored in a secret | +| SASL Configuration -> Scram Mechanism | Access -> Scram Mechanism | Input mechanism as string, e.g. "sha256" or "sha512" | + +### Fluentd + +As of v2.5.2, it is only possible to add a single Fluentd server using the "Edit as Form" option. To add multiple servers, edit the `Output` as YAML and input multiple servers. + +| Legacy Logging | v2.5+ Logging | Notes | +|------------------------------------------|-----------------------------------------------------|----------------------------------------------------------------------| +| Fluentd Configuration -> Endpoint | Target -> Host, Port | Input the host and port separately | +| Fluentd Configuration -> Shared Key | Access -> Shared Key | Shared key must be stored as a secret | +| Fluentd Configuration -> Username | Access -> Username | Username must be stored as a secret | +| Fluentd Configuration -> Password | Access -> Password | Password must be stored as a secret | +| Fluentd Configuration -> Hostname | Edit as YAML -> `host` | `host` field set as YAML key under `spec.forward.servers[n]` | +| Fluentd Configuration -> Weight | Edit as YAML -> `weight` | `weight` field set as YAML key under `spec.forward.servers[n]` | +| SSL Configuration -> Use TLS | - | Do not need to explicitly enable. Define client cert fields instead. | +| SSL Configuration -> Client Private Key | Edit as YAML -> `tls_private_key_path` | Field set as YAML key under `spec.forward`. See (1) | +| SSL Configuration -> Client Certificate | Edit as YAML -> `tls_client_cert_path` | Field set as YAML key under `spec.forward`. See (1) | +| SSL Configuration -> Client Key Password | Edit as YAML -> `tls_client_private_key_passphrase` | Field set as YAML key under `spec.forward`. See (1) | +| SSL Configuration -> SSL Verify | Edit as YAML -> `tls_insecure_mode` | Field set as YAML key under `spec.forward`. Default: `false` | +| SSL Configuration -> CA Certificate PEM | Edit as YAML -> `tls_cert_path` | Field set as YAML key under `spec.forward`. See (1) | +| Enable Gzip Compression | - | No longer supported in v2.5+ logging | + +_(1) These values are to be specified as paths to files. Those files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ + +### Syslog + +As of v2.5.2, syslog is not currently supported for `Outputs` using v2.5+ logging. + +# Custom Log Fields + +In order to add custom log fields, you will need to add the following YAML to your `Flow` configuration: + +``` +... +spec: + filters: + - record_modifier: + records: + - foo: "bar" +``` + +(replace `foo: "bar"` with custom log fields you wish to add) + +# System Logging + +In legacy logging, collecting logs from system components was accomplished by checking a box labeled "Include System Log" when setting up cluster logging. In v2.5+ logging, system logs are gathered in one of two ways: + +1. Gather all cluster logs, not specifying any match or exclusion rules. This results in all container logs from the cluster being collected, which includes system logs. +2. Specifically target system logs by adding match rules for system components. Specific match rules depend on the component being collected. \ No newline at end of file diff --git a/docs/explanations/integrations-in-rancher/logging/rbac-for-logging.md b/docs/explanations/integrations-in-rancher/logging/rbac-for-logging.md new file mode 100644 index 0000000000..8065e16d96 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/logging/rbac-for-logging.md @@ -0,0 +1,25 @@ +--- +shortTitle: Role-based Access Control +title: Role-based Access Control for Logging +weight: 3 +--- + +Rancher logging has two roles, `logging-admin` and `logging-view`. + +- `logging-admin` gives users full access to namespaced `Flows` and `Outputs` +- `logging-view` allows users to *view* namespaced `Flows` and `Outputs`, and `ClusterFlows` and `ClusterOutputs` + +:::note Why choose one role over the other? + +Edit access to `ClusterFlow` and `ClusterOutput` resources is powerful. Any user with it has edit access for all logs in the cluster. + +::: + +In Rancher, the cluster administrator role is the only role with full access to all `rancher-logging` resources. Cluster members are not able to edit or read any logging resources. Project owners and members have the following privileges: + +Project Owners | Project Members +--- | --- +able to create namespaced `Flows` and `Outputs` in their projects' namespaces | only able to view the `Flows` and `Outputs` in projects' namespaces +can collect logs from anything in their projects' namespaces | cannot collect any logs in their projects' namespaces + +Both project owners and project members require at least *one* namespace in their project to use logging. If they do not, then they may not see the logging button in the top nav dropdown. \ No newline at end of file diff --git a/content/rancher/v2.6/en/logging/taints-tolerations/_index.md b/docs/explanations/integrations-in-rancher/logging/taints-and-tolerations.md similarity index 100% rename from content/rancher/v2.6/en/logging/taints-tolerations/_index.md rename to docs/explanations/integrations-in-rancher/logging/taints-and-tolerations.md diff --git a/docs/explanations/integrations-in-rancher/longhorn.md b/docs/explanations/integrations-in-rancher/longhorn.md new file mode 100644 index 0000000000..ce7f6f77f4 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/longhorn.md @@ -0,0 +1,70 @@ +--- +title: Longhorn - Cloud native distributed block storage for Kubernetes +shortTitle: Longhorn Storage +weight: 19 +--- + +[Longhorn](https://longhorn.io/) is a lightweight, reliable, and easy-to-use distributed block storage system for Kubernetes. + +Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. You can learn more about its architecture [here.](https://longhorn.io/docs/latest/concepts/) + +With Longhorn, you can: + +- Use Longhorn volumes as persistent storage for the distributed stateful applications in your Kubernetes cluster +- Partition your block storage into Longhorn volumes so that you can use Kubernetes volumes with or without a cloud provider +- Replicate block storage across multiple nodes and data centers to increase availability +- Store backup data in external storage such as NFS or AWS S3 +- Create cross-cluster disaster recovery volumes so that data from a primary Kubernetes cluster can be quickly recovered from backup in a second Kubernetes cluster +- Schedule recurring snapshots of a volume, and schedule recurring backups to NFS or S3-compatible secondary storage +- Restore volumes from backup +- Upgrade Longhorn without disrupting persistent volumes + +
Longhorn Dashboard
+![Longhorn Dashboard](/img/longhorn-screenshot.png) + +### Installing Longhorn with Rancher + +1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/latest/deploy/install/#installation-requirements) +1. Go to the cluster where you want to install Longhorn. +1. Click **Apps & Marketplace**. +1. Click **Charts**. +1. Click **Longhorn**. +1. Optional: To customize the initial settings, click **Longhorn Default Settings** and edit the configuration. For help customizing the settings, refer to the [Longhorn documentation.](https://longhorn.io/docs/latest/references/settings/) +1. Click **Install**. + +**Result:** Longhorn is deployed in the Kubernetes cluster. + +### Accessing Longhorn from the Rancher UI + +1. Go to the cluster where Longhorn is installed. In the left navigation menu, click **Longhorn**. +1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section. + +**Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. + +### Uninstalling Longhorn from the Rancher UI + +1. Go to the cluster where Longhorn is installed and click **Apps & Marketplace**. +1. Click **Installed Apps**. +1. Go to the `longhorn-system` namespace and check the boxes next to the `longhorn` and `longhorn-crd` apps. +1. Click **Delete,** and confirm **Delete**. + +**Result:** Longhorn is uninstalled. + +### GitHub Repository + +The Longhorn project is available [here.](https://github.com/longhorn/longhorn) + +### Documentation + +The Longhorn documentation is [here.](https://longhorn.io/docs/) + +### Architecture + +Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. + +The storage controller and replicas are themselves orchestrated using Kubernetes. + +You can learn more about its architecture [here.](https://longhorn.io/docs/latest/concepts/) + +
Longhorn Architecture
+![Longhorn Architecture](/img/longhorn-architecture.svg) diff --git a/docs/explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md new file mode 100644 index 0000000000..544fe72956 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -0,0 +1,114 @@ +--- +title: Built-in Dashboards +weight: 3 +--- + +- [Grafana UI](#grafana-ui) +- [Alertmanager UI](#alertmanager-ui) +- [Prometheus UI](#prometheus-ui) + +# Grafana UI + +[Grafana](https://grafana.com/grafana/) allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. + +To see the default dashboards for time series data visualization, go to the Grafana UI. + +### Customizing Grafana + +To view and customize the PromQL queries powering the Grafana dashboard, see [this page.](../../../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md) + +### Persistent Grafana Dashboards + +To create a persistent Grafana dashboard, see [this page.](../../../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md) + +### Access to Grafana + +For information about role-based access control for Grafana, see [this section.](rbac-for-monitoring.md#role-based-access-control-for-grafana) + + +# Alertmanager UI + +When `rancher-monitoring` is installed, the Prometheus Alertmanager UI is deployed, allowing you to view your alerts and the current Alertmanager configuration. + +:::note + +This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](how-monitoring-works.md#how-alertmanager-works) + +::: + +### Accessing the Alertmanager UI + +The Alertmanager UI lets you see the most recently fired alerts. + +:::note Prerequisite: + +The `rancher-monitoring` application must be installed. + +::: + +To see the Alertmanager UI, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the Alertmanager UI, click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Alertmanager**. + +**Result:** The Alertmanager UI opens in a new tab. For help with configuration, refer to the [official Alertmanager documentation.](https://prometheus.io/docs/alerting/latest/alertmanager/) + +For more information on configuring Alertmanager in Rancher, see [this page.](../../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +
The Alertmanager UI
+![Alertmanager UI](/img/alertmanager-ui.png) + + +### Viewing Default Alerts + +To see alerts that are fired by default, go to the Alertmanager UI and click **Expand all groups**. + + +# Prometheus UI + +By default, the [kube-state-metrics service](https://github.com/kubernetes/kube-state-metrics) provides a wealth of information about CPU and memory utilization to the monitoring application. These metrics cover Kubernetes resources across namespaces. This means that in order to see resource metrics for a service, you don't need to create a new ServiceMonitor for it. Because the data is already in the time series database, you can go to the Prometheus UI and run a PromQL query to get the information. The same query can be used to configure a Grafana dashboard to show a graph of those metrics over time. + +To see the Prometheus UI, install `rancher-monitoring`. Then: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the Prometheus UI and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Prometheus Graph**. + +
Prometheus Graph UI
+![Prometheus Graph UI](/img/prometheus-graph-ui.png) + +### Viewing the Prometheus Targets + +To see what services you are monitoring, you will need to see your targets. Targets are set up by ServiceMonitors and PodMonitors as sources to scrape metrics from. You won't need to directly edit targets, but the Prometheus UI can be useful for giving you an overview of all of the sources of metrics that are being scraped. + +To see the Prometheus Targets, install `rancher-monitoring`. Then: + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the Prometheus targets and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Prometheus Targets**. + +
Targets in the Prometheus UI
+![Prometheus Targets UI](/img/prometheus-targets-ui.png) + +### Viewing the PrometheusRules + +When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Alertmanager to figure out which Route should receive a certain Alert. + +To see the PrometheusRules, install `rancher-monitoring`. Then: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Prometheus Rules**. + +You can also see the rules in the Prometheus UI: + +
Rules in the Prometheus UI
+![PrometheusRules UI](/img/prometheus-rules-ui.png) + +For more information on configuring PrometheusRules in Rancher, see [this page.](../../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) \ No newline at end of file diff --git a/docs/explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md new file mode 100644 index 0000000000..d2937abac7 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md @@ -0,0 +1,256 @@ +--- +title: How Monitoring Works +weight: 1 +--- + +1. [Architecture Overview](#1-architecture-overview) +2. [How Prometheus Works](#2-how-prometheus-works) +3. [How Alertmanager Works](#3-how-alertmanager-works) +4. [Monitoring V2 Specific Components](#4-monitoring-v2-specific-components) +5. [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics) + +# 1. Architecture Overview + +_**The following sections describe how data flows through the Monitoring V2 application:**_ + +### Prometheus Operator + +Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created. When the Prometheus configuration resources are created, Prometheus Operator calls the Prometheus API to sync the new configuration. As the diagram at the end of this section shows, the Prometheus Operator acts as the intermediary between Prometheus and Kubernetes, calling the Prometheus API to synchronize Prometheus with the monitoring-related resources in Kubernetes. + +### ServiceMonitors and PodMonitors + +ServiceMonitors and PodMonitors declaratively specify targets, such as Services and Pods, that need to be monitored. + +- Targets are scraped on a recurring schedule based on the configured Prometheus scrape interval, and the metrics that are scraped are stored into the Prometheus Time Series Database (TSDB). + +- In order to perform the scrape, ServiceMonitors and PodMonitors are defined with label selectors that determine which Services or Pods should be scraped and endpoints that determine how the scrape should happen on the given target, e.g., scrape/metrics in TCP 10252, proxying through IP addr x.x.x.x. + +- Out of the box, Monitoring V2 comes with certain pre-configured exporters that are deployed based on the type of Kubernetes cluster that it is deployed on. For more information, see [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics). + +### How PushProx Works + +- Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called **PushProx**. The Kubernetes components that expose metrics to Prometheus through PushProx are the following: +`kube-controller-manager`, `kube-scheduler`, `etcd`, and `kube-proxy`. + +- For each PushProx exporter, we deploy one PushProx client onto all target nodes. For example, a PushProx client is deployed onto all controlplane nodes for kube-controller-manager, all etcd nodes for kube-etcd, and all nodes for kubelet. + +- We deploy exactly one PushProx proxy per exporter. The process for exporting metrics is as follows: + +1. The PushProx Client establishes an outbound connection with the PushProx Proxy. +1. The client then polls the proxy for scrape requests that have come into the proxy. +1. When the proxy receives a scrape request from Prometheus, the client sees it as a result of the poll. +1. The client scrapes the internal component. +1. The internal component responds by pushing metrics back to the proxy. + + +

Process for Exporting Metrics with PushProx:
+ +![Process for Exporting Metrics with PushProx](/img/pushprox-process.svg) + +### PrometheusRules + +PrometheusRules allow users to define rules for what metrics or time series database queries should result in alerts being fired. Rules are evaluated on an interval. + +- **Recording rules** create a new time series based on existing series that have been collected. They are frequently used to precompute complex queries. +- **Alerting rules** run a particular query and fire an alert from Prometheus if the query evaluates to a non-zero value. + +### Alert Routing + +Once Prometheus determines that an alert needs to be fired, alerts are forwarded to **Alertmanager**. + +- Alerts contain labels that come from the PromQL query itself and additional labels and annotations that can be provided as part of specifying the initial PrometheusRule. + +- Before receiving any alerts, Alertmanager will use the **routes** and **receivers** specified in its configuration to form a routing tree on which all incoming alerts are evaluated. Each node of the routing tree can specify additional grouping, labeling, and filtering that needs to happen based on the labels attached to the Prometheus alert. A node on the routing tree (usually a leaf node) can also specify that an alert that reaches it needs to be sent out to a configured Receiver, e.g., Slack, PagerDuty, SMS, etc. Note that Alertmanager will send an alert first to **alertingDriver**, then alertingDriver will send or forward alert to the proper destination. + +- Routes and receivers are also stored in the Kubernetes API via the Alertmanager Secret. When the Secret is updated, Alertmanager is also updated automatically. Note that routing occurs via labels only (not via annotations, etc.). + +
How data flows through the monitoring application:
+ + +# 2. How Prometheus Works + +### Storing Time Series Data + +After collecting metrics from exporters, Prometheus stores the time series in a local on-disk time series database. Prometheus optionally integrates with remote systems, but `rancher-monitoring` uses local storage for the time series database. + +Once stored, users can query this TSDB using PromQL, the query language for Prometheus. + +PromQL queries can be visualized in one of two ways: + +1. By supplying the query in Prometheus's Graph UI, which will show a simple graphical view of the data. +1. By creating a Grafana Dashboard that contains the PromQL query and additional formatting directives that label axes, add units, change colors, use alternative visualizations, etc. + +### Defining Rules for Prometheus + +Rules define queries that Prometheus needs to execute on a regular `evaluationInterval` to perform certain actions, such as firing an alert (alerting rules) or precomputing a query based on others existing in its TSDB (recording rules). These rules are encoded in PrometheusRules custom resources. When PrometheusRule custom resources are created or updated, the Prometheus Operator observes the change and calls the Prometheus API to synchronize the set of rules that Prometheus is currently evaluating on a regular interval. + +A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: + +- The name of the new alert or record +- A PromQL expression for the new alert or record +- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) +- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. + +On evaluating a [rule](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#rule), Prometheus will execute the provided PromQL query, add additional provided labels (or annotations - only for alerting rules), and execute the appropriate action for the rule. For example, an Alerting Rule that adds `team: front-end` as a label to the provided PromQL query will append that label to the fired alert, which will allow Alertmanager to forward the alert to the correct Receiver. + +### Alerting and Recording Rules + +Prometheus doesn't maintain the state of whether alerts are active. It fires alerts repetitively at every evaluation interval, relying on Alertmanager to group and filter the alerts into meaningful notifications. + +The `evaluation_interval` constant defines how often Prometheus evaluates its alerting rules against the time series database. Similar to the `scrape_interval`, the `evaluation_interval` also defaults to one minute. + +The rules are contained in a set of rule files. Rule files include both alerting rules and recording rules, but only alerting rules result in alerts being fired after their evaluation. + +For recording rules, Prometheus runs a query, then stores it as a time series. This synthetic time series is useful for storing the results of an expensive or time-consuming query so that it can be queried more quickly in the future. + +Alerting rules are more commonly used. Whenever an alerting rule evaluates to a positive number, Prometheus fires an alert. + +The Rule file adds labels and annotations to alerts before firing them, depending on the use case: + +- Labels indicate information that identifies the alert and could affect the routing of the alert. For example, if when sending an alert about a certain container, the container ID could be used as a label. + +- Annotations denote information that doesn't affect where an alert is routed, for example, a runbook or an error message. + +# 3. How Alertmanager Works + +The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of the following tasks: + +- Deduplicating, grouping, and routing alerts to the correct receiver integration such as email, PagerDuty, or OpsGenie + +- Silencing and inhibition of alerts + +- Tracking alerts that fire over time + +- Sending out the status of whether an alert is currently firing, or if it is resolved + +### Alerts Forwarded by alertingDrivers + +When alertingDrivers are installed, this creates a `Service` that can be used as the receiver's URL for Teams or SMS, based on the alertingDriver's configuration. The URL in the Receiver points to the alertingDrivers; so the Alertmanager sends alert first to alertingDriver, then alertingDriver forwards or sends alert to the proper destination. + +### Routing Alerts to Receivers + +Alertmanager coordinates where alerts are sent. It allows you to group alerts based on labels and fire them based on whether certain labels are matched. One top-level route accepts all alerts. From there, Alertmanager continues routing alerts to receivers based on whether they match the conditions of the next route. + +While the Rancher UI forms only allow editing a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager Secret. + +### Configuring Multiple Receivers + +By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. + +By editing custom YAML in the Alertmanager or Receiver configuration, you can also send alerts to multiple notification systems. For more information, see the section on configuring [Receivers.](../../../reference-guides/monitoring-v2-configuration/receivers.md#configuring-multiple-receivers) + +# 4. Monitoring V2 Specific Components + +Prometheus Operator introduces a set of [Custom Resource Definitions](https://github.com/prometheus-operator/prometheus-operator#customresourcedefinitions) that allow users to deploy and manage Prometheus and Alertmanager instances by creating and modifying those custom resources on a cluster. + +Prometheus Operator will automatically update your Prometheus configuration based on the live state of the resources and configuration options that are edited in the Rancher UI. + +### Resources Deployed by Default + +By default, a set of resources curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project are deployed onto your cluster as part of installing the Rancher Monitoring Application to set up a basic Monitoring/Alerting stack. + +The resources that get deployed onto your cluster to support this solution can be found in the [`rancher-monitoring`](https://github.com/rancher/charts/tree/main/charts/rancher-monitoring) Helm chart, which closely tracks the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community with certain changes tracked in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md). + +### Default Exporters + +Monitoring V2 deploys three default exporters that provide additional metrics for Prometheus to store: + +1. `node-exporter`: exposes hardware and OS metrics for Linux hosts. For more information on `node-exporter`, refer to the [upstream documentation](https://prometheus.io/docs/guides/node-exporter/). + +1. `windows-exporter`: exposes hardware and OS metrics for Windows hosts (only deployed on Windows clusters). For more information on `windows-exporter`, refer to the [upstream documentation](https://github.com/prometheus-community/windows_exporter). + +1. `kube-state-metrics`: expose additional metrics that track the state of resources contained in the Kubernetes API (e.g., pods, workloads, etc.). For more information on `kube-state-metrics`, refer to the [upstream documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs). + +ServiceMonitors and PodMonitors will scrape these exporters, as defined [here](#defining-what-metrics-are-scraped). Prometheus stores these metrics, and you can query the results via either Prometheus's UI or Grafana. + +See the [architecture](#1-architecture-overview) section for more information on recording rules, alerting rules, and Alertmanager. + +### Components Exposed in the Rancher UI + +When the monitoring application is installed, you will be able to edit the following components in the Rancher UI: + +| Component | Type of Component | Purpose and Common Use Cases for Editing | +|--------------|------------------------|---------------------------| +| ServiceMonitor | Custom resource | Sets up Kubernetes Services to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | +| PodMonitor | Custom resource | Sets up Kubernetes Pods to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | +| Receiver | Configuration block (part of Alertmanager) | Modifies information on where to send an alert (e.g., Slack, PagerDuty, etc.) and any necessary information to send the alert (e.g., TLS certs, proxy URLs, etc.). Automatically updates the Alertmanager custom resource. | +| Route | Configuration block (part of Alertmanager) | Modifies the routing tree that is used to filter, label, and group alerts based on labels and send them to the appropriate Receiver. Automatically updates the Alertmanager custom resource. | +| PrometheusRule | Custom resource | Defines additional queries that need to trigger alerts or define materialized views of existing series that are within Prometheus's TSDB. Automatically updates the Prometheus custom resource. | + +### PushProx + +PushProx allows Prometheus to scrape metrics across a network boundary, which prevents users from having to expose metrics ports for internal Kubernetes components on each node in a Kubernetes cluster. + +Since the metrics for Kubernetes components are generally exposed on the host network of nodes in the cluster, PushProx deploys a DaemonSet of clients that sit on the hostNetwork of each node and make an outbound connection to a single proxy that is sitting on the Kubernetes API. Prometheus can then be configured to proxy scrape requests through the proxy to each client, which allows it to scrape metrics from the internal Kubernetes components without requiring any inbound node ports to be open. + +Refer to [Scraping Metrics with PushProx](#scraping-metrics-with-pushprox) for more. + +# 5. Scraping and Exposing Metrics + +### Defining what Metrics are Scraped + +ServiceMonitors and PodMonitors define targets that are intended for Prometheus to scrape. The [Prometheus custom resource](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md#prometheus) tells Prometheus which ServiceMonitors or PodMonitors it should use to find out where to scrape metrics from. + +The Prometheus Operator observes the ServiceMonitors and PodMonitors. When it observes that they are created or updated, it calls the Prometheus API to update the scrape configuration in the Prometheus custom resource and keep it in sync with the scrape configuration in the ServiceMonitors or PodMonitors. This scrape configuration tells Prometheus which endpoints to scrape metrics from and how it will label the metrics from those endpoints. + +Prometheus scrapes all of the metrics defined in its scrape configuration at every `scrape_interval`, which is one minute by default. + +The scrape configuration can be viewed as part of the Prometheus custom resource that is exposed in the Rancher UI. + +### How the Prometheus Operator Sets up Metrics Scraping + +The Prometheus Deployment or StatefulSet scrapes metrics, and the configuration of Prometheus is controlled by the Prometheus custom resources. The Prometheus Operator watches for Prometheus and Alertmanager resources, and when they are created, the Prometheus Operator creates a Deployment or StatefulSet for Prometheus or Alertmanager with the user-defined configuration. + +When the Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created, it knows that the scrape configuration needs to be updated in Prometheus. It updates Prometheus by first updating the configuration and rules files in the volumes of Prometheus's Deployment or StatefulSet. Then it calls the Prometheus API to sync the new configuration, resulting in the Prometheus Deployment or StatefulSet to be modified in place. + +### How Kubernetes Component Metrics are Exposed + +Prometheus scrapes metrics from deployments known as [exporters,](https://prometheus.io/docs/instrumenting/exporters/) which export the time series data in a format that Prometheus can ingest. In Prometheus, time series consist of streams of timestamped values belonging to the same metric and the same set of labeled dimensions. + +### Scraping Metrics with PushProx + +Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called PushProx. For detailed information on PushProx, refer [here](#how-pushprox-works) and to the above [architecture](#1-architecture-overview) section. + +### Scraping Metrics + +The following Kubernetes components are directly scraped by Prometheus: + +- kubelet* +- ingress-nginx** +- coreDns/kubeDns +- kube-api-server + +\* You can optionally use `hardenedKubelet.enabled` to use a PushProx, but that is not the default. + +** For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. + + +### Scraping Metrics Based on Kubernetes Distribution + +Metrics are scraped differently based on the Kubernetes distribution. For help with terminology, refer [here](#terminology). For details, see the table below: + +
How Metrics are Exposed to Prometheus
+ +| Kubernetes Component | RKE | RKE2 | KubeADM | K3s | +|-----|-----|-----|-----|-----| +| kube-controller-manager | rkeControllerManager.enabled |rke2ControllerManager.enabled | kubeAdmControllerManager.enabled | k3sServer.enabled | +| kube-scheduler | rkeScheduler.enabled | rke2Scheduler.enabled |kubeAdmScheduler.enabled | k3sServer.enabled | +| etcd | rkeEtcd.enabled | rke2Etcd.enabled | kubeAdmEtcd.enabled | Not available | +| kube-proxy | rkeProxy.enabled | rke2Proxy.enabled | kubeAdmProxy.enabled | k3sServer.enabled | +| kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | +| ingress-nginx* | Collects metrics directly exposed by kubelet, exposed by rkeIngressNginx.enabled | Collects metrics directly exposed by kubelet, Exposed by rke2IngressNginx.enabled | Not available | Not available | +| coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | +| kube-api-server | Collects metrics directly exposed by kube-api-server |Collects metrics directly exposed by kube-api-server | Collects metrics directly exposed by kube-appi-server | Collects metrics directly exposed by kube-api-server | + +\* For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. + +### Terminology + +- **kube-scheduler:** The internal Kubernetes component that uses information in the pod spec to decide on which node to run a pod. +- **kube-controller-manager:** The internal Kubernetes component that is responsible for node management (detecting if a node fails), pod replication and endpoint creation. +- **etcd:** The internal Kubernetes component that is the distributed key/value store which Kubernetes uses for persistent storage of all cluster information. +- **kube-proxy:** The internal Kubernetes component that watches the API server for pods/services changes in order to maintain the network up to date. +- **kubelet:** The internal Kubernetes component that watches the API server for pods on a node and makes sure they are running. +- **ingress-nginx:** An Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer. +- **coreDns/kubeDns:** The internal Kubernetes component responsible for DNS. +- **kube-api-server:** The main internal Kubernetes component that is responsible for exposing APIs for the other master components. diff --git a/docs/explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md new file mode 100644 index 0000000000..d0201d1f93 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md @@ -0,0 +1,428 @@ +--- +title: PromQL Expression Reference +weight: 6 +--- + +The PromQL expressions in this doc can be used to configure alerts. + +For more information about querying the Prometheus time series database, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) + + + +- [Cluster Metrics](#cluster-metrics) + - [Cluster CPU Utilization](#cluster-cpu-utilization) + - [Cluster Load Average](#cluster-load-average) + - [Cluster Memory Utilization](#cluster-memory-utilization) + - [Cluster Disk Utilization](#cluster-disk-utilization) + - [Cluster Disk I/O](#cluster-disk-i-o) + - [Cluster Network Packets](#cluster-network-packets) + - [Cluster Network I/O](#cluster-network-i-o) +- [Node Metrics](#node-metrics) + - [Node CPU Utilization](#node-cpu-utilization) + - [Node Load Average](#node-load-average) + - [Node Memory Utilization](#node-memory-utilization) + - [Node Disk Utilization](#node-disk-utilization) + - [Node Disk I/O](#node-disk-i-o) + - [Node Network Packets](#node-network-packets) + - [Node Network I/O](#node-network-i-o) +- [Etcd Metrics](#etcd-metrics) + - [Etcd Has a Leader](#etcd-has-a-leader) + - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) + - [Number of Failed Proposals](#number-of-failed-proposals) + - [GRPC Client Traffic](#grpc-client-traffic) + - [Peer Traffic](#peer-traffic) + - [DB Size](#db-size) + - [Active Streams](#active-streams) + - [Raft Proposals](#raft-proposals) + - [RPC Rate](#rpc-rate) + - [Disk Operations](#disk-operations) + - [Disk Sync Duration](#disk-sync-duration) +- [Kubernetes Components Metrics](#kubernetes-components-metrics) + - [API Server Request Latency](#api-server-request-latency) + - [API Server Request Rate](#api-server-request-rate) + - [Scheduling Failed Pods](#scheduling-failed-pods) + - [Controller Manager Queue Depth](#controller-manager-queue-depth) + - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) + - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) + - [Ingress Controller Connections](#ingress-controller-connections) + - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) +- [Rancher Logging Metrics](#rancher-logging-metrics) + - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) + - [Fluentd Input Rate](#fluentd-input-rate) + - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) + - [Fluentd Output Rate](#fluentd-output-rate) +- [Workload Metrics](#workload-metrics) + - [Workload CPU Utilization](#workload-cpu-utilization) + - [Workload Memory Utilization](#workload-memory-utilization) + - [Workload Network Packets](#workload-network-packets) + - [Workload Network I/O](#workload-network-i-o) + - [Workload Disk I/O](#workload-disk-i-o) +- [Pod Metrics](#pod-metrics) + - [Pod CPU Utilization](#pod-cpu-utilization) + - [Pod Memory Utilization](#pod-memory-utilization) + - [Pod Network Packets](#pod-network-packets) + - [Pod Network I/O](#pod-network-i-o) + - [Pod Disk I/O](#pod-disk-i-o) +- [Container Metrics](#container-metrics) + - [Container CPU Utilization](#container-cpu-utilization) + - [Container Memory Utilization](#container-memory-utilization) + - [Container Disk I/O](#container-disk-i-o) + + + +# Cluster Metrics + +### Cluster CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | +| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | + +### Cluster Load Average + +| Catalog | Expression | +| --- | --- | +| Detail |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
| +| Summary |
load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
| + +### Cluster Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | +| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | + +### Cluster Disk Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | +| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | + +### Cluster Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
| +| Summary |
read`sum(rate(node_disk_read_bytes_total[5m]))`
written`sum(rate(node_disk_written_bytes_total[5m]))`
| + +### Cluster Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| +| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| + +### Cluster Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
| +| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
| + +# Node Metrics + +### Node CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | +| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | + +### Node Load Average + +| Catalog | Expression | +| --- | --- | +| Detail |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| +| Summary |
load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
| + +### Node Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | +| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | + +### Node Disk Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | +| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | + +### Node Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| +| Summary |
read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
| + +### Node Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| +| Summary |
receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| + +### Node Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
| +| Summary |
receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
| + +# Etcd Metrics + +### Etcd Has a Leader + +`max(etcd_server_has_leader)` + +### Number of Times the Leader Changes + +`max(etcd_server_leader_changes_seen_total)` + +### Number of Failed Proposals + +`sum(etcd_server_proposals_failed_total)` + +### GRPC Client Traffic + +| Catalog | Expression | +| --- | --- | +| Detail |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
| +| Summary |
in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
| + +### Peer Traffic + +| Catalog | Expression | +| --- | --- | +| Detail |
in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
| +| Summary |
in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
| + +### DB Size + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | +| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | + +### Active Streams + +| Catalog | Expression | +| --- | --- | +| Detail |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
| +| Summary |
lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
| + +### Raft Proposals + +| Catalog | Expression | +| --- | --- | +| Detail |
applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
| +| Summary |
applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
pending`sum(increase(etcd_server_proposals_pending[5m]))`
failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
| + +### RPC Rate + +| Catalog | Expression | +| --- | --- | +| Detail |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
| +| Summary |
total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
| + +### Disk Operations + +| Catalog | Expression | +| --- | --- | +| Detail |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
| +| Summary |
commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
| + +### Disk Sync Duration + +| Catalog | Expression | +| --- | --- | +| Detail |
wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
| +| Summary |
wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
| + +# Kubernetes Components Metrics + +### API Server Request Latency + +| Catalog | Expression | +| --- | --- | +| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | +| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | + +### API Server Request Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | +| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | + +### Scheduling Failed Pods + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | +| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | + +### Controller Manager Queue Depth + +| Catalog | Expression | +| --- | --- | +| Detail |
volumes`sum(volumes_depth) by instance`
deployment`sum(deployment_depth) by instance`
replicaset`sum(replicaset_depth) by instance`
service`sum(service_depth) by instance`
serviceaccount`sum(serviceaccount_depth) by instance`
endpoint`sum(endpoint_depth) by instance`
daemonset`sum(daemonset_depth) by instance`
statefulset`sum(statefulset_depth) by instance`
replicationmanager`sum(replicationmanager_depth) by instance`
| +| Summary |
volumes`sum(volumes_depth)`
deployment`sum(deployment_depth)`
replicaset`sum(replicaset_depth)`
service`sum(service_depth)`
serviceaccount`sum(serviceaccount_depth)`
endpoint`sum(endpoint_depth)`
daemonset`sum(daemonset_depth)`
statefulset`sum(statefulset_depth)`
replicationmanager`sum(replicationmanager_depth)`
| + +### Scheduler E2E Scheduling Latency + +| Catalog | Expression | +| --- | --- | +| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | +| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | + +### Scheduler Preemption Attempts + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | +| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | + +### Ingress Controller Connections + +| Catalog | Expression | +| --- | --- | +| Detail |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
| +| Summary |
reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
| + +### Ingress Controller Request Process Time + +| Catalog | Expression | +| --- | --- | +| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | +| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | + +# Rancher Logging Metrics + + +### Fluentd Buffer Queue Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | + +### Fluentd Input Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | + +### Fluentd Output Errors Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | +| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | + +### Fluentd Output Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | + +# Workload Metrics + +### Workload CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| +| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| + +### Workload Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | +| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | + +### Workload Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| +| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| + +### Workload Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| +| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| + +### Workload Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
| +| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
| + +# Pod Metrics + +### Pod CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
| +| Summary |
cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
| + +### Pod Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | +| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | + +### Pod Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| +| Summary |
receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| + +### Pod Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| +| Summary |
receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| + +### Pod Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
| +| Summary |
read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
| + +# Container Metrics + +### Container CPU Utilization + +| Catalog | Expression | +| --- | --- | +| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | + +### Container Memory Utilization + +`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` + +### Container Disk I/O + +| Catalog | Expression | +| --- | --- | +| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/docs/explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md new file mode 100644 index 0000000000..418d42d95f --- /dev/null +++ b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md @@ -0,0 +1,181 @@ +--- +title: Role-based Access Control +shortTitle: RBAC +weight: 2 +--- +This section describes the expectations for RBAC for Rancher Monitoring. + +- [Cluster Admins](#cluster-admins) +- [Users with Kubernetes ClusterRole-based Permissions](#users-with-kubernetes-clusterrole-based-permissions) + - [Users with Kubernetes Admin/Edit Permissions](#users-with-kubernetes-admin-edit-permissions) + - [Users with Kubernetes View Permissions](#users-with-kubernetes-view-permissions) + - [Additional Monitoring Roles](#additional-monitoring-roles) + - [Additional Monitoring ClusterRoles](#additional-monitoring-clusterroles) +- [Users with Rancher Based Permissions](#users-with-rancher-based-permissions) + - [Differences in 2.5.x](#differences-in-2-5-x) + - [Assigning Additional Access](#assigning-additional-access) +- [Role-based Access Control for Grafana](#role-based-access-control-for-grafana) + +# Cluster Admins + +By default, only those with the cluster-admin `ClusterRole` should be able to: + +- Install the `rancher-monitoring` App onto a cluster and all other relevant configuration performed on the chart deploy + - e.g. whether default dashboards are created, what exporters are deployed onto the cluster to collect metrics, etc. +- Create / modify / delete Prometheus deployments in the cluster via Prometheus CRs +- Create / modify / delete Alertmanager deployments in the cluster via Alertmanager CRs +- Persist new Grafana dashboards or datasources via creating ConfigMaps in the appropriate namespace +- Expose certain Prometheus metrics to the k8s Custom Metrics API for HPA via a Secret in the `cattle-monitoring-system` namespace + +# Users with Kubernetes ClusterRole-based Permissions + +The `rancher-monitoring` chart installs the following three `ClusterRoles`. By default, they aggregate into the corresponding k8s `ClusterRoles`: + +| ClusterRole | Aggregates To Default K8s ClusterRole | +| ------------------------------| ---------------------------| +| `monitoring-admin` | `admin`| +| `monitoring-edit` | `edit` | +| `monitoring-view` | `view ` | + +These `ClusterRoles` provide different levels of access to the Monitoring CRDs based on the actions that can be performed: + +| CRDs (monitoring.coreos.com) | Admin | Edit | View | +| ------------------------------| ---------------------------| ---------------------------| ---------------------------| +|
  • `prometheuses`
  • `alertmanagers`
| Get, List, Watch | Get, List, Watch | Get, List, Watch | +|
  • `servicemonitors`
  • `podmonitors`
  • `prometheusrules`
| * | * | Get, List, Watch | + +On a high level, the following permissions are assigned by default as a result. + +### Users with Kubernetes Admin/Edit Permissions + +Only those with the the cluster-admin, admin or edit `ClusterRole` should be able to: + +- Modify the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs +- Modify the alerting / recording rules of a Prometheus deployment via PrometheusRules CRs + +### Users with Kubernetes View Permissions + +Only those with who have some Kubernetes `ClusterRole` should be able to: + +- View the configuration of Prometheuses that are deployed within the cluster +- View the configuration of Alertmanagers that are deployed within the cluster +- View the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs +- View the alerting/recording rules of a Prometheus deployment via PrometheusRules CRs + +### Additional Monitoring Roles + +Monitoring also creates additional `Roles` that are not assigned to users by default but are created within the cluster. They can be bound to a namespace by deploying a `RoleBinding` that references it. To define a `RoleBinding` with `kubectl` instead of through Rancher, click [here](#assigning-roles-and-clusterroles-with-kubectl). + +Admins should use these roles to provide more fine-grained access to users: + +| Role | Purpose | +| ------------------------------| ---------------------------| +| monitoring-config-admin | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | +| monitoring-config-edit | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | +| monitoring-config-view | Allow admins to assign roles to users to be able to view Secrets and ConfigMaps within the cattle-monitoring-system namespace. Viewing Secrets / ConfigMaps in this namespace could allow users to observe the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | +| monitoring-dashboard-admin | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | +| monitoring-dashboard-edit | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | +| monitoring-dashboard-view | Allow admins to assign roles to users to be able to view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | + +### Additional Monitoring ClusterRoles + +Monitoring also creates additional `ClusterRoles` that are not assigned to users by default but are created within the cluster. They are not aggregated by default but can be bound to a namespace by deploying a `RoleBinding` or `ClusterRoleBinding` that references it. To define a `RoleBinding` with `kubectl` instead of through Rancher, click [here](#assigning-roles-and-clusterroles-with-kubectl). + +| Role | Purpose | +| ------------------------------| ---------------------------| +| monitoring-ui-view | _Available as of Monitoring v2 14.5.100+_ Provides read-only access to external Monitoring UIs by giving a user permission to list the Prometheus, Alertmanager, and Grafana endpoints and make GET requests to Prometheus, Grafana, and Alertmanager UIs through the Rancher proxy. | + +### Assigning Roles and ClusterRoles with kubectl + +An alternative method to using Rancher to attach a `Role` or `ClusterRole` to a user or group is by defining bindings in YAML files that you create. You must first configure the `RoleBinding` with the YAML file, then you apply the config changes by running the `kubectl apply` command. + + +* **Roles**: Below is an example of a YAML file to help you configure `RoleBindings` in Kubernetes. You will need to fill in the name below, and name is case-sensitive. + +``` +# monitoring-config-view-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: monitoring-config-view + namespace: cattle-monitoring-system +roleRef: + kind: Role + name: monitoring-config-view + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: User + name: u-b4qkhsnliz # this can be found via `kubectl get users -A` + apiGroup: rbac.authorization.k8s.io +``` + +* **kubectl**: Below is an example of a `kubectl` command used to apply the binding you've created in the YAML file. As noted, you will need to fill in your YAML filename accordingly. + + * **`kubectl apply -f monitoring-config-view-role-binding.yaml` + + +# Users with Rancher Based Permissions + +The relationship between the default roles deployed by Rancher (i.e. cluster-owner, cluster-member, project-owner, project-member), the default Kubernetes roles, and the roles deployed by the rancher-monitoring chart are detailed in the table below: + +
Default Rancher Permissions and Corresponding Kubernetes ClusterRoles
+ +| Rancher Role | Kubernetes Role | Monitoring ClusterRole / Role | ClusterRoleBinding or RoleBinding? | +| --------- | --------- | --------- | --------- | +| cluster-owner | cluster-admin | N/A | ClusterRoleBinding | +| cluster-member | admin | monitoring-admin | ClusterRoleBinding | +| project-owner | admin | monitoring-admin | RoleBinding within Project namespace | +| project-member | edit | monitoring-edit | RoleBinding within Project namespace | + +In addition to these default Roles, the following additional Rancher project roles can be applied to members of your Cluster to provide additional access to Monitoring. These Rancher Roles will be tied to ClusterRoles deployed by the Monitoring chart: + +
Non-default Rancher Permissions and Corresponding Kubernetes ClusterRoles
+ +| Rancher Role | Kubernetes ClusterRole | Available In Rancher From | Available in Monitoring v2 From | +|--------------------------|-------------------------------|-------|------| +| View Monitoring* | [monitoring-ui-view](#monitoring-ui-view) | 2.4.8+ | 9.4.204+ | + +\* A User bound to the **View Monitoring** Rancher Role only has permissions to access external Monitoring UIs if provided links to those UIs. In order to access the Monitoring Pane to get those links, the User must be a Project Member of at least one Project. + +### Differences in 2.5.x + +Users with the project-member or project-owners roles assigned will not be given access to either Prometheus or Grafana in Rancher 2.5.x since we only create Grafana or Prometheus on a cluster-level. + +In addition, while project owners will still be only able to add ServiceMonitors / PodMonitors that scrape resources within their project's namespace by default, PrometheusRules are not scoped to a single namespace / project. Therefore, any alert rules or recording rules created by project-owners within their project namespace will be applied across the entire cluster, although they will be unable to view / edit / delete any rules that were created outside the project's namespace. + +### Assigning Additional Access + +If cluster-admins would like to provide additional admin/edit access to users outside of the roles offered by the rancher-monitoring chart, the following table identifies the potential impact: + +|CRDs (monitoring.coreos.com) | Can it cause impact outside of a namespace / project? | Impact | +|----------------------------| ------| ----------------------------| +| `prometheuses`| Yes, this resource can scrape metrics from any targets across the entire cluster (unless the Operator itself is otherwise configured). | User will be able to define the configuration of new cluster-level Prometheus deployments that should be created in the cluster. | +| `alertmanagers`| No | User will be able to define the configuration of new cluster-level Alertmanager deployments that should be created in the cluster. Note: if you just want to allow users to configure settings like Routes and Receivers, you should just provide access to the Alertmanager Config Secret instead. | +|
  • `servicemonitors`
  • `podmonitors`
| No, not by default; this is configurable via `ignoreNamespaceSelectors` on the Prometheus CR. | User will be able to set up scrapes by Prometheus on endpoints exposed by Services / Pods within the namespace they are given this permission in. | +| `prometheusrules`| Yes, PrometheusRules are cluster-scoped. | User will be able to define alert or recording rules on Prometheus based on any series collected across the entire cluster. | + +| k8s Resources | Namespace | Can it cause impact outside of a namespace / project? | Impact | +|----------------------------| ------| ------| ----------------------------| +|
  • `secrets`
  • `configmaps`
| `cattle-monitoring-system` | Yes, Configs and Secrets in this namespace can impact the entire monitoring / alerting pipeline. | User will be able to create or edit Secrets / ConfigMaps such as the Alertmanager Config, Prometheus Adapter Config, TLS secrets, additional Grafana datasources, etc. This can have broad impact on all cluster monitoring / alerting. | +|
  • `secrets`
  • `configmaps`
| `cattle-dashboards` | Yes, Configs and Secrets in this namespace can create dashboards that make queries on all metrics collected at a cluster-level. | User will be able to create Secrets / ConfigMaps that persist new Grafana Dashboards only. | + + + +# Role-based Access Control for Grafana + +Rancher allows any users who are authenticated by Kubernetes and have access the Grafana service deployed by the Rancher Monitoring chart to access Grafana via the Rancher Dashboard UI. By default, all users who are able to access Grafana are given the [Viewer](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#viewer-role) role, which allows them to view any of the default dashboards deployed by Rancher. + +However, users can choose to log in to Grafana as an [Admin](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#admin-role) if necessary. The default Admin username and password for the Grafana instance will be `admin`/`prom-operator`, but alternative credentials can also be supplied on deploying or upgrading the chart. + +To see the Grafana UI, install `rancher-monitoring`. Then: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Grafana**. + +
Cluster Compute Resources Dashboard in Grafana
+![Cluster Compute Resources Dashboard in Grafana](/img/cluster-compute-resources-dashboard.png) + +
Default Dashboards in Grafana
+![Default Dashboards in Grafana](/img/grafana-default-dashboard.png) \ No newline at end of file diff --git a/docs/explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md new file mode 100644 index 0000000000..593dc3e74b --- /dev/null +++ b/docs/explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md @@ -0,0 +1,62 @@ +--- +title: Windows Cluster Support for Monitoring V2 +shortTitle: Windows Support +weight: 5 +--- + +_Available as of v2.5.8_ + +Starting at Monitoring V2 14.5.100 (used by default in Rancher 2.5.8), Monitoring V2 can now be deployed on a Windows cluster and will scrape metrics from Windows nodes using [prometheus-community/windows_exporter](https://github.com/prometheus-community/windows_exporter) (previously named `wmi_exporter`). + +- [Comparison to Monitoring V1](#comparison-to-monitoring-v1) +- [Cluster Requirements](#cluster-requirements) + - [Upgrading Existing Clusters to wins v0.1.0](#upgrading-existing-clusters-to-wins-v0-1-0) + +# Comparison to Monitoring V1 + +Unlike Monitoring V1 for Windows, metrics collected by `windows_exporter` will be labeled as `windows_` instead of `wmi_` in accordance to a naming change from upstream from `wmi_exporter` to `windows_exporter`. + +In addition, Monitoring V2 for Windows will no longer require users to keep port 9796 open on Windows hosts since the host metrics will published directly onto a port exposed on the windows-exporter Pod. This feature was powered by recent changes made by `wins` v0.1.0 to support publishing ports exposed on the hostNetwork on Pods that use wins to run a privileged Windows binary as a host process. + +# Cluster Requirements + +Monitoring V2 for Windows can only scrape metrics from Windows hosts that have a minimum `wins` version of v0.1.0. To be able to fully deploy Monitoring V2 for Windows, all of your hosts must meet this requirement. + +If you provision a fresh RKE1 cluster in Rancher 2.5.8, your cluster should already meet this requirement. + +### Upgrading Existing Clusters to wins v0.1.0 + +If the cluster was provisioned before Rancher 2.5.8 (even if the current Rancher version is 2.5.8), you will not be able to successfully deploy Monitoring V2 for Windows until you upgrade the wins version on each host to at least v0.1.0. + +To facilitate this upgrade, Rancher 2.5.8 has released a brand new Helm chart called `rancher-wins-upgrader`. + +> **Prerequisite:** Make sure Monitoring V1 for Windows is uninstalled. + +1. Deploy `rancher-wins-upgrader` with the following override: + ```yaml + # Masquerading bootstraps the wins-upgrader installation via + # a previously whitelisted process path since the normal install path, + # c:\etc\rancher\wins\wins-upgrade.exe is not normally whitelisted. + # In this case, we are using the previously whitelisted process + # path used by Monitoring V1. + masquerade: + enabled: true + as: c:\\etc\wmi-exporter\wmi-exporter.exe + ``` + :::note Note for Non-Default Windows Prefix Path: + + - If you set up the RKE cluster with a `cluster.yml` that has a non-default `win_prefix_path`, you will need to update the `masquerade.as` field with your prefix path in place of `c:\\`. + + - For example, if you have `win_prefix_path: 'c:\host\opt\'`, then you will need to set `as: c:\host\opt\etc\wmi-exporter\wmi-exporter.exe`. + + ::: + +2. Once all your hosts have been successfully upgraded, please ensure that you deploy the Helm chart once again with default values to avoid conflicts with the following settings: + ```yaml + masquerade: + enabled: false + ``` + +**Result:** The hosts are ready for Monitoring V2 to be installed. You may choose to uninstall the `rancher-wins-upgrader` chart or keep it in your cluster to facilitate future upgrades. + +For more information on how it can be used, please see the [README.md](https://github.com/rancher/wins/blob/master/charts/rancher-wins-upgrader/README.md) of the chart. diff --git a/docs/explanations/integrations-in-rancher/neuvector.md b/docs/explanations/integrations-in-rancher/neuvector.md new file mode 100644 index 0000000000..e363537926 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/neuvector.md @@ -0,0 +1,124 @@ +--- +title: NeuVector Integration +weight: 22 +--- + +### NeuVector Integration in Rancher + +New in Rancher v2.6.5, [NeuVector 5.x](https://open-docs.neuvector.com/) is an open-source container-centric security platform that is now integrated into Rancher. NeuVector offers real-time compliance, visibility, and protection for critical applications and data during runtime. NeuVector provides a firewall, container process/file system monitoring, security auditing with CIS benchmarks, and vulnerability scanning. For more information on Rancher security, please see the [security documentation](../../pages-for-subheaders/rancher-security.md). + +NeuVector can be enabled through a Helm chart that may be installed either through **Apps & Marketplace** or through the **Cluster Tools** button in the Rancher UI. Once the Helm chart is installed, users can easily [deploy and manage NeuVector clusters within Rancher](https://open-docs.neuvector.com/deploying/rancher#deploy-and-manage-neuvector-through-rancher-apps-marketplace). + +### Installing NeuVector with Rancher + +The Harvester Helm Chart is used to manage access to the NeuVector UI in Rancher where users can navigate directly to deploy and manage their NeuVector clusters. + +**To navigate to and install the NeuVector chart through Apps & Marketplace:** + +1. Click **☰ > Cluster Management**. +1. On the Clusters page, go to the cluster where you want to deploy NeuVector, and click **Explore**. +1. Go to **Apps & Marketplace > Charts**, and install **NeuVector** from the chart repo. +1. Different cluster types require different container runtimes. When configuring Helm chart values, go to the **Container Runtime** section, and select your runtime in accordance with the cluster type. Finally, click **Install** again. + +Some examples are as follows: + + - RKE1: `docker` + - K3s and RKE2: `k3scontainerd` + - AKS: `containerd` for v1.19 and up + - EKS: `docker` for v1.22 and below; `containerd` for v1.23 and up + - GKE: `containerd` (see the [Google docs](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd) for more) + + :::note + + Only one container runtime engine may be selected at a time during installation. + + ::: + +**To navigate to and install the NeuVector chart through Cluster Tools:** + +1. Click **☰ > Cluster Management**. +1. On the Clusters page, go to the cluster where you want to deploy NeuVector, and click **Explore**. +1. Click on **Cluster Tools** at the bottom of the left navigation bar. +1. Repeat step 4 above to select your container runtime accordingly, then click **Install** again. + +### Accessing NeuVector from the Rancher UI + +1. Navigate to the cluster explorer of the cluster where NeuVector is installed. In the left navigation bar, click **NeuVector**. +1. Click the external link to go to the NeuVector UI. Once the link is selected, users must accept the `END USER LICENSE AGREEMENT` to access the NeuVector UI. + +### Uninstalling NeuVector from the Rancher UI + +**To uninstall from Apps & Marketplace:** + +1. Click **☰ > Cluster Management**. +1. Under **Apps & Marketplace**, click **Installed Apps**. +1. Under `cattle-neuvector-system`, select both the NeuVector app (and the associated CRD if desired), then click **Delete**. + +**To uninstall from Cluster Tools:** + +1. Click **☰ > Cluster Management**. +1. Click on **Cluster Tools** at the bottom-left of the screen, then click on the trash can icon under the NeuVector chart. Select `Delete the CRD associated with this app` if desired, then click **Delete**. + +### GitHub Repository + +The NeuVector project is available [here](https://github.com/neuvector/neuvector). + +### Documentation + +The NeuVector documentation is [here](https://open-docs.neuvector.com/). + +### Architecture + +The NeuVector security solution contains four types of security containers: Controllers, Enforcers, Managers, and Scanners. A special container called an All-in-One is also provided to combine the Controller, Enforcer, and Manager functions all in one container, primarily for Docker-native deployments. There is also an Updater which, when run, will update the CVE database. + +- **Controller:** Manages the NeuVector Enforcer container; provides REST APIs for the management console. +- **Enforcer:** Enforces security policies. +- **Manager:** Provides a web-UI and CLI console to manage the NeuVector platform. +- **All-in-One:** Includes the Controller, Enforcer, and Manager. +- **Scanner:** Performs the vulnerability and compliance scanning for images, containers, and nodes. +- **Updater:** Updates the CVE database for Neuvector (when run); redeploys scanner pods. + +
**NeuVector Security Containers:**
+![NeuVector Security Containers](/img/neuvector-security-containers.png) + +
**NeuVector Architecture:**
+![NeuVector Architecture](/img/neuvector-architecture.png) + +To learn more about NeuVector's architecture, please refer [here](https://open-docs.neuvector.com/basics/overview#architecture). + +### CPU and Memory Allocations + +Below are the minimum recommended computing resources for the NeuVector chart installation in a default deployment. Note that the resource limit is not set. + +| Container | CPU - Request | Memory - Request | +|------------|--------|---------| +| Controller | 3 (1GB 1vCPU needed per controller) | * +| Enforcer | On all nodes (500MB .5vCPU) | 1GB +| Manager | 1 (500MB .5vCPU) | * +| Scanner | 3 (100MB .5vCPU) | * + +\* Minimum 1GB of memory total required for Controller, Manager, and Scanner containers combined. + + +### Support Limitations + +* Only admins and cluster owners are currently supported. + +* Fleet multi-cluster deployment is not supported. + +* NeuVector is not supported on a Windows cluster. + +* NeuVector installation is not supported on hardened clusters. + +* NeuVector installation is not supported on SELinux clusters. + +* NeuVector installation is not supported on clusters in an air-gapped environment. + +### Other Limitations + +* Currently, NeuVector feature chart installation fails when a NeuVector partner chart already exists. To work around this issue, uninstall the NeuVector partner chart and reinstall the NeuVector feature chart. + +* Sometimes when the controllers are not ready, the NeuVector UI is not accessible from the Rancher UI. During this time, controllers will try to restart, and it takes a few minutes for the controllers to be active. + +* Container runtime is not auto-detected for different cluster types when installing the NeuVector chart. To work around this, you can specify the runtime manually. + diff --git a/docs/explanations/integrations-in-rancher/opa-gatekeeper.md b/docs/explanations/integrations-in-rancher/opa-gatekeeper.md new file mode 100644 index 0000000000..d8a2335432 --- /dev/null +++ b/docs/explanations/integrations-in-rancher/opa-gatekeeper.md @@ -0,0 +1,112 @@ +--- +title: OPA Gatekeeper +weight: 16 +--- + +To ensure consistency and compliance, every organization needs the ability to define and enforce policies in its environment in an automated way. [OPA (Open Policy Agent)](https://www.openpolicyagent.org/) is a policy engine that facilitates policy-based control for cloud native environments. Rancher provides the ability to enable OPA Gatekeeper in Kubernetes clusters, and also installs a couple of built-in policy definitions, which are also called constraint templates. + +OPA provides a high-level declarative language that lets you specify policy as code and ability to extend simple APIs to offload policy decision-making. + +[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is a project that provides integration between OPA and Kubernetes. OPA Gatekeeper provides: + +- An extensible, parameterized policy library. +- Native Kubernetes CRDs for instantiating the policy library, also called “constraints." +- Native Kubernetes CRDs for extending the policy library, also called "constraint templates." +- Audit functionality. + +To read more about OPA, please refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/) + +# How the OPA Gatekeeper Integration Works + +Kubernetes provides the ability to extend API server functionality via admission controller webhooks, which are invoked whenever a resource is created, updated or deleted. Gatekeeper is installed as a validating webhook and enforces policies defined by Kubernetes custom resource definitions. In addition to the admission control usage, Gatekeeper provides the capability to audit existing resources in Kubernetes clusters and mark current violations of enabled policies. + +OPA Gatekeeper is made available via Rancher's Helm system chart, and it is installed in a namespace named `gatekeeper-system.` + +# Enabling OPA Gatekeeper in a Cluster + +:::note + +In Rancher v2.5, the OPA Gatekeeper application was improved. The Rancher v2.4 feature can't be upgraded to the new version in Rancher v2.5. If you installed OPA Gatekeeper in Rancher v2.4, you will need to uninstall OPA Gatekeeper and its CRDs from the old UI, then reinstall it in Rancher v2.5. To uninstall the CRDs run the following command in the kubectl console `kubectl delete crd configs.config.gatekeeper.sh constrainttemplates.templates.gatekeeper.sh`. + +::: + +:::note Prerequisite: + +Only administrators and cluster owners can enable OPA Gatekeeper. + +::: + +The OPA Gatekeeper Helm chart can be installed from **Apps & Marketplace**. + +### Enabling OPA Gatekeeper + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the **Clusters** page, go to the cluster where you want to enable OPA Gatekeeper and click **Explore**. +1. In the left navigation bar, click **Apps & Marketplace**. +1. Click **Charts** and click **OPA Gatekeeper**. +1. Click **Install**. + +**Result:** OPA Gatekeeper is deployed in your Kubernetes cluster. + +# Constraint Templates + +[Constraint templates](https://github.com/open-policy-agent/gatekeeper#constraint-templates) are Kubernetes custom resources that define the schema and Rego logic of the OPA policy to be applied by Gatekeeper. For more information on the Rego policy language, refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/policy-language/) + +When OPA Gatekeeper is enabled, Rancher installs some templates by default. + +To list the constraint templates installed in the cluster, go to the left side menu under OPA Gatekeeper and click on **Templates**. + +Rancher also provides the ability to create your own constraint templates by importing YAML definitions. + +# Creating and Configuring Constraints + +[Constraints](https://github.com/open-policy-agent/gatekeeper#constraints) are Kubernetes custom resources that define the scope of objects to which a specific constraint template applies to. The complete policy is defined by constraint templates and constraints together. + +:::note Prerequisite: + +OPA Gatekeeper must be enabled in the cluster. + +::: + +To list the constraints installed, go to the left side menu under OPA Gatekeeper, and click on **Constraints**. + +New constraints can be created from a constraint template. + +Rancher provides the ability to create a constraint by using a convenient form that lets you input the various constraint fields. + +The **Edit as yaml** option is also available to configure the the constraint's yaml definition. + +### Exempting Rancher's System Namespaces from Constraints + +When a constraint is created, ensure that it does not apply to any Rancher or Kubernetes system namespaces. If the system namespaces are not excluded, then it is possible to see many resources under them marked as violations of the constraint. + +To limit the scope of the constraint only to user namespaces, always specify these namespaces under the **Match** field of the constraint. + +Also, the constraint may interfere with other Rancher functionality and deny system workloads from being deployed. To avoid this, exclude all Rancher-specific namespaces from your constraints. + +# Enforcing Constraints in your Cluster + +When the **Enforcement Action** is **Deny,** the constraint is immediately enabled and will deny any requests that violate the policy defined. By default, the enforcement value is **Deny**. + +When the **Enforcement Action** is **Dryrun,** then any resources that violate the policy are only recorded under the constraint's status field. + +To enforce constraints, create a constraint using the form. In the **Enforcement Action** field, choose **Deny**. + +# Audit and Violations in your Cluster + +OPA Gatekeeper runs a periodic audit to check if any existing resource violates any enforced constraint. The audit-interval (default 300s) can be configured while installing Gatekeeper. + +On the Gatekeeper page, any violations of the defined constraints are listed. + +Also under **Constraints,** the number of violations of the constraint can be found. + +The detail view of each constraint lists information about the resource that violated the constraint. + +# Disabling Gatekeeper + +1. Navigate to the cluster's Dashboard view +1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper**. +1. Click the **⋮ > Disable**. + +**Result:** Upon disabling OPA Gatekeeper, all constraint templates and constraints will also be deleted. + diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 0000000000..50a2901350 --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,70 @@ +--- +title: FAQ +weight: 500 +--- + +This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. + +See [Technical FAQ](faq/technical-items.md), for frequently asked technical questions. + +
+ +**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** + +When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. + +
+ +**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** + +Yes. + +
+ +**Does Rancher support Windows?** + +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.](pages-for-subheaders/use-windows-clusters.md) + +
+ +**Does Rancher support Istio?** + +As of Rancher 2.3.0, we support [Istio.](pages-for-subheaders/istio.md) + +Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) + +
+ +**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** + +Secrets management is on our roadmap but we haven't assigned it to a specific release yet. + +
+ +**Does Rancher v2.x support RKT containers as well?** + +At this time, we only support Docker. + +
+ +**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and registered Kubernetes?** + +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. + +
+ +**Are you planning on supporting Traefik for existing setups?** + +We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. + +
+ +**Can I import OpenShift Kubernetes clusters into v2.x?** + +Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. + +
+ +**Are you going to integrate Longhorn?** + +Yes. Longhorn was integrated into Rancher v2.5+. diff --git a/docs/faq/container-network-interface-providers.md b/docs/faq/container-network-interface-providers.md new file mode 100644 index 0000000000..f2065c8be8 --- /dev/null +++ b/docs/faq/container-network-interface-providers.md @@ -0,0 +1,205 @@ +--- +title: Container Network Interface (CNI) Providers +description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you +weight: 2300 +--- + +## What is CNI? + +CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. + +Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. + +![CNI Logo](/img/cni-logo.png) + +For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). + +## What Network Models are Used in CNI? + +CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). + +### What is an Encapsulated Network? + +This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. + +In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. + +This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. + +CNI network providers using this network model include Flannel, Canal, Weave, and Cilium. By default, Calico is not using this model, but it can be configured to do so. + +![Encapsulated Network](/img/encapsulated-network.png) + +### What is an Unencapsulated Network? + +This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). + +In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. + +This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. + +CNI network providers using this network model include Calico and Cilium. Cilium may be configured with this model although it is not the default mode. + +![Unencapsulated Network](/img/unencapsulated-network.png) + +## What CNI Providers are Provided by Rancher? + +### RKE Kubernetes clusters + +Out-of-the-box, Rancher provides the following CNI network providers for RKE Kubernetes clusters: Canal, Flannel, and Weave. + +You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. + +#### Canal + +![Canal Logo](/img/canal-logo.png) + +Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. + +In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. + +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (health checks). If using Wireguard, you should open UDP ports `51820` and `51821`. For more details, refer to [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md). + +![](/img/canal-diagram.png) + +For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) + +#### Flannel + +![Flannel Logo](/img/flannel-logo.png) + +Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan). + +Encapsulated traffic is unencrypted by default. Flannel provides two solutions for encryption: + +* [IPSec](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. It is an experimental backend for encryption. +* [WireGuard](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard), which is a more faster-performing alternative to strongSwan. + +Kubernetes workers should open UDP port `8472` (VXLAN). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +![Flannel Diagram](/img/flannel-diagram.png) + +For more information, see the [Flannel GitHub Page](https://github.com/flannel-io/flannel). + +#### Weave + +![Weave Logo](/img/weave-logo.png) + +Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. + +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +For more information, see the following pages: + +- [Weave Net Official Site](https://www.weave.works/) + +### RKE2 Kubernetes clusters + +Out-of-the-box, Rancher provides the following CNI network providers for RKE2 Kubernetes clusters: [Canal](#canal) (see above section), Calico, and Cilium. + +You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. + +#### Calico + +![Calico Logo](/img/calico-logo.png) + +Calico enables networking and network policy in Kubernetes clusters across the cloud. By default, Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. + +Calico also provides a stateless IP-in-IP or VXLAN encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. + +Kubernetes workers should open TCP port `179` if using BGP or UDP port `4789` if using VXLAN encapsulation. In addition, TCP port `5473` is needed when using Typha. See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +:::note Important: + +In Rancher v2.6.3, Calico probes fail on Windows nodes upon RKE2 installation. Note that this issue is resolved in v2.6.4. + +- To work around this issue, first navigate to `https:///v3/settings/windows-rke2-install-script`. + +- There, change the current setting: `https://raw.githubusercontent.com/rancher/wins/v0.1.3/install.ps1` to this new setting: `https://raw.githubusercontent.com/rancher/rke2/master/windows/rke2-install.ps1`. + +::: + +![Calico Diagram](/img/calico-diagram.svg) + +For more information, see the following pages: + +- [Project Calico Official Site](https://www.projectcalico.org/) +- [Project Calico GitHub Page](https://github.com/projectcalico/calico) + +#### Cilium + +![Cilium Logo](/img/cilium-logo.png) + +Cilium enables networking and network policies (L3, L4, and L7) in Kubernetes. By default, Cilium uses eBPF technologies to route packets inside the node and VXLAN to send packets to other nodes. Unencapsulated techniques can also be configured. + +Cilium recommends kernel versions greater than 5.2 to be able to leverage the full potential of eBPF. Kubernetes workers should open TCP port `8472` for VXLAN and TCP port `4240` for health checks. In addition, ICMP 8/0 must be enabled for health checks. For more information, check [Cilium System Requirements](https://docs.cilium.io/en/latest/operations/system_requirements/#firewall-requirements). + +##### Ingress Routing Across Nodes in Cilium +
+By default, Cilium does not allow pods to contact pods on other nodes. To work around this, enable the ingress controller to route requests across nodes with a `CiliumNetworkPolicy`. + +After selecting the Cilium CNI and enabling Project Network Isolation for your new cluster, configure as follows: + +``` +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: hn-nodes + namespace: default +spec: + endpointSelector: {} + ingress: + - fromEntities: + - remote-node +``` + +## CNI Features by Provider + +The following table summarizes the different features available for each CNI network provider provided by Rancher. + +| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | +| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | +| Canal | Encapsulated (VXLAN) | No | Yes | No | K8s API | Yes | Yes | +| Flannel | Encapsulated (VXLAN) | No | No | No | K8s API | Yes | No | +| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8s API | Yes | Yes | +| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | +| Cilium | Encapsulated (VXLAN) | Yes | Yes | Yes | Etcd and K8s API | Yes | Yes | + +- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) + +- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. + +- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. + +- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. + +- External Datastore: CNI network providers with this feature need an external datastore for its data. + +- Encryption: This feature allows cyphered and secure network control and data planes. + +- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. + + +## CNI Community Popularity + +The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2022. + +| Provider | Project | Stars | Forks | Contributors | +| ---- | ---- | ---- | ---- | ---- | +| Canal | https://github.com/projectcalico/canal | 679 | 100 | 21 | +| Flannel | https://github.com/flannel-io/flannel | 7k | 2.5k | 185 | +| Calico | https://github.com/projectcalico/calico | 3.1k | 741 | 224 | +| Weave | https://github.com/weaveworks/weave/ | 6.2k | 635 | 84 | +| Cilium | https://github.com/cilium/cilium | 10.6k | 1.3k | 352 | + +
+ +## Which CNI Provider Should I Use? + +It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. + +Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. + +## How can I configure a CNI network provider? + +Please see [Cluster Options](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) and the options for [Network Plug-ins](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/docs/faq/deprecated-features-in-v2.5.md b/docs/faq/deprecated-features-in-v2.5.md new file mode 100644 index 0000000000..0815fd2904 --- /dev/null +++ b/docs/faq/deprecated-features-in-v2.5.md @@ -0,0 +1,27 @@ +--- +title: Deprecated Features in Rancher +weight: 100 +--- + +### What is Rancher's Deprecation policy? + +We have published our official deprecation policy in the support [terms of service](https://rancher.com/support-maintenance-terms). + +### Where can I find out which features have been deprecated in Rancher? + +Rancher will publish deprecated features as part of the [release notes](https://github.com/rancher/rancher/releases) for Rancher found on GitHub. Please consult the following patch releases for deprecated features: + +| Patch Version | Release Date | +|---------------|---------------| +| [2.6.0](https://github.com/rancher/rancher/releases/tag/v2.6.0) | Aug 31, 2021 | +| [2.6.1](https://github.com/rancher/rancher/releases/tag/v2.6.1) | Oct 11, 2021 | +| [2.6.2](https://github.com/rancher/rancher/releases/tag/v2.6.2) | Oct 19, 2021 | +| [2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) | Dec 21, 2021 | +| [2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | Mar 31, 2022 | +| [2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) | May 12, 2022 | +| [2.6.6](https://github.com/rancher/rancher/releases/tag/v2.6.6) | Jun 30, 2022 | + + +### What can I expect when a feature is marked for deprecation? + +In the release where functionality is marked as "Deprecated", it will still be available and supported allowing upgrades to follow the usual procedure. Once upgraded, users/admins should start planning to move away from the deprecated functionality before upgrading to the release it marked as removed. The recommendation for new deployments is to not use the deprecated feature. \ No newline at end of file diff --git a/docs/faq/dockershim.md b/docs/faq/dockershim.md new file mode 100644 index 0000000000..d9ce2fe643 --- /dev/null +++ b/docs/faq/dockershim.md @@ -0,0 +1,46 @@ +--- +title: Dockershim +weight: 300 +--- + +The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). Removal is currently scheduled for Kubernetes 1.24. For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). + +RKE clusters, starting with Kubernetes 1.21, now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. + +To enable the external Dockershim, configure the following option. + +``` +enable_cri_dockerd: true +``` + +For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. + +### FAQ + +
+ +Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim? + +The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on Rancher 2.6 or above to have support for RKE with Kubernetes 1.21. See our [support matrix](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/) for details. + +
+ +Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim? + +A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and is not scheduled for removal upstream until Kubernetes 1.24. It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to Kubernetes 1.21 as you would normally, but should consider enabling the external Dockershim by Kubernetes 1.22. The external Dockershim will need to be enabled before upgrading to Kubernetes 1.24, at which point the existing implementation will be removed. + +For more information on the deprecation and its timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). + +
+ +Q: What are my other options if I don’t want to depend on the Dockershim? + +A: You can use a runtime like containerd with Kubernetes that does not require Dockershim support. RKE2 or K3s are two options for doing this. + +
+ +Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options? + +A: Rancher is exploring the possibility of an in-place upgrade path. Alternatively you can always migrate workloads from one cluster to another using kubectl. + +
diff --git a/content/rancher/v2.6/en/faq/kubectl/_index.md b/docs/faq/install-and-configure-kubectl.md similarity index 100% rename from content/rancher/v2.6/en/faq/kubectl/_index.md rename to docs/faq/install-and-configure-kubectl.md diff --git a/docs/faq/networking/networking.md b/docs/faq/networking/networking.md new file mode 100644 index 0000000000..149ff4853a --- /dev/null +++ b/docs/faq/networking/networking.md @@ -0,0 +1,9 @@ +--- +title: Networking +weight: 8005 +--- + +Networking FAQ's + +- [CNI Providers](../container-network-interface-providers.md) + diff --git a/docs/faq/rancher-is-no-longer-needed.md b/docs/faq/rancher-is-no-longer-needed.md new file mode 100644 index 0000000000..ac895b26f3 --- /dev/null +++ b/docs/faq/rancher-is-no-longer-needed.md @@ -0,0 +1,67 @@ +--- +title: Rancher is No Longer Needed +weight: 8010 +--- + +This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. + +- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) +- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) +- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) +- [What if I don't want my registered cluster managed by Rancher?](#what-if-i-don-t-want-my-registered-cluster-managed-by-rancher) +- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) + +### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? + +If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. + +### If the Rancher server is deleted, how do I access my downstream clusters? + +The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: + +- **Registered clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. +- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. +- **RKE clusters:** To access an [RKE cluster,](../pages-for-subheaders/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../pages-for-subheaders/rancher-manager-architecture.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. + +### What if I don't want Rancher anymore? + +:::note + +The previously recommended [System Tools](../reference-guides/system-tools.md) has been deprecated since June 2022. + +::: + +If you [installed Rancher on a Kubernetes cluster,](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) remove Rancher by using the [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) tool. + +As of Rancher v2.5.8, uninstalling Rancher in high-availability (HA) mode will also remove all `helm-operation-*` pods and the following apps: + +- fleet +- fleet-agent +- rancher-operator +- rancher-webhook + +Custom resources (CRDs) and custom namespaces will still need to be manually removed. + +If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. + +Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) + +### What if I don't want my registered cluster managed by Rancher? + +If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. + +To detach the cluster, + +1. In the upper left corner, click **☰ > Cluster Management**. +2. Go to the registered cluster that should be detached from Rancher and click **⋮ > Delete**. +3. Click **Delete**. + +**Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. + +### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? + +At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. + +The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) + +For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/docs/faq/security.md b/docs/faq/security.md new file mode 100644 index 0000000000..aee42e0fb9 --- /dev/null +++ b/docs/faq/security.md @@ -0,0 +1,15 @@ +--- +title: Security +weight: 8007 + +--- + +**Is there a Hardening Guide?** + +The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. + +
+ +**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** + +We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/docs/faq/technical-items.md b/docs/faq/technical-items.md new file mode 100644 index 0000000000..e29731d486 --- /dev/null +++ b/docs/faq/technical-items.md @@ -0,0 +1,177 @@ +--- +title: Technical +weight: 8006 +--- + +### How can I reset the administrator password? + +Docker Install: +``` +$ docker exec -ti reset-password +New password for default administrator (user-xxxxx): + +``` + +Kubernetes install (Helm): +``` +$ KUBECONFIG=./kube_config_cluster.yml +$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher --no-headers | head -1 | awk '{ print $1 }') -c rancher -- reset-password +New password for default administrator (user-xxxxx): + +``` + + + +### I deleted/deactivated the last admin, how can I fix it? +Docker Install: +``` +$ docker exec -ti ensure-default-admin +New default administrator (user-xxxxx) +New password for default administrator (user-xxxxx): + +``` + +Kubernetes install (Helm): +``` +$ KUBECONFIG=./kube_config_cluster.yml +$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin +New password for default administrator (user-xxxxx): + +``` +### How can I enable debug logging? + +See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) + +### My ClusterIP does not respond to ping + +ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. + +### Where can I manage Node Templates? + +Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. + +### Why is my Layer-4 Load Balancer in `Pending` state? + +The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../pages-for-subheaders/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) + +### Where is the state of Rancher stored? + +- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. +- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. + +### How are the supported Docker versions determined? + +We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. + +### How can I access nodes created by Rancher? + +SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. + +![Download Keys](/img/downloadsshkeys.png) + +Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) + +``` +$ ssh -i id_rsa user@ip_of_node +``` + +### How can I automate task X in Rancher? + +The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: + +* Visit `https://your_rancher_ip/v3` and browse the API options. +* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) + +### The IP address of a node changed, how can I recover? + +A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. + +When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) to clean the node. + +When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. + +### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? + +You can add additional arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). + +### How do I check if my certificate chain is valid? + +Use the `openssl verify` command to validate your certificate chain: + +:::tip + +Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS-installed certificates are not used when verifying manually. + +::: + +``` +SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem +rancher.yourdomain.com.pem: OK +``` + +If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: + +``` +SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem +rancher.yourdomain.com.pem: OK +``` + +If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). + +``` +-----BEGIN CERTIFICATE----- +%YOUR_CERTIFICATE% +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +%YOUR_INTERMEDIATE_CERTIFICATE% +-----END CERTIFICATE----- +``` + +If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: + +``` +openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem +subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com +issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA +``` + +### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? + +Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. + +Check `Common Name`: + +``` +openssl x509 -noout -subject -in cert.pem +subject= /CN=rancher.my.org +``` + +Check `Subject Alternative Names`: + +``` +openssl x509 -noout -in cert.pem -text | grep DNS + DNS:rancher.my.org +``` + +### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? + +This is due to a combination of the following default Kubernetes settings: + +* kubelet + * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) +* kube-controller-manager + * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) + * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) + * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) + +See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. + +In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. + +* kube-apiserver (Kubernetes v1.13 and up) + * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. + * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. + +### Can I use keyboard shortcuts in the UI? + +Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/content/rancher/v2.0-v2.4/en/faq/telemetry/_index.md b/docs/faq/telemetry.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/faq/telemetry/_index.md rename to docs/faq/telemetry.md diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 0000000000..72ea1bf1c8 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,10 @@ +--- +title: Getting Started +--- + +To get up and running with Rancher quickly, we have included a **Getting Started** section. + +The goal of this section is to be able to assist users in deploying Rancher and workloads and to install or upgrade Rancher quickly and effectively. + +Please see the [introduction](../docs/pages-for-subheaders/introduction.md), [quick start guides](../docs/pages-for-subheaders/quick-start-guides.md), and the [installation and upgrade](../docs/pages-for-subheaders/installation-and-upgrade.md) sections for more. + diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md new file mode 100644 index 0000000000..43f2f6144c --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md @@ -0,0 +1,268 @@ +--- +title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer +weight: 252 +--- + +For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. + +A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. + +This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. + +## Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.](../../../../pages-for-subheaders/installation-requirements.md) + +## Installation Outline + + + +- [1. Provision Linux Host](#1-provision-linux-host) +- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) +- [3. Configure Load Balancer](#3-configure-load-balancer) + + + +## 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements](../../../../pages-for-subheaders/installation-requirements.md) to launch your Rancher Server. + +## 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +:::note Do you want to.. + +- Complete an Air Gap Installation? +- Record all transactions with the Rancher API? + +See [Advanced Options](#advanced-options) below before continuing. + +::: + +Choose from the following options: + +
+ Option A-Bring Your Own Certificate: Self-Signed + +If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. + +:::note Prerequisites: + +Create a self-signed certificate. + +- The certificate files must be in PEM format. + +::: + +**To Install Rancher Using a Self-Signed Cert:** + +1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest + ``` + +
+
+ Option B-Bring Your Own Certificate: Signed by Recognized CA + +If your cluster is public facing, it's best to use a certificate signed by a recognized CA. + +:::note Prerequisites: + +- The certificate files must be in PEM format. + +::: + +**To Install Rancher Using a Cert Signed by a Recognized CA:** + +If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. + +1. Enter the following command. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest --no-cacerts + ``` + +
+ +## 3. Configure Load Balancer + +When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. + +The load balancer or proxy has to be configured to support the following: + +- **WebSocket** connections +- **SPDY** / **HTTP/2** protocols +- Passing / setting the following headers: + + | Header | Value | Description | + |--------|-------|-------------| + | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. + | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

**Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. + | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. + | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. +### Example NGINX configuration + +This NGINX configuration is tested on NGINX 1.14. + +:::note + +This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +::: + +- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server rancher-server:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` + +
+ +## What's Next? + +- **Recommended:** Review Single Node [Backup](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) and [Restore](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
+ +## FAQ and Troubleshooting + +For help troubleshooting certificates, see [this section.](../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +## Advanced Options + +### API Auditing + +If you want to record all transactions with the Rancher API, enable the [API Auditing](enable-api-audit-log.md) feature by adding the flags below into your install command. + + -e AUDIT_LEVEL=1 \ + -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ + -e AUDIT_LOG_MAXAGE=20 \ + -e AUDIT_LOG_MAXBACKUP=20 \ + -e AUDIT_LOG_MAXSIZE=100 \ + +### Air Gap + +If you are visiting this page to complete an [Air Gap Installation](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). + +:::note + +This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + +::: + +``` +upstream rancher { + server rancher-server:80; +} + +map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; +} + +server { + listen 443 ssl http2; + server_name rancher.yourdomain.com; + ssl_certificate /etc/your_certificate_directory/fullchain.pem; + ssl_certificate_key /etc/your_certificate_directory/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } +} + +server { + listen 80; + server_name rancher.yourdomain.com; + return 301 https://$server_name$request_uri; +} +``` + +
+ diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md new file mode 100644 index 0000000000..d42f6bb902 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md @@ -0,0 +1,559 @@ +--- +title: Enabling the API Audit Log to Record System Events +weight: 4 +--- + +You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. + +You can enable API Auditing during Rancher installation or upgrade. + +## Enabling API Audit Log + +The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. + +- [Docker Install](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +- [Kubernetes Install](../../../../reference-guides/installation-references/helm-chart-options.md#api-audit-log) + +## API Audit Log Options + +The usage below defines rules about what the audit log should record and what data it should include: + +| Parameter | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
`1` - Log event metadata.
`2` - Log event metadata and request body.
`3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | +| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
| +| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | +| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | +| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | + +
+ +### Audit Log Levels + +The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. + +| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | +| --------------------- | ---------------- | ------------ | ----------------- | ------------- | +| `0` | | | | | +| `1` | ✓ | | | | +| `2` | ✓ | ✓ | | | +| `3` | ✓ | ✓ | ✓ | ✓ | + +## Viewing API Audit Logs + +### Docker Install + +Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. + +### Kubernetes Install + +Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. + +The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. + +#### CLI + +```bash +kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log +``` + +#### Shipping the Audit Log + +You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging](../../../../pages-for-subheaders/logging.md) for details. + +## Audit Log Samples + +After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. + +### Metadata Level + +If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. + +```json +{ + "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", + "requestURI": "/v3/schemas", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "GET", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:22:43 +0800" +} +``` + +### Metadata and Request Body Level + +If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. + +The code sample below depicts an API request, with both its metadata header and body. + +```json +{ + "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:28:08 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my description", + "volumes": [] + } +} +``` + +### Metadata, Request Body, and Response Body Level + +If you set your `AUDIT_LEVEL` to `3`, Rancher logs: + +- The metadata header and body for every API request. +- The metadata header and body for every API response. + +#### Request + +The code sample below depicts an API request, with both its metadata header and body. + +```json +{ + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my decript", + "volumes": [] + } +} +``` + +#### Response + +The code sample below depicts an API response, with both its metadata header and body. + +```json +{ + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "responseStatus": "200", + "stage": "ResponseComplete", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "responseBody": { + "actionLinks": { + "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", + "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", + "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" + }, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements" + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container" + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "links": { + "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", + "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" + }, + "name": "nginx", + "namespaceId": "default", + "paused": false, + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + } + } +} +``` diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md new file mode 100644 index 0000000000..78b4313e14 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md @@ -0,0 +1,108 @@ +--- +title: Opening Ports with firewalld +weight: 1 +--- + +> We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. + +Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. + +For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: + +``` +Chain INPUT (policy ACCEPT) +target prot opt source destination +ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +ACCEPT icmp -- anywhere anywhere +ACCEPT all -- anywhere anywhere +ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain FORWARD (policy ACCEPT) +target prot opt source destination +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +``` + +You can check the default firewall rules with this command: + +``` +sudo iptables --list +``` + +This section describes how to use `firewalld` to apply the [firewall port rules](../../installation-requirements/port-requirements.md) for nodes in a high-availability Rancher server cluster. + +# Prerequisite + +Install v7.x or later ofv`firewalld`: + +``` +yum install firewalld +systemctl start firewalld +systemctl enable firewalld +``` + +# Applying Firewall Port Rules + +In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: + +``` +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` +If your Rancher server nodes have separate roles, use the following commands based on the role of the node: + +``` +# For etcd nodes, run the following commands: +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp + +# For control plane nodes, run the following commands: +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp + +# For worker nodes, run the following commands: +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` + +After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: + +``` +firewall-cmd --reload +``` + +**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. diff --git a/content/rancher/v2.6/en/installation/resources/advanced/etcd/_index.md b/docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/advanced/etcd/_index.md rename to docs/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/continuous-delivery.md b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/continuous-delivery.md new file mode 100644 index 0000000000..2451705a99 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/continuous-delivery.md @@ -0,0 +1,14 @@ +--- +title: Continuous Delivery +weight: 3 +--- + +As of Rancher v2.5, [Fleet](../../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) comes preinstalled in Rancher, and as of Rancher v2.6, Fleet can no longer be fully disabled. However, the Fleet feature for GitOps continuous delivery may be disabled using the `continuous-delivery` feature flag. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../../pages-for-subheaders/enable-experimental-features.md) + +Environment Variable Key | Default Value | Description +---|---|--- + `continuous-delivery` | `true` | This flag disables the GitOps continuous delivery feature of Fleet. | + +If Fleet was disabled in Rancher v2.5.x, it will become enabled if Rancher is upgraded to v2.6.x. Only the continuous delivery part of Fleet can be disabled. When `continuous-delivery` is disabled, the `gitjob` deployment is no longer deployed into the Rancher server's local cluster, and `continuous-delivery` is not shown in the Rancher UI. diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md new file mode 100644 index 0000000000..19a38981b1 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md @@ -0,0 +1,33 @@ +--- +title: UI for Istio Virtual Services and Destination Rules +weight: 2 +--- + +This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. + +> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../../pages-for-subheaders/enable-experimental-features.md) + +Environment Variable Key | Default Value | Status | Available as of +---|---|---|--- +`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `true` | GA | v2.3.2 + +# About this Feature + +A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. + +When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. + +The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules**. + +- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) +- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) + +To see these tabs, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where Istio is installed and click **Explore**. +1. In the left navigation bar, click **Istio**. +1. You will see tabs for **Kiali** and **Jaeger**. From the left navigation bar, you can view and configure **Virtual Services** and **Destination Rules**. \ No newline at end of file diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md new file mode 100644 index 0000000000..04da623402 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md @@ -0,0 +1,45 @@ +--- +title: "Running on ARM64 (Experimental)" +weight: 3 +--- + +:::caution: + +Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. + +::: + +The following options are available when using an ARM64 platform: + +- Running Rancher on ARM64 based node(s) + - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) link: + + ``` + # In the last line `rancher/rancher:vX.Y.Z`, be certain to replace "X.Y.Z" with a released version in which ARM64 builds exist. For example, if your matching version is v2.5.8, you would fill in this line with `rancher/rancher:v2.5.8`. + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:vX.Y.Z + ``` +:::note + +To check if your specific released version is compatible with the ARM64 architecture, you may navigate to your +version's release notes in the following two ways: + +- Manually find your version using https://github.com/rancher/rancher/releases. +- Go directly to your version using the tag and the specific version number. If you plan to use v2.5.8, for example, you may navigate to https://github.com/rancher/rancher/releases/tag/v2.5.8. + +::: + +- Create custom cluster and adding ARM64 based node(s) + - Kubernetes cluster version must be 1.12 or higher + - CNI Network Provider must be [Flannel](../../../../faq/container-network-interface-providers.md#flannel) +- Importing clusters that contain ARM64 based nodes + - Kubernetes cluster version must be 1.12 or higher + +Please see [Cluster Options](cluster-provisioning/rke-clusters/options/) how to configure the cluster options. + +The following features are not tested: + +- Monitoring, alerts, notifiers, pipelines and logging +- Launching apps from the catalog diff --git a/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md new file mode 100644 index 0000000000..3d8c86b8c5 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md @@ -0,0 +1,40 @@ +--- +title: Allow Unsupported Storage Drivers +weight: 1 +--- + +This feature allows you to use types for storage providers and provisioners that are not enabled by default. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../../pages-for-subheaders/enable-experimental-features.md) + +Environment Variable Key | Default Value | Description +---|---|--- + `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. + +### Types for Persistent Volume Plugins that are Enabled by Default +Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +### Types for StorageClass that are Enabled by Default +Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|-------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` \ No newline at end of file diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md new file mode 100644 index 0000000000..1a21094532 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -0,0 +1,91 @@ +--- +title: Rendering the Helm Template in an Air-Gapped Environment +shortTitle: Air Gap Upgrade +weight: 1 +--- + +:::note + +These instructions assume you have already followed the instructions for a Kubernetes upgrade on [this page,](upgrades.md) including the prerequisites, up until step 3. Upgrade Rancher. + +::: + +### Rancher Helm Template Options + +Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +Based on the choice you made during installation, complete one of the procedures below. + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. + + +### Option A: Default Self-signed Certificate + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +### Option B: Certificates from Files using Kubernetes Secrets + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +### Apply the Rendered Templates + +Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. + +Use `kubectl` to apply the rendered manifests. + +```plain +kubectl -n cattle-system apply -R -f ./rancher +``` + +# Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +:::tip + +Having network issues following upgrade? + +See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). + +::: + +# Known Upgrade Issues + +A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md new file mode 100644 index 0000000000..f6029f909b --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md @@ -0,0 +1,128 @@ +--- +title: Installing Rancher on Azure Kubernetes Service +shortTitle: AKS +weight: 3 +--- + +This page covers how to install Rancher on Microsoft's Azure Kubernetes Service (AKS). + +The guide uses command line tools to provision an AKS cluster with an ingress. If you prefer to provision your cluster using the Azure portal, refer to the [official documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough-portal). + +If you already have an AKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) + +# Prerequisites + +:::caution + +Deploying to Microsoft Azure will incur charges. + +::: + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- Your subscription has sufficient quota for at least 2 vCPUs. For details on Rancher server resource requirements, refer to [this section](../../../pages-for-subheaders/installation-requirements.md#rke-and-hosted-kubernetes) +- When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +# 1. Prepare your Workstation + +Install the following command line tools on your workstation: + +- The Azure CLI, **az:** For help, refer to these [installation steps.](https://docs.microsoft.com/en-us/cli/azure/) +- **kubectl:** For help, refer to these [installation steps.](https://kubernetes.io/docs/tasks/tools/#kubectl) +- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) + +# 2. Create a Resource Group + +After installing the CLI, you will need to log in with your Azure account. + +``` +az login +``` + +Create a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) to hold all relevant resources for your cluster. Use a location that applies to your use case. + +``` +az group create --name rancher-rg --location eastus +``` + +# 3. Create the AKS Cluster + +To create an AKS cluster, run the following command. Use a VM size that applies to your use case. Refer to [this article](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) for available sizes and options. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +:::note + +If the version of Kubernetes is updated to v1.22 or later, the version of ingress-nginx would also need to be [updated](https://kubernetes.github.io/ingress-nginx/#faq-migration-to-apiversion-networkingk8siov1). + +::: + +``` +az aks create \ + --resource-group rancher-rg \ + --name rancher-server \ + --kubernetes-version \ + --node-count 3 \ + --node-vm-size Standard_D2_v3 +``` + +The cluster will take some time to be deployed. + +# 4. Get Access Credentials + +After the cluster is deployed, get the access credentials. + +``` +az aks get-credentials --resource-group rancher-rg --name rancher-server +``` + +This command merges your cluster's credentials into the existing kubeconfig and allows `kubectl` to interact with the cluster. + +# 5. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. Installing an Ingress requires allocating a public IP address. Ensure you have sufficient quota, otherwise it will fail to assign the IP address. Limits for public IP addresses are applicable at a regional level per subscription. + +The following command installs an `nginx-ingress-controller` with a Kubernetes load balancer service. + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +# 6. Get Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + AGE +ingress-nginx-controller LoadBalancer 10.0.116.18 40.31.180.83 80:31229/TCP,443:31050/TCP + 67s +``` + +Save the `EXTERNAL-IP`. + +# 7. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the `EXTERNAL-IP` that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the [Azure DNS documentation](https://docs.microsoft.com/en-us/azure/dns/) + +# 8. Install the Rancher Helm Chart + +Next, install the Rancher Helm chart by following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md new file mode 100644 index 0000000000..b65a54d283 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md @@ -0,0 +1,166 @@ +--- +title: Installing Rancher on Amazon EKS +shortTitle: Amazon EKS +weight: 3 +--- + +This page covers two ways to install Rancher on EKS. + +The first is a guide for deploying the Rancher server on an EKS cluster using CloudFormation. This guide was created in collaboration with Amazon Web Services to show how to deploy Rancher following best practices. + +The second is a guide for installing an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. + +If you already have an EKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) + +- [Automated Quickstart using AWS Best Practices](#automated-quickstart-using-aws-best-practices) +- [Creating an EKS Cluster for the Rancher Server](#creating-an-eks-cluster-for-the-rancher-server) + +# Automated Quickstart using AWS Best Practices + +Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) + +The quick start guide provides three options for deploying Rancher on EKS: + +- **Deploy Rancher into a new VPC and new Amazon EKS cluster**. This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, Amazon EKS cluster, and other infrastructure components. It then deploys Rancher into this new EKS cluster. +- **Deploy Rancher into an existing VPC and a new Amazon EKS cluster**. This option provisions Rancher in your existing AWS infrastructure. +- **Deploy Rancher into an existing VPC and existing Amazon EKS cluster**. This option provisions Rancher in your existing AWS infrastructure. + +Deploying this Quick Start for a new virtual private cloud (VPC) and new Amazon EKS cluster using default parameters builds the following Rancher environment in the AWS Cloud: + +- A highly available architecture that spans three Availability Zones.* +- A VPC configured with public and private subnets, according to AWS best practices, to provide you with your own virtual network on AWS.* +- In the public subnets: + - Managed network address translation (NAT) gateways to allow outbound internet access for resources.* + - Linux bastion hosts in an Auto Scaling group to allow inbound Secure Shell (SSH) access to Amazon Elastic Compute Cloud (Amazon EC2) instances in public and private subnets.* +- In the private subnets: + - Kubernetes nodes in an Auto Scaling group.* + - A Network Load Balancer (not shown) for accessing the Rancher console. +- Rancher deployment using AWS Systems Manager automation. +- Amazon EKS service for the EKS cluster, which provides the Kubernetes control plane.* +- An Amazon Route 53 DNS record for accessing the Rancher deployment. + +\* The CloudFormation template that deploys the Quick Start into an existing Amazon EKS cluster skips the components marked by asterisks and prompts you for your existing VPC configuration. + +# Creating an EKS Cluster for the Rancher Server + +In this section, you'll install an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. + +:::note Prerequisites: + +- You should already have an AWS account. +- It is recommended to use an IAM user instead of the root AWS account. You will need the IAM user's access key and secret key to configure the AWS command line interface. +- The IAM user needs the minimum IAM policies described in the official [eksctl documentation.](https://eksctl.io/usage/minimum-iam-policies/) + +::: + +### 1. Prepare your Workstation + +Install the following command line tools on your workstation: + +- **The AWS CLI v2:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- **eksctl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) +- **kubectl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) + +### 2. Configure the AWS CLI + +To configure the AWS CLI, run the following command: + +``` +aws configure +``` + +Then enter the following values: + +| Value | Description | +|-------|-------------| +| AWS Access Key ID | The access key credential for the IAM user with EKS permissions. | +| AWS Secret Access Key | The secret key credential for the IAM user with EKS permissions. | +| Default region name | An [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) where the cluster nodes will be located. | +| Default output format | Enter `json`. | + +### 3. Create the EKS Cluster + +To create an EKS cluster, run the following command. Use the AWS region that applies to your use case. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +eksctl create cluster \ + --name rancher-server \ + --version 1.20 \ + --region us-west-2 \ + --nodegroup-name ranchernodes \ + --nodes 3 \ + --nodes-min 1 \ + --nodes-max 4 \ + --managed +``` + +The cluster will take some time to be deployed with CloudFormation. + +### 4. Test the Cluster + +To test the cluster, run: + +``` +eksctl get cluster +``` + +The result should look like the following: + +``` +eksctl get cluster +2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0 +2021-03-18 15:09:35 [ℹ] using region us-west-2 +NAME REGION EKSCTL CREATED +rancher-server-cluster us-west-2 True +``` + +### 5. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. + +The following command installs an `nginx-ingress-controller` with a LoadBalancer service. This will result in an ELB (Elastic Load Balancer) in front of NGINX: + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +### 6. Get Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + AGE +ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP + 27m +``` + +Save the `EXTERNAL-IP`. + +### 7. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the AWS documentation on [routing traffic to an ELB load balancer.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) + +### 8. Install the Rancher Helm Chart + +Next, install the Rancher Helm chart by following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md new file mode 100644 index 0000000000..1122319b15 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md @@ -0,0 +1,186 @@ +--- +title: Installing Rancher on a Google Kubernetes Engine Cluster +shortTitle: GKE +weight: 3 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to install Rancher using Google Kubernetes Engine. + +If you already have a GKE Kubernetes cluster, skip to the step about [installing an ingress.](#7-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) + +# Prerequisites + +- You will need a Google account. +- You will need a Google Cloud billing account. You can manage your Cloud Billing accounts using the Google Cloud Console. For more information about the Cloud Console, visit [General guide to the console.](https://support.google.com/cloud/answer/3465889?hl=en&ref_topic=3340599) +- You will need a cloud quota for at least one in-use IP address and at least 2 CPUs. For more details about hardware requirements for the Rancher server, refer to [this section.](../../../pages-for-subheaders/installation-requirements.md#rke-and-hosted-kubernetes) + +# 1. Enable the Kubernetes Engine API + +Take the following steps to enable the Kubernetes Engine API: + +1. Visit the [Kubernetes Engine page](https://console.cloud.google.com/projectselector/kubernetes?_ga=2.169595943.767329331.1617810440-856599067.1617343886) in the Google Cloud Console. +1. Create or select a project. +1. Open the project and enable the Kubernetes Engine API for the project. Wait for the API and related services to be enabled. This can take several minutes. +1. Make sure that billing is enabled for your Cloud project. For information on how to enable billing for your project, refer to the [Google Cloud documentation.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project) + +# 2. Open the Cloud Shell + +Cloud Shell is a shell environment for managing resources hosted on Google Cloud. Cloud Shell comes preinstalled with the `gcloud` command-line tool and kubectl command-line tool. The `gcloud` tool provides the primary command-line interface for Google Cloud, and `kubectl` provides the primary command-line interface for running commands against Kubernetes clusters. + +The following sections describe how to launch the cloud shell from the Google Cloud Console or from your local workstation. + +### Cloud Shell + +To launch the shell from the [Google Cloud Console,](https://console.cloud.google.com) go to the upper-right corner of the console and click the terminal button. When hovering over the button, it is labeled **Activate Cloud Shell**. + +### Local Shell + +To install `gcloud` and `kubectl`, perform the following steps: + +1. Install the Cloud SDK by following [these steps.](https://cloud.google.com/sdk/docs/install) The Cloud SDK includes the `gcloud` command-line tool. The steps vary based on your OS. +1. After installing Cloud SDK, install the `kubectl` command-line tool by running the following command: + + ``` + gcloud components install kubectl + ``` + In a later step, `kubectl` will be configured to use the new GKE cluster. +1. [Install Helm 3](https://helm.sh/docs/intro/install/) if it is not already installed. +1. Enable Helm experimental [support for OCI images](https://github.com/helm/community/blob/master/hips/hip-0006.md) with the `HELM_EXPERIMENTAL_OCI` variable. Add the following line to `~/.bashrc` (or `~/.bash_profile` in macOS, or wherever your shell stores environment variables): + + ``` + export HELM_EXPERIMENTAL_OCI=1 + ``` +1. Run the following command to load your updated `.bashrc` file: + + ``` + source ~/.bashrc + ``` + If you are running macOS, use this command: + ``` + source ~/.bash_profile + ``` + + + +# 3. Configure the gcloud CLI + + Set up default gcloud settings using one of the following methods: + +- Using gcloud init, if you want to be walked through setting defaults. +- Using gcloud config, to individually set your project ID, zone, and region. + + + + +1. Run gcloud init and follow the directions: + + ``` + gcloud init + ``` + If you are using SSH on a remote server, use the --console-only flag to prevent the command from launching a browser: + + ``` + gcloud init --console-only + ``` +2. Follow the instructions to authorize gcloud to use your Google Cloud account and select the new project that you created. + + + + + + + +# 4. Confirm that gcloud is configured correctly + +Run: + +``` +gcloud config list +``` + +The output should resemble the following: + +``` +[compute] +region = us-west1 # Your chosen region +zone = us-west1-b # Your chosen zone +[core] +account = +disable_usage_reporting = True +project = + +Your active configuration is: [default] +``` + +# 5. Create a GKE Cluster + +The following command creates a three-node cluster. + +Replace `cluster-name` with the name of your new cluster. + +When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +gcloud container clusters create cluster-name --num-nodes=3 --cluster-version=1.20.8-gke.900 +``` + +# 6. Get Authentication Credentials + +After creating your cluster, you need to get authentication credentials to interact with the cluster: + +``` +gcloud container clusters get-credentials cluster-name +``` + +This command configures `kubectl` to use the cluster you created. + +# 7. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. + +The following command installs an `nginx-ingress-controller` with a LoadBalancer service: + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +# 8. Get the Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:31876/TCP,443:32497/TCP 81s +``` + +Save the `EXTERNAL-IP`. + +# 9. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the Google Cloud documentation about [managing DNS records.](https://cloud.google.com/dns/docs/records) + +# 10. Install the Rancher Helm chart + +Next, install the Rancher Helm chart by following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use the DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md new file mode 100644 index 0000000000..2f297a1894 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -0,0 +1,119 @@ +--- +title: Rollbacks +weight: 3 +--- + +- [Rolling Back to Rancher v2.5.0+](#rolling-back-to-rancher-v2-5-0) +- [Rolling Back to Rancher v2.2-v2.4+](#rolling-back-to-rancher-v2-2-v2-4) +- [Rolling Back to Rancher v2.0-v2.1](#rolling-back-to-rancher-v2-0-v2-1) + +# Rolling Back to Rancher v2.5.0+ + +To roll back to Rancher v2.5.0+, use the **Rancher Backups** application and restore Rancher from backup. + +Rancher has to be started with the lower/previous version after a rollback. + +A restore is performed by creating a Restore custom resource. + +:::note Important: + +* Follow the instructions from this page for restoring Rancher on the same cluster where it was backed up from. In order to migrate Rancher to a new cluster, follow the steps to [migrate Rancher.](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +* While restoring Rancher on the same setup, the Rancher deployment is manually scaled down before the restore starts, then the operator will scale it back up once the restore completes. As a result, Rancher and its UI will be unavailable until the restore is complete. While the UI is unavailable, use the original cluster kubeconfig with the restore YAML file: `kubectl create -f restore.yaml`. + +::: + +### Scale the Rancher Deployment to 0 + +1. In the upper left corner, click **☰ > local**. +1. From the menu on the left, click **Workload**. +1. Under **Workload**, click on **Deployments**. +1. At the top, click on the dropdown to adjust the filter. Select **cattle-system** as the filter. +1. Find the row for the `rancher` deployment and click **⋮ > Edit Config**. +1. Change **Replicas** to 0. +1. Scroll to the bottom and click **Save**. + +### Create the Restore Custom Resource + +1. Click **☰ > Cluster Management**. +1. Go to the local cluster and click **Explore**. +1. In the left navigation bar, click **Rancher Backups > Restore**. + :::note + + If the Rancher Backups app is not visible, you will need to install it from the Charts page in **Apps & Marketplace**. Refer [here](../../../pages-for-subheaders/helm-charts-in-rancher.md#charts) for more information. + + ::: + +1. Click **Create**. +1. Create the Restore with the form or with YAML. For help creating the Restore resource using the online form, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) +1. To use the YAML editor, you can click **Create > Create from YAML.** Enter the Restore YAML. The following is an example Restore custom resource: + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Restore + metadata: + name: restore-migration + spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + encryptionConfigSecretName: encryptionconfig + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + ``` + For help configuring the Restore, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) + +1. Click **Create**. + +**Result:** The backup file is created and updated to the target storage location. The resources are restored in this order: + +1. Custom Resource Definitions (CRDs) +2. Cluster-scoped resources +3. Namespaced resources + +To check how the restore is progressing, you can check the logs of the operator. Follow these steps to get the logs: + +```yaml +kubectl get pods -n cattle-resources-system +kubectl logs -n cattle-resources-system -f +``` + +### Roll back to a previous Rancher version + +Rancher can be rolled back using the Helm CLI. To roll back to the previous version: + +```yaml +helm rollback rancher -n cattle-system +``` + +If the previous revision is not the intended target, you can specify a revision to roll back to. To see the deployment history: + +```yaml +helm history rancher -n cattle-system +``` + +When the target revision is determined, perform the rollback. This example will roll back to revision `3`: + +```yaml +helm rollback rancher 3 -n cattle-system +``` + +# Rolling Back to Rancher v2.2-v2.4+ + +To roll back to Rancher before v2.5, follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{}}/rancher/v2.0-v2.4/en/backups/restore/rke-restore/) Restoring a snapshot of the Rancher server cluster will revert Rancher to the version and state at the time of the snapshot. + +For information on how to roll back Rancher installed with Docker, refer to [this page.](../other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md) + +:::note + +Managed clusters are authoritative for their state. This means restoring the Rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. + +::: + +# Rolling Back to Rancher v2.0-v2.1 + +Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved [here]({{}}/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1) and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md new file mode 100644 index 0000000000..8a2b6a45e9 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md @@ -0,0 +1,189 @@ +--- +title: Troubleshooting the Rancher Server Kubernetes Cluster +weight: 276 +--- + +This section describes how to troubleshoot an installation of Rancher on a Kubernetes cluster. + +### Relevant Namespaces + +Most of the troubleshooting will be done on objects in these 3 namespaces. + +- `cattle-system` - `rancher` deployment and pods. +- `ingress-nginx` - Ingress controller pods and services. +- `cert-manager` - `cert-manager` pods. + +### "default backend - 404" + +A number of things can cause the ingress-controller not to forward traffic to your rancher instance. Most of the time its due to a bad ssl configuration. + +Things to check + +- [Is Rancher Running](#check-if-rancher-is-running) +- [Cert CN is "Kubernetes Ingress Controller Fake Certificate"](#cert-cn-is-kubernetes-ingress-controller-fake-certificate) + +### Check if Rancher is Running + +Use `kubectl` to check the `cattle-system` system namespace and see if the Rancher pods are in a Running state. + +``` +kubectl -n cattle-system get pods + +NAME READY STATUS RESTARTS AGE +pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m +``` + +If the state is not `Running`, run a `describe` on the pod and check the Events. + +``` +kubectl -n cattle-system describe pod + +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 11m default-scheduler Successfully assigned rancher-784d94f59b-vgqzh to localhost + Normal SuccessfulMountVolume 11m kubelet, localhost MountVolume.SetUp succeeded for volume "rancher-token-dj4mt" + Normal Pulling 11m kubelet, localhost pulling image "rancher/rancher:v2.0.4" + Normal Pulled 11m kubelet, localhost Successfully pulled image "rancher/rancher:v2.0.4" + Normal Created 11m kubelet, localhost Created container + Normal Started 11m kubelet, localhost Started container +``` + +### Check the Rancher Logs + +Use `kubectl` to list the pods. + +``` +kubectl -n cattle-system get pods + +NAME READY STATUS RESTARTS AGE +pod/rancher-784d94f59b-vgqzh 1/1 Running 0 10m +``` + +Use `kubectl` and the pod name to list the logs from the pod. + +``` +kubectl -n cattle-system logs -f rancher-784d94f59b-vgqzh +``` + +### Cert CN is "Kubernetes Ingress Controller Fake Certificate" + +Use your browser to check the certificate details. If it says the Common Name is "Kubernetes Ingress Controller Fake Certificate", something may have gone wrong with reading or issuing your SSL cert. + +:::note + +If you are using LetsEncrypt to issue certs, it can sometimes take a few minutes to issue the cert. + +::: + +### Checking for issues with cert-manager issued certs (Rancher Generated or LetsEncrypt) + +`cert-manager` has 3 parts. + +- `cert-manager` pod in the `cert-manager` namespace. +- `Issuer` object in the `cattle-system` namespace. +- `Certificate` object in the `cattle-system` namespace. + +Work backwards and do a `kubectl describe` on each object and check the events. You can track down what might be missing. + +For example there is a problem with the Issuer: + +``` +kubectl -n cattle-system describe certificate +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning IssuerNotReady 18s (x23 over 19m) cert-manager Issuer rancher not ready +``` + +``` +kubectl -n cattle-system describe issuer +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning ErrInitIssuer 19m (x12 over 19m) cert-manager Error initializing issuer: secret "tls-rancher" not found + Warning ErrGetKeyPair 9m (x16 over 19m) cert-manager Error getting keypair for CA issuer: secret "tls-rancher" not found +``` + +### Checking for Issues with Your Own SSL Certs + +Your certs get applied directly to the Ingress object in the `cattle-system` namespace. + +Check the status of the Ingress object and see if its ready. + +``` +kubectl -n cattle-system describe ingress +``` + +If its ready and the SSL is still not working you may have a malformed cert or secret. + +Check the nginx-ingress-controller logs. Because the nginx-ingress-controller has multiple containers in its pod you will need to specify the name of the container. + +``` +kubectl -n ingress-nginx logs -f nginx-ingress-controller-rfjrq nginx-ingress-controller +... +W0705 23:04:58.240571 7 backend_ssl.go:49] error obtaining PEM from secret cattle-system/tls-rancher-ingress: error retrieving secret cattle-system/tls-rancher-ingress: secret cattle-system/tls-rancher-ingress was not found +``` + +### No matches for kind "Issuer" + +The SSL configuration option you have chosen requires cert-manager to be installed before installing Rancher or else the following error is shown: + +``` +Error: validation failed: unable to recognize "": no matches for kind "Issuer" in version "certmanager.k8s.io/v1alpha1" +``` + +Install cert-manager and try installing Rancher again. + + +### Canal Pods show READY 2/3 + +The most common cause of this issue is port 8472/UDP is not open between the nodes. Check your local firewall, network routing or security groups. + +Once the network issue is resolved, the `canal` pods should timeout and restart to establish their connections. + +### nginx-ingress-controller Pods show RESTARTS + +The most common cause of this issue is the `canal` pods have failed to establish the overlay network. See [canal Pods show READY `2/3`](#canal-pods-show-ready-2-3) for troubleshooting. + + +### Failed to dial to /var/run/docker.sock: ssh: rejected: administratively prohibited (open failed) + +Some causes of this error include: + +* User specified to connect with does not have permission to access the Docker socket. This can be checked by logging into the host and running the command `docker ps`: + +``` +$ ssh user@server +user@server$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +``` + +See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. + +* When using RedHat/CentOS as operating system, you cannot use the user `root` to connect to the nodes because of [Bugzilla #1527565](https://bugzilla.redhat.com/show_bug.cgi?id=1527565). You will need to add a separate user and configure it to access the Docker socket. See [Manage Docker as a non-root user](https://docs.docker.com/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user) how to set this up properly. + +* SSH server version is not version 6.7 or higher. This is needed for socket forwarding to work, which is used to connect to the Docker socket over SSH. This can be checked using `sshd -V` on the host you are connecting to, or using netcat: +``` +$ nc xxx.xxx.xxx.xxx 22 +SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2.10 +``` + +### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: no key found + +The key file specified as `ssh_key_path` cannot be accessed. Make sure that you specified the private key file (not the public key, `.pub`), and that the user that is running the `rke` command can access the private key file. + +### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain + +The key file specified as `ssh_key_path` is not correct for accessing the node. Double-check if you specified the correct `ssh_key_path` for the node and if you specified the correct user to connect with. + +### Failed to dial ssh using address [xxx.xxx.xxx.xxx:xx]: Error configuring SSH: ssh: cannot decode encrypted private keys + +If you want to use encrypted private keys, you should use `ssh-agent` to load your keys with your passphrase. If the `SSH_AUTH_SOCK` environment variable is found in the environment where the `rke` command is run, it will be used automatically to connect to the node. + +### Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running? + +The node is not reachable on the configured `address` and `port`. diff --git a/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md new file mode 100644 index 0000000000..31621a1bd8 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -0,0 +1,177 @@ +--- +title: Upgrades +weight: 2 +--- +The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air-gapped installs with Helm. + +For the instructions to upgrade Rancher installed with Docker, refer to [this page.](../other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md) + +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. + +- [Prerequisites](#prerequisites) +- [Upgrade Outline](#upgrade-outline) +- [Known Upgrade Issues](#known-upgrade-issues) +- [RKE Add-on Installs](#rke-add-on-installs) + +# Prerequisites + +### Access to kubeconfig + +Helm should be run from the same location as your kubeconfig file, or the same location where you run your kubectl commands from. + +If you installed Kubernetes with RKE, the config will have been created in the directory you ran `rke up` in. + +The kubeconfig can also be manually targeted for the intended cluster with the `--kubeconfig` tag (see: https://helm.sh/docs/helm/helm/) + +### Review Known Issues + +Review the list of known issues for each Rancher version, which can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) + +Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository](../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren't supported. + +### Helm Version + +The upgrade instructions assume you are using Helm 3. + +For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +### For air-gapped installs: Populate private registry + +For [air-gapped installs only,](../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +### For upgrades from a Rancher server with a hidden local cluster + +If you are upgrading to Rancher v2.5 from a Rancher server that was started with the Helm chart option `--add-local=false`, you will need to drop that flag when upgrading. Otherwise, the Rancher server will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. For more information, see [this section.](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#upgrading-from-rancher-with-a-hidden-local-cluster) + +### For upgrades with cert-manager older than 0.8.0 + +[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.](../resources/upgrade-cert-manager.md) + +# Upgrade Outline + +Follow the steps to upgrade Rancher server: + +- [1. Back up your Kubernetes cluster that is running Rancher server](#1-back-up-your-kubernetes-cluster-that-is-running-rancher-server) +- [2. Update the Helm chart repository](#2-update-the-helm-chart-repository) +- [3. Upgrade Rancher](#3-upgrade-rancher) +- [4. Verify the Upgrade](#4-verify-the-upgrade) + +# 1. Back up Your Kubernetes Cluster that is Running Rancher Server + +Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher. + +You'll use the backup as a restore point if something goes wrong during upgrade. + +# 2. Update the Helm chart repository + +1. Update your local helm repo cache. + + ``` + helm repo update + ``` + +1. Get the repository name that you used to install Rancher. + + For information about the repos and their differences, see [Helm Chart Repositories](../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + + {{< release-channel >}} + + ``` + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + + :::note + + If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + + ::: + +1. Fetch the latest chart to install Rancher from the Helm chart repository. + + This command will pull down the latest charts and save it in the current directory as a `.tgz` file. + + ```plain + helm fetch rancher-/rancher + ``` + You can fetch the chart for the specific version you are upgrading to by adding in the `--version=` tag. For example: + + ```plain + helm fetch rancher-/rancher --version=v2.4.11 + ``` + +# 3. Upgrade Rancher + +This section describes how to upgrade normal (Internet-connected) or air-gapped installations of Rancher with Helm. + +:::note Air Gap Instructions: + +If you are installing Rancher in an air-gapped environment, skip the rest of this page and render the Helm template by following the instructions on [this page.](air-gapped-upgrades.md) + +::: + + +Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. + +``` +helm get values rancher -n cattle-system + +hostname: rancher.my.org +``` + +:::note + +There will be more values that are listed with this command. This is just an example of one of the values. + +::: + + +If you are upgrading cert-manager to the latest version from v1.5 or below, follow the [cert-manager upgrade docs](../resources/upgrade-cert-manager.md#option-c-upgrade-cert-manager-from-versions-1-5-and-below) to learn how to upgrade cert-manager without needing to perform an uninstall or reinstall of Rancher. Otherwise, follow the [steps to upgrade Rancher](#steps-to-upgrade-rancher) below. + +### Steps to Upgrade Rancher + +Upgrade Rancher to the latest version with all your settings. + +Take all the values from the previous step and append them to the command using `--set key=value`: + +``` +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +:::note + +The above is an example, there may be more values from the previous step that need to be appended. + +::: + +Alternatively, it's possible to export the current values to a file and reference that file during upgrade. For example, to only change the Rancher version: + +``` +helm get values rancher -n cattle-system -o yaml > values.yaml + +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + -f values.yaml \ + --version=2.4.5 +``` + +# 4. Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +:::tip + +Having network issues following upgrade? + +See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). + +::: + +# Known Upgrade Issues + +A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) diff --git a/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md b/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md new file mode 100644 index 0000000000..610eb3233e --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/installation-requirements/dockershim.md @@ -0,0 +1,44 @@ +--- +title: Dockershim +weight: 300 +--- + +The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). + +RKE clusters now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community external Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. + +To enable the external Dockershim, configure the following option. + +``` +enable_cri_dockerd: true +``` + +For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. + +### FAQ + +
+ +Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim? + +A The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on a version of Rancher that supports RKE 1.21. See our support matrix for details. + +
+ +Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim? + +A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and it is not deprecated until a later release. For information on the timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to 1.21 as you would normally. + +
+ +Q: What are my other options if I don’t want to depend on the Dockershim? + +A: You can use a runtime like containerd with Kubernetes that does not require Dockershim support. RKE2 or K3s are two options for doing this. + +
+ +Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options? + +A: Today, you can stand up a new cluster and migrate workloads to a new RKE2 cluster that uses containerd. Rancher is exploring the possibility of an in-place upgrade path. + +
diff --git a/content/rancher/v2.5/en/installation/requirements/installing-docker/_index.md b/docs/getting-started/installation-and-upgrade/installation-requirements/install-docker.md similarity index 100% rename from content/rancher/v2.5/en/installation/requirements/installing-docker/_index.md rename to docs/getting-started/installation-and-upgrade/installation-requirements/install-docker.md diff --git a/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md b/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md new file mode 100644 index 0000000000..6dcd2ce95f --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md @@ -0,0 +1,350 @@ +--- +title: Port Requirements +description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes +weight: 300 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. + +- [Rancher Nodes](#rancher-nodes) + - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) + - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) + - [Ports for Rancher Server Nodes on RKE2](#ports-for-rancher-server-nodes-on-rke2) + - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) +- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) + - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) + - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) + - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) + - [Ports for Registered Clusters](#ports-for-registered-clusters) +- [Other Port Considerations](#other-port-considerations) + - [Commonly Used Ports](#commonly-used-ports) + - [Local Node Traffic](#local-node-traffic) + - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) + - [Opening SUSE Linux Ports](#opening-suse-linux-ports) + +# Rancher Nodes + +The following table lists the ports that need to be open to and from nodes that are running the Rancher server. + +The port requirements differ based on the Rancher server architecture. + +Rancher can be installed on any Kubernetes cluster. For Rancher installs on a K3s, RKE, or RKE2 Kubernetes cluster, refer to the tabs below. For other Kubernetes distributions, refer to the distribution's documentation for the port requirements for cluster nodes. + +:::note Notes: + +- Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). +- Kubernetes recommends TCP 30000-32767 for node port services. +- For firewalls, traffic may need to be enabled within the cluster and pod CIDR. +- Rancher nodes may also need outbound access to an external S3 location which is used for storing cluster backups (Minio for example). + +::: + +### Ports for Rancher Server Nodes on K3s + +
+ Click to expand + +The K3s server needs port 6443 to be accessible by the nodes. + +The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +:::note Important: + +The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +::: + +The following tables break down the port requirements for inbound and outbound traffic: + +
Inbound Rules for Rancher Server Nodes
+ +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
  • server nodes
  • agent nodes
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl | +| TCP | 6443 | K3s server nodes | Kubernetes API +| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. +| TCP | 10250 | K3s server and agent nodes | kubelet + +
Outbound Rules for Rancher Nodes
+ +| Protocol | Port | Destination | Description | +| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
+ +### Ports for Rancher Server Nodes on RKE + +
+ Click to expand + +Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. + +The following tables break down the port requirements for traffic between the Rancher nodes: + +
Rules for traffic between Rancher nodes
+ +| Protocol | Port | Description | +|-----|-----|----------------| +| TCP | 443 | Rancher agents | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| TCP | 6443 | Kubernetes apiserver | +| TCP | 8443 | Nginx Ingress's Validating Webhook | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 10250 | Metrics server communication with all nodes | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | + +The following tables break down the port requirements for inbound and outbound traffic: + +
Inbound Rules for Rancher Nodes
+ +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | +| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | +| TCP | 443 |
  • Load Balancer/Reverse Proxy
  • IPs of all cluster nodes and other API/UI clients
| HTTPS traffic to Rancher UI/API | +| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | + +
Outbound Rules for Rancher Nodes
+ +| Protocol | Port | Destination | Description | +|-----|-----|----------------|---| +| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | +| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | +| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | +| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | + +
+ +### Ports for Rancher Server Nodes on RKE2 + +
+ Click to expand + +The RKE2 server needs port 6443 and 9345 to be accessible by other nodes in the cluster. + +All nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +:::note Important: + +The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +::: + +
Inbound Rules for RKE2 Server Nodes
+ +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 9345 | RKE2 agent nodes | Kubernetes API +| TCP | 6443 | RKE2 agent nodes | Kubernetes API +| UDP | 8472 | RKE2 server and agent nodes | Required only for Flannel VXLAN +| TCP | 10250 | RKE2 server and agent nodes | kubelet +| TCP | 2379 | RKE2 server nodes | etcd client port +| TCP | 2380 | RKE2 server nodes | etcd peer port +| TCP | 30000-32767 | RKE2 server and agent nodes | NodePort port range +| TCP | 5473 | Calico-node pod connecting to typha pod | Required when deploying with Calico +| HTTP | 8080 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| HTTPS | 8443 |
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl. Not needed if you have LB doing TLS termination. | + +Typically all outbound traffic is allowed. +
+ +### Ports for Rancher Server in Docker + +
+ Click to expand + +The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: + +
Inbound Rules for Rancher Node
+ +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used +| TCP | 443 |
  • hosted/registered Kubernetes
  • any source that needs to be able to use the Rancher UI or API
| Rancher agent, Rancher UI/API, kubectl + +
Outbound Rules for Rancher Node
+ +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
+ +# Downstream Kubernetes Cluster Nodes + +Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. + +The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +The following diagram depicts the ports that are opened for each [cluster type](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
Port Requirements for the Rancher Management Plane
+ +![Basic Port Requirements](/img/port-communications.svg) + +:::tip + +If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. + +::: + +### Ports for Rancher Launched Kubernetes Clusters using Node Pools + +
+ Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with nodes created in an [Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +:::note + +The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. + +::: + +{{< ports-iaas-nodes >}} + +
+ +### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes + +
+ Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with [Custom Nodes](../../../pages-for-subheaders/use-existing-nodes.md). + +{{< ports-custom-nodes >}} + +
+ +### Ports for Hosted Kubernetes Clusters + +
+ Click to expand + +The following table depicts the port requirements for [hosted clusters](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md). + +{{< ports-imported-hosted >}} + +
+ +### Ports for Registered Clusters + +:::note + +Registered clusters were called imported clusters before Rancher v2.5. + +::: + +
+ Click to expand + +The following table depicts the port requirements for [registered clusters](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md). + +{{< ports-imported-hosted >}} + +
+ + +# Other Port Considerations + +### Commonly Used Ports + +These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. + +import CommonPortsTable from '../../../shared-files/_common-ports-table.md'; + + + +---- + +### Local Node Traffic + +Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). +These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. + +However, this traffic may be blocked when: + +- You have applied strict host firewall policies on the node. +- You are using nodes that have multiple interfaces (multihomed). + +In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. + +### Rancher AWS EC2 Security Group + +When using the [AWS EC2 node driver](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. + +| Type | Protocol | Port Range | Source/Destination | Rule Type | +|-----------------|:--------:|:-----------:|------------------------|:---------:| +| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | +| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | +| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | +| All traffic | All | All | 0.0.0.0/0 | Outbound | + +### Opening SUSE Linux Ports + +SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, + + + +1. SSH into the instance. +1. Start YaST in text mode: +``` +sudo yast2 +``` + +1. Navigate to **Security and Users** > **Firewall** > **Zones:public** > **Ports**. To navigate within the interface, follow the instructions [here](https://doc.opensuse.org/documentation/leap/reference/html/book.opensuse.reference/cha-yast-text.html#sec-yast-cli-navigate). +1. To open the required ports, enter them into the **TCP Ports** and **UDP Ports** fields. In this example, ports 9796 and 10250 are also opened for monitoring. The resulting fields should look similar to the following: +```yaml +TCP Ports +22, 80, 443, 2376, 2379, 2380, 6443, 9099, 9796, 10250, 10254, 30000-32767 +UDP Ports +8472, 30000-32767 +``` + +1. When all required ports are enter, select **Accept**. + + + +1. SSH into the instance. +1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: + ``` + FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" + FW_SERVICES_EXT_UDP="8472 30000:32767" + FW_ROUTE=yes + ``` +1. Restart the firewall with the new ports: + ``` + SuSEfirewall2 + ``` + + + + +**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md new file mode 100644 index 0000000000..72ea1a53a1 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -0,0 +1,150 @@ +--- +title: Docker Install Commands +weight: 1 +--- + +The Docker installation is for Rancher users who want to test out Rancher. + +Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +The backup application can be used to migrate the Rancher server from a Docker install to a Kubernetes install using [these steps.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | + +:::note Do you want to.. + +- Configure custom CA root certificate to access your services? See [Custom CA root certificate](../../resources/custom-ca-root-certificates.md). +- Record all transactions with the Rancher API? See [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log). + +::: + +Choose from the following options: + +### Option A: Default Self-Signed Certificate + +
+ Click to expand + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to install. | + +Privileged access is [required.](#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +
+ +### Option B: Bring Your Own Certificate: Self-Signed + +
+ Click to expand + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +:::note Prerequisites: + +From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. + +- The certificate files must be in PEM format. +- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +::: + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | + +Privileged access is [required.](#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +
+ +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
+ Click to expand + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +:::note Prerequisite: + +The certificate files must be in PEM format. + +::: + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | + +:::note + +Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +::: + +Privileged access is [required.](#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged + /rancher/rancher: +``` + +
+ + + +:::note + +If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. + +::: + diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md new file mode 100644 index 0000000000..151ecb9822 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -0,0 +1,188 @@ +--- +title: '1. Set up Infrastructure and Private Registry' +weight: 100 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). + +An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. + +The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.](../../../../pages-for-subheaders/installation-and-upgrade.md) + +Rancher can be installed on any Kubernetes cluster. The RKE and K3s Kubernetes infrastructure tutorials below are still included for convenience. + + + + +We recommend setting up the following infrastructure for a high-availability installation: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set up one of the following external databases: + +* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) +* [MySQL](https://www.mysql.com/) (certified against version 5.7) +* [etcd](https://etcd.io/) (certified against version 3.3.15) + +When you install Kubernetes, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the database, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md) for setting up a MySQL database on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +:::note Important: + +Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +::: + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 5. Set up a Private Docker Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +:::caution + +Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +::: + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 4. Set up a Private Docker Registry + +Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + +:::note Notes: + +- The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. + +- The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +::: + +### 1. Set up a Linux Node + +This host will be disconnected from the Internet, but needs to be able to connect to your private registry. + +Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up a Private Docker Registry + +Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) + + + + +### [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md new file mode 100644 index 0000000000..9100a3441a --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -0,0 +1,385 @@ +--- +title: '3. Install Kubernetes (Skip for Docker Installs)' +weight: 300 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note + +Skip this section if you are installing Rancher on a single node with Docker. + +::: + +This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. + +Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes providers. + +The steps to set up an air-gapped Kubernetes cluster on RKE, RKE2, or K3s are shown below. + + + + +In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. + +### Installation Outline + +1. [Prepare Images Directory](#1-prepare-images-directory) +2. [Create Registry YAML](#2-create-registry-yaml) +3. [Install K3s](#3-install-k3s) +4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) + +### 1. Prepare Images Directory +Obtain the images tar file for your architecture from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be running. + +Place the tar file in the `images` directory before starting K3s on each node, for example: + +```sh +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ +``` + +### 2. Create Registry YAML +Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. + +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +Note, at this time only secure registries are supported with K3s (SSL with custom CA). + +For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) + +### 3. Install K3s + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +Obtain the K3s binary from the [releases](https://github.com/k3s-io/k3s/releases) page, matching the same version used to get the airgap images tar. +Also obtain the K3s install script at https://get.k3s.io + +Place the binary in `/usr/local/bin` on each node. +Place the install script anywhere on each node, and name it `install.sh`. + +Install K3s on each server: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +Install K3s on each agent: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh +``` + +Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. +The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` + +:::note + +K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gapped networks. + +::: + +### 4. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### Note on Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap images (tar file) from the [releases](https://github.com/k3s-io/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. +2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. +3. Restart the K3s service (if not restarted automatically by installer). + + + + +In this guide, we are assuming you have created your nodes in your air-gapped environment and have a secure Docker private registry on your bastion server. + +### Installation Outline + +1. [Create RKE2 configuration](#1-create-rke2-configuration) +2. [Create Registry YAML](#2-create-registry-yaml) +3. [Install RKE2](#3-install-rke2) +4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) + +### 1. Create RKE2 configuration +Create the config.yaml file at `/etc/rancher/rke2/config.yaml`. This will contain all the configuration options necessary to create a highly available RKE2 cluster. + +On the first server the minimum config is: + +``` +token: my-shared-secret +tls-san: + - loadbalancer-dns-domain.com +``` + +On each other server the config file should contain the same token and tell RKE2 to connect to the existing first server: + +``` +server: https://ip-of-first-server:9345 +token: my-shared-secret +tls-san: + - loadbalancer-dns-domain.com +``` + +For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/ha/). + +:::note + +RKE2 additionally provides a `resolv-conf` option for kubelets, which may help with configuring DNS in air-gap networks. + +::: + +### 2. Create Registry YAML +Create the registries.yaml file at `/etc/rancher/rke2/registries.yaml`. This will tell RKE2 the necessary details to connect to your private registry. + +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +For more information on private registries configuration file for RKE2, refer to the [RKE2 documentation.](https://docs.rke2.io/install/containerd_registry_configuration/) + +### 3. Install RKE2 + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +Download the install script, rke2, rke2-images, and sha256sum archives from the release and upload them into a directory on each server: + +``` +mkdir /tmp/rke2-artifacts && cd /tmp/rke2-artifacts/ +wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/rke2-images.linux-amd64.tar.zst +wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/rke2.linux-amd64.tar.gz +wget https://github.com/rancher/rke2/releases/download/v1.21.5%2Brke2r2/sha256sum-amd64.txt +curl -sfL https://get.rke2.io --output install.sh +``` + +Next, run install.sh using the directory on each server, as in the example below: + +``` +INSTALL_RKE2_ARTIFACT_PATH=/tmp/rke2-artifacts sh install.sh +``` + +Then enable and start the service on all servers: + +`` +systemctl enable rke2-server.service +systemctl start rke2-server.service +`` + +For more information, refer to the [RKE2 documentation](https://docs.rke2.io/install/airgap/). + +### 4. Save and Start Using the kubeconfig File + +When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl), a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/rke2/rke2.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `rke2.yaml`: + +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your RKE2 cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### Note on Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap artifacts and install script from the [releases](https://github.com/rancher/rke2/releases) page for the version of RKE2 you will be upgrading to. +2. Run the script again just as you had done in the past with the same environment variables. +3. Restart the RKE2 service. + + + +We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. + +### 1. Install RKE + +Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) + +### 2. Create an RKE Config File + +From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. + +This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the three nodes you created. + +:::tip + +For more details on the options available, see the RKE [Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +::: + +
RKE Options
+ +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | +| `user` | ✓ | A user that can run Docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### 3. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### 4. Save Your Files + +:::note Important: + +The files mentioned below are needed to maintain, troubleshoot, and upgrade your cluster. + +::: + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +
+
+ +:::note + +The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +::: + +### Issues or errors? + +See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. + +### [Next: Install Rancher](install-rancher-ha.md) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md new file mode 100644 index 0000000000..e8c457aac9 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -0,0 +1,263 @@ +--- +title: 4. Install Rancher +weight: 400 +--- + +This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Privileged Access for Rancher + +When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. + +# Docker Instructions + +If you want to continue the air gapped installation using Docker commands, skip the rest of this page and follow the instructions on [this page.](docker-install-commands.md) + +# Kubernetes Instructions + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher: + +- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) +- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) +- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) +- [4. Install Rancher](#4-install-rancher) + +# 1. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements](../../resources/helm-version-requirements.md) to choose a version of Helm to install Rancher. + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. + ```plain + helm fetch rancher-/rancher + ``` + + If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: + ```plain + helm fetch rancher-stable/rancher --version=v2.4.8 + ``` + +# 2. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +:::note + +If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +::: + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
This option must be passed when rendering the Rancher Helm template. | no | + +# Helm Chart Options for Air Gap Installations + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | `` | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | + +# 3. Render the Rancher Helm Template + +Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. + +# Option A: Default Self-Signed Certificate + + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +:::note + +Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](../../resources/upgrade-cert-manager.md). + +::: + +### 1. Add the cert-manager repo + +From a system connected to the internet, add the cert-manager repo to Helm: + +```plain +helm repo add jetstack https://charts.jetstack.io +helm repo update +``` + +### 2. Fetch the cert-manager chart + +Fetch the latest cert-manager chart available from the [Helm chart repository](https://artifacthub.io/packages/helm/cert-manager/cert-manager). + +:::note + +New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +::: + +```plain +helm fetch jetstack/cert-manager --version v1.7.1 +``` + +### 3. Render the cert-manager template + +Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + +```plain +helm template cert-manager ./cert-manager-v1.7.1.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller \ + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ + --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl +``` + +### 4. Download the cert-manager CRD + +Download the required CRD file for cert-manager: + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/cert-manager/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml + ``` + +### 5. Render the Rancher template + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` + +# Option B: Certificates From Files using Kubernetes Secrets + + +### 1. Create secrets + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +### 2. Render the Rancher template + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` + +Then refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them. + +# 4. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +### For Self-Signed Certificate Installs, Install Cert-manager + +
+ Click to expand + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + + :::note + + If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + + ::: + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +
+ +### Install Rancher with kubectl + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` +The installation is complete. + +:::caution + +If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +::: + +# Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](../../../../reference-guides/installation-references/helm-chart-options.md) +- [Adding TLS secrets](../../resources/add-tls-secrets.md) +- [Troubleshooting Rancher Kubernetes Installations](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md new file mode 100644 index 0000000000..c79cddeece --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md @@ -0,0 +1,316 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, all images used to [provision Kubernetes clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) or launch any tools in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. + +The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes](../../../../pages-for-subheaders/use-windows-clusters.md), there are separate instructions to support the images needed. + +:::note Prerequisites: + +You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. + +If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. + +::: + + + + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) +2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) +3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) +4. [Populate the private registry](#4-populate-the-private-registry) + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + +### 1. Find the required assets for your Rancher version + +1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets**. Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### 2. Collect the cert-manager image + +:::note + +Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. + +::: + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://artifacthub.io/packages/helm/cert-manager/cert-manager) image to `rancher-images.txt` as well. + + +:::note + +New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +::: + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + :::note + + Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](../../resources/upgrade-cert-manager.md). + + ::: + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v1.7.1 + helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### 4. Populate the private registry + +Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` + + + + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +# Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +1. Find the required assets for your Rancher version +2. Save the images to your Windows Server workstation +3. Prepare the Docker daemon +4. Populate the private registry + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + +| Release File | Description | +|----------------------------|------------------| +| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | +| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | +| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + ```plain + ./rancher-save-images.ps1 + ``` + + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + + + +### 3. Prepare the Docker daemon + +Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ``` + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + + + +### 4. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. + +1. Using `powershell`, log into your private registry if required: + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.ps1 --registry + ``` + +# Linux Steps + +The Linux images need to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +1. Find the required assets for your Rancher version +2. Collect all the required images +3. Save the images to your Linux workstation +4. Populate the private registry + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets**. + +2. From the release's **Assets** section, download the following files: + +| Release File | Description | +|----------------------------| -------------------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Collect all the required images + +**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://artifacthub.io/packages/helm/cert-manager/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + :::note + + Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](../../resources/upgrade-cert-manager.md). + + ::: + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.12.0 + helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + + + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + +**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + + + +### 4. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. + +The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` + +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + +```plain +./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry +``` + + + + +### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster](install-kubernetes.md) + +### [Next step for Docker Installs - Install Rancher](install-rancher-ha.md) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md new file mode 100644 index 0000000000..bf915741f8 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md @@ -0,0 +1,176 @@ +--- +title: '2. Install Kubernetes' +weight: 200 +--- + +Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. + +### Installing Docker + +First, you have to install Docker and setup the HTTP proxy on all three Linux nodes. For this perform the following steps on all three nodes. + +For convenience, export the IP address and port of your proxy into an environment variable and set up the HTTP_PROXY variables for your current shell: + +``` +export proxy_host="10.0.0.5:8888" +export HTTP_PROXY=http://${proxy_host} +export HTTPS_PROXY=http://${proxy_host} +export NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16 +``` + +Next configure apt to use this proxy when installing packages. If you are not using Ubuntu, you have to adapt this step accordingly: + +``` +cat <<'EOF' | sudo tee /etc/apt/apt.conf.d/proxy.conf > /dev/null +Acquire::http::Proxy "http://${proxy_host}/"; +Acquire::https::Proxy "http://${proxy_host}/"; +EOF +``` + +Now you can install Docker: + +``` +curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh +``` + +Then ensure that your current user is able to access the Docker daemon without sudo: + +``` +sudo usermod -aG docker YOUR_USERNAME +``` + +And configure the Docker daemon to use the proxy to pull images: + +``` +sudo mkdir -p /etc/systemd/system/docker.service.d +cat <<'EOF' | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf > /dev/null +[Service] +Environment="HTTP_PROXY=http://${proxy_host}" +Environment="HTTPS_PROXY=http://${proxy_host}" +Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" +EOF +``` + +To apply the configuration, restart the Docker daemon: + +``` +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +#### Air-gapped proxy + +_New in v2.6.4_ + +You can now provision node driver clusters from an air-gapped cluster configured to use a proxy for outbound connections. + +In addition to setting the default rules for a proxy server, you will need to add additional rules, shown below, to provision node driver clusters from a proxied Rancher environment. + +You will configure your filepath according to your setup, e.g., `/etc/apt/apt.conf.d/proxy.conf`: + +``` +acl SSL_ports port 22 +acl SSL_ports port 2376 + +acl Safe_ports port 22 # ssh +acl Safe_ports port 2376 # docker port +``` + +### Creating the RKE Cluster + +You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: + +* [RKE CLI binary](https://rancher.com/docs/rke/latest/en/installation/#download-the-rke-binary) + +``` +sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 +sudo chmod +x /usr/local/bin/rke +``` + +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +``` +curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" +chmod +x ./kubectl +sudo mv ./kubectl /usr/local/bin/kubectl +``` + +* [helm](https://helm.sh/docs/intro/install/) + +``` +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 +chmod +x get_helm.sh +sudo ./get_helm.sh +``` + +Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). + +``` +nodes: + - address: 10.0.1.200 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 10.0.1.201 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 10.0.1.202 + user: ubuntu + role: [controlplane,worker,etcd] + +services: + etcd: + backup_config: + interval_hours: 12 + retention: 6 +``` + +After that, you can create the Kubernetes cluster by running: + +``` +rke up --config rancher-cluster.yaml +``` + +RKE creates a state file called `rancher-cluster.rkestate`, this is needed if you want to perform updates, modify your cluster configuration or restore it from a backup. It also creates a `kube_config_cluster.yaml` file, that you can use to connect to the remote Kubernetes cluster locally with tools like kubectl or Helm. Make sure to save all of these files in a secure location, for example by putting them into a version control system. + +To have a look at your cluster run: + +``` +export KUBECONFIG=kube_config_cluster.yaml +kubectl cluster-info +kubectl get pods --all-namespaces +``` + +You can also verify that your external load balancer works, and the DNS entry is set up correctly. If you send a request to either, you should receive HTTP 404 response from the ingress controller: + +``` +$ curl 10.0.1.100 +default backend - 404 +$ curl rancher.example.com +default backend - 404 +``` + +### Save Your Files + +:::note Important: + +The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +::: + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. + +:::note + +The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +::: + +### Issues or errors? + +See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. + +### [Next: Install Rancher](install-rancher.md) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md new file mode 100644 index 0000000000..9b9e365ae6 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md @@ -0,0 +1,101 @@ +--- +title: 3. Install Rancher +weight: 300 +--- + +Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. + +:::note + +These installation instructions assume you are using Helm 3. + +::: + +### Install cert-manager + +Add the cert-manager helm repository: + +``` +helm repo add jetstack https://charts.jetstack.io +``` + +Create a namespace for cert-manager: + +``` +kubectl create namespace cert-manager +``` + +Install the CustomResourceDefinitions of cert-manager: + +:::note + +New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +::: + +``` +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml +``` + +And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers: + +``` +helm upgrade --install cert-manager jetstack/cert-manager \ + --namespace cert-manager --version v1.7.1 \ + --set http_proxy=http://${proxy_host} \ + --set https_proxy=http://${proxy_host} \ + --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local +``` + +Now you should wait until cert-manager is finished starting up: + +``` +kubectl rollout status deployment -n cert-manager cert-manager +kubectl rollout status deployment -n cert-manager cert-manager-webhook +``` + +### Install Rancher + +Next you can install Rancher itself. First add the helm repository: + +``` +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +``` + +Create a namespace: + +``` +kubectl create namespace cattle-system +``` + +And install Rancher with Helm. Rancher also needs a proxy configuration so that it can communicate with external application catalogs or retrieve Kubernetes version update metadata: + +``` +helm upgrade --install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.example.com \ + --set proxy=http://${proxy_host} \ + --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local +``` + +After waiting for the deployment to finish: + +``` +kubectl rollout status deployment -n cattle-system rancher +``` + +You can now navigate to `https://rancher.example.com` and start using Rancher. + +:::caution + +If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +::: + +### Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](../../../../reference-guides/installation-references/helm-chart-options.md) +- [Adding TLS secrets](../../resources/add-tls-secrets.md) +- [Troubleshooting Rancher Kubernetes Installations](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md new file mode 100644 index 0000000000..945dca55a6 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md @@ -0,0 +1,64 @@ +--- +title: '1. Set up Infrastructure' +weight: 100 +--- + +In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will connect to the internet through an HTTP proxy. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +:::note Important: + +Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +::: + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + + +### [Next: Set up a Kubernetes cluster](install-kubernetes.md) diff --git a/content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md rename to docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md new file mode 100644 index 0000000000..4e9a3a1abd --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md @@ -0,0 +1,92 @@ +--- +title: Rolling Back Rancher Installed with Docker +weight: 1015 +--- + +If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade](upgrade-docker-installed-rancher.md). Rolling back restores: + +- Your previous version of Rancher. +- Your data backup created before upgrade. + +## Before You Start + +During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker pull rancher/rancher: +``` + +In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <PRIOR_RANCHER_VERSION> and <RANCHER_CONTAINER_NAME>![Placeholder Reference](/img/placeholder-ref-2.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | ------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that the backup is for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
+ +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Rolling Back Rancher + +If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. + +:::danger + +Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. + +::: + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. + + For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. + + ``` + docker pull rancher/rancher: + ``` + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + You can obtain the name for your Rancher container by entering `docker ps`. + +1. Move the backup tarball that you created during completion of [Docker Upgrade](upgrade-docker-installed-rancher.md) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Docker Upgrade](upgrade-docker-installed-rancher.md), it will have a name similar to (`rancher-data-backup--.tar.gz`). + +1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. + + ``` + docker run --volumes-from rancher-data \ + -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ + && tar zxvf /backup/rancher-data-backup--.tar.gz" + ``` + +1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. + ``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: + ``` + Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + + :::danger + + **_Do not_** stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. + + ::: + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. + +**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md new file mode 100644 index 0000000000..97db98432a --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md @@ -0,0 +1,407 @@ +--- +title: Upgrading Rancher Installed with Docker +weight: 1010 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The following instructions will guide you through upgrading a Rancher server that was installed with Docker. + +:::caution + +**Docker installs are not supported in production environments.** These instructions are provided for testing and development purposes only. If you have already deployed a Docker install in production and need to upgrade to a new Rancher version, we recommend [migrating to the Helm chart install](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) before upgrading. + +::: + +# Prerequisites + +- **Review the [known upgrade issues](../../install-upgrade-on-a-kubernetes-cluster/upgrades.md#known-upgrade-issues)** section in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums](https://forums.rancher.com/c/announcements/12). Note that upgrades to or from any chart in the [rancher-alpha repository](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren’t supported. +- **For [air gap installs only,](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version**. Follow the guide to [populate your private registry](../air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +# Placeholder Review + +During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). + +Here's an **example** of a command with a placeholder: + +``` +docker stop +``` + +In this command, `` is the name of your Rancher container. + +# Get Data for Upgrade Commands + +To obtain the data to replace the placeholders, run: + +``` +docker ps +``` + +Write down or copy this information before starting the upgrade. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | +| `` | `2018-12-19` | The date that the data container or backup was created. | +
+ +You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +# Upgrade Outline + +During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: + +- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) +- [2. Create a backup tarball](#2-create-a-backup-tarball) +- [3. Pull the new Docker image](#3-pull-the-new-docker-image) +- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) +- [5. Verify the Upgrade](#5-verify-the-upgrade) +- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) + +# 1. Create a copy of the data from your Rancher server container + +1. Using a remote Terminal connection, log into the node running your Rancher server. + +1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data rancher/rancher: + ``` + +# 2. Create a backup tarball + +1. From the data container that you just created (rancher-data), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). + + This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. + + + ``` + docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** When you enter this command, a series of commands should run. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + + ``` + [rancher@ip-10-0-0-50 ~]$ ls + rancher-data-backup-v2.1.3-20181219.tar.gz + ``` + +1. Move your backup tarball to a safe location external from your Rancher server. + +# 3. Pull the New Docker Image + +Pull the image of the Rancher version that you want to upgrade to. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +``` +docker pull rancher/rancher: +``` + +# 4. Start the New Rancher Server Container + +Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. + +:::danger + +**_Do not_** stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. + +::: + +If you used a proxy, see [HTTP Proxy Configuration.](../../../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) + +If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) + +If you are recording all transactions with the Rancher API, see [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +To see the command to use when starting the new Rancher server container, choose from the following options: + +- Docker Upgrade +- Docker Upgrade for Air Gap Installs + + + + +Select which option you had installed Rancher server + +### Option A: Default Self-Signed Certificate + +
+ Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: +``` + +Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +
+ +### Option B: Bring Your Own Certificate: Self-Signed + +
+ Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +:::note Reminder of the Cert Prerequisite: + +The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. + +::: + +Placeholder | Description +------------|------------- + `` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + --privileged \ + rancher/rancher: +``` + +Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +
+ +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
+ Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. + +:::note Reminder of the Cert Prerequisite: + +The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +::: + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + --privileged \ + rancher/rancher: \ + --no-cacerts +``` + +Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) +
+ +### Option D: Let's Encrypt Certificate + +
+ Click to expand + +:::caution + +Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +::: + +If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. + +:::note Reminder of the Cert Prerequisites: + +- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +::: + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. +`` | The domain address that you had originally started with + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: \ + --acme-domain +``` + +Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +
+ +
+ + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +When starting the new Rancher server container, choose from the following options: + +### Option A: Default Self-Signed Certificate + +
+ Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to to upgrade to. + +``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) +
+ +### Option B: Bring Your Own Certificate: Self-Signed + +
+ Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +:::note Reminder of the Cert Prerequisite: + +The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +::: + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` +Privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) +
+ +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
+ Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. + + :::note Reminder of the Cert Prerequisite: + + The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + + ::: + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +:::note + +Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +::: + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged + /rancher/rancher: +``` +privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) +
+ +
+
+ +**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. + +# 5. Verify the Upgrade + +Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. + +:::note Having network issues in your user clusters following upgrade? + +See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). + +::: + +# 6. Clean up Your Old Rancher Server Container + +Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. + +# Rolling Back + +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback](roll-back-docker-installed-rancher.md). diff --git a/docs/getting-started/installation-and-upgrade/resources/add-tls-secrets.md b/docs/getting-started/installation-and-upgrade/resources/add-tls-secrets.md new file mode 100644 index 0000000000..9cfdd52475 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/resources/add-tls-secrets.md @@ -0,0 +1,46 @@ +--- +title: Adding TLS Secrets +weight: 2 +--- + +Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. + +Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. + +For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. +This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. + +Use `kubectl` with the `tls` secret type to create the secrets. + +``` +kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +:::note + +If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. + +::: + +# Using a Private CA Signed Certificate + +If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. + +Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. + +``` +kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem=./cacerts.pem +``` + +:::note + +The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. + +::: + +# Updating a Private CA Certificate + +Follow the steps on [this page](update-rancher-certificate.md) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/content/rancher/v2.6/en/installation/resources/bootstrap-password/_index.md b/docs/getting-started/installation-and-upgrade/resources/bootstrap-password.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/bootstrap-password/_index.md rename to docs/getting-started/installation-and-upgrade/resources/bootstrap-password.md diff --git a/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md new file mode 100644 index 0000000000..5dccca87fb --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md @@ -0,0 +1,110 @@ +--- +title: Choosing a Rancher Version +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to choose a Rancher version. + +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements](helm-version-requirements.md) to choose a version of Helm to install Rancher. + +For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image**. + + + + +When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. + +Refer to the [Helm version requirements](helm-version-requirements.md) to choose a version of Helm to install Rancher. + +### Helm Chart Repositories + +Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. + +| Type | Command to Add the Repo | Description of the Repo | +| -------------- | ------------ | ----------------- | +| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | +| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | +| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | + +
+Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). + +:::note + +All charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. + +::: + +### Helm Chart Versions + +Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
+    `helm search repo --versions` + +If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
+For more information, see https://helm.sh/docs/helm/helm_search_repo/ + +To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
+    `helm fetch rancher-stable/rancher --version=2.4.8` + +### Switching to a Different Helm Chart Repository + +After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. + +:::note + +Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. + +::: + +{{< release-channel >}} + +1. List the current Helm chart repositories. + + ```plain + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + +2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. + + ```plain + helm repo remove rancher- + ``` + +3. Add the Helm chart repository that you want to start installing Rancher from. + + ```plain + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +4. Continue to follow the steps to [upgrade Rancher](../install-upgrade-on-a-kubernetes-cluster/upgrades.md) from the new Helm chart repository. + +
+ +When performing [Docker installs](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. + +### Server Tags + +Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. + +| Tag | Description | +| -------------------------- | ------ | +| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | +| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | + +:::note + +- The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. +- Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. + +::: + + +
diff --git a/docs/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md b/docs/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md new file mode 100644 index 0000000000..a45fb23afa --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md @@ -0,0 +1,25 @@ +--- +title: About Custom CA Root Certificates +weight: 1 +--- + +If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). + +Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. + +To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. + +Examples of services that Rancher can access: + +- Catalogs +- Authentication providers +- Accessing hosting/cloud API when using Node Drivers + +## Installing with the custom CA Certificate + +For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: + +- [Docker install Custom CA certificate options](../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) + +- [Kubernetes install options for Additional Trusted CAs](../../../reference-guides/installation-references/helm-chart-options.md#additional-trusted-cas) + diff --git a/content/rancher/v2.6/en/installation/resources/helm-version/_index.md b/docs/getting-started/installation-and-upgrade/resources/helm-version-requirements.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/helm-version/_index.md rename to docs/getting-started/installation-and-upgrade/resources/helm-version-requirements.md diff --git a/docs/getting-started/installation-and-upgrade/resources/local-system-charts.md b/docs/getting-started/installation-and-upgrade/resources/local-system-charts.md new file mode 100644 index 0000000000..49d5b18af5 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/resources/local-system-charts.md @@ -0,0 +1,15 @@ +--- +title: Setting up Local System Charts for Air Gapped Installations +weight: 120 +--- + +The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. + +In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag. + +# Using Local System Charts + +A local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. + +Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap installation](../../../pages-for-subheaders/air-gapped-helm-cli-install.md) instructions for Docker and Helm installs. + diff --git a/docs/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/docs/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md new file mode 100644 index 0000000000..d7e111af1c --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md @@ -0,0 +1,263 @@ +--- +title: Updating the Rancher Certificate +weight: 10 +--- + +# Updating a Private CA Certificate + +Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. + +A summary of the steps is as follows: + +1. Create or update the `tls-rancher-ingress` Kubernetes secret resource with the new certificate and private key. +2. Create or update the `tls-ca` Kubernetes secret resource with the root CA certificate (only required when using a private CA). +3. Update the Rancher installation using the Helm CLI. +4. Reconfigure the Rancher agents to trust the new CA certificate. +5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher. + +The details of these instructions are below. + +## 1. Create/update the certificate secret resource + +First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. + +If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +Alternatively, to update an existing certificate secret: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 2. Create/update the CA certificate secret resource + +If the new certificate was signed by a private CA, you will need to copy the corresponding root CA certificate into a file named `cacerts.pem` and create or update the `tls-ca secret` in the `cattle-system` namespace. If the certificate was signed by an intermediate CA, then the `cacerts.pem` must contain both the intermediate and root CA certificates (in this order). + +To create the initial secret: + +``` +$ kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem +``` + +To update an existing `tls-ca` secret: + +``` +$ kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 3. Reconfigure the Rancher deployment + +:::note + +Before proceeding, generate an API token in the Rancher UI (User > API & Keys) and save the Bearer Token which you might need in step 4. + +::: + +This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). + +It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. + +To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: + +``` +$ helm get values rancher -n cattle-system +``` + +Also get the version string of the currently deployed Rancher chart: + +``` +$ helm ls -A +``` + +Upgrade the Helm application instance using the original configuration values and making sure to specify `ingress.tls.source=secret` as well as the current chart version to prevent an application upgrade. + +If the certificate was signed by a private CA, add the `set privateCA=true` argument as well. Also make sure to read the documentation describing the initial installation using custom certificates. + +``` +helm upgrade rancher rancher-stable/rancher \ + --namespace cattle-system \ + --version \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret \ + --set ... +``` + +When the upgrade is completed, navigate to `https:///v3/settings/cacerts` to verify that the value matches the CA certificate written in the `tls-ca` secret earlier. + +## 4. Reconfigure Rancher agents to trust the private CA + +This section covers three methods to reconfigure Rancher agents to trust the private CA. This step is required if either of the following is true: + +- Rancher was initially configured to use the Rancher self-signed certificate (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`) +- The root CA certificate for the new custom certificate has changed + +### Why is this step required? + +When Rancher is configured with a certificate signed by a private CA, the CA certificate chain is downloaded into Rancher agent containers. Agents compare the checksum of the downloaded certificate against the `CATTLE_CA_CHECKSUM` environment variable. This means that, when the private CA certificate is changed on Rancher server side, the environvment variable `CATTLE_CA_CHECKSUM` must be updated accordingly. + +### Which method should I choose? + +Method 1 is the easiest one but requires all clusters to be connected to Rancher after the certificates have been rotated. This is usually the case if the process is performed right after updating the Rancher deployment (Step 3). + +If the clusters have lost connection to Rancher but you have [Authorized Cluster Endpoints](https://rancher.com/docs/rancher/v2.6/en/cluster-admin/cluster-access/ace/) enabled, then go with method 2. + +Method 3 can be used as a fallback if method 1 and 2 are unfeasible. + +### Method 1: Kubectl command + +For each cluster under Rancher management (except the `local` Rancher management cluster) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). + +``` +kubectl patch clusters.management.cattle.io -p '{"status":{"agentImage":"dummy"}}' --type merge +``` + +This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. + + +### Method 2: Manually update checksum + +Manually patch the agent Kubernetes resources by updating the `CATTLE_CA_CHECKSUM` environment variable to the value matching the checksum of the new CA certificate. Generate the new checksum value like so: + +``` +$ curl -k -s -fL /v3/settings/cacerts | jq -r .value > cacert.tmp +$ sha256sum cacert.tmp | awk '{print $1}' +``` + +Using a Kubeconfig for each downstream cluster update the environment variable for the two agent deployments. + +``` +$ kubectl edit -n cattle-system ds/cattle-node-agent +$ kubectl edit -n cattle-system deployment/cattle-cluster-agent +``` + +### Method 3: Recreate Rancher agents + +With this method you are recreating the Rancher agents by running a set of commands on a controlplane node of each downstream cluster. + +First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 + +Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: +https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b + +## 5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher + +Select 'Force Update' for the clusters within the [Continuous Delivery](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#accessing-fleet-in-the-rancher-ui) view of the Rancher UI to allow the fleet-agent in downstream clusters to successfully connect to Rancher. + +### Why is this step required? + +Fleet agents in Rancher managed clusters store kubeconfig that is used to connect to the Rancher proxied kube-api in the fleet-agent secret of the fleet-system namespace. The kubeconfig contains a certificate-authority-data block containing the Rancher CA. When changing the Rancher CA, this block needs to be updated for a successful connection of the fleet-agent to Rancher. + +# Updating from a Private CA Certificate to a Common Certificate + +:::note + +It is possible to perform the opposite procedure as shown above: you may change from a private certificate to a common, or non-private, certificate. The steps involved are outlined below. + +::: + +## 1. Create/update the certificate secret resource + +First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. + +If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +Alternatively, to update an existing certificate secret: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 2. Delete the CA certificate secret resource + +You will delete the `tls-ca secret` in the `cattle-system` namespace as it is no longer needed. You may also optionally save a copy of the `tls-ca secret` if desired. + +To save the existing secret: + +``` +kubectl -n cattle-system get secret tls-ca -o yaml > tls-ca.yaml +``` + +To delete the existing `tls-ca` secret: + +``` +kubectl -n cattle-system delete secret tls-ca +``` + +## 3. Reconfigure the Rancher deployment + +:::note Important: + +Before proceeding, [generate an API token in the Rancher UI](../../../reference-guides/user-settings/api-keys.md#creating-an-api-key) (User > API & Keys). + +::: + +This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). + +It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. + +To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: + +``` +$ helm get values rancher -n cattle-system +``` + +Also get the version string of the currently deployed Rancher chart: + +``` +$ helm ls -A +``` + +Upgrade the Helm application instance using the original configuration values and making sure to specify the current chart version to prevent an application upgrade. + +Also make sure to read the documentation describing the initial installation using custom certificates. + +``` +helm upgrade rancher rancher-stable/rancher \ + --namespace cattle-system \ + --version \ + --set hostname=rancher.my.org \ + --set ... +``` + +On upgrade, you can either + +- remove `--set ingress.tls.source=secret \` from the Helm upgrade command, as shown above, or + +- remove the `privateCA` parameter or set it to `false` because the CA is valid: + +``` +set privateCA=false +``` + +## 4. Reconfigure Rancher agents for the non-private/common certificate + +`CATTLE_CA_CHECKSUM` environment variable on the downstream cluster agents should be removed or set to "" (an empty string). + +## 5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher + +Select 'Force Update' for the clusters within the [Continuous Delivery](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#accessing-fleet-in-the-rancher-ui) view of the Rancher UI to allow the fleet-agent in downstream clusters to successfully connect to Rancher. + +### Why is this step required? + +Fleet agents in Rancher managed clusters store kubeconfig that is used to connect to the Rancher proxied kube-api in the fleet-agent secret of the fleet-system namespace. The kubeconfig contains a certificate-authority-data block containing the Rancher CA. When changing the Rancher CA, this block needs to be updated for a successful connection of the fleet-agent to Rancher. diff --git a/docs/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md b/docs/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md new file mode 100644 index 0000000000..3cb767496a --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md @@ -0,0 +1,283 @@ +--- +title: Upgrading Cert-Manager +weight: 4 +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data + +:::note Important: + +If you are upgrading cert-manager to the latest version from a version older than 1.5, follow the steps in [Option C](#option-c-upgrade-to-new-cert-manager-from-versions-15-and-below) below to do so. Note that you do not need to reinstall Rancher to perform this upgrade. + +::: + +# Upgrade Cert-Manager + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +In order to upgrade cert-manager, follow these instructions: + +### Option A: Upgrade cert-manager with Internet Access + +
+ Click to expand + +1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + :::note Important: + + If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + + ::: + +1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) + + ```plain + helm uninstall cert-manager + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed + + ```plain + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply --validate=false -f https://github.com/cert-manager/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + + ``` + + :::note + + If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + + ::: + +1. Create the namespace for cert-manager if needed + + ```plain + kubectl create namespace cert-manager + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v0.12.0 + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
+ +### Option B: Upgrade cert-manager in an Air-Gapped Environment + +
+ Click to expand + +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://artifacthub.io/packages/helm/cert-manager/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + The Helm 3 command is as follows: + + ```plain + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + + The Helm 2 command is as follows: + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager (old and new) + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/cert-manager/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/cert-manager/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + :::note Important: + + If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + + ::: + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n cert-manager \ + delete deployment,sa,clusterrole,clusterrolebinding \ + -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y you installed + + ```plain + kubectl delete -f cert-manager/cert-manager-crd-old.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + :::note Important: + + If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + + ::: + +1. Create the namespace for cert-manager + + ```plain + kubectl create namespace cert-manager + ``` + +1. Install cert-manager + + ```plain + kubectl -n cert-manager apply -R -f ./cert-manager + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
+ +### Option C: Upgrade cert-manager from Versions 1.5 and Below + +
+ Click to expand + +Previously, in order to upgrade cert-manager from an older version, an uninstall and reinstall of Rancher was recommended. Using the method below, you may upgrade cert-manager without those additional steps in order to better preserve your production environment: + +1. Install `cmctl`, the cert-manager CLI tool, using [the installation guide](https://cert-manager.io/docs/usage/cmctl/#installation). + +1. Ensure that any cert-manager custom resources that may have been stored in etcd at a deprecated API version get migrated to v1: + + ``` + cmctl upgrade migrate-api-version + ``` + Refer to the [API version migration docs](https://cert-manager.io/docs/usage/cmctl/#migrate-api-version) for more information. Please also see the [docs to upgrade from 1.5 to 1.6](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/) and the [docs to upgrade from 1.6. to 1.7](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/) if needed. + +1. Upgrade cert-manager to v1.7.1 with a normal `helm upgrade`. You may go directly from version 1.5 to 1.7 if desired. + +1. Follow the Helm tutorial to [update the API version of a release manifest](https://helm.sh/docs/topics/kubernetes_apis/#updating-api-versions-of-a-release-manifest). The chart release name is `release_name=rancher` and the release namespace is `release_namespace=cattle-system`. + +1. In the decoded file, search for `cert-manager.io/v1beta1` and **replace it** with `cert-manager.io/v1`. + +1. Upgrade Rancher normally with `helm upgrade`. + +
+ +### Verify the Deployment + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +## Cert-Manager API change and data migration + +--- +_New in v2.6.4_ + +Rancher now supports cert-manager versions 1.6.2 and 1.7.1. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. To read more, see the [cert-manager docs](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#4-install-cert-manager). For instructions on upgrading cert-manager from version 1.5 to 1.6, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/). For instructions on upgrading cert-manager from version 1.6 to 1.7, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/). + +--- + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). + diff --git a/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md b/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md new file mode 100644 index 0000000000..44cc5c7286 --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md @@ -0,0 +1,140 @@ +--- +title: Upgrading and Rolling Back Kubernetes +weight: 70 +--- + +Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. + +Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation](https://rancher.com/docs/rke/latest/en/). + +This section covers the following topics: + +- [New Features](#new-features) +- [Tested Kubernetes Versions](#tested-kubernetes-versions) +- [How Upgrades Work](#how-upgrades-work) +- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) +- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) +- [Rolling Back](#rolling-back) +- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) + - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) + - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) + - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) + - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) +- [Troubleshooting](#troubleshooting) + +# Tested Kubernetes Versions + +Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/) + +# How Upgrades Work + +RKE v1.1.0 changed the way that clusters are upgraded. + +In this section of the [RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. + + +# Recommended Best Practice for Upgrades + +When upgrading the Kubernetes version of a cluster, we recommend that you: + +1. Take a snapshot. +1. Initiate a Kubernetes upgrade. +1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. This is achieved by selecting the **Restore etcd and Kubernetes version** option. This will return your cluster to the pre-upgrade kubernetes version before restoring the etcd snapshot. + +The restore operation will work on a cluster that is not in a healthy or active state. + +# Upgrading the Kubernetes Version + +:::note Prerequisites: + +- The options below are available only for [Rancher-launched RKE Kubernetes clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) and [Registered K3s Kubernetes clusters.](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md#additional-features-for-registered-k3s-clusters) +- Before upgrading Kubernetes, [back up your cluster.](../../pages-for-subheaders/backup-restore-and-disaster-recovery.md) + +::: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to upgrade and click **⋮ > Edit Config**. +1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. +1. Click **Save**. + +**Result:** Kubernetes begins upgrading for the cluster. + +# Rolling Back + +A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: + +- [Backing up a cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md#how-snapshots-work) +- [Restoring a cluster from backup](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md#restoring-a-cluster-from-a-snapshot) + +# Configuring the Upgrade Strategy + +As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability) are met. + +The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. + +### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI + +From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. + +By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. + +To change the default number or percentage of worker nodes, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to upgrade and click **⋮ > Edit Config**. +1. In the **Upgrade Strategy** tab, enter the **Worker Concurrency** as a fixed number or percentage. To get this number, you can take the number of nodes in your cluster and subtract the max unavailable nodes. +1. Click **Save**. + +**Result:** The cluster is updated to use the new upgrade strategy. + +### Enabling Draining Nodes During Upgrades from the Rancher UI + +By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. + +To enable draining each node during a cluster upgrade, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to enable node draining and click **⋮ > Edit Config**. +1. Click **⋮ > Edit**. +1. In the **Upgrade Strategy** tab, go to the **Drain nodes** field and click **Yes**. Node draining is configured separately for control plane and worker nodes. +1. Configure the options for how pods are deleted. For more information about each option, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md#aggressive-and-safe-draining-options) +1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. +1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. +1. Click **Save**. + +**Result:** The cluster is updated to use the new upgrade strategy. + +:::note + +As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. + +::: + +### Maintaining Availability for Applications During Upgrades + +_Available as of RKE v1.1.0_ + +In [this section of the RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. + +### Configuring the Upgrade Strategy in the cluster.yml + +More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. + +For details, refer to [Configuring the Upgrade Strategy](https://rancher.com/docs/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. + +# Troubleshooting + +If a node doesn't come up after an upgrade, the `rke up` command errors out. + +No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. + +If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. + +A failed node could be in many different states: + +- Powered off +- Unavailable +- User drains a node while upgrade is in process, so there are no kubelets on the node +- The upgrade itself failed + +If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. diff --git a/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md new file mode 100644 index 0000000000..18d7dd505a --- /dev/null +++ b/docs/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -0,0 +1,89 @@ +--- +title: Upgrading Kubernetes without Upgrading Rancher +weight: 30 +--- + +The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. + +:::note + +The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. + +::: + +Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates**. Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. + +This table below describes the CRDs that are affected by the periodic data sync. + +:::note + +Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. + +::: + +| Resource | Description | Rancher API URL | +|----------|-------------|-----------------| +| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | +| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | +| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | + +Administrators might configure the RKE metadata settings to do the following: + +- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher +- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub +- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher + +### Refresh Kubernetes Metadata + +The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) + +To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **Drivers**. +1. Click **Refresh Kubernetes Metadata**. + +You can configure Rancher to only refresh metadata when desired by setting `refresh-interval-minutes` to `0` (see below) and using this button to perform the metadata refresh manually when desired. + +### Configuring the Metadata Synchronization + +:::caution + +Only administrators can change these settings. + +::: + +The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. + +The way that the metadata is configured depends on the Rancher version. + +To edit the metadata config in Rancher, + +1. In the upper left corner, click **☰ > Global Settings**. +1. Go to the **rke-metadata-config** section. Click **⋮ > Edit Setting**. +1. You can optionally fill in the following parameters: + + - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. + - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. +1. Click **Save**. + +If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) + +However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. + +### Air Gap Setups + +Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) + +If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. + +To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) + +After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. + +1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. +1. Download the OS specific image lists for Linux or Windows. +1. Download `rancher-images.txt`. +1. Prepare the private registry using the same steps during the [air gap install](other-installation-methods/air-gapped-helm-cli-install/publish-images.md), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. + +**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/docs/getting-started/introduction/overview.md b/docs/getting-started/introduction/overview.md new file mode 100644 index 0000000000..6961919689 --- /dev/null +++ b/docs/getting-started/introduction/overview.md @@ -0,0 +1,66 @@ +--- +title: Overview +weight: 1 +--- + +Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. + +# Run Kubernetes Everywhere + +Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. + +# Meet IT Requirements + +Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: + +- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. +- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. +- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. + +# Empower DevOps Teams + +Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. + +The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. + +![Platform](/img/platform.png) + +# Features of the Rancher API Server + +The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: + +### Authorization and Role-Based Access Control + +- **User management:** The Rancher API server [manages user identities](../../pages-for-subheaders/about-authentication.md) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. +- **Authorization:** The Rancher API server manages [access control](../../pages-for-subheaders/manage-role-based-access-control-rbac.md) and [security](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) policies. + +### Working with Kubernetes + +- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) on existing nodes, or perform [Kubernetes upgrades.](../installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts](../../pages-for-subheaders/helm-charts-in-rancher.md) that make it easy to repeatedly deploy applications. +- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration](../../pages-for-subheaders/manage-projects.md) and for [managing applications within projects.](../../pages-for-subheaders/kubernetes-resources-setup.md) +- **Pipelines:** Setting up a [pipeline](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. +- **Istio:** Our [integration with Istio](../../pages-for-subheaders/istio.md) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +### Working with Cloud Infrastructure + +- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) in all clusters. +- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) and [persistent storage](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in the cloud. + +### Cluster Visibility + +- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. +- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. +- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. + +# Editing Downstream Clusters with Rancher + +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. + +After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.](../../pages-for-subheaders/cluster-configuration.md) + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../../shared-files/_cluster-capabilities-table.md'; + + diff --git a/docs/getting-started/introduction/what-are-divio-docs.md b/docs/getting-started/introduction/what-are-divio-docs.md new file mode 100644 index 0000000000..0f2a412eae --- /dev/null +++ b/docs/getting-started/introduction/what-are-divio-docs.md @@ -0,0 +1,112 @@ +--- +title: What Are Divio Docs? +--- + +The [Divio documentation system](https://documentation.divio.com/) is a software documentation paradigm that is based on functionality and the premise that the best documentation is specific, concise, and purposeful. Divio traditionally consists of four main categories: tutorials, how-to guides, reference guides, and explanations. + +In our docs, we have used this guideline to craft a unique set of docs which include [getting started](../../getting-started.md), [how-to guides](../../how-to-guides.md) (including [new](../../pages-for-subheaders/new-user-guides.md) and [advanced user guides](../../pages-for-subheaders/advanced-user-guides.md)), [reference guides](../../reference-guides.md), [explanations](../../explanations.md), an [FAQ section](../../faq.md), [troubleshooting tips](../../troubleshooting.md), and the ability to [contribute to Rancher](../../contribute-to-rancher.md). + +- [Getting Started](#getting-started) +- [How-to Guides](#how-to-guides) + - [New User Guides](#new-user-guides) + - [Advanced User Guides](#advanced-user-guides) +- [Reference Guides](#reference-guides) +- [Explanations](#explanations) + - [Integrations in Rancher](#integrations-in-rancher) +- [Other Docs Categories](#other-docs-categories) + - [FAQ](#faq) + - [Troubleshooting](#troubleshooting) + - [Contribute to Rancher](#contribute-to-rancher) +- [Overlapping of Categories](#overlapping-of-categories) +- [New Structure Goals](#new-structure-goals) + + +## Getting Started + +To get up and running with Rancher quickly, we have included a **Getting Started** section. + +The goal of this section is to be able to assist users in deploying Rancher and workloads and to install or upgrade Rancher quickly and effectively. + +Please see the [introduction](../docs/pages-for-subheaders/introduction.md), [quick start guides](../docs/pages-for-subheaders/quick-start-guides.md), and the [installation and upgrade](../docs/pages-for-subheaders/installation-and-upgrade.md) sections for more. + +## How-to Guides + +How-to guides serve to describe practical steps for users to accomplish some task. In Rancher, we break down how-to guides further into [new user guides](#new-user-guides) and [advanced user guides](#advanced-user-guides). + +### New User Guides + +New user guides, also known as tutorials, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". + +The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. + +The average Rancher user has a level of technical skill that is above the level of "beginner"; however, the new user guides are designed to help new, or beginner, users as well as the seasoned Rancher customer equally. This is accomplished by using a combination of high-level and technical language to introduce topics and guide the user through general tasks that are essential for every Rancher user to know. + +A good example of a new user guide can be found [here](../../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). + +### Advanced User Guides + +Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. + +It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. + +A good example of an advanced user guide can be found [here](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md). + +## Reference Guides + +Reference guides are technical descriptions of processes or products that users can study. Reference guides are designed to be "information-oriented" and their primary function is to describe. + +These docs may also include some usage steps in the course of description; however, their purpose is not to explain concepts nor to outline steps to achieve tasks. + +The users who utilize reference guides are knowledgeable with the Rancher product as well as how to use it. They will benefit from detailed descriptions of something to be used when needing to refer to specifics of usage. + +Good examples of Rancher reference guides would be the [Rancher Manager architecture](../../pages-for-subheaders/rancher-manager-architecture.md) and [cluster configuration guides](../../pages-for-subheaders/cluster-configuration.md). + +## Explanations + +Explanation docs are concerned primarily with providing theoretical knowledge for the "why" behind a task or a topic. Explanations are "understanding-oriented" in nature and will clarify a topic in order to broaden the user's knowledge. In this section, users can find additional context and background, alternatives or even opinions on topics, and often historical reasons, constraints, and insights into why a process works the way that it does. + +Explanatory docs do not instruct the user how to do something, as in tutorials and how-to guides, nor do they give detailed descriptions as references do. Explanations serve to give substance and background on both simple and complex topics. + +For our new docs, we are working to build up this section as most of our previous documentation was process-oriented rather than discussion-oriented. Currently, we feature [Integrations in Rancher](../../pages-for-subheaders/integrations-in-rancher.md) to discuss our integrated products. + +### Integrations in Rancher + +Over time, Rancher has accrued several products and projects that have been integrated into the Rancher UI. To assist users in learning more about these [integrations](../../pages-for-subheaders/integrations-in-rancher.md), this subsection has been added under **references**. + +Examples of some of these integrations are [Harvester](../../explanations/integrations-in-rancher/harvester.md) and [NeuVector](../../explanations/integrations-in-rancher/neuvector.md). + +## Other Docs Categories + +### FAQ + +Our [FAQ](../../faq.md) section is designed to answer the questions our users have been most often asking about Rancher v2.x. The nature of these questions may be technical or non-technical. + +We work to continually add to and enhance this section; check back frequently for updates. + +### Troubleshooting + +The [troubleshooting section](../../troubleshooting.md) is designed to help both new and existing Rancher users to troubleshoot known issues that they may encounter when using Rancher. + +We work to continually add to and enhance this section; check back frequently for updates. + +### Contribute to Rancher + +The Rancher Manager documentation is always a work-in-progress; the docs work best when being constantly examined, updated, and improved upon. To do this more effectively, we call upon the community to assist us. + +This [contributing to Rancher section](../../contribute-to-rancher.md) will instruct users on the repositories used for Rancher, how to build the repositories, and what information is needed when filing an issue or creating a pull request. + +We review all contributions frequently and will provide feedback to contributors promptly. + +## Overlapping of Categories + +You may have noticed that within the confines of each category - new user guides, advanced user guides, references - there is some overlap. This is true because the flow of information is fluid, and so often docs will include data that could logically fall under more than one category. Although there is the tendency for our docs to overlap somewhat, if we keep in mind the primary functions of each category and work to make those distinct, then the documentation will be much clearer and useful for users. + +## New Structure Goals + +Our previous Rancher documentation focused on individual features and topics; the new Divio paradigm prioritizes function and cohesion. + +Because the previous docs structure was not based on the Divio paradigm, not every doc as it is written currently will fall neatly into a user guide or a reference, for example. Some docs may include elements of several kind of documentation functions. + +As such, we have worked to move our existing documentation into the new paradigm based on each doc's function. Moving forward, we will be creating, rewriting, and reshaping our docs as needed to more closely align with the Divio structure, purpose, and its design concepts. + +Ultimately, the finished product will much more cohesively and effectively assist our users by emphasizing functionality over individual topic or feature-based docs. \ No newline at end of file diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/amazon-aws-marketplace-qs/_index.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md similarity index 100% rename from content/rancher/v2.6/en/quick-start-guide/deployment/amazon-aws-marketplace-qs/_index.md rename to docs/getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/aws.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/aws.md new file mode 100644 index 0000000000..d382b354b2 --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/aws.md @@ -0,0 +1,94 @@ +--- +title: Rancher AWS Quick Start Guide +description: Read this step by step Rancher AWS guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher server on AWS in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +:::caution + +Deploying to Amazon AWS will incur charges. + +::: + +- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. +- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. +- [IAM Policy created](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start): Defines the permissions an account attached with this policy has. +- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. + +### Example IAM Policy + +The AWS module just creates an EC2 KeyPair, an EC2 SecurityGroup and an EC2 instance. A simple policy would be: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + } + ] +} +``` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the AWS folder containing the terraform files by executing `cd quickstart/rancher/aws`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `aws_access_key` - Amazon AWS Access Key + - `aws_secret_key` - Amazon AWS Secret Key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/aws) for more information. +Suggestions include: + - `aws_region` - Amazon AWS region, choose the closest instead of the default (`us-east-1`) + - `prefix` - Prefix for all created resources + - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget + - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/aws`. + +##### Result + +Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +## What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/aws` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/azure.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/azure.md new file mode 100644 index 0000000000..39b77c8aaa --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/azure.md @@ -0,0 +1,83 @@ +--- +title: Rancher Azure Quick Start Guide +description: Read this step by step Rancher Azure guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 115 +--- + +The following steps will quickly deploy a Rancher server on Azure in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +:::caution + +Deploying to Microsoft Azure will incur charges. + +::: + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the Azure folder containing the terraform files by executing `cd quickstart/rancher/azure`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `azure_subscription_id` - Microsoft Azure Subscription ID + - `azure_client_id` - Microsoft Azure Client ID + - `azure_client_secret` - Microsoft Azure Client Secret + - `azure_tenant_id` - Microsoft Azure Tenant ID + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/azure) for more information. +Suggestions include: + - `azure_location` - Microsoft Azure region, choose the closest instead of the default (`East US`) + - `prefix` - Prefix for all created resources + - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget + - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster + - `windows_admin_password` - The admin password of the windows worker node + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/azure`. + +#### Result + +Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/azure` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md new file mode 100644 index 0000000000..ac8d7c156a --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md @@ -0,0 +1,75 @@ +--- +title: Rancher DigitalOcean Quick Start Guide +description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 120 +--- +The following steps will quickly deploy a Rancher server on DigitalOcean in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +:::caution + +Deploying to DigitalOcean will incur charges. + +::: + +- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. +- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/rancher/do`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `do_token` - DigitalOcean access key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/do) for more information. +Suggestions include: + - `do_region` - DigitalOcean region, choose the closest instead of the default (`nyc1`) + - `prefix` - Prefix for all created resources + - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 15 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/do`. + +#### Result + +Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/do` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md new file mode 100644 index 0000000000..8d8c45826a --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md @@ -0,0 +1,130 @@ +--- +title: Rancher Equinix Metal Quick Start +weight: 250 +--- + +## This tutorial walks you through the following: + +- Provisioning an Equinix Metal Server +- Installation of Rancher 2.x +- Creation of your first cluster +- Deployment of an application, Nginx + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. The Docker install is not recommended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Quick Start Outline + +This Quick Start Guide is divided into different tasks for easier consumption. + + + + +1. [Provision a Equinix Metal Host](#1-provision-a-equinix-metal-host) + +1. [Install Rancher](#2-install-rancher) + +1. [Log In](#3-log-in) + +1. [Create the Cluster](#4-create-the-cluster) + + +
+ +## Prerequisites + +- An [Equinix Metal account](https://metal.equinix.com/developers/docs/accounts/users/) +- An [Equinix Metal project](https://metal.equinix.com/developers/docs/accounts/projects/) + + +### 1. Provision a Equinix Metal Host + + Begin deoploying an Equinix Metal Host. Equinix Metal Servers can be provisioned by either the Equinix Metal console, api, or cli. You can find instructions on how to deploy with each deployment type on the [Equinix Metal deployment documentation](https://metal.equinix.com/developers/docs/deploy/on-demand/). Yopu can find additional documentation on Equinix Metal server types and prices below. + - [Equinix Metal Server Types](https://metal.equinix.com/developers/docs/servers/about/) + - [Equinix Metal Pricing](https://metal.equinix.com/developers/docs/servers/server-specs/) + +:::note Notes: + +- When provisioning a new Equinix Metal Server via the CLI or API you will need to be able to provide the following information: project-id, plan, metro, and the operating-system +- When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. +- For a full list of port requirements, refer to [Docker Installation](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md). +- Provision the host according to our [Requirements](../../../pages-for-subheaders/installation-requirements.md). + +::: +### 2. Install Rancher + +To install Rancher on your Equinix Metal host, connect to it and then use a shell to install. + +1. Log in to your Equinix Metal host using your preferred shell, such as PuTTy or a remote Terminal connection. + +2. From your shell, enter the following command: + + ``` + sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged rancher/rancher + ``` + +**Result:** Rancher is installed. + +### 3. Log In + +Log in to Rancher to begin using the application. After you log in, you'll make some one-time configurations. + +1. Open a web browser and enter the IP address of your host: `https://`. + + Replace `` with your host IP address. + +2. When prompted, create a password for the default `admin` account there cowpoke! + +3. Set the **Rancher Server URL**. The URL can either be an IP address or a host name. However, each node added to your cluster must be able to connect to this URL.

If you use a hostname in the URL, this hostname must be resolvable by DNS on the nodes you want to add to you cluster. + +
+ +### 4. Create the Cluster + +Welcome to Rancher! You are now able to create your first Kubernetes cluster. + +In this task, you can use the versatile **Custom** option. This option lets you add _any_ Linux host (cloud-hosted VM, on-prem VM, or bare-metal) to be used in a cluster. + +1. Click **☰ > Cluster Management**. +1. From the **Clusters** page, click **Create**. +2. Choose **Custom**. + +3. Enter a **Cluster Name**. + +4. Skip **Member Roles** and **Cluster Options**. We'll tell you about them later. + +5. Click **Next**. + +6. From **Node Role**, select _all_ the roles: **etcd**, **Control**, and **Worker**. + +7. **Optional**: Rancher auto-detects the IP addresses used for Rancher communication and cluster communication. You can override these using `Public Address` and `Internal Address` in the **Node Address** section. + +8. Skip the **Labels** stuff. It's not important for now. + +9. Copy the command displayed on screen to your clipboard. + +10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. + +11. When you finish running the command on your Linux host, click **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +#### Finished + +Congratulations! You have created your first cluster. + +#### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md new file mode 100644 index 0000000000..aed01fd328 --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md @@ -0,0 +1,77 @@ +--- +title: Rancher GCP Quick Start Guide +description: Read this step by step Rancher GCP guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 130 +--- +The following steps will quickly deploy a Rancher server on GCP in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +:::caution + +Deploying to Google GCP will incur charges. + +::: + +- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. +- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. +- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the GCP folder containing the terraform files by executing `cd quickstart/rancher/gcp`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `gcp_account_json` - GCP service account file path and file name + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/gcp) for more information. +Suggestions include: + - `gcp_region` - Google GCP region, choose the closest instead of the default (`us-east4`) + - `gcp_zone` - Google GCP zone, choose the closest instead of the default (`us-east4-a`) + - `prefix` - Prefix for all created resources + - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/gcp`. + +#### Result + +Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/gcp` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md new file mode 100644 index 0000000000..f93a1ef1ce --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md @@ -0,0 +1,140 @@ +--- +title: Helm CLI Quick Start +weight: 300 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +These instructions capture a quick way to set up a proof-of-concept Rancher installation. + +These instructions assume you have a Linux virtual machine that you will communicate with from your local workstation. Rancher will be installed on the Linux machine. You will need to retrieve the IP address of that machine so that you can access Rancher from your local workstation. Rancher is designed to manage Kubernetes clusters remotely, so any Kubernetes cluster that Rancher manages in the future will also need to be able to reach this IP address. + +We don't recommend installing Rancher locally because it creates a networking problem. Installing Rancher on localhost does not allow Rancher to communicate with downstream Kubernetes clusters, so on localhost you wouldn't be able to test Rancher's cluster provisioning or cluster management functionality. + +Your Linux machine can be anywhere. It could be an Amazon EC2 instance, a Digital Ocean droplet, or an Azure virtual machine, to name a few examples. Other Rancher docs often use 'node' as a generic term for all of these. One possible way to deploy a Linux machine is by setting up an Amazon EC2 instance as shown in [this tutorial](../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md). + +The full installation requirements are [here](../../../pages-for-subheaders/installation-requirements.md). + + +## Install K3s on Linux + +Install a K3s cluster by running this command on the Linux machine: + +``` +curl -sfL https://get.k3s.io | sh -s - server +``` + +Save the IP of the Linux machine. + +## Save the kubeconfig to your workstation + +The kubeconfig file is important for accessing the Kubernetes cluster. Copy the file at `/etc/rancher/k3s/k3s.yaml` from the Linux machine and save it to your local workstation in the directory `~/.kube/config`. One way to do this is by using the `scp` tool and run this command on your local machine: + + + + +``` +scp root@:/etc/rancher/k3s/k3s.yaml ~/.kube/config +``` + + + + +By default, "scp" is not a recognized command, so we need to install a module first. + +In Windows Powershell: + +``` +Find-Module Posh-SSH +Install-Module Posh-SSH + +## Get the remote kubeconfig file +scp root@:/etc/rancher/k3s/k3s.yaml $env:USERPROFILE\.kube\config +``` + + + + +## Edit the Rancher server URL in the kubeconfig + +In the kubeconfig file, you will need to change the value of the `server` field to `:6443`. The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443. This edit is needed so that when you run Helm or kubectl commands from your local workstation, you will be able to communicate with the Kubernetes cluster that Rancher will be installed on. + + + + +One way to open the kubeconfig file for editing is to use Vim: + +``` +vi ~/.kube/config +``` + +Press `i` to put Vim in insert mode. To save your work, press `Esc`. Then press `:wq` and press `Enter`. + + + + + +In Windows Powershell, you can use `notepad.exe` for editing the kubeconfig file: + +``` +notepad.exe $env:USERPROFILE\.kube\config +``` + +Once edited, either press `ctrl+s` or go to `File > Save` to save your work. + + + + +## Install Rancher with Helm + +Then from your local workstation, run the following commands. You will need to have [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) and [helm.](https://helm.sh/docs/intro/install/) installed. + +``` +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest + +kubectl create namespace cattle-system + +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.crds.yaml + +helm repo add jetstack https://charts.jetstack.io + +helm repo update + +helm install cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.7.1 + +# Windows Powershell +helm install cert-manager jetstack/cert-manager ` + --namespace cert-manager ` + --create-namespace ` + --version v1.7.1 +``` + +The final command to install Rancher is below. The command requires a domain name that forwards traffic to the Linux machine. For the sake of simplicity in this tutorial, you can use a fake domain name to create your proof-of-concept. An example of a fake domain name would be `.sslip.io`. + +``` +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=.sslip.io \ + --set replicas=1 \ + --set bootstrapPassword= + +# Windows Powershell +helm install rancher rancher-latest/rancher ` + --namespace cattle-system ` + --set hostname=.sslip.io ` + --set replicas=1 ` + --set bootstrapPassword= +``` +``` + +Now if you navigate to `.sslip.io` in a web browser, you should see the Rancher UI. + +To make these instructions simple, we used a fake domain name and self-signed certificates to do this installation. Therefore, you will probably need to add a security exception to your web browser to see the Rancher UI. Note that for production installs, you would need a high-availability setup with a load balancer, a real domain name and real certificates. + +These instructions also left out the full installation requirements and other installation options. If you have any issues with these steps, refer to the full [Helm CLI installation docs.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + +To launch new Kubernetes clusters with your new Rancher server, you may need to set up cloud credentials in Rancher. For more information, see [Launching Kubernetes clusters with Rancher.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md new file mode 100644 index 0000000000..8347bcecfa --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md @@ -0,0 +1,75 @@ +--- +title: Rancher Hetzner Cloud Quick Start Guide +description: Read this step by step Rancher Hetzner Cloud guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 140 +--- +The following steps will quickly deploy a Rancher server on Hetzner Cloud in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +:::caution + +Deploying to Hetzner Cloud will incur charges. + +::: + +- [Hetzner Cloud Account](https://www.hetzner.com): You will require an account on Hetzner as this is where the server and cluster will run. +- [Hetzner API Access Key](https://docs.hetzner.cloud/#getting-started): Use these instructions to create a Hetzner Cloud API Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to Hetzner. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the Hetzner folder containing the terraform files by executing `cd quickstart/rancher/hcloud`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `hcloud_token` - Hetzner API access key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Hetzner Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/hcloud) for more information. +Suggestions include: + - `prefix` - Prefix for all created resources + - `instance_type` - Instance type, minimum required is `cx21` + - `hcloud_location` - Hetzner Cloud location, choose the closest instead of the default (`fsn1`) + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 15 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/hcloud`. + +#### Result + +Two Kubernetes clusters are deployed into your Hetzner account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/hcloud` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md new file mode 100644 index 0000000000..9f41b777e0 --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md @@ -0,0 +1,76 @@ +--- +title: Rancher Outscale Quick Start Guide +description: Read this step by step Rancher Outscale guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 140 +--- +The following steps will quickly deploy a Rancher server on Outscale in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +:::note + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +:::caution + +Deploying to Outscale will incur charges. + +::: + +- [Outscale Account](https://en.outscale.com/): You will require an account on Outscale as this is where the server and cluster will run. +- [Outscale Access Key](https://docs.outscale.com/en/userguide/About-Access-Keys.html): Use these instructions to create an Outscale Access Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Outscale. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the Outscale folder containing the terraform files by executing `cd quickstart/rancher/outscale`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `access_key_id` - Outscale access key + - `secret_key_id` - Outscale secret key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Outscale Quickstart Readme](https://github.com/rancher/quickstart/tree/master/rancher/outscale) for more information. +Suggestions include: + - `region` - Outscale region, choose the closest instead of the default (`eu-west-2`) + - `prefix` - Prefix for all created resources + - `instance_type` - Instance type, minimum required is `tinav3.c2r4p3` + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 21 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/rancher/outscale`. + +#### Result + +Two Kubernetes clusters are deployed into your Outscale account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/outscale` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md b/docs/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md new file mode 100644 index 0000000000..5fc264a1a6 --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md @@ -0,0 +1,51 @@ +--- +title: Vagrant Quick Start +weight: 200 +--- +The following steps quickly deploy a Rancher Server with a single node cluster attached. + +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +::: + +## Prerequisites + +- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. +- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. +- At least 4GB of free RAM. + +### Note +- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: + + `vagrant plugin install vagrant-vboxmanage` + + `vagrant plugin install vagrant-vbguest` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the folder containing the Vagrantfile by executing `cd quickstart/rancher/vagrant`. + +3. **Optional:** Edit `config.yaml` to: + + - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) + - Change the password of the `admin` user for logging into Rancher. (`default_password`) + +4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. + +5. Once provisioning finishes, go to `https://192.168.56.101` in the browser. The default user/password is `admin/adminPassword`. + +**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/rancher/vagrant` folder execute `vagrant destroy -f`. + +2. Wait for the confirmation that all resources have been destroyed. diff --git a/docs/getting-started/quick-start-guides/deploy-workloads/nodeports.md b/docs/getting-started/quick-start-guides/deploy-workloads/nodeports.md new file mode 100644 index 0000000000..154f51e5ef --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-workloads/nodeports.md @@ -0,0 +1,139 @@ +--- +title: Workload with NodePort Quick Start +weight: 200 +--- + +### Prerequisite + +You have a running cluster with at least 1 node. + +### 1. Deploying a Workload + +You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. + +For this workload, you'll be deploying the application Rancher Hello-World. + +1. Click **☰ > Cluster Management**. +1. From the **Clusters** page, go to the cluster where the workload should be deployed and click **Explore**. +1. Click **Workload**. +1. Click **Create**. +1. Enter a **Name** for your workload. +1. From the **Container Image** field, enter `rancher/hello-world`. This field is case-sensitive. +1. Click **Add Port**. +1. From the **Service Type** drop-down, make sure that **NodePort** is selected. + + ![As a dropdown, NodePort (On every node selected)](/img/nodeport-dropdown.png) + +1. From the **Publish the container port** field, enter port `80`. + + ![Publish the container port, 80 entered](/img/container-port-field.png) + +1. Click **Create**. + +**Result:** + +* Your workload is deployed. This process might take a few minutes to complete. +* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. + +
+ +### 2. Viewing Your Application + +From the **Workloads** page, click the link underneath your workload. If your deployment succeeded, your application opens. + +### Attention: Cloud-Hosted Sandboxes + +When using a cloud-hosted virtual machine, you may not have access to the port running the container. In this event, you can test Nginx in an ssh session on the local machine using `Execute Shell`. Use the port number after the `:` in the link under your workload if available, which is `31568` in this example. + +```sh +gettingstarted@rancher:~$ curl http://localhost:31568 + + + + Rancher + + + + + +

Hello world!

+

My hostname is hello-world-66b4b9d88b-78bhx

+
+

k8s services found 2

+ + INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
+ + KUBERNETES tcp://10.43.0.1:443
+ +
+
+ + +
+ +
+
+ + + +gettingstarted@rancher:~$ + +``` + +### Finished + +Congratulations! You have successfully deployed a workload exposed via a NodePort. + +#### What's Next? + +When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: + +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/docs/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md b/docs/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md new file mode 100644 index 0000000000..1f4de821d4 --- /dev/null +++ b/docs/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md @@ -0,0 +1,74 @@ +--- +title: Workload with Ingress Quick Start +weight: 100 +--- + +### Prerequisite + +You have a running cluster with at least 1 node. + +### 1. Deploying a Workload + +You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. + +For this workload, you'll be deploying the application Rancher Hello-World. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. +1. Click **Create**. +1. Click **Deployment**. +1. Enter a **Name** for your workload. +1. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. +1. Click **Add Port** and enter `80` in the **Private Container Port** field. Adding a port enables access to the application inside and outside of the cluster. For more information, see [Services](../../../pages-for-subheaders/workloads-and-pods.md#services). +1. Click **Create**. + +**Result:** + +* Your workload is deployed. This process might take a few minutes to complete. +* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. + +
+### 2. Expose The Application Via An Ingress + +Now that the application is up and running, it needs to be exposed so that other services can connect. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. + +1. Click **Service Discovery > Ingresses**. + +1. Click **Create.** + +1. When choosing **Namespace**, ensure it is the same as the one used when you created your deployment. Otherwise, your deployment will not be available when you attempt to select **Target Service**, as in Step 8 below. + +1. Enter a **Name**, such as **hello**. + +1. Specify your **Path**, such as `/hello`. + +1. In the **Target Service** field, drop down the list and choose the name that you set for your service. + +1. In the **Port** field, drop down the list and select `80`. + +1. Click **Create** at the bottom right. + +**Result:** The application is assigned a `sslip.io` address and exposed. It may take a minute or two to populate. + + +### View Your Application + +From the **Deployments** page, find the **Endpoints** column for your deployment and click on an endpoint. The endpoints available will depend on how you configured the port you added to your deployment. For endpoints where you do not see a randomly assigned port, append the path you specified when creating the ingress to the IP address. For example, if your endpoint looks like `xxx.xxx.xxx.xxx` or `https://xxx.xxx.xxx.xxx` change it to `xxx.xxx.xxx.xxx/hello` or `https://xxx.xxx.xxx.xxx/hello`. + +Your application will open in a separate window. + +#### Finished + +Congratulations! You have successfully deployed a workload exposed via an ingress. + +#### What's Next? + +When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: + +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/docs/how-to-guides.md b/docs/how-to-guides.md new file mode 100644 index 0000000000..87165f2a75 --- /dev/null +++ b/docs/how-to-guides.md @@ -0,0 +1,5 @@ +--- +title: How-to Guides +--- + +**How-to guides** serve to describe practical steps for users to accomplish some task. In Rancher, we break down how-to guides further into [new user guides](./pages-for-subheaders/new-user-guides.md) and [advanced user guides](./pages-for-subheaders/advanced-user-guides.md). diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md new file mode 100644 index 0000000000..c1e4a05a3b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md @@ -0,0 +1,220 @@ +--- +title: Configure Active Directory (AD) +weight: 1112 +--- + +If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. + +Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication](../../../../../pages-for-subheaders/configure-openldap.md) integration. + +:::note + +Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +::: + +## Prerequisites + +You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. + +Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. + +Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. + +:::note Using TLS? + +- If the certificate used by the AD server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +- Upon an upgrade to v2.6.0, authenticating via Rancher against an active directory using TLS can fail if the certificates on the AD server do not support SAN attributes. This is a check enabled by default in Go v1.15. + + - The error received is "Error creating SSL connection: LDAP Result Code 200 "Network Error": x509: certificate relies on legacy Common Name field, use SANs or temporarily enable Common Name matching with GODEBUG=x509ignoreCN=0". + + - To resolve the error, update or replace the certificates on the AD server with new ones that support the SAN attribute. Alternatively, this error can be ignored by setting `GODEBUG=x509ignoreCN=0` as an environment variable to Rancher server container. + +::: + +## Configuration Steps +### Open Active Directory Configuration + +1. Log into the Rancher UI using the initial local `admin` account. +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **ActiveDirectory**. The **Authentication Provider: ActiveDirectory** form will be displayed. +1. Fill out the form. For help, refer to the details on configuration options below. +1. Click **Enable**. + +### Configure Active Directory Server Settings + +In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. + +:::note + +If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). + +::: + +**Table 1: AD Server parameters** + +| Parameter | Description | +|:--|:--| +| Hostname | Specify the hostname or IP address of the AD server | +| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| +| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| +| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | +| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | +| Service Account Password | The password for the service account. | +| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | +| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| +| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| + +--- + +### Configure User/Group Schema + +In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. + +Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. + +:::note + +If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. + +::: + +#### User Schema + +The table below details the parameters for the user schema section configuration. + +**Table 2: User schema configuration parameters** + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Username Attribute | The user attribute whose value is suitable as a display name. | +| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | +| User Member Attribute | The attribute containing the groups that a user is a member of. | +| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | +| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | +| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | +| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | + +--- + +#### Group Schema + +The table below details the parameters for the group schema configuration. + +**Table 3: Group schema configuration parameters** + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Name Attribute | The group attribute whose value is suitable for a display name. | +| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | +| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | +| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | +| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | +| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | +| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (i.e., you have groups that contain other groups as members. We advise avoiding nested groups when possible to avoid potential performance issues when there is a large amount of nested memberships). | + +--- + +### Test Authentication + +Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. + +:::note + +The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. + +::: + +1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. +2. Click **Authenticate with Active Directory** to finalise the setup. + +**Result:** + +- Active Directory authentication has been enabled. +- You have been signed into Rancher as administrator using the provided AD credentials. + +:::note + +You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. + +::: + +## Annex: Identify Search Base and Schema using ldapsearch + +In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. + +The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. + +For the purpose of the example commands provided below we will assume: + +- The Active Directory server has a hostname of `ad.acme.com` +- The server is listening for unencrypted connections on port `389` +- The Active Directory domain is `acme` +- You have a valid AD account with the username `jdoe` and password `secret` + +### Identify Search Base + +First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: + +``` +$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ +-h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" +``` + +This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: + +![](/img/ldapsearch-user.png) + +Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. + +Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, i.e., `OU=Groups,DC=acme,DC=com`. + +### Identify User Schema + +The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: + +- `Object Class`: **person** [1] +- `Username Attribute`: **name** [2] +- `Login Attribute`: **sAMAccountName** [3] +- `User Member Attribute`: **memberOf** [4] + +:::note + +If the AD users in our organization were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. + +::: + +We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. + +### Identify Group Schema + +Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: + +``` +$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ +-h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ +-s sub "CN=examplegroup" +``` + +This command will inform us on the attributes used for group objects: + +![](/img/ldapsearch-group.png) + +Again, this allows us to determine the correct values to enter in the group schema configuration: + +- `Object Class`: **group** [1] +- `Name Attribute`: **name** [2] +- `Group Member Mapping Attribute`: **member** [3] +- `Search Attribute`: **sAMAccountName** [4] + +Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. + +In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md new file mode 100644 index 0000000000..6257f175bd --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md @@ -0,0 +1,243 @@ +--- +title: Configure Azure AD +weight: 1115 +--- + +If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. + +:::note Prerequisite: + +Have an instance of Azure AD configured. + +::: + +:::note Notes + +- Azure AD integration only supports Service Provider initiated logins. + +- Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). + +::: + +## Azure Active Directory Configuration Outline + +Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. + +:::tip + +Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. + +::: + + + +- [1. Register Rancher with Azure](#1-register-rancher-with-azure) +- [2. Create a new client secret](#2-create-a-new-client-secret) +- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) +- [4. Add a Reply URL](#4-add-a-reply-url) +- [5. Copy Azure Application Data](#5-copy-azure-application-data) +- [6. Configure Azure AD in Rancher](#6-configure-azure-ad-in-rancher) + + + +### 1. Register Rancher with Azure + +Before enabling Azure AD within Rancher, you must register Rancher with Azure. + +1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. + +1. Use search to open the **App registrations** service. + + ![Open App Registrations](/img/search-app-registrations.png) + +1. Click **New registrations** and complete the **Create** form. + + ![New App Registration](/img/new-app-registration.png) + + 1. Enter a **Name** (something like `Rancher`). + + 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. + + 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. + + :::tip + + You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + + ::: + + 1. Click **Register**. + +:::note + +It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +::: + +### 2. Create a new client secret + +From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. + +1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. + + ![Open Rancher Registration](/img/open-rancher-app.png) + +1. From the navigation pane on left, click **Certificates and Secrets**. + +1. Click **New client secret**. + + ![Create new client secret](/img/select-client-secret.png) + + 1. Enter a **Description** (something like `Rancher`). + + 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. + + 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). + + +1. Copy the key value and save it to an [empty text file](#tip). + + You'll enter this key into the Rancher UI later as your **Application Secret**. + + You won't be able to access the key value again within the Azure UI. + +### 3. Set Required Permissions for Rancher + +Next, set API permissions for Rancher within Azure. + +1. From the navigation pane on left, select **API permissions**. + + ![Open Required Permissions](/img/select-required-permissions.png) + +1. Click **Add a permission**. + +1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: + + ![Select API Permissions](/img/select-required-permissions-2.png) + +
+
+ - **Access the directory as the signed-in user** + - **Read directory data** + - **Read all groups** + - **Read all users' full profiles** + - **Read all users' basic profiles** + - **Sign in and read user profile** + +1. Click **Add permissions**. + +1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. + + :::note + + You must be signed in as an Azure administrator to successfully save your permission settings. + + ::: + + +### 4. Add a Reply URL + +To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. + + +1. From the **Setting** blade, select **Reply URLs**. + + ![Azure: Enter Reply URL](/img/enter-azure-reply-url.png) + +1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. + + :::tip + + You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + + ::: + +1. Click **Save**. + +**Result:** Your reply URL is saved. + +:::note + +It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +::: + +### 5. Copy Azure Application Data + +As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. + +1. Obtain your Rancher **Tenant ID**. + + 1. Use search to open the **Azure Active Directory** service. + + ![Open Azure Active Directory](/img/search-azure-ad.png) + + 1. From the left navigation pane, open **Overview**. + + 2. Copy the **Directory ID** and paste it into your [text file](#tip). + + You'll paste this value into Rancher as your **Tenant ID**. + +1. Obtain your Rancher **Application ID**. + + 1. Use search to open **App registrations**. + + ![Open App Registrations](/img/search-app-registrations.png) + + 1. Find the entry you created for Rancher. + + 1. Copy the **Application ID** and paste it to your [text file](#tip). + +1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. + + 1. From **App registrations**, click **Endpoints**. + + ![Click Endpoints](/img/click-endpoints.png) + + 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). + + - **Microsoft Graph API endpoint** (Graph Endpoint) + - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) + - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) + +:::note + +Copy the v1 version of the endpoints + +::: + +### 6. Configure Azure AD in Rancher + +From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. + +Enter the values that you copied to your [text file](#tip). + +1. Log into Rancher. +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **AzureAD**. +1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). + + :::note Important + + When entering your Graph Endpoint, remove the tenant ID from the URL, like below. + + https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c + + ::: + + The following table maps the values you copied in the Azure portal to the fields in Rancher. + + | Rancher Field | Azure Value | + | ------------------ | ------------------------------------- | + | Tenant ID | Directory ID | + | Application ID | Application ID | + | Application Secret | Key Value | + | Endpoint | https://login.microsoftonline.com/ | + | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | + | Token Endpoint | OAuth 2.0 Token Endpoint | + | Auth Endpoint | OAuth 2.0 Authorization Endpoint | + +1. Click **Enable**. + +**Result:** Azure Active Directory authentication is configured. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md new file mode 100644 index 0000000000..1dc551a9a1 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md @@ -0,0 +1,61 @@ +--- +title: Configure FreeIPA +weight: 1114 +--- + +If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. + +:::note Prerequisites: + +- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. +- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. +- Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +::: + +1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **FreeIPA**. +1. Complete the **Configure an FreeIPA server** form. + + You may need to log in to your domain controller to find the information requested in the form. + + :::note Using TLS? + + If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. + + ::: + + :::note User Search Base vs. Group Search Base + + Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. + + * If your users and groups are in the same search base, complete only the User Search Base. + * If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. + + ::: + +1. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. + + :::note Search Attribute + + The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. + + The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. + + * `uid`: User ID + * `sn`: Last Name + * `givenName`: First Name + + With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. + + ::: + +1. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. +1. Click **Enable**. + +**Result:** + +- FreeIPA authentication is configured. +- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md new file mode 100644 index 0000000000..56859c962f --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md @@ -0,0 +1,57 @@ +--- +title: Configure GitHub +weight: 1116 +--- + +In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. + +:::note Prerequisites: + +Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +::: + +1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **GitHub**. +1. Follow the directions displayed to set up a GitHub Application. Rancher redirects you to GitHub to complete registration. + + :::note What's an Authorization Callback URL? + + The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). + + When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. + + ::: + +1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. + + :::note Where do I find the Client ID and Client Secret? + + From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. + + ::: + +1. Click **Authenticate with GitHub**. + +1. Use the **Site Access** options to configure the scope of user authorization. + + - **Allow any valid Users** + + _Any_ GitHub user can access Rancher. We generally discourage use of this setting! + + - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** + + Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. + + - **Restrict access to only Authorized Users and Organizations** + + Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. +
+1. Click **Enable**. + +**Result:** + +- GitHub authentication is configured. +- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md new file mode 100644 index 0000000000..9a0c098ae3 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md @@ -0,0 +1,112 @@ +--- +title: Configure Google OAuth +weight: 10 +--- + +If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. + +Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. + +Within Rancher, only administrators or users with the **Manage Authentication** [global role](../../manage-role-based-access-control-rbac/global-permissions.md) can configure authentication. + +# Prerequisites + +- You must have a [G Suite admin account](https://admin.google.com) configured. +- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. +- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) + +After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: +![Enable Admin APIs](/img/Google-Enable-APIs-Screen.png) + +# Setting up G Suite for OAuth with Rancher + +Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: + +1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) +1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) +1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) +1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) + +### 1. Adding Rancher as an Authorized Domain + +1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. +1. Select your project and click **OAuth consent screen**. +![OAuth Consent Screen](/img/Google-OAuth-consent-screen-tab.png) +1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) +1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. + +**Result:** Rancher has been added as an authorized domain for the Admin SDK API. + +### 2. Creating OAuth2 Credentials for the Rancher Server + +1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) +![Credentials](/img/Google-Credentials-tab.png) +1. On the **Create Credentials** dropdown, select **OAuth client ID**. +1. Click **Web application**. +1. Provide a name. +1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs**. Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. + - Under **Authorized JavaScript origins,** enter your Rancher server URL. + - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. +1. Click on **Create**. +1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON**. Save the file so that you can provide these credentials to Rancher. + +**Result:** Your OAuth credentials have been successfully created. + +### 3. Creating Service Account Credentials +Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. + +Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. + +As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. + +This section describes how to: + +- Create a service account +- Create a key for the service account and download the credentials as JSON + +1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. +1. Click on **Create Service Account**. +1. Enter a name and click **Create**. +![Service account creation Step 1](/img/Google-svc-acc-step1.png) +1. Don't provide any roles on the **Service account permissions** page and click **Continue** +![Service account creation Step 2](/img/Google-svc-acc-step2.png) +1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. +![Service account creation Step 3](/img/Google-svc-acc-step3-key-creation.png) + +**Result:** Your service account is created. + +### 4. Register the Service Account Key as an OAuth Client + +You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. + +Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: + +1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK**. This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID**. + + ![Service account Unique ID](/img/Google-Select-UniqueID-column.png) +1. Go to the [**Domain-wide Delegation** page.](https://admin.google.com/ac/owl/domainwidedelegation) +1. Add the Unique ID obtained in the previous step in the **Client Name** field. +1. In the **One or More API Scopes** field, add the following scopes: + ``` + openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly + ``` +1. Click **Authorize**. + +**Result:** The service account is registered as an OAuth client in your G Suite account. + +# Configuring Google OAuth in Rancher + +1. Sign into Rancher using a local user assigned the [administrator](../../manage-role-based-access-control-rbac/global-permissions.md) role. This user is also called the local principal. +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **Google**. The instructions in the UI cover the steps to set up authentication with Google OAuth. + 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. + 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. + 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. + - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) + - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. + - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. +1. Click **Authenticate with Google**. +1. Click **Enable**. + +**Result:** Google authentication is successfully configured. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-oidc.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-oidc.md new file mode 100644 index 0000000000..146e834e48 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-oidc.md @@ -0,0 +1,145 @@ +--- +title: Configure Keycloak (OIDC) +description: Create a Keycloak OpenID Connect (OIDC) client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins +weight: 1200 +--- +If your organization uses [Keycloak Identity Provider (IdP)](https://www.keycloak.org) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. Rancher supports integration with Keycloak using the OpenID Connect (OIDC) protocol and the SAML protocol. Both implementations are functionally equivalent when used with Rancher. This page describes the process to configure Rancher to work with Keycloak using the OIDC protocol. + +If you prefer to use Keycloak with the SAML protocol instead, refer to [this page](configure-keycloak-saml.md). + +If you have an existing configuration using the SAML protocol and want to switch to the OIDC protocol, refer to [this section](#migrating-from-saml-to-oidc). + +## Prerequisites + +- On Rancher, Keycloak (SAML) is disabled. +- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. +- In Keycloak, create a [new OIDC client](https://www.keycloak.org/docs/latest/server_admin/#oidc-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#oidc-clients) for help. + + Setting | Value + ------------|------------ + `Client ID` | <CLIENT_ID> (e.g. `rancher`) + `Name` | <CLIENT_NAME> (e.g. `rancher`) + `Client Protocol` | `openid-connect` + `Access Type` | `confidential` + `Valid Redirect URI` | `https://yourRancherHostURL/verify-auth` + +- In the new OIDC client, create [Mappers](https://www.keycloak.org/docs/latest/server_admin/#_protocol-mappers) to expose the users fields. + - Create a new "Groups Mapper" with the settings below. + + Setting | Value + ------------|------------ + `Name` | `Groups Mapper` + `Mapper Type` | `Group Membership` + `Token Claim Name` | `groups` + `Add to ID token` | `OFF` + `Add to access token` | `OFF` + `Add to user info` | `ON` + + - Create a new "Client Audience" with the settings below. + + Setting | Value + ------------|------------ + `Name` | `Client Audience` + `Mapper Type` | `Audience` + `Included Client Audience` | <CLIENT_NAME> + `Add to access token` | `ON` + + - Create a new "Groups Path" with the settings below. + + Setting | Value + ------------|------------ + `Name` | `Group Path` + `Mapper Type` | `Group Membership` + `Token Claim Name` | `full_group_path` + `Full group path` | `ON` + `Add to user info` | `ON` + +## Configuring Keycloak in Rancher + +1. In the Rancher UI, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Auth Provider**. +1. Select **Keycloak (OIDC)**. +1. Complete the **Configure a Keycloak OIDC account** form. For help with filling the form, see the [configuration reference](#configuration-reference). +1. After you complete the **Configure a Keycloak OIDC account** form, click **Enable**. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. + + :::note + + You may need to disable your popup blocker to see the IdP login page. + + ::: + +**Result:** Rancher is configured to work with Keycloak using the OIDC protocol. Your users can now sign into Rancher using their Keycloak logins. + +## Configuration Reference + +| Field | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Client ID | The `Client ID` of your Keycloak client. | +| Client Secret | The generated `Secret` of your Keycloak client. In the Keycloak console, select **Clients**, select the client you created, select the **Credentials** tab and copy the value of the `Secret` field. | +| Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. Required if HTTPS/SSL is enabled on your Keycloak server. | +| Endpoints | Choose whether to use the generated values for the `Rancher URL`, `Issue`, and `Auth Endpoint` fields or to provide manual overrides if incorrect. | +| Keycloak URL | The URL for your Keycloak server. | +| Keycloak Realm | The name of the realm in which the Keycloak client was created in. | +| Rancher URL | The URL for your Rancher Server. | +| Issuer | The URL of your IdP. | +| Auth Endpoint | The URL where users are redirected to authenticate. | + +## Migrating from SAML to OIDC + +This section describes the process to transition from using Rancher with Keycloak (SAML) to Keycloak (OIDC). + +### Reconfigure Keycloak + +1. Change the existing client to use the OIDC protocol. In the Keycloak console, select **Clients**, select the SAML client to migrate, select the **Settings** tab, change `Client Protocol` from `saml` to `openid-connect`, and click **Save** + +1. Verify the `Valid Redirect URIs` are still valid. + +1. Select the **Mappers** tab and create a new Mapper with the settings below. + + Setting | Value + ------------|------------ + `Name` | `Groups Mapper` + `Mapper Type` | `Group Membership` + `Token Claim Name` | `groups` + `Add to ID token` | `ON` + `Add to access token` | `ON` + `Add to user info` | `ON` + +### Reconfigure Rancher + +Before configuring Rancher to use Keycloak (OIDC), Keycloak (SAML) must be first disabled. + +1. In the Rancher UI, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Auth Provider**. +1. Select **Keycloak (SAML)**. +1. Click **Disable**. + +Configure Rancher to use Keycloak (OIDC) by following the steps in [this section](#configuring-keycloak-in-rancher). + +:::note + +After configuration is completed, Rancher user permissions will need to be reapplied as they are not automatically migrated. + +::: + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration options of your OIDC client. You may also inspect the Rancher logs to help pinpoint what's causing issues. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. + +All Keycloak related log entries will be prepended with either `[generic oidc]` or `[keycloak oidc]`. + +### You are not redirected to Keycloak + +When you fill the **Configure a Keycloak OIDC account** form and click on **Enable**, you are not redirected to your IdP. + + * Verify your Keycloak client configuration. + +### The generated `Issuer` and `Auth Endpoint` are incorrect + + * On the **Configure a Keycloak OIDC account** form, change **Endpoints** to `Specify (advanced)` and override the `Issuer` and `Auth Endpoint` values. To find the values, go to the Keycloak console and select **Realm Settings**, select the **General** tab, and click **OpenID Endpoint Configuration**. The JSON output will display values for `issuer` and `authorization_endpoint`. + +### Keycloak Error: "Invalid grant_type" + + * In some cases, this error message may be misleading and is actually caused by setting the `Valid Redirect URI` incorrectly. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-saml.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-saml.md new file mode 100644 index 0000000000..db51fa5a7d --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-saml.md @@ -0,0 +1,187 @@ +--- +title: Configure Keycloak (SAML) +description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins +weight: 1200 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +## Prerequisites + +- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. +- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. + + Setting | Value + ------------|------------ + `Sign Documents` | `ON` 1 + `Sign Assertions` | `ON` 1 + All other `ON/OFF` Settings | `OFF` + `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 + `Client Name` | (e.g. `rancher`) + `Client Protocol` | `SAML` + `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` + + >1: Optionally, you can enable either one or both of these settings. + >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. + + ![](/img/keycloak/keycloak-saml-client-configuration.png) + +- In the new SAML client, create Mappers to expose the users fields + - Add all "Builtin Protocol Mappers" + ![](/img/keycloak/keycloak-saml-client-builtin-mappers.png) + - Create a new "Group list" mapper to map the member attribute to a user's groups + ![](/img/keycloak/keycloak-saml-client-group-mapper.png) + +## Getting the IDP Metadata + + + + +To get the IDP metadata, export a `metadata.xml` file from your Keycloak client. +From the **Installation** tab, choose the **SAML Metadata IDPSSODescriptor** format option and download your file. + + + + +1. From the **Configure** section, click the **Realm Settings** tab. +1. Click the **General** tab. +1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. + +Verify the IDP metadata contains the following attributes: + +``` +xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" +xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" +xmlns:ds="http://www.w3.org/2000/09/xmldsig#" +``` + +Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. + +The following is an example process for Firefox, but will vary slightly for other browsers: + +1. Press **F12** to access the developer console. +1. Click the **Network** tab. +1. From the table, click the row containing `descriptor`. +1. From the details pane, click the **Response** tab. +1. Copy the raw response data. + +The XML obtained contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: + +1. Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. +1. Remove the `` tag from the beginning. +1. Remove the `` from the end of the xml. + +You are left with something similar as the example below: + +``` + +.... + +``` + + + + +1. From the **Configure** section, click the **Realm Settings** tab. +1. Click the **General** tab. +1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. + +Verify the IDP metadata contains the following attributes: + +``` +xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" +xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" +xmlns:ds="http://www.w3.org/2000/09/xmldsig#" +``` + +Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. + +The following is an example process for Firefox, but will vary slightly for other browsers: + +1. Press **F12** to access the developer console. +1. Click the **Network** tab. +1. From the table, click the row containing `descriptor`. +1. From the details pane, click the **Response** tab. +1. Copy the raw response data. + + + + +## Configuring Keycloak in Rancher + + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **Keycloak SAML**. +1. Complete the **Configure Keycloak Account** form. For help with filling the form, see the [configuration reference](#configuration-reference). +1. After you complete the **Configure a Keycloak Account** form, click **Enable**. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. + + :::note + + You may have to disable your popup blocker to see the IdP login page. + + ::: + +**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. + +{{< saml_caveats >}} + +## Configuration Reference + +| Field | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Display Name Field | The attribute that contains the display name of users.

Example: `givenName` | +| User Name Field | The attribute that contains the user name/given name.

Example: `email` | +| UID Field | An attribute that is unique to every user.

Example: `email` | +| Groups Field | Make entries for managing group memberships.

Example: `member` | +| Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | +| Rancher API Host | The URL for your Rancher Server. | +| Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | +| IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | + +:::tip + +You can generate a key/certificate pair using an openssl command. For example: + +openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert + +::: + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. + +### You are not redirected to Keycloak + +When you click on **Authenticate with Keycloak**, you are not redirected to your IdP. + + * Verify your Keycloak client configuration. + * Make sure `Force Post Binding` set to `OFF`. + + +### Forbidden message displayed after IdP login + +You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. + + * Check the Rancher debug log. + * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. + +### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata + +This is usually due to the metadata not being created until a SAML provider is configured. +Try configuring and saving keycloak as your SAML provider and then accessing the metadata. + +### Keycloak Error: "We're sorry, failed to process response" + + * Check your Keycloak log. + * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. + +### Keycloak Error: "We're sorry, invalid requester" + + * Check your Keycloak log. + * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md new file mode 100644 index 0000000000..b39e0c951b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md @@ -0,0 +1,62 @@ +--- +title: Configure Okta (SAML) +weight: 1210 +--- + +If your organization uses Okta Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +:::note + +Okta integration only supports Service Provider initiated logins. + +::: +## Prerequisites + +In Okta, create a SAML Application with the settings below. See the [Okta documentation](https://developer.okta.com/standards/SAML/setting_up_a_saml_application_in_okta) for help. + +Setting | Value +------------|------------ +`Single Sign on URL` | `https://yourRancherHostURL/v1-saml/okta/saml/acs` +`Audience URI (SP Entity ID)` | `https://yourRancherHostURL/v1-saml/okta/saml/metadata` + +## Configuring Okta in Rancher + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **Okta**. +1. Complete the **Configure Okta Account** form. The examples below describe how you can map Okta attributes from attribute statements to fields within Rancher. + + | Field | Description | + | ------------------------- | ----------------------------------------------------------------------------- | + | Display Name Field | The attribute name from an attribute statement that contains the display name of users. | + | User Name Field | The attribute name from an attribute statement that contains the user name/given name. | + | UID Field | The attribute name from an attribute statement that is unique to every user. | + | Groups Field | The attribute name in a group attribute statement that exposes your groups. | + | Rancher API Host | The URL for your Rancher Server. | + | Private Key / Certificate | A key/certificate pair used for Assertion Encryption. | + | Metadata XML | The `Identity Provider metadata` file that you find in the application `Sign On` section. | + + :::tip + + You can generate a key/certificate pair using an openssl command. For example: + + ``` + openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.crt + ``` + + ::: + + +1. After you complete the **Configure Okta Account** form, click **Enable**. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Okta IdP to validate your Rancher Okta configuration. + + :::note + + If nothing seems to happen, it's likely because your browser blocked the pop-up. Make sure you disable the pop-up blocker for your rancher domain and whitelist it in any other extensions you might utilize. + + ::: + +**Result:** Rancher is configured to work with Okta. Your users can now sign into Rancher using their Okta logins. + +{{< saml_caveats >}} diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md new file mode 100644 index 0000000000..042a20bfb0 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md @@ -0,0 +1,56 @@ +--- +title: Configure PingIdentity (SAML) +weight: 1200 +--- + +If your organization uses Ping Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +>**Prerequisites:** +> +>- You must have a [Ping IdP Server](https://www.pingidentity.com/) configured. +>- Following are the Rancher Service Provider URLs needed for configuration: +Metadata URL: `https:///v1-saml/ping/saml/metadata` +Assertion Consumer Service (ACS) URL: `https:///v1-saml/ping/saml/acs` +Note that these URLs will not return valid data until the authentication configuration is saved in Rancher. +>- Export a `metadata.xml` file from your IdP Server. For more information, see the [PingIdentity documentation](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **Ping Identity**. +1. Complete the **Configure a Ping Account** form. Ping IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. + + 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). + + 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). + + 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). + + 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). + + 1. **Entity ID Field** (optional): The published, protocol-dependent, unique identifier of your partner. This ID defines your organization as the entity operating the server for SAML 2.0 transactions. This ID may have been obtained out-of-band or via a SAML metadata file. + + 1. **Rancher API Host**: Enter the URL for your Rancher Server. + + 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. + + You can generate one using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + 1. **IDP-metadata**: The `metadata.xml` file that you [exported from your IdP server](https://documentation.pingidentity.com/pingfederate/pf83/index.shtml#concept_exportingMetadata.html). + + +1. After you complete the **Configure Ping Account** form, click **Enable**. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Ping IdP to validate your Rancher PingIdentity configuration. + + :::note + + You may have to disable your popup blocker to see the IdP login page. + + ::: + +**Result:** Rancher is configured to work with PingIdentity. Your users can now sign into Rancher using their PingIdentity logins. + +{{< saml_caveats >}} diff --git a/content/rancher/v2.6/en/admin-settings/authentication/local/_index.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/local/_index.md rename to docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md new file mode 100644 index 0000000000..a98c27e2fc --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md @@ -0,0 +1,71 @@ +--- +title: Users and Groups +weight: 1 +--- + +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. + +Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control](../../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md). + +## Managing Members + +When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local](create-local-users.md) user account, you will not be able to search for GitHub users or groups. + +All users, whether they are local users or from an authentication provider, can be viewed and managed. In the upper left corner, click **☰ > Users & Authentication**. In the left navigation bar, click **Users**. + +{{< saml_caveats >}} + +## User Information + +Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. + +Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. + +### Automatically Refreshing User Information + +Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. + +Two settings control this behavior: + +- **`auth-user-info-max-age-seconds`** + + This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. + +- **`auth-user-info-resync-cron`** + + This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. + +To change these settings, + +1. In the upper left corner, click **☰ > Global Settings**. +1. Go to the setting you want to configure and click **⋮ > Edit Setting**. + +:::note + +Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. + +::: +### Manually Refreshing User Information + +If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. On the **Users** page, click on **Refresh Group Memberships**. + +**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. + +:::note + +Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. + +::: + +## Session Length + +The default length (TTL) of each user session is adjustable. The default session length is 16 hours. + +1. In the upper left corner, click **☰ > Global Settings**. +1. Go to **`auth-user-session-ttl-minutes`** and click **⋮ > Edit Setting**. +1. Enter the amount of time in minutes a session length should last and click **Save**. + +**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md new file mode 100644 index 0000000000..9bde44ef33 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md @@ -0,0 +1,82 @@ +--- +title: 1. Configuring Microsoft AD FS for Rancher +weight: 1205 +--- + +Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. + +1. Log into your AD server as an administrative user. + +1. Open the **AD FS Management** console. Select **Add Relying Party Trust..**. from the **Actions** menu and click **Start**. + + ![](/img/adfs/adfs-overview.png) + +1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. + + ![](/img/adfs/adfs-add-rpt-2.png) + +1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. + + ![](/img/adfs/adfs-add-rpt-3.png) + +1. Select **AD FS profile** as the configuration profile for your relying party trust. + + ![](/img/adfs/adfs-add-rpt-4.png) + +1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. + + ![](/img/adfs/adfs-add-rpt-5.png) + +1. Select **Enable support for the SAML 2.0 WebSSO protocol** + and enter `https:///v1-saml/adfs/saml/acs` for the service URL. + + ![](/img/adfs/adfs-add-rpt-6.png) + +1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. + + ![](/img/adfs/adfs-add-rpt-7.png) + +1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. + + ![](/img/adfs/adfs-add-rpt-8.png) + +1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. + + ![](/img/adfs/adfs-add-rpt-9.png) + +1. After reviewing your settings, select **Next** to add the relying party trust. + + ![](/img/adfs/adfs-add-rpt-10.png) + + +1. Select **Open the Edit Claim Rules..**. and click **Close**. + + ![](/img/adfs/adfs-add-rpt-11.png) + +1. On the **Issuance Transform Rules** tab, click **Add Rule..**.. + + ![](/img/adfs/adfs-edit-cr.png) + +1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. + + ![](/img/adfs/adfs-add-tcr-1.png) + +1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: + + | LDAP Attribute | Outgoing Claim Type | + | -------------------------------------------- | ------------------- | + | Given-Name | Given Name | + | User-Principal-Name | UPN | + | Token-Groups - Qualified by Long Domain Name | Group | + | SAM-Account-Name | Name | +
+ ![](/img/adfs/adfs-add-tcr-2.png) + +1. Download the `federationmetadata.xml` from your AD server at: +``` +https:///federationmetadata/2007-06/federationmetadata.xml +``` + +**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. + +### [Next: Configuring Rancher for Microsoft AD FS](configure-rancher-for-ms-adfs.md) diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md new file mode 100644 index 0000000000..313b6b842b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md @@ -0,0 +1,55 @@ +--- +title: 2. Configuring Rancher for Microsoft AD FS +weight: 1205 +--- + +After you complete [Configuring Microsoft AD FS for Rancher](configure-ms-adfs-for-rancher.md), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. + +:::note Important Notes For Configuring Your ADFS Server: + +- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` +- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` +- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` + +::: + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **ADFS**. +1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The [configuration section below](#configuration) describe how you can map AD attributes to fields within Rancher. +1. After you complete the **Configure AD FS Account** form, click **Enable**. + + Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. + + :::note + + You may have to disable your popup blocker to see the AD FS login page. + + ::: + +**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. + +# Configuration + +| Field | Description | +|---------------------------|-----------------| +| Display Name Field | The AD attribute that contains the display name of users.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | +| User Name Field | The AD attribute that contains the user name/given name.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | +| UID Field | An AD attribute that is unique to every user.

Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | +| Groups Field | Make entries for managing group memberships.

Example: `http://schemas.xmlsoap.org/claims/Group` | +| Rancher API Host | The URL for your Rancher Server. | +| Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

[Certificate creation command](#cert-command) | +| Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | + + + + +:::tip + +You can generate a certificate using an openssl command. For example: + +``` +openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" +``` + +::: \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md new file mode 100644 index 0000000000..746cb06ae7 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md @@ -0,0 +1,32 @@ +--- +title: Group Permissions with Shibboleth and OpenLDAP +weight: 1 +--- + +This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. + +Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. + +One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. + +### Terminology + +- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. +- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. +- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. +- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. + +### Adding OpenLDAP Group Permissions to Rancher Resources + +The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. + +For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. + +In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. + +When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. + +Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. + +![Adding OpenLDAP Group Permissions to Rancher Resources](/img/shibboleth-with-openldap-groups.svg) + \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md new file mode 100644 index 0000000000..7803f1f6b8 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md @@ -0,0 +1,43 @@ +--- +title: Cluster Drivers +weight: 1 +--- + +Cluster drivers are used to create clusters in a [hosted Kubernetes provider](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. + +If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. + +### Managing Cluster Drivers + +:::note Prerequisites: + +To create, edit, or delete cluster drivers, you need _one_ of the following permissions: + +- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Cluster Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. + +::: + +## Activating/Deactivating Cluster Drivers + +By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. + +1. In the upper left corner, click **☰ > Cluster Management**. + +2. In the left navigation menu, click **Drivers**. + +3. On the **Cluster Drivers** tab, select the driver that you wish to activate or deactivate and click **⋮ > Activate** or **⋮ > Deactivate**. + +## Adding Custom Cluster Drivers + +If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **Drivers**. +1. On the **Cluster Drivers** tab, click **Add Cluster Driver**. +1. Complete the **Add Cluster Driver** form. Then click **Create**. + + +### Developing your own Cluster Driver + +In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md new file mode 100644 index 0000000000..71e2af898b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md @@ -0,0 +1,42 @@ +--- +title: Node Drivers +weight: 2 +--- + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +#### Managing Node Drivers + +:::note Prerequisites: + +To create, edit, or delete drivers, you need _one_ of the following permissions: + +- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Node Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. + +::: + +## Activating/Deactivating Node Drivers + +By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. + +1. In the upper left corner, click **☰ > Cluster Management**. + +2. In the left navigation menu, click **Drivers**. + +2. On the **Node Drivers** tab, select the driver that you wish to activate or deactivate and click **⋮ > Activate** or **⋮ > Deactivate**. + +## Adding Custom Node Drivers + +If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **Drivers**. +1. On **Node Drivers** tab, click **Add Node Driver**. +1. Complete the **Add Node Driver** form. Then click **Create**. + +### Developing your own node driver + +Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md new file mode 100644 index 0000000000..2d0f8d92ca --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md @@ -0,0 +1,65 @@ +--- +title: Access and Sharing +weight: 31 +--- + +If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. + +Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. + +When you share a template, each user can have one of two access levels: + +- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. +- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. + +If you create a template, you automatically become an owner of that template. + +If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.](manage-rke1-templates.md) + +There are several ways to share templates: + +- Add users to a new RKE template during template creation +- Add users to an existing RKE template +- Make the RKE template public, sharing it with all users in the Rancher setup +- Share template ownership with users who are trusted to modify the template + +### Sharing Templates with Specific Users or Groups + +To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Under **RKE1 configuration**, click **RKE Templates**. +1. Go to the template that you want to share and click the **⋮ > Edit**. +1. In the **Share Template** section, click on **Add Member**. +1. Search in the **Name** field for the user or group you want to share the template with. +1. Choose the **User** access type. +1. Click **Save**. + +**Result:** The user or group can create clusters using the template. + +### Sharing Templates with All Users + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **RKE1 Configuration > RKE Templates**. +1. Go to the template that you want to share and click the **⋮ > Edit**. +1. Under **Share Template,** check the box for **Make Public (read-only)**. +1. Click **Save**. + +**Result:** All users in the Rancher setup can create clusters using the template. + +### Sharing Ownership of Templates + +If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. + +In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. + +To give Owner access to a user or group, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Under **RKE1 configuration**, click **RKE Templates**. +1. Go to the RKE template that you want to share and click the **⋮ > Edit**. +1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. +1. In the **Access Type** field, click **Owner**. +1. Click **Save**. + +**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md new file mode 100644 index 0000000000..ff81feee70 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md @@ -0,0 +1,65 @@ +--- +title: Applying Templates +weight: 50 +--- + +You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.](access-or-share-templates.md) + +RKE templates can be applied to new clusters. + +You can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +This section covers the following topics: + +- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) +- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) +- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) + +### Creating a Cluster from an RKE Template + +To add a cluster [hosted by an infrastructure provider](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using an RKE template, use these steps: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create** and choose the infrastructure provider. +1. Provide the cluster name and node template details as usual. +1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision**. +1. Choose an RKE template and revision from the dropdown menu. +1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. +1. Click **Create** to launch the cluster. + +### Updating a Cluster Created with an RKE Template + +When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. + +- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.](../../../../pages-for-subheaders/cluster-configuration.md) +- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. + +If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. + +An existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. + +:::note + +You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +::: + +### Converting an Existing Cluster to Use an RKE Template + +This section describes how to create an RKE template from an existing cluster. + +RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.](manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) + +To convert an existing cluster to use an RKE template, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster that will be converted to use an RKE template. Click **⋮ > Save as RKE Template**. +1. Enter a name for the template in the form that appears, and click **Create**. + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md new file mode 100644 index 0000000000..8326c06c40 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md @@ -0,0 +1,58 @@ +--- +title: Template Creator Permissions +weight: 10 +--- + +Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. + +For more information on administrator permissions, refer to the [documentation on global permissions](../manage-role-based-access-control-rbac/global-permissions.md). + +# Giving Users Permission to Create Templates + +Templates can only be created by users who have the global permission **Create RKE Templates**. + +Administrators have the global permission to create templates, and only administrators can give that permission to other users. + +For information on allowing users to modify existing templates, refer to [Sharing Templates.](access-or-share-templates.md) + +Administrators can give users permission to create RKE templates in two ways: + +- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) +- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) + +### Allowing a User to Create Templates + +An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Users**. +1. Choose the user you want to edit and click **⋮ > Edit Config**. +1. In the **Built-in** section, check the box for **Create new RKE Cluster Templates** role along with any other roles the user should have. You may want to also check the box for **Create RKE Template Revisions**. +1. Click **Save**. + +**Result:** The user has permission to create RKE templates. + +### Allowing New Users to Create Templates by Default + +Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Go to the role named **Create new RKE Cluster Templates and click **⋮ > Edit Config**. +1. Select the option **Yes: Default role for new users**. +1. Click **Save**. +1. If you would like new users to also be able to create RKE template revisions, enable that role as default as well. + +**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. + +### Revoking Permission to Create Templates + +Administrators can remove a user's permission to create templates with the following steps. Note: Administrators have full control over all resources regardless of whether fine-grained permissions are selected. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Users**. +1. Choose the user you want to edit permissions for and click **⋮ > Edit Config**. +1. In the **Built-in** section, un-check the box for **Create RKE Templates** and **Create RKE Template Revisions,** if applicable. In this section, you can change the user back to a standard user, or give the user a different set of permissions. +1. Click **Save**. + +**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md new file mode 100644 index 0000000000..c4b1d9e7e9 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md @@ -0,0 +1,44 @@ +--- +title: Template Enforcement +weight: 32 +--- + +This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. + +By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, + +- Only an administrator has the ability to create clusters without a template. +- All standard users must use an RKE template to create a new cluster. +- Standard users cannot create a cluster without using a template. + +Users can only create new templates if the administrator [gives them permission.](creator-permissions.md#allowing-a-user-to-create-templates) + +After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision](apply-templates.md#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.](manage-rke1-templates.md#updating-a-template) + +# Requiring New Clusters to Use an RKE Template + +You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user](../manage-role-based-access-control-rbac/global-permissions.md) will use the Kubernetes and/or Rancher settings that are vetted by administrators. + +To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: + +1. Click **☰ > Global Settings**. +1. Go to the `cluster-template-enforcement` setting. Click **⋮ > Edit Setting**. +1. Set the value to **True** and click **Save**. + + :::note Important: + + When the admin sets the `cluster-template-enforcement` to True, they also need to share the `clusterTemplates` with users so that users can select one of these templates to create the cluster. + + ::: + +**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. + +# Disabling RKE Template Enforcement + +To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: + +1. Click **☰ > Global Settings**. +1. Go to the `cluster-template-enforcement` setting. Click **⋮ > Edit Setting**. +1. Set the value to **False** and click **Save**. + +**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md new file mode 100644 index 0000000000..bf7bf15bec --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md @@ -0,0 +1,71 @@ +--- +title: Example Scenarios +weight: 5 +--- + +These example scenarios describe how an organization could use templates to standardize cluster creation. + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) + + +# Enforcing a Template Setting for Everyone + +Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. + +1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. +1. The administrator makes the template public. +1. The administrator turns on template enforcement. + +**Results:** + +- All Rancher users in the organization have access to the template. +- All new clusters created by [standard users](../manage-role-based-access-control-rbac/global-permissions.md) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. +- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. + +In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. + +# Templates for Basic and Advanced Users + +Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. + +1. First, an administrator turns on [RKE template enforcement.](enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user](../manage-role-based-access-control-rbac/global-permissions.md) in Rancher will need to use an RKE template when they create a cluster. +1. The administrator then creates two templates: + + - One template for basic users, with almost every option specified except for access keys + - One template for advanced users, which has most or all options has **Allow User Override** turned on + +1. The administrator shares the advanced template with only the advanced users. +1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. + +**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. + +# Updating Templates and Clusters Created with Them + +Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. + +In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. + +The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: + +- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. +- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. +- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades)**. This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. + +# Allowing Other Users to Control and Share a Template + +Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. + +Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. + +To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.](access-or-share-templates.md#sharing-ownership-of-templates) + +The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: + +- [Revise the template](manage-rke1-templates.md#updating-a-template) when the best practices change +- [Disable outdated revisions](manage-rke1-templates.md#disabling-a-template-revision) of the template so that no new clusters can be created with it +- [Delete the whole template](manage-rke1-templates.md#deleting-a-template) if the organization wants to go in a different direction +- [Set a certain revision as default](manage-rke1-templates.md#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. +- [Share the template](access-or-share-templates.md) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md new file mode 100644 index 0000000000..38524924ff --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md @@ -0,0 +1,70 @@ +--- +title: RKE Templates and Infrastructure +weight: 90 +--- + +In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. + +Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. + +If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. + +### Node Templates + +[Node templates](../../../../reference-guides/user-settings/manage-node-templates.md) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. + +### Terraform + +Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. + +This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. + +Terraform allows you to: + +- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates +- Leverage catalog apps and multi-cluster apps +- Codify infrastructure across many platforms, including Rancher and major cloud providers +- Commit infrastructure-as-code to version control +- Easily repeat configuration and setup of infrastructure +- Incorporate infrastructure changes into standard development practices +- Prevent configuration drift, in which some servers become configured differently than others + +# How Does Terraform Work? + +Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. + +To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. + +Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. + +When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. + +# Tips for Working with Terraform + +- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) + +- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. + +- You can also modify auth in the Terraform provider. + +- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. + +- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. + +# Tip for Creating CIS Benchmark Compliant Clusters + +This section describes one way that you can make security and compliance-related config files standard in your clusters. + +When you create a [CIS benchmark compliant cluster,](../../../../pages-for-subheaders/rancher-security.md) you have an encryption config file and an audit log config file. + +Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. + +Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. + +In this way, you can create flags that comply with the CIS benchmark. + +# Resources + +- [Terraform documentation](https://www.terraform.io/docs/) +- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) +- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md new file mode 100644 index 0000000000..33e0544565 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md @@ -0,0 +1,176 @@ +--- +title: Creating and Revising RKE Templates +weight: 32 +--- + +This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Cluster Management** view under **RKE1 Configuration > RKE Templates**. + +Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. + +Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. + +The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a template](#creating-a-template) +- [Updating a template](#updating-a-template) +- [Deleting a template](#deleting-a-template) +- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) +- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) +- [Disabling a template revision](#disabling-a-template-revision) +- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) +- [Setting a template revision as default](#setting-a-template-revision-as-default) +- [Deleting a template revision](#deleting-a-template-revision) +- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) +- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) + +### Prerequisites + +You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.](creator-permissions.md) + +You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.](access-or-share-templates.md#sharing-ownership-of-templates) + +### Creating a Template + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Click **RKE1 configuration > Node Templates**. +1. Click **Add Template**. +1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. +1. Optional: Share the template with other users or groups by [adding them as members.](access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. +1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. + +**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. + +### Updating a Template + +When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. + +You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) + +When new template revisions are created, clusters using an older revision of the template are unaffected. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **RKE1 Configuration > RKE Templates**. +1. Go to the template that you want to edit and click the **⋮ > Edit**. +1. Edit the required information and click **Save**. +1. Optional: You can change the default revision of this template and also change who it is shared with. + +**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) + +### Deleting a Template + +When you no longer use an RKE template for any of your clusters, you can delete it. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Click **RKE1 configuration > RKE Templates**. +1. Go to the RKE template that you want to delete and click the **⋮ > Delete**. +1. Confirm the deletion. + +**Result:** The template is deleted. + +### Creating a Revision Based on the Default Revision + +You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **RKE1 Configuration > RKE Templates**. +1. Go to the RKE template that you want to clone and click the **⋮ > New Revision from Default**. +1. Complete the rest of the form to create a new revision. + +**Result:** The RKE template revision is cloned and configured. + +### Creating a Revision Based on a Cloned Revision + +When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Under **RKE1 configuration**, click **RKE Templates**. +1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision**. +1. Complete the rest of the form. + +**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. + +### Disabling a Template Revision + +When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. + +You can disable the revision if it is not being used by any cluster. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **RKE1 Configuration > RKE Templates**. +1. Go to the template revision you want to disable. Then select **⋮ > Disable**. + +**Result:** The RKE template revision cannot be used to create a new cluster. + +### Re-enabling a Disabled Template Revision + +If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Under **RKE1 configuration**, click **RKE Templates**. +1. Go to the template revision you want to re-enable. Then select **⋮ > Enable**. + +**Result:** The RKE template revision can be used to create a new cluster. + +### Setting a Template Revision as Default + +When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. + +To set an RKE template revision as default, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **RKE1 Configuration > RKE templates**. +1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default**. + +**Result:** The RKE template revision will be used as the default option when clusters are created with the template. + +### Deleting a Template Revision + +You can delete all revisions of a template except for the default revision. + +To permanently delete a revision, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation menu, click **RKE1 Configuration > RKE templates**. +1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete**. + +**Result:** The RKE template revision is deleted. + +### Upgrading a Cluster to Use a New Template Revision + +:::note + +This section assumes that you already have a cluster that [has an RKE template applied.](apply-templates.md) + +This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. + +::: + +To upgrade a cluster to use a new template revision, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that you want to upgrade and click **⋮ > Edit Config**. +1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. +1. Click **Save**. + +**Result:** The cluster is upgraded to use the settings defined in the new template revision. + +### Exporting a Running Cluster to a New RKE Template and Revision + +You can save an existing cluster's settings as an RKE template. + +This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] + +To convert an existing cluster to use an RKE template, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that will be converted to use an RKE template and **⋮ > Save as RKE Template**. +1. Enter a name for the RKE template in the form that appears, and click **Create**. + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template and revision.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md new file mode 100644 index 0000000000..3efe34d5ea --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md @@ -0,0 +1,15 @@ +--- +title: Overriding Template Settings +weight: 33 +--- + +When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override**. + +After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override**. However, if the template is [updated to a new revision](manage-rke1-templates.md) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. + +When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. + +The **Allow User Override** model of the RKE template is useful for situations such as: + +- Administrators know that some settings will need the flexibility to be frequently updated over time +- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md new file mode 100644 index 0000000000..043c7686b2 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md @@ -0,0 +1,78 @@ +--- +title: Pod Security Policies +weight: 60 +--- + +_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). + +If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. + +- [How PSPs Work](#how-psps-work) +- [Default PSPs](#default-psps) + - [Restricted-NoRoot](#restricted-noroot) + - [Restricted](#restricted) + - [Unrestricted](#unrestricted) +- [Creating PSPs](#creating-psps) + - [Requirements](#requirements) + - [Creating PSPs in the Rancher UI](#creating-psps-in-the-rancher-ui) +- [Configuration](#configuration) + +# How PSPs Work + +You can assign PSPs at the cluster or project level. + +PSPs work through inheritance: + +- By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. +- **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. +- You can override the default PSP by assigning a different PSP directly to the project. + +Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. + +Read more about Pod Security Policies in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). + +# Default PSPs + +Rancher ships with three default Pod Security Policies (PSPs): the `restricted-noroot`, `restricted` and `unrestricted` policies. + +### Restricted-NoRoot + +This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: + +- Prevents pods from running as a privileged user and prevents escalation of privileges. +- Validates that server-required security mechanisms are in place, such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added. + +### Restricted + +This policy is a relaxed version of the `restricted-noroot` policy, with almost all the restrictions in place, except for the fact that it allows running containers as a privileged user. + +### Unrestricted + +This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. + +# Creating PSPs + +Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. + +### Requirements + +Rancher can only assign PSPs for clusters that are [launched using RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster](../../../pages-for-subheaders/cluster-configuration.md). + +It is a best practice to set PSP at the cluster level. + +We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. + +### Creating PSPs in the Rancher UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the left navigation bar, click **Pod Security Policies**. +1. Click **Add Policy**. +1. Name the policy. +1. Complete each section of the form. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for more information on what each policy does. +1. Click **Create**. + +# Configuration + +The Kubernetes documentation on PSPs is [here](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/custom-branding.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/custom-branding.md new file mode 100644 index 0000000000..32b4d89e6c --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/custom-branding.md @@ -0,0 +1,221 @@ +--- +title: Custom Branding +weight: 90 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher v2.6 introduced the ability to customize Rancher’s branding and navigation links. + +- [Changing Brand Settings](#changing-brand-settings) +- [Brand Configuration](#brand-configuration) +- [Custom Navigation Links](#custom-navigation-links) +- [Link Configuration](#link-configuration) +- [Link Examples](#link-examples) + +# Changing Brand Settings + +:::note Prerequisite: + +You will need to have at least cluster member permissions. + +::: + +To configure the brand settings, + +1. Click **☰ > Global settings**. +2. Click **Branding**. + +# Brand Configuration + +### Private Label Company Name + +This option replaces “Rancher” with the value you provide in most places. Files that need to have Rancher in the name, such as “rancher-compose.yml”, will not be changed. + +### Support Links + +Use a url address to send new "File an Issue" reports instead of sending users to the Github issues page. Optionally show Rancher community support links. + +### Logo + +Upload light and dark logos to replace the Rancher logo in the top-level navigation header. + +### Primary Color + +You can override the primary color used throughout the UI with a custom color of your choice. + +### Fixed Banners + + + + +Display a custom fixed banner in the header, footer, or both. + + + + +Display a custom fixed banner in the header, footer, or both. + +As of Rancher v2.6.4, configuration of fixed banners has moved from the **Branding** tab to the **Banners** tab. + +To configure banner settings, + +1. Click **☰ > Global settings**. +2. Click **Banners**. + + + + +# Custom Navigation Links + +In this section, you'll learn how to configure the links in the left navigation bar of the **Cluster Dashboard**. To get to the cluster dashboard, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want custom navigation links and click **Explore**. + +It can be useful to add a link for quick access to services installed on a cluster. For example, you could add a link to the Kiali UI for clusters with Istio installed, or you could add a link to the Grafana UI for clusters with Rancher monitoring installed. + +The custom links don't affect who has access to each service. + +Links can be created at the top level and multiple links can be grouped together. + +### Adding a Custom Navigation Link + +:::note Prerequisite: + +You will need to have at least cluster member or project member permissions. + +::: + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you would like to add custom navigation links and click **Explore**. +2. In the top navigation menu, click **🔍 (Resource Search)**. +3. Type **Nav** and click **Nav Links**. +4. Click **Create from YAML**. +5. The simplest way to create a navigation link is to add these fields: + + name: linkname + toURL: https://example.com + + For more details on setting up links, including optional fields, see [Link Configuration.](#link-configuration) +6. Click **Create**. + +# Link Configuration + +### `name` + +Display name for the link. Required. + +### `group` + +Name of a group of links that expands when clicked. + +Optional. If not provided, the link appears standalone. + +Groups are displayed separately from standalone links, as shown below: + +![Screenshot of group and standalone link](/img/grouped-vs-standalone-links.png) + +### `iconSrc` + +Icon source in in base64 format. + +Below is an example of the Grafana logo in base64 format: + +``` +data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAgAElEQVR4Aey9d5xkZZn3zb/P+3mffZ9nDcDAzHTuqs49PZEhCBhBJc10nO7pHKbD9PR07p5AWlEEZM2ioph3dXGNqLgCxhVBRIYRmNy5cjqnTlWdqu/7ue5zTk+DiNIsCn4suOacrjrxvq/fle/7PofX+ycDZNT/pIAUaUUmaRBKp8FMQ1L9qL6S4+VYUikIeuBrn+LppiuJNG/BvHYd7HbhbXLBZ/dB8AkwFokCHsAH6Kb8IxTHJIyBHwgDMTBT6h7yWAmb1L3sxyEjfxkYxDDQMa0nWV0vyE2slwZ5JtUO8v5JUhgk0EkRw5TnUg9sgJm03lsezkyTIU0C2VrNJU2WsdtTPVQyAmYU4mGIhDESaULAcSG5RjwJEQ8YsxgEOZoOcSxjvXsyKM8nL24QJ4UmF5TnlL7IWLure/G/3lnn/PVu9SrdaQVApO3/CCAZAYjNSLIVBrD/VMwSnsf4/B38ofWthFu3krhhPZmaLJZuyEY7vJPIV++AwEnImJwJ66qTFcMLSswkGWKkiKqtMIICwB890zL/2lwoHGLYnCIMtMqP3Md5N7mMDRDTBkhKAURAqNBs3TOdspjWERgrmkYuJbQMEPUeKdAEIBrIuSIKEiZ+B0ShADz7W/Tjv8TMLOIjybxcJwNJOUgAkjHQSFoAkedMWn2g7rXKV/9rnfZ3DRCRjgoN0ik2A0nDLgMkbYD3aU7dPcRT7ZfhadmEVldA/IZ1RN6TRahxO8f37CTyxU/B4pySvOlYHNM0sZhQRKgwTRrRWALOZc0lWksoY30n0lPkuMBCPYCcKn/Ic632Iy/ivNvy+4k2EOAamDbJ/rIKFYEhx74AWPIWcpRhv8dyu5mQTEEkmUYTjSsaMBWDiA9mjsF/foWTH76Z33zqEL6jD5IgRkTeR64valfOQQSL6My0Ap/c32qvlNJgq339v8Z5fzcAkcayBKrFkNIBCiDOD/Lj8jG2iZWOwvGHefxQNY+3beHk7grCDS7067JhRwn6dZX4d72L3zVei/6Vz0HYB/EQJINKawRTQXVP+UfYQEjdRphITBlTNIrFqBopIsqcsZnaQYww0iv5yA1XkuJ+eRJREStJvrMFhnO8A5S09ewCDoMkCQUw67KxOMQMCBkpkok4JIIQnYGf3k/s7mlO7N3Fw7VX8J2Ot3Pi/rvBXMLMJKxbx+UR5L4CEJ0IKYUbeV0xAUXDZVSrvZIGeHXPff0DRDGE9PRZPhGec8jhBWcr30uXCWEG4Xf/wW+H38ajXVUcaS7GX+dGvyYX6jeQvL6MZO1lzFx7Mc81XkPyM3eC/xlIz5LJzBIz/bbUtUyKZaksUtPUIS06wzK/LGluM6jwrVg9wkCvECDOe51lE2kL5w2drdU+Ths5bSBbMacsVMtGtKDFug5+5Q00Iw2JFOhhmD0C3/goS6M1HKvfiqfxMo7t3MLv2i7F97nDoJ+BpG45IXLysgYRgIhvJPeRu4QVibZ7LX/+rgDiNLTT58IADjM4rPI8HyXpgYc+yXODF3G0ZyPPtZSxUFeM/9p8MrUVJK4rIX5NMfEdm1jauZ1j7Vfj/exNcPoRSB2HxIICiHI+Hb4U00mYSWkP4RAhHTKiUexggfCEPJDiUOepX/5W3tN5R9m3PpZZJ6bdWbJ+kWPkto51JyaVxbBnpYtcT35XwFDXt8Ee8cJ//wj9X6c40fF2TtZU4qspJV5bidawCU/HxWgfHoTwccuhl4srE0saRnwQwwaIZQKa+BH6B0CcfnvVttIBK8jmFId5zjKEmA+WhJTIkeoYYxY+t5/FtmJ8zeUEWzdypqmM07VFhGpK0W9wYVyXh15dQLxnG/NdF/NE00V4PzgAv/0OLB5RbyWKQD2CML5wV1KMbIlmCSgkapQEkcLCNPJ72mJAsdXl+Vb7cRh+mcnlQvKl0IomUfs2mOT28rwCaiErgiW+hXWOaBSnzRSw4/Mw/wR87zN4xht55vqNzNxQQXj3VoyWzRjX5ZPcWUigrozozU0QeMbSNAnIyA0EcaQRE1N8EPWn0hoSDxSSRnntfl73GkTMAsvXsDnCYZAXMERc2dei2i0HVnWMdpro4etYuv58orUujLatLLZsZKapkqXaYqINRZi7XWQ63ASacwh2lhPtvZwjdVs4M94ETz4M8ajFjI5TLlsJLavwsu0GCA84JpX4uEAAVHBYGHa1H3lVuZaQxXgvAIh86QDFDqu+ECDSIstS3AGWnCdmUnwRjv4Y/XMHON51OSerSwjv2kCqdRta81ZiDZWwqwjq8onWFKIfrIPQs5CIKl/ekJvZDyagFJJbWKFuiQFLePwfJtZq+//PnieNLc64lUuwTYoXMITwZowMgbSu3EIjE8NMiKMdhmd/zlzrRjJ12UQb3IQaKojUbiRUW8VSQynzjQV4mtfjbz6fSNN5hBvXsrjbzXzTZjz1V/Bs0/Xw0A9g7qRy3E3DRzARUJpKni0ZSljpEUkcCEBsQR3BYIEIC2mxw+WBV/dx3v950TP5UshpBxskqURG+cvCjtImwqyyDYZ9pDPiMKfR4hHQY5aJdPIIPPg1jrS/nZndW/E0lRJodBHY5SbYUEq4biOx2goi16+D1iLCO/PwDL0HfvdD5X9JFNwXP+vjyL2UMJDnUs8kRpzkjv4BkNX1/l9wlmiOhHL4RIbaDrA0vs0UwifSMVEkuSWJsyTxRACMIKSi8Nj3WWyphLr16PWFaPVlGDs3ou2swldXpgCy0LoWT+t5RJreqEASaMpRDGLs2E6w+u2c7mkgcM/t8IdHID2PSZAQUaJmSrkAypgXXrClaTIj5kZcRXXiKlH4ygAibXA2Yme3wUqAJDMWWDJgGEmiWgzDFL1hCRcjHkWP+kgaPkgHQUyqIz8l+fHDzLa/i0DdVrTaUmL1LsINBTZIignXVRCpK8W3cx3Jdjehnfl4970bHnsA0rpi/QWxLqUf7SiZ2pd/BBPio0kQQyVO/4LO/hsd8ro2sYQxRPYJSJSZJYwhHSBkWoJUABJRqTLRM0m08KKlPYwgiR99kZOdlRi71pOuKSS90wJIrLqK2M4yZTaEGy0N4ml9M96W89Hqs0lVu0nt2Ii+YztPv8XF8ZZ3EPrkAfj9dyF50gaJhscwEP5U0twGrZlKkEhKwDNkZ7gV27yC7ndUxYtfwkymMZMp5L7KF0pJNj2OmYiR0MMkRWsIoyY8MPsYia/fyZmxGzjdsInAjjKM6nIS1aUKJOH6UgK7LAo2FBPc5WKh9kJiYoJWF7I08G5SP7sfMrryLuZsYCjZJYlFp39EWKhIn4TBVy8gXvyN/2e/fd0DREoY4sqvsEo8lHQSCSV8Z5c8RDJJxAeRUKvmm4FEGDQ/S1+6k2NdG4g1ZWPWFJLZWUZi5wbiOypJXF9GfIcbrT6HYNNaPC0X4G2+AF0BxEVqZ7k6NvRuN96dGzjR+VZOva+LyMNfgOizYrwQJkIkY1qmhcMcSZGccTJmCF0XJ/UVAESuuRytOssY8rWjSGVf12MYsbDKyYh0z4SXyIQWIB60hIU2D7/6FjN37uVo56XMNpQSqXdj1LpIVBdj1JSi1WwgUrtBaQ4xscTc8u7O40z9hYTa3UR2uvD2vpPIA/dBJooXmLOfQwkIx7SS13X6RwIY/wDI2Y77n94TDWKVMCQtP0Q6QRpfJJRsJXyfgWhaoCFaJk5SACKM4ZnlxIemOd2+mWhTnvJB4nUlmDsqMK8vVwDRdxYr0yLSkEekIYfwrhyCjXkqmajVFlvMc0M++k43vh2lnKzfxqmpRrRvfRwWHlPVW7G0D40YyaSBGRVJLV66xSWmZPIV96yyZRQSnIy9JRDkK7m6NIES1CIYMgkykv0W3yvug+giaB7LnDrxK1LfvYeT040cqdvEUm0JdFRhtpbhr84mXF9MuL4crWYjevVG9JoKYhIKbypgsTmH0w3rCLcWkbjBTbD9SrxfuxvSfnyYLDmvpQIXKzSpoFceVJlX/9AgTjP9j2/FsYyRVKQcdWlr4QrHIzQhYUIsbSq7Py2Z8/CC0h4cO8rRw3uZb9mKvstFZJdoixISO8tJ3lCuTKxItUjSYiVNtTqXAotyVBtd6ju91gWNLpI7skheV4BevYHFhks4PnA9S/feQup3D4B+0o7WxIjrYUxdEogWs4j584o+DkCk3kku9YJIlTjikZROUsIGKfEvvJDyg+EB3wkSj3yD2dt7OTnwdk7UbySwq4pE0wYiNYUEavOJNJfiF5OqoVw55HpNOUICEKVBmgo4U7+OsBR2Xu8itvsyFu65GVIegsRUpkN1hiRNRVvY7+3UellZ9FfYBq+oAf/8ya9rE0sAItlZAclLAUTLpIkmJfQoUZoASNLrsV/x9Gg3vqaLSNcWkagtQq8rUY6nxPT9tWXKUQ/WVygJKlJUnFIHMFKSEqvPg+4K0rU5pG7IgdpSkjdUMn/NBmbarmLu1n7MR/4NPE8qkMTQCKSlhtcSnlIoqyTpn++nFz9CACKaUsiWyqI9RD5ICkLyDhF0YmYYQ1+CmJhVi7BwlNPf+zK/PtBGtPsSwlefy+w7z1P+RazrYhYbN3K6upTFunIFDvE35H0jDQVKMKg2qCtV2mWxJksFOHivi2TdxczdNQqJWaKZABKtIxO2KhZsh1we09JuVtWxFaJ/8dd7LXz7ugaINLb4FpZ/YYd5RZ07XJK0SoESaZOY5icj9dcSvZLy7Ie/y+mBBsINW0nXlJKsKVEAEfva11CCTyI0dWUogNRV2FEbkZ6lSoKKjR6pd5FoLMLcVUSqzkXyhgL09+ShS/a9fjvB7qs5PlJP8vufhugzKjEmxp4wsDyiVIorS8PGibP/oowhP4oIdrbOvi2V5VfHtLJyDhLhk5yPbmmN2Ax4j5L61bc58pGDPDTcyFNd78SoLoTq9dBUjL67grn6ck5Ul3GmpgJPQxXB+nLCDW4VwXLMSzG7YrVWW/h2ZqHX5cF1hcTrt+G5bS/EThNP+YjKmwowklEwJWJllZo4Cl4EnHqdF33h18aXr2uAqCa0TQtpaKvCVuobjGVTS3zAuCHJvCCYAZICEM2DcecowV0X2aZDKbo4ohLOFC2xTGJarKA66xjruGIideUqbyI5AUvTyPluxBwzatzqev66TXj73g2fvRWOPQpxj2KaU1qIZ2NhO5ttMbcw+LJ5btsjmYyE44QEHBKIcLZytDU2Q5KOkmqRrELELsBPZOKQDkPKC8YMnPkl+r/fydHhnfy+YRvzjdtJtl+GUVNMosaFVudW7y2ACDRUqPdR4Kh3tKZoEREK8t5WG4mppTfkE63OVu8c7ryMk/vq4fhvVeInYJorhJVIAyvULhUN8p6veXQAr2+ACCrEvLATcAlV8xNUA4WUHo+DRDWTCRnLEADTgxxDeAbz5naSNWXLwBCmf7kkibJw7VYCddvwN2zE21iKv9GlnHkV7arJg8ZKaLmUeP8NxO+cgF9+T4VUpZJ1MR1TCbuzDvXZ6FPaCYvKOzpaQrbCWYoEKOJbpfASZyEVYTERJGRKUFukdRik1mzpaXjk6yzePcLxve9ltmkbkboK0tUlZKrd6LVnGd5h/L90K8IgUZdPpCbb8km6L2W2fycc+ZXKhQTkOaV/RGWIv5XRVd9IxbAChy3cXhu64sWf4u8OIHGkFF23OiYOetQkrQoHxbySMYE6zD5FdLzOju8XK5NJpOHLJqVdJPy5Udnq3sZiPLsL8O7OIdyYrULEyVo3qcbNeOsv4Wjj2zl2yyCZn/4nhE6qkXppCSRImZZpkRPkUmkLqWdaaTeJ8xJNk4mYZMIp0mHNMhkTS6rM3Kpt8oG5CN4j8Mwj+L54JzOHezjW/HbmqrcSq5YQtovUNdkY165Hr7W1ggpGSEDiLycBiFGbpwAigsHTtZ2Z/hvg1z9WWjKsRk39AyAvDr2/xrciXYWBVmiQuCqjtgFiQDRkmSLL5obk1Z98mKW+d2PsLEHCtS8bGDaYRONIIk2rLVcOqwzTXWjJY6ElB//ubKVJQjuyiVaLlN7CfMOlHG27iuMHOkh842Nw9BcQ84IeAc2maBhiMuYiCJEgEm3j2afh6FNw5En4/eOkn3iUzOO/hsd+Bk88BI//CJ78L3juF3Div+G3P8T4z3vwfewQj7VdzTONV6roWrLxIlXGn7khH65bR2ZHNhKJezmgWHmsACReY2kQ365Cljq3MrPnGvjJf6j8SlTU4PM0iDUGRHyjf2iQvyZARHur6Ig4wFGVEFQdk4BYWPS8HCCOokhXP+Eff4XZjiuIVxe9IoCIeZLaKTa85bB7mwqYb81jti1HgUQ0iUR+gtflEb2uGKPhIoJNlzHbdDmefTtI3L4HfnUf/Pwe+OHd8B+3wudGMT7cQ/C23XhursV7Yw3ew9V4DlzP4tS1LIy/h7nRq5nd/w7m9l1FaLgeT88OllquI9xRjdm/G/qbibVdz2LNFczVXsJ8/RYWaivw1rgxGlywuwh25UN1tkoGrmT6l7MvANGr84hU5+JpzGexfSMz3e+Cb34GdC9xGaIrALESMsrekuG/UtHwD4D8DQAiDS9DPsVZd0I6CTFLRJJJNEUAEjnJwlc+yJmWi/4HAOLG3OkiVS0h3wKVPJtvKWCmLQ/Zirnlr8kjurOA5A1ukteXkthZgV5bRbhhM77GSuYb3cwJsHa7WGh2sdTsxtPqwt/qVhRocRNsLiDYlE+gMQ9/Qw6++vV469bhq87FaN5GtHoLwasriF1VBddeDDVXQt1boe5StOrNxHZVqfox784cwrXryezOh0YpmblQDTF+OaBYeawARNuZvwyQhfYK5trfCl+4EyLzmKrU3/ZDRE5lZEShjLBcAZC/Bp+8gnu8/n0Q24GV9heAWMM4ZRyG3TGSaLYdRFI+mP8tpz48ohKEEmmSTrZCti9/K5EfAUequkAlEcUOX2h2M9viZr65GE9TMUvX55KQ7HRtKZnrCkm8NwdzRyHpWpcybxJdm9G6thDr3EqkfSPR1irCzZWEdlcQ3F2Kt86Ft64An02B2gKVxJNEnqIGF776QjUSMlBfonI2oZoygjfIwK9CNfhL8jvplgqSzUWEG7Lw1a5hqfZNeOrWqPCtFcaVUO7LI2m72I48wjtzWGrMYb6tlPnWizE/cRiC4mNJlMQ2g6WfVFhXQs82QETtv8Y/r2+ASOPakRALICKh7LCW9YX6XUq9JcRI0gfHH+H0+9rxNm9RodhXChAJkUrNkoAssEtAUcrC7nI8jRX4d1UQra9QtUzmDS7YUQA78mBnNunq9aTq85h/73pmr8ln/rpCFm9w4a0uIVgrSbhyIg1lBGqKCFbLAC6poC2y/SUr6y8l+r7GXPxt+QQ6ixR5W4tZairEU5uPrzqPeEOxer5kvYtUc5Eqq5mvXcOJujczs+tCAo0FLxsYDpCk7aLXWwBZbMpisa2IxeatpP51BHzPWZM7SD8ICRgUIOz+Wf77tY2Q1z9A7Ia2+kEST45TbneMjHKNZ0gbGsTm4dff4Ll9V+HdVUFkR54aKBWuc7EakkFWMaGaIsK1JWpUnb+uEl99FYHaKjWuRKJGcSlf2VGiRt4ZNdnEa9cRq19DtH6dun+0RiqHy4jWlqhtrKYEi4rUtbWaIixyEa92Ea8pVNtoXT6epizmWtcx15JlUy4Lu3PxNeYTqi8kVluIVmNRTLROg5yTy1xLLvPNuYhzHWhwrYqkzVKitXbmEel2M9ucQ6BlI6cHr4VnfmbNpyUdY/uIFhRWRFVe29hQT/d3ARDBiAMQ9VYKNHZ9kgKIOIoJ0Bbgh59kpu9ygo1lRHfmvyKAWKCywBGqKyNUW6lAEarZpPyC2M5NaIoq0apLFLOG6yVnsA7f7jX4G9cpYMZqyrBIgCH7Jep4BYrqEhscAhKXTWcB4m/MwrN7Hb6mdch+qEEKDHMRMGi1+QpMAhABijyvgMG3y4WnsVCRf5XgkOvI9TKNlerawW4XpxsvVLVbZ/a8Ax7/nnLUpWOk9spRIhZabCH2D4C8+i3gaGoLILYaV2aX3QkSaZTaCylBic3BFw7i6d5MrEkYNl+Vi0id0epIRtcVWyPspCSjbgOxmo3Eajaj79yKXr3ZcpKlTFyNpXCrEvHFlmzm29YiWzlfEncWSb2X7MvWrv1a3p7NaIs5p0LT9S60+jwStdnKbEPqwa4VyoNrC1QBYXqHW5mSco5VmWuFtSUCJyFu+V7GdayG5Fx2b1ZmnL87n5O7zlNaa777Cvjp1yC2ZJm4diBL+mi5ImA5+/nq88grucPrXoM42kPgIPtn292ydSURJ9l09aPvGJHbWwm3lxFvLlYRHLHjpZJ3NRRqLCLYWKK0kfgL4m9odVUqShWv2YReu4loXZUayhtoKsHb7GapJZ+FtlwW2rNYaslV58t50Xo53yJ1rYYSIg0lRO1tZFcRFp191uguqX/KJ1mXT6Y6H3bkg+Q4ri+EGwphhxuzxi7ErC9S1crisCdqyzCry1TwQMbdhxrdqyI5l8aNGLtK8HbmcqrxXPRdBYR6r4AHP2dVDcuQAztXKH1k9Y/s2QLslXDvX+Hcvz+ALKsSVd+gZsuURLoCyPHH8Ey8B6PFhdkiYz0KiO0qItq4Ogo3FRFqKlYUbSwmtqsUvaGUeH05upSIN5QTbiwluLsYX0sRvhYXntYCfK15ioLNecQaXarcXkruHbK+KyDWWIDeKMesoKZcYjbJOJbw7gKCzS517aVWF/PtFkmoWL6X90vWF6mK5UxNMUIpSXDWW88qzx3aXbQqiu0qJlVTqTSIpyub2eY3k2krIrnvbfDtj1rjTaQWzh4DvwwQmfFFjc957YPkdQ0QR3sIJqSplQax2l6VlMiUm6rOT6KKaQj88gGWBq+E1nwyLUVEanPQmtzEmopXRZHdxUR2uxXJdYTijdY21uQm3Owm0FqIt80if2uh+jvcXIjWlE+8KR+tSfbPUmx3IbHd+RYJgGyKNOcRacl5PjXnWfdoKWapvZjZzmJOdbs52eNmpsvNggyFbZH3c5PY5casd5NqKCbRUEx8V4kyM+Udws2rIzFT9etLlgGy0HYedBeT2Xcl+hduBn1WFVeenWLIcRb/AZC/gvKzAOGAYxkgdhRRaq5UqbeNIhFaz/3nF/ANXKoAQoubUHWWYh6HyVezFYYWZtd259lkMXGoNQd/Ww6ejhyWOmWbR6A1TzF+QrLYdYVkJMrU5CYkz/I8soAk4LIoX4Vy/W15rKRQSz7xxmJl4lgMbwFWQCFg9LQXstRhkewLUOU3Aa7zrrK/WlJ+3LVFGA1leLuzWGw/HzpdJHsu4rRUCcwfeXGAqBGVMsJTek0lSP4q/LKam7yGNYg03AtImH0l2X9KM4udq5xASzjZVq8M2LF/MBIcue9u/AOXQ0chtOYS3HkuWnMBEdEmq6Boi4toSwFaS766jlxL9iOt+QTb8vF15CKmh5C3M5tgezZ6cy5mYz7UCxWo84JtBTyfXATank++dhe+9gLObgsIt7owmmQ8SolNRSTFZNttPVegvQBvRwGLXfksdMlWzrfuFWktUM8uzy/7kRbXi9AL26WESMsKai7FU+0i1FaqAOJrPRfackk2l3F85Fr43Q9UTZb0i5hZ0keqS6WPVg7DVUEV6UyJBzsduAI49kzyZ7veCuf/NQD2NweIemm7ASwbyW5FaShFYh8JOWMhbFTY2DH0FKFYnEBaJmawf9OTEI1DPIVKEkpnhDw8dfdBwmPXIqFW/7X/C63h/6J3FBDrKH4F5LbPLUVrq0Br20CsfQORjgrCnaUEu9wEevII9uQQ6c5C78wi1ZYDLXkgJlPTeQR3WxRqXkOweR3B3VmEmnMJiXllP1u0qwShSFcZEdnvLkXvKiXVWUK6vRizrYhUq4u4Ddjobjk/l+DuHIIt2YRa8wi35RNtK0BvdxHvcBPvLCTTX0Zqj7UfFaEhxyvKJbS7AL2t1KZy9LYK9NYNyxTs3MCZvnJO9RcR6F6P2boGGtbArlx8HZt5Yu+74blHIHpazSujy8yWUuErGJCpkJYk9C6DWVIQS9jDcmWAlcxbJgdYCRQVJl4xQbhUTJiq5k7mPZaLvXqfvz1AnBj5WfFgN4wARIAhWmAFQBylImLJljwpE8JpmXTZBoiIKl2mNMkQlepYmazg+GMcOdSJv/cdBGtz0Rr/mVjLG9A684h0Fq+Kwp3FBLtKCXQJEEoJd5YT6Sgn1lFqkwBPAOQm1ulC63Ap5jTaC0i0uZBttD2HyDIJE+cqRhZmDreKlrA0RailUDndgd2FquZLEnzeXXnonUXqurF2l2J+0QYWMCxm1zrc6nfZxtoLcY6LthUSEQ3TkoO3JQtfS5YCUrglj1hrAUabm1R7MYlWtyKjrRi9vRit3Xq3SEcp/p5iTvWXcKbfRbgzF7NlPTQK5aK1lnFqz0Wc/FAXxq+/BimZ4ySKaWhktKQFDBlyvDLEJdEUGXkY91ujEJVQtPIoAgM51NIvUq4SVmR98/cMEOHpleCQ/WU9LA6FTUqb2ClZOcYGhxxtJFLEjJTS2nYLKmUkQzp1meTTnIOf/RvHB65Fb96OUZ2P2VeAv20Nka4CIp3uVZGAwtu9AU9PBd49bvx7cgj3XECs+3z0rgswOrIsBmupING8WRUW6i1b0No2Kk0T7thAcM8lBHovw997Ef6erfh6qvB1leHvKsLfUUi8v1xRYk8ZiT0VJLorMbo2kOisIt6xAb13I9G+DUR6qwjvqSDUU0mop5xgdwXB7jKWWouQ6Ja3vQRfRzGBrnL1e3jPBoL9Vcz3livy9pap4/WOEhKtxdaUq40F0JxHujWHVJul/cI96/H3rmexfy3evvWEOwrR2oqItZURtSncXkKoo4iFnjKe6d3MiffVE/7OR+HMb0Am7ktrpE2ZsClD1ATNnsPMshIMezoima9LhuxafS7yUMAhW+vj8IYwwtH2mK0AACAASURBVKv3+ZtrkBf6FC8KDtU0KxpiBaDiCQMhmR9agUa2KUvaxJUenwf9d6S+cjNL3ZdD6zaodcPQRubb1qsSiWhXEashMXVC3WUEe0oI7ikk2JtFqG8N0T3nE+9eQ7IzC7O1ALO5HHP3ZhK7t6G1bCfcth1/5za8ndsI9V9OsO8tCij+nm0Eujfh79qAv70Uf0cx3uZ8fM35BHbnK5NHImDK92kW/8ClGFGAtJIsX8XyN8I9xaykUHcRDgX2lLDQVcRiVyn+7lJlthldZUpzpFvdpFsKSLflYrZnk+jMQutZR6j3Qnz9F7K0d41633SzG5rE9NpIoHMjiz0bWNhTgWdPsSU0ess51V7BM3vfhudTY/D7H0JK1qAKq8k2ZhI6c8kUkUzGsrClRF40iZhYQmI9KCjZUUpHOFqqxP7y7xkg8m4OwysOFyDI2zsSwop2yOwXzmErtzK6Wf5W/5j2HAEx0FOaWhIMjsLv72Pp5huItGyAzougqRRz31Zm2wsJdYttvzqA6J2FinnS7VkkOnPRugqXr6V1WXZ9ol2kbw6pVrH93Yjd7unaxkzPW5jv2U5kTxnx7gLiXUWWuSQmTGs5sdZKoi2WTyO+jfgClrlUgN6Vi96VTbxrHVrHhWida5ZJNNdKCrW8iXDrm4m0nUus43x1XLz7QoyetaR6sqC3CPaUwZ5i6Cki2eNSzxPtzifcnUeoJ5/Qntw/okhPrvJj2LUBdm0m0bJBaadZ8UkGyjk9UM7inlLlR9FZgtnmxtNejveDTfDYVyEzq2Zc8WCySEZNESTTM1n9qEqwQeYSUHPAyJgFMaDtyQGFPeRPoRVy89WAyd9egyyDwgGGAwirLFqcMGuCaqfe6vlbOdoBiClDVMW3k/GrMsWPcZzQo5/lzGc6ebarglBjIXRuhLYK4n2bWOouI9xTSqy7eFUU73JBe7aiTHuBkrzJ9kqMjg3EOyuJdxUrHyfWlaUcdHHU/T1uPD3lLPRsxttdgdaejdl+gTJhku35JDrcGJ2l6nytu4pY9xYi3VsI7dlCsG8zgf6N+Pduwje4Ed9QFWe6rNzH6Z4SzuwpZqa3jJneEoRRZ/tKWdi7gYW9FcwPVDLXX6a+l9/P7CllsdtNqrecdFcRiY5Com15hNoLCHQVEuotItxfQrC3yNrfU0S0pwi9u0iBOdlZRLqtlExzJWZLFVpHJYEeuUclswMVLPaVEepxk27NgqYLQbY9JSwObufY+xsJPHQvxI+rhT/DagWqJFEzoeYPVh0qnSrLRygUSKeqjrU6X76WiMzfP0DssJ74Fyu0howXsIBhgUN8CecIZyu2qFAsmSSeThNPmWoOWjVBmRGGM0/AL77KsX9t46n9F3O6w0203QWdpdBdTqijhEhvJdGeYtXx0vkvl2KKWYpJdhSTaSuGllKbykm3laN3lePtKWa+z83sgIv5/jy8vVmEe9aS6DxfASO9ex1mSzbptnzMrnxSfS4Sg0XEhyuIjFUxt7eS0/u3cmb87cwcvJ75W1tZ+mA/3rtGmf/INOZ370V/4F7iP7gP44efJ/ngl0j9+IuY//UV0j/5Mvz062qrP/B5fP/xUc588Q6euecmnvzIAf5wRz8nJt/J3P4tnOyt4GRnKTMdxfh6KtH3biWz/2IFVKOjklS79U7yXplWi8SJj3bnEugtZKm3ksU9W/D2bCXSWYnZng9ta6BnHVrdPyEROnPsYkKjb+NY18WEpnfCN2+HM/9F2jyhpmoVq0F8SS1h8b70tQjHsyCRCejsUYoCDvHa/741iAMQx5yytgIQmQjO+k/A8XyAOOCQBtRUOtCad1eNGpRZA71HSXzn4xw9UM+xfVfyXFsZsaHNJPZWqmiS0Vem6qBifWWEbcn4csEhx4v2CfVsINwtGqOcVHupKrVQ5RYSnu0uxttTynxfKbP9xcz3F+DvzVJOvNnxz5jt5xHtKFQOdaC3iuC+LQRGLsY/fQWLN76D2Vuvgf84AN+9HX5yLzz2HXjuUVg6DiEZHSnj1kPWeHZdVqKVCNCfIRn7LueF/OA/BY/+O/z4bmJfmWLu7g5OHtzByeGrmN/7Nvx9VxDu3I7esZVU20YyrZXQWm4JgdZiUh0u5YcsDmSz2F+EV4IEXRXEO0rItOVC6zqMhjcqX0zrkQBBKaHujST7Loaei1jq3U7gK1Okn/0OJGfUNEWJpEnAgGDaWqBKcGCFciWaac/Q6Mx9JoygzIdXw7iyrvk3NrEEIHYo9wU+h0gOAUYsqas1ti2QpNXU/VpcxzBkjiWZORD8yTAZAUZiFk79HP2rt3D64E4W979DRYciPZuI9FQR2VNBuLeEUL+bUH8h4b5CIr1uIr3Fq6JwbxmhgYsI7NtOaGgL4X0VBAeK8PVk42lfy1LzGrQ9Vpg30laI1llEvLcUY28xWl8eS31uToxdzLGb3sviv7ahf3EafvBJ+M234blfw+yzEPFDLAq6zPUlQ4nFiXVyRLIvs+OtklKylqBMYC1zZy2A/1l4+mFS3/8c/k8eYPHWDuZGriW4753oA5eS7tsCPRXQ6YaOPOjMJT5USnS/1F/lYu5dT7L3QrSutfhac5nfXajMQ61rE2ZHBbQXQ1shtBdidhYjkbRj+7ZjfGUcnvsRhE+QNmJqfi9ZWkfm+1KznCnr24CUzOIijrvARozrV//zGgGIo0EsA8oBh4BCZpaV5Q3ipoFuRLEmfLYYJGnGmUv41YqzSgL9/rtEPjPK7ORVeIYuITl2GWLHG50Wad0bCPWV4B9w49+bq8gCyWoBUkJo72Z8e7fgFZ9gpJLAeAWBURf+gWx8XRdi9rthsBIGt2Ds3YZv8BIWxt6B933VLH1sD8YPP0ryv78Gxx4C/x9AZlrXRDNEIBg5G/wXv1XW+FieVtQywxWLCAOthjIQ0ZJoyYwValeTLAgYF8H3DMz9Gn79NTJfO8zcjdfybO9G5nsr0PZvwhypwtxXQbK3HGOPi8SeCzH6ziW+91wi+9bhHShmsbeKQM+laB1vISPRw5YKCyAdORh7soj05ZIa2Yxn5C2EPjEIR36gZoGUyR7mE7AohpcpARd5P5lXQISpRjIZQktbK7682lOXvgYAskIa2supiea0oCLbNEY6QVQLocl0OCr0JwkliWzEVJ4jkzoGj99P7GNDLAxchlcy2AMlxPaVou8pJdlVCe0bSXdUofWWEtjrYnF/LovD2QT3uoj2Fa2aRFJG9pSgDVcR2l+JZ28xnoECtJEiOLABvS9XZdElJzAz9nY8HxtSfgKnnrKWbwuKiSTTg9raQGL/GV2tgmUtj+CYmxZWBCDin0oCWvalrVb7EQmsEttSki5zc6mtRAXl6iLDpdjwGOiPw6lvk3j4Q8x+dg9P3nw1j49cwrHeKlLid7WXkt5bTniomDP7cjk5uJ6FoXxi+ysxujeT7LiEZNtbSXRcjtazmUi/i+DQm4kOvgm616vk5cz+K1n8zCippx8EM6T632OkCMQzhGPidwhARAoY6EkfYdOrVl5XczKvtgH+gvP+tgBRkQp7PT+pwzkbrX0+QJKamhldqVgjjBlcsObXTcv6GsfxPPYFTt3ejm/gbZh9WzD7iwkN5OIbyEbrc5HsLoH2ckWJnlIFioXhXIReKUCMgVJCnYUsteYQ6HRhDFVhjm9BG97AQn8R0ZveQuTO64l/aQR+/jmYewIkiGAzpRIEUhKehFjcJBo3iCY04qZGAs2eBURmApGAhQWI/8mtU8Yhgb9oKkM4lSSc0YgisSUfCRaAU8AxQDTcb0g8803+8O338+Rd7cr8MnsvJdS3nfl+GW5bxtxgEYGhIuJD5eji8HdvJdJzGYHeS/AObMSzz4V3+M1E972BVOMboK8cfeRijo69g2c/PUbm2YfU9EwChlQiSTQSJ67ZdXWyTHVGpuQOqxls/r5LTQQgCiRnnS0l1VZoEGEGNQuGWndcJK3M0O6B6DzEjhH41Sc59slWZgcuJdWzFfZUoHVmsbh3HeEDLqKDOaR686FD7GY3qT3FRAfceIfcLO13E9orf6+OIgOl+PaUEeqtwOgpJdNdCj3lsHczxtTb8PzLTvj5p+H4g5A4LXljtU6JP5ViMawzGwgRNDQFCN1MIsuzyfuKNWGaGSsq5xTwKfPCWbZM5hqWBXGseiWnGVe3XVn45wRJ4qRFi2WiJNJBYkkfkZRHMWWcKBphvJlFwv6nmP/mR/D+6xBnRq9lrutSol2byfRvxezfRKyvAt9gBYv7y5gbKWVuxM3ScD6+/XmE9uUR788j05YNXW6SQ5vxT17Oczdew4l79pF54n5IzEBKSoV04ppOMmXlwtIk1eJ7ii9e5TDW316DOAARBWIXLQqTiGRVklJpFjHD7BIEWedCZicJncH87bd47Jb3MDu+lVTfRugsJ9GWh6f9As4MrcN/MJ/ovrWk+9ZCVxZ05ZDszVcACQ6KJikj8goBstTlVgDR+qvQ+jajD10Od7TC9z8Kz8oquIvq2WViackfz6QzLKUFKhLUzBCKB4kmIyQyoiXOJkOV7y02j+NbSORGrbEh64s4AJGrnHVWpSkdLfyXbOXiGbWIj5irTjLOWqJN1i6MabJMnMwSLzPoy8TYsGgmmU0nmFeGWIq0/wQceRi+fQ/6HYOE915NqGWLKtgUcza4rxDvcBaLYxewNHoBvuF1BPflERkoUwCS+jTJvxgD5SQnt7M0spWjo5exdO8gPHW/NfF2Okg6k0A3TeIpazHSjCwrZ8gqodY7v1r//u0B4jDACwCiwCHS1JnNXCIYRggML/hP4v31jzh272HmhreSEQ3R6YIOF+E9LuYHCzgxms2Z4QuI71sD/edC77nQvwZtMJfwYAnC0JIs1KSj9hatirS9LpjcgD5UxPzQJvx3NsGDn4b5p8CIKZQLvrUURAzQhcdVT6ZVLZKmiZ0vDJ5W852bmRSptEhHK/n5ohpBhIhNZ5ljZSO+jH2ZyC0WISMr28rAfclkv4gfGE0k8IdlASA7ky2HxSGyFLAEl0j5yAI8+RDpL91B+OYmYvsvJj3ottp+4J/IDP4TqcE3EB9cp9o6vHeTigDOdJYQGChH31tKRLL2XbkEhysJvO89nP54Jzz9HYgdVwBWII2nSciO4DlqC5BXCx0yu7u8qwVCJyehQgbqO8dRtjpVDnT8BZHo9okrStXVdV7498t+eOlgub5M/iYmlTT+HERmQJ8Bz+8JPvh5fvPhcZ44cAPcciUMCkByoN+NNlLG3LCL40NZzA+vUwDJDJyLUHLvGqL7cgnvKyLeV0Wytwqjv4j4gMvqtMESooNFimKDLrS9BcQHLBIQRQdLCA+WEdpnkX9ISr3LiN1VAz+5C+Z+CvE5VWQnTq/Id6nkliWRRQEobSDOpvRwRhxhXUlwicwlUjpGUkNPxRFzSyI5MuRLcmEOOf3hbJf7ZYXHptruL/1bhI+Ej6XUPGaQihsqfK4nU8iiQ/KEihclepZIk1RIF7SnIJqBeFqZPoYCV8oSCoFZePS7ZD49RnjyCuJ788kMnAf9b1JgSQ2st9p7oJLA3q3M79uOZ/92fH3l+NrWo7WdR2ZvLsbERmYPvgX/v43CyR9BJqiESzSWsCbhcBpFAdriWcWTTjvL1uFRtSti56yGtnj+zzPnOdIIEuZLRHzWRMpGkIweJRQHX8Jad0KTq0lvSLw9IYVkMjm0tVCNDGlVK4DZW8GQREaU8ycxBzt8n5Sl+WxGERvbuaSEcWUFc7VMmJQTqHU8vJCYA/0EBJ6EmZ/CE1+HH3yI1OeG8XygnpMT7+TkwEai+0tIDuaTHCwkPlRIdL+L0IiL4GghoZF8tOFc4vuzFen7c4kOW8doQyUqY53oySLZl4e+v5zo+EZi4xuIjZYQGxJgZMFQLum9uUT6i1jqK2F+oJyF4U0EDmxn7rZrmPv6nYR++6AFYDH/TA0jFkXTk6TsUv5lIbTcYbYQULkfQ6U6rVUUrVYQplT9smIrESvhCetazvmy1LSMo7BzI85WKjdtyiSd1W1F4NjHiaZQJHMi2ReWrf2gspF7Oc8hW/lbfSQULLNgJERD6giYxPSSY6TfVWjNG4Hjz8ETD+O9o5fZ4Svw7CmFsUoV2ZP2jPesx9hfgXffpfgmrsI79lY8feWkutfAwAUwlEVsbw6nRyvQ75+CpafsnE8aM2pYkb+krpZ6ULP5y4vI/QUrK7MG0lRKP69MPC+/qv1Sf3pzjrycNL6KMZuy4IosmSXrOFg2pyzMIq6gxdEyCCmo1vjLxCIktJgFBFuxpFMmYhsSF3tCA01UtzSmbkkXZxZzZ/bymAf96E+JHvkRoce/jf8XX2PpwXtY+vYdeP79MIEvj3Lqrnrmb78e7/veReCmywkf2kZ0spLoWCmx0SKL4YXpV0H6UCHJvhxM0RLDZUTHNxCcqCI4WkpoqIDI3vUw7ibRn6Wknj6xhfmRbfxh/3ZCH2uFR7+szD1JWjqfRCJBLBZD0zQljZ3vX3wrvWnZ/zJ+XiJVKxnTAYnDoMK4Vu/rloMumeWUQSadIiMqSqSVSB9RVbL5c1slzUTQ2bafLXUdPpNnEZKntLFjD2Kzw+xqQRxL0zjaRiaxFPmZ8SVhdhGO/Abzm59k7uYmTvZvwz9YRmaiBEbzVHLRN1BFYPhyAiOXEhnaBMMlMFkEY9kk+y9Q/ey9/Rq0733YWutEVqtKGKTj0ioy1WzMjmbZAHEALw8u+zZABB4yq7z4U897nxfvmOVvzxEAyMupj7Lzg6QTIaJxHa9usBCPs2RohBMBjKQPM+0V11I9WIogaWbI8BzwDKSPgnEEor+DwOPgeRROPgJP/wh+9Z8gSwR/+cNkPn4Lidsnid/aS+TA9cSm30ls+h1Ep68gOnUZkaltxKY2E5ssJzRWQHgsh/D4WiJj5xMZfxORsf9DZOz/IzL6RqIj2asChwBK2+9SmeDEUBnJkSLi48X4xzewOFbJ4nAxS4NuVQ+VGK2AkQLM0VICt1xF9L4x+O13rYnolJMr89IliMfjpFIpMlK6ncmofadpX3wrjCzhSyHLbBXmdCT4SuZUHa0Uhy3BJR8kmfCMwMoKdr7YViw6iY4JU8jvcm2HrCSb46BLyMAyQZbVuxxonWidZONOvhKRICR5KusJFCdawJRdeXjh4XgKluYxHrqfUx8Z4pnxK5kbLCYxlAWj64n2riXen0+mJ59kr5vwyDYiBy4iPOrC07de8cLMyEV4b3sP/PgOCB612wuloSWJLIsiSTDheRpkBQqsuj4Bk5AVKZRXUzLkxTtm+dtzfAYE4qBpcUjIEgGiQXyoiZ5lXTtZjEXq95NnVHUskT9A6Aj4nwLvf5M59XUyz34W44kPE3zkfSx8e5JTX+jj+CdaOHlXPSduuZaZm65h6dA1BKbeTXT83ehjV5EYfhfsv4K0lB60rrOofS10rCXTfSFmz/kkxLEeW09mYh2pqTUkp8/DOPBm9ANvInbwjcQOnE90NIfYiGt1NOzGGKtSlBgrQRsrwj9RwcJ4BXOj5cwNl+Ef30RkfBOJySrSH7gavvMvajkzlctIWhJbgBGJRBRAlltWBJiYpC/1UWgQ6b1CggtzvZBWMqpjPigT12JKh4dfbCsOdixpoKVSyq8xMk4pqMXcsqSoQUwZuSJd5Vbq/nIxYXKH5L5iItt8L5aHkPxsiWo7BJ0W/8qW5vJ4coK0kyyc+uzPCX39Vv5w8F0cE59utABz34UwKIGUtUqTL+zfyqmxS5gbrWRufwmBqYsIT28jNl1B5K6rMR7+FETFz4OIWPzqGawckWJ4555yXxsBFkAERJJ0PeuHOL+/VBedE08lEScrbUSs9fOiJyzJ/8y34NHPwoN3wPdvg/tvIfXlQ0TvGWPpQwPMvL+ThVvqWRi9FN/wJvwjm/GNbVQMFZjYiH9qE/6pjYQObyZwuArfwUoWD5QyN1nEmYkCTo/msTCUA6NlsK8Y9hXBYBH0F5Lqz1cx8uhArpLy0RE3oVE3ofEiAlPF+KdL8B8sJ3SgVEmaVwKQ1HgF8fEqgpOb8E5swDfhxjPhZmZqA3OTGwlOVOKd3Er0I+3w8/vAd8Raa1ySe7bzqurCEgkr4ma3tmiUaFTc9D/zkU50ACGdqrhcfAMxfSSDbPsJDvfLMSIsbV9PggGJjJVDEfloaRFHq5hKeyjZmZHsQep5f8tKHWFSyh6I2KFcuc3zJPELASIxhhf4RkozpXTSMlow4YOMU0Wlq+XvTPFX0mIO+sHzFNGHP8cfPt7PsQNXkh7Khv1vhulcMjeWEzq4lYWJS5mfuBzPgSuUsIrfUoV5sBD/SCGhuxvgN99S682L5SPPIs8szaIwYfu/0qwWCXjEz3PIEir2j3+mc+Ac9VKiNdIRCBwn9N9f59i9+znz/muIve8KEoc2Yx7YTHzqEiITbyE49jb8I1cSGroMfe8WGK6CoVIYKoEhqTsqJL0vj8S+XLShLEJDa/HvX4t/bC2+qfV4D67Hd2M2gZtzid5UQGK8EEaKyIyWYI6VKDMmNVZOcqxSRTK00SoiYxsJj2wiNLoF/8hWvCNb8I5sVcCMjJagjbpXTfGxMiLj5XgnqlicLMc/6cY37cJ7eAPew1uZP7Ad874BePJ7EJoDQypKlz0Ba3KOdFqZVIq3bPPKMbNeqgfOduJyb2LNdCdOthTnOSTOtQUMcYSFKcR6ERNHysPjCYmAxe0QsZSKOBGbF9864WQtIwCxAjGOLyq3UcyzUhLb9xZQOiFm59klEKFAmohjGCFMQxYp8trl60FSpoDFHs8hyU0zCAkv0T/8kvl/uw3/eBXpobXo0wXoN1eg37gR7/gmTo9eyszEJSyNFhCbyiE5uZ7wcA7+qYuJfX4M89nHl4fjOs8ijynkyBILOKI1rECI1WK2KeYIJTn5JT7npPynSIdnIBmA8Ck8D3+e37x/J8+OFmMeLoDRN8LIG0mOZhMdLyI0uYHQpER7yjAmpPAvm8DeLCKDOWj78zFGXSQnXKQni8hMuUmM5dqUTWIsG2M0i8ToOoyRtcRG1xM56CZ4qJjAwRKC08X4J4vwjxcTGCkjMFKhIkcS3ZDx0+GBzeiD2zGGLiO1/0qSIxersG5srJDVUGSsSDnkgZES/KMFeMfy8EwXEjjsJnmzm+htF6F9dRKe+xnEJaMLft3qbmlXMyVh2hSmaSpSOZuXaOwX/mR1rGPDixx27CdhfWdfutkyfWTPAYcDEHVN5b+IDyN5ColWSeTKIJOMk9AiJPUoqXjMmjBBQrLyezqpQC3Xs3wJxyKRu8i9rXs6v8tWgUcBR7SaDV47IJYwIW5CTGaXkSy8MsAkT7IEyNqQHtLxBTJSjStMLNp39jm0f59Ev+sqZqc3sTRRijblUn6nRK/OjJah7V9LYug8jLE8olOleCY3MHvr1cS+eYe1arDSThaopT3l6eV9HLJmQHkBQBzwy/bPAURlZePyItKwUcK/e4AnPtzOsclyuNUF+/8X7P9/SIydS2wyj/DBUsKHK4gfLiFxuJTgeAH+cReBCTehCTeRiSKi4y70CTfx8UIFltR4AamxfMyRPFJDOST3ZZMaXI+2fz3esRwWpnJZmipQzOmbLiI4XUJ0qhxtagPahPgIm0iObiY1shVz+CIYvkRRemSbKgpcDTjknPC4G+9IMZ4RFwHJ8I5egH86i9BNbqLv38T83deC/7dqzUDhKbF5fRlL4lpdIZ2dUqaVOOcrASKgEd/kpT/SQ1bnOSsvWdEWcTytDLbjBJ/VWfYKthkJKftAPw3h58D3NMz9Fk78itQfHiH++x8TfeKHJJ9+CPOZn6rv1e/+oxA7AfHTYMxb0UUpo3cYXkwhO6sugBVj7awTLuFKu6hSwsiqzN72n+xKY9FEEviRtXZ1Ff4JQnqRdPy0BZC0rFNoB4bk9aNPwX99kOMf2MGxye0EDlWg31RM+MZStEMu0vvXwPCFpCbdyg9ZHC9jZnIz0Q/VkfrG+6wlLdSzW0EOAbGAYzmqpt7BgYsEQ+zkohz4lwBE5a1slS0Pju7H+8h9PHf7e5mfKCA2/Ab0oX8iPHIBwSkXnuly5ibK8E+Xod20geBUGcGpCkWhyQqEIpNliqITJQjFbNLHixGSaJFFbvSJfPTJvGWKT+SzTOOFygQTMywxJtrITVJotFiR/C1AjE0Wroqik26lrbxKjWeRPHgh8Yl/JvIvJfDgYdCetMpaZGJ4ex1ykaSW0HEkvLTyS38EPLpujWER08v5SA4jri2RSPqUByDgEEhJ5wqjiQcTjmtkpP5MCjNTpyH0Gzj2Tfjlx0g/cCOBTzQS/WgN4bt34L/rOjwfeA/zt13F7C3v4Mwtb+fY9GWcOHwFc7e+E9+d16J9tJbUZ3bDfR3w5X2qnJ2nH4KFp60aNwmjSoLWtH3S9JI1mMk4qRK1GVl70IgSNNJEpTRAluU1ksrmk5GAQUAMLClVl9IaaTe1aKdjZknEzp6pRDGoVEZEn4Hf34/vi8OceP+78N6yidShtaT2/2+Sg2/CHF5HeiKPuPieN27Ff+M2jOkNykcJf+cTVhY/EcaMSvGqNf3TYjyCpoSM5YNYsEla95YucwDidMaf2J4jLyDk5DvEZDCO/YLFL45wfHoL4YlczIn1JCfy0cZF/ZUpM0tCsGJmCRhCU2VEJmxgONtlkJQRnRCywLJyG5sQoLjPgmA8n4RNFkjyiE+cBY/Yos+jyTxik/kvCQ5R2S9F/uFcQuM5hCbWEphah3nXBnigD05/H+ILymwRwSFttBwOtxX4yymWE5CIMy+RLdEulo8i/kJSMVw4GkI3pHpXACg9KEwXAP8JMid+jv6rL+K7/zBzn2rm1O3v5PTN21g4UI55axXcVAaHy0kdLCI5VUR8sgB9rIDYmGSkXepv0ebaeN7y97GRPPxjpfhueQ/zd7TgvWeU2P13wi++Bsd/CsGnWewSOQAAIABJREFUrahl4jikjoM5Axlh+6gan+PLZPALMGI6BMOkQgaiMB1eEpDMZ2TuEqvd5L2U1lUmoJPIdELWsrLvKTVoau7rh/j9wUtYGvg/MPZPcFMuifG1aENr0MfzCB3cRHi6HCZySB7exKm7u+Hx74MmGnEJUrIMuEwimCaUljCEFbZQ7SlWkoDT0RxnZdWfgAeoRKG8lLyIkPrI6Lxf3Mfs+68ndnAzTBXDaCEM5sCgC0bKYX8pZl8+KQmPThYRn7C22qSzLVLfy29/mkrQJsrRJiptKle5j9hkKdHpEsIHilRFbuigi9DBAgKH8vEfzrUpm8ChXGLTBS8JgJcChzBScjKb6NhavBPZ+O66nPSDN8HSTyB5gozhU1BQhpI0pmpQm3ntBJ/V2k7D/fFWolkCCgGIbJ19OVKYRq4dk7XSRTWpS8cgeAaO/hfmw/eyeO8w3o8147ntXSxOb2RJqgRGckiM55GeyleCy5i0BIs+nqtsdW0sh/hoLrLlcCncWAIHizEPujGnXDjHByeLmb3xLZw4fCUz0xfhnd6Cfsul8OHr4EsD8K33wZmfQeiYmi5GTC0pwg8SJYIMWJIKWystYbstjnqVOfsIJayRJSJYxMhRIJE2VOZZ1Brbk05jZsScFDEUJn3mlyx8dRrPwU1oI2tI35hN4uA64pNr0cfFhy0iNV4E09nKxD86fSVz98mIxAdAgVnK8wXESfyGGHp2vylgiI8mZDvqf9xdf/TNOVa1pl37lcpYGVnJpi88hv6Nmwh94N1KnaWH1sPgBTAsodkC2F+golWm+Bc2JScKEDImz5IwoZA2ZZEw9DJNFdmAqLSy41PlyvcIT5cSXgGQ8IECwgfzCR3KJXQo26b1hA9m/1mA6NNu/hQZ0wVwcxHBiRw8H3gLPHirFeJOLZJJBkiZcdWxwruKeUX6qQiRdKZjqyvD9I8a1vlC8iOiORz/RLYCFkVSniP952BOxpk/8wsSD3wC/z19LP3L1fimNiFCJzNZCFP5MJUH03lwoAAOFpAczyE+kWMx/VQeyelCktP5pA64SB0oIDFVsPy3hErTh4rIHHYr0Ji3lBG4uRLfjeVEDxRgjGeTHlkLYtIe2IL/pqsI3TuM/zsfRzvykLUwJ34y+EixoMaLCOOr9hHGl3yORN5kdsSkTsaQ3IP1+/OOU+UqkuiU8K+pJsEU40hpaIl0Lf0GfvAvRD70LuZG1pO8OZ/MbRXKWtCGs2HKBYfEesjDe/PFPHvL1YTvPwCen4J5EtIe9ERImaeqH6R9paHl+eTeapIQK8Ln9NOf2p6jXkYShAkdMx5VJM46yVk48QCeL/Vx5raLmBk+j8jUGzBvXoN2+Hy8h9YSuTGflHTYuE0TeTCRp+xFczJP/SYdJIwoYTztQAGxgxZFDhUgFDvgPksHC9EOCBWgH8jDmM4jMZ1DaipLkTm1HqH01FpF5lSWurZ1jnPu87f6QRcvRfFDbvzvu4zQNw/D4qNKikXSCbwJqYsS7rWljlOEqVryLwfICxteTCtHm6hybcl1BL2YR39J8PufZunTQyze9l6CUxswhBluzIcDa2HiPBg9F/avIbN/HYmRXKUlBBzxqWzVVsmDeaQOFZK+sZDMTUVkbnKp743pHLWV4+JTuep4fVLOuwB97P8lMfW/SRw4n8RNBRg3V2Hcsp34TZeh3XQ5wUOXMX/wck697z0sfWEvyUfvBd8vgTNqftyQDNu1eU/5LhLmjS9ZVdcyMExK9O2aNAGSEgiqUSypEIj6kJiXaFJxaSxTSIYzHIHffZngnVeRvHUzocMb8U4XEz6YS+qmfNKHRBMWwPu3MH9oI8++/53Ef/JBMJ6EzDwyGjMptWdyTUGpyDYFEvlHqgYkAGFVD7ywj1b+fQ4pAYc4ZGEy8TAyDkBFtERCmrPEfvkZTt1Ty7ED2YRuPI/UbRcQuvHNzNy4Dv9tJYqRxYESElA4JMBJKHo+QFaCRMAifys6mId2MG8ZGNKpFjhyMCdzSCvKgoksGLdIvhPwvRRA/n/e3gO6ruu689b62swksS1ZYhFI9F4JgBSpFpdxquM4TuI4zngmseOZzKRMkkkmK7YlkUTvAEH0QoIA2FQs23KXYjuO4xa5SLIkq8vsRH29l9+3/vvcB0KyLUfkzGCtg/vKfe/du/f+73b22UcA+GlD2biLLXcQvffvXNOAxJLNtCrQlEYT482tMq2z4T14BJZ2FISMq5tp+hMfK+4QMF7xlwzAc18i8+URLs7+Oc93/ibnD9yB/0CTaX8+tgP+7kb4nze4499tJfP3BaQ+VkHi7hpi+2sJfqyUwN2FBO8uIbS/mMiBcqItZUQPVhBrLScukLRVEGuptOc65t6PHyyEljeTPfgm4i03E2wpZf1gLWsHGlm/R9a8nszBRuJ3KdOnJQT1vNT5qyyf+Esy/zIHF75pzTKcKlHZiwJ8zXOsQUxLE/TcZZfMs/KsiYDiwJIhkgkSS0eJe3OiTq8nHG3TF+HRY8RnPsgLd+3j/D2NRLtqibeVEd9fDa31cLDMUvLP3r2Lp0beR+KJU5A556JGscbhwf2gQKLrsdIUxSeql35t/l1nlWWZhMuZJxLmOMjUKS4x5q/8kMgX+swFibdUQlsJyXuKWLm7hrXWZvwHawgdrLQROVCJRnT/lRG7pwo3aoypYuyVUUXi7ioS98ikv2rodRubz68jcXcdybvcMCG5p+Y1LUT8YA0/bSy338nlEx+DJ78IMZXSLBGO+S17pOktBZqiw4YLYb6ry17JZ47napdeIfU//kRxh4oXFY/oT2BZWVkh9NSXiB95L/RWE7pnB/67dhBuKSPRXUeso45AazXLd5Wwur+SYGsdkY5dxDp2EWmrJXSgytzOtf1lFospPlNMpqPvYBm+gxV2zL2v5/6WcgKtVQTbKgm11xDqrGWltYLltjKbvF1rKcR/sIDAgZ2EDuwkcqCQ5b/dRuxAFfTshZ7biO/fbe5w4uBt+A69h8Sj04TOPALhZ7117EpbuwIAzY3YxKJueqOCO7cAK2OrK6XJ1YQhHlGdX2xjnkeTj6aok0skHr2XlZkPstZ1p5tMPFhF9GA1mdZy+Nib4WAeq+31fL/jLTx2/G8Jq/5PYLVCtE0+3gZABA7FKWGviu3HeZZ75Tp/yN1QOu1KTsRCmbt1FeXqiUozH/88q0f+C6udd5Bsryd9sIrwXXVu7uNgDb7WSnwtVfhaagwwAk3wYJUxUYCJSfhfIfB1JO5qMGF/NUB07mZQRQWAe2Sp6ojsbyB6T8OmY50BUlozdLDcgdPOd6DRb4dbq4m0ViNwJw6WE28pJ9xaZ77rM92/R+Kxh12aUKXqcR/B0DqBVNqyesvqGu/R44olcbZapDFxlzR4SsjccM+i67H9STCSKbLWmCFmWRbOP84LDx/l8dEPsL5/O9x1HRz4t2Rb30yybQfR9lLCHbUEOxpZ72hmtXMPa+1NJgTrrVWst5ThaynCBLqjkvWOatbaq/C1ubHeVk5uBDqr0PB3VNr7662VaKy1CBiVnG2t5XxHLcudVQS7y4h17iTevo1YyxYi+7eSaCsmcHc+vr8vspQ8B13GjL8vx/+3pTx5cB/Pz3+I0LcnYPnrNuchDS3ayL5KX28EyoYT5ZVkc1RZmySuiT4VXmqZg601d+ltNbSOJNLus5EL8J1jrE7+By7cs4fQQSmJeoKysAe2kr3rBuhvYqnnTr7R8hYufL7TpY7Da+4idCGObRvunmZpsqb+XmXVc3zzjtcJDLoZVw9qmXd3aznG68sTARKPf5zHR97PSx23EOtugP0lcFcByZYqp9F0we27CHgj2F5PpK3GzHuypYy0xsEKktLoLbtsDiXS2kS8tda5AXIFNkYN8dYfH5pA0vkbx7Yqou3FRDoKCXYWE2qvInGwieSBZvt8qKOCpdZSIr01JNuLyLTuINmxgzP3lPPs4T8h/O0vuDb7tt5bvrKIdWXWWs82GGwEExLcMACozGI15NJQXgmIKKg158FU0vbCSMdyRVspiKzC9+7n8tQfcv5gFYGOmwn2bCXYcxOhru1EOgWOYosFRKf0gVrSB+pJHqwl0eJoGekoJtSZT7B7B76efNa7K1jvqmG9qwrfxqgg0PmTxuZzquwzq91VrHZX4OsuJdhVSLRjB4n2PJLteaTbdpBsyzeQyE1ztN9FtKXZSkKiLU1mXaQYz/XcwYXjf0zyiaOQesqcVFkE0UmeZUpC5imSZEa9zlQR7pSznaS5l2zcQCWrLbmU6Pm1nkWPkkvw1KcIHvvPLB/cQ/hgLQw1EW8vhJ5yaKm1xESwpZhLfXtIffEeiDwJkRDZtRgRrb7wem35jLFayeb3MiSvQsWmp7ZgShdyBSBrriGARfwuKWGB1vJTXPiHQzw19E4utdVDWxns30HmYKkJtsy+QCGA+Dp2EeioNYGNtpcZgdMtpQgoidYqoq31hFqbCbU1GbiiEvSrGe1lRDrzCXXtINBVTLCjinjLKwHi76/h/P6bCR7YQranEH9bHi9338rlz47DyiWX1dhEkNf1UAwPu10qxcdg0jWyE4OV4wqq4Zssh+q3tCXyN+9nfeqP8Xc2Q+cW6L/BgLHeU4yvu5hgZymR9jKibRUOJC1VZq1NsYh2baXEPYUQ6nRKQdbB11lLoKPGhmggxaDvsdFRahZJVkm8yA29F+oo2wBSsLOMiM7tKCTRnm/AEDiSbYXud9sqTOFJCfram1lv32v8o10ZpUJz5852N/PS7Hu4/PB+ki9/BmIvuSJFT6/YOvtE1lZPJtTAIhs0bFyxzq4qV+DQkECvqdOKiCsARV6C7x8jevQPWWm5Bd+BagLt5QQPFpFRVq+1Cva/mfWPXk94+l3wvWMQugShhLUz1QLnFa30FGpl1mJa3++h9qcwfsOC2Ps2kSJz57Wdt2rVlNXykF6Di9/g/MJf8lLb7SQ6a6GlwPLUsg7S/rIYwXYxS2C5ApB4WymJVoHDnScLIOsRad1FtK3m6sBh1qPMNJ40b7CjglC7rEu95celcQW61FA9F+/eynrrVoLdJbzcUs3K/B/Dy/9sltHSfj+FOP+ql0XfpDCgYj0tNHMzuaYBVfekVObqM2T+aZqzh3+f8wf3EO+phwFljPJItlQ469DqrlfCHegsM42+3l1qR1+XE2RZSCmieIvucZdZy2irrPEuu29peN23rLpAJeXFwWJQMH5AI3/T2GHvyVLLXdH3ihfio7JZOX4Zz9pUSLiZv/WeEqwnoc8drLCM5lJrOc8dqOCp7lu5cOrPiH57DvzPeVbatTbS3jnmlJAgquSQVUCahnbk1ky7uWAu/ltVWbspa7li6mTztLlbvpk/4kd3NxHpamTlYzutRCjTWQWtO4jdvZPVrn28NPZBOPeoldEIDz6to1e1SQ4gWuJqrsBP5/R1uhxZHPsTsy01JofNXWhErT9jEYfg+EUyXz/GhfEPsNzZRORgMdnWEmgpIdVWRqK9nGhHtfnPCgDDHdVEOyqJdZSTaK90o63aiBpra8BGey2x9uqrGx3l5jPHOguIttdZABdrq7PvT7bWWnYj2V1JrLcUf28BL7SW8MLgu+BbRyH2sosHbG4jR4DXd8zRWRv4JBPSd45msWQIdX20tjzrPyTw5RFeGHgnZ/dXE+2uhqF6/N3VXPhoIamWBrIHGu1owtZRTaizEn93OWu9Zaz1lrPWU4m/q5ZgZwOh9mairbegIDlxcB+Jlt0kWptJtjSSbGkg1VJHVu6H5joOVMKBcpsz4ECJc4v3F4GNAns9c7Bh4zs2f0/C6FhHzPhT6/FVvNX1uRHsrMXfXkegrY5Ie4XxOXCwiMsHyjjXfRsvj70P/5cOk9I2bEnFGBmLm0MpBeqebHoAkQCbrFrHlpAp+JxLJKoaLa29rEpTnod/WWB56kOsdr2V1YNVrHeWEuoqIbq/kExbPdGefTyx/y2saf1I+Ix9uRZ+ZjX772UlfxY49LPXuWpHl4qzqzKnW0hxacxQJkI8mySrzmJqjLz8FMmHB3l54Je4IC0lgBwsgBb5+CUeSCoJd3pDgOmsNALHBBIDiKuWzT2+FoAkOneS6Cgi1tpMrHU3UWNoJQJIpqXatEl2oJKLPSU83Xcrvi8M2T2QuAgpr0jz9eFi42wx1NyBTMra0rhIUAyIeKnOF7n8xT7OzPwel9oriXfsINtdQLgtn4utNSx37SHc3kiitYGcQG4+CuzR9gbCNhoJdjTj79iNr/MWfB37CLbfYvctcCRa3RBIHFAcWDIH67DRUmvAEXjcqCZ7sI7sgWYyB1T7tM+GyjcSLfre3UTbmu367Pc7aj2elhPtLCPaWUKoqxxfbyPB3iYHkLt3krpru1ksxSyXWhv5Yfcvc+nBj8JLD7vFd7h17FpLY4pZRPQeO4B4JfGeJZa7agAhSSq86rqvyN3yPQ/fu48LYx/kcvcdrPcXEuzdYUWvvv3NZHpvY7XvLTx39I9IvPAZNx8jFNr+h2tuojYHyg2O/viD61wkL6Z6pkRXrce6IOKEslpxBiGVsvojbrHQi1/iwvE/4+Xe20mq4rc1H1oLoa2YdHspSdPsFUS6qgh31diIdNYQ66wh3lFFsqOCdJsb+ny8Q69fxeisINlZaL+Xat1NsnUPkc46Yp1VpNrqoKXGViKGW4t5oaeWix//b7CkAsSwt1xWMx6yllf3J3qHbLbdBfZJdSuRo61KhMuPE/nnKX44/C6WDu0h1qfrvJHowZsIthbh67mF4ODb8HfvJty5y1K48fZ6Um31ZFtUHqIcvzumW3eRbNM5TQS6drPevZvVnj2s9zQR76izCcF0Ww2p9hq772R7nWUb9X3x9l02Yu1NvHok25rg4G44sAe07ufgXlIttxJvvZVI+62EO/YS7thDuLOJcFed8TPWVU68q5h4Vz6R7mJLgvh76o23sbsLSH30ZtifDy3FxPYXs9JWw/Pdt/PywodJPn7CGuhp8ZTcHO2zmrMcOtqfqollRWwSz4FDHR8jiaRrqCehlnyqgjd+iew3Frk4+X6ebdnGxfYtpjwCrXeQ6GgmPngrz/btZfWLH4Nz33OVy+qpFl8hop3Jchm23G//hON1Lpz0lkjqKjWsVMXlq0PeSjMtLU5Z0VAYwi8T+5djnJn9AKHOJmgthjYBpJBsezGpjmLineVEuioIddcQ6qqzISJLeAUgAcmBqcJe0+uvd8QNIKWk2itwQtREpLPeQCIhsaCtQynREi7M/hbZpxe9+EqqX9ZDSdxrAYgaFsQ2Mi7GOJWOX36O5FcmuDj2XlZ7dxEfLCbRl0eo4yYC7fnE+htI9N9GsGsfa917We/eY3TUHEe6VcBW4FvhuUflcLAcWitMGYmGEkh9r7+nhnRHPrRth/Y8aN9JuqNwE/2dggp0y6XbZUOg0ljrabLfNDAeEBh3kVWCo203sfY9BoxQp65rt513BSClxLsKSXXuJNmVz/KBbfjaipECTHU0uMLJllrMpTuQB4MV+A7czPP7izk/9154TIHzC+ojxLpfcyE5C5GTfEmpBND8IZsLUTtWvysYNqsTVY1XDlnhs6S+Ps6PRu/kTEcJgf47ifW9zVnezkp8PWVcPHwnwS90w+XHbFWj1qRoN6t/JUAkIF6K0wq6nN3R78tPVMQvXzDnM2ZDPrde/fJ3OHf/3Sz33EmqvYp0e7mBQwBJdxSTVEamq9wAEuiuQ0wSUGRVBJBsuwOTgPR6gbH5fFkk+31p0LY608ahrl2mWdNtVXCokXDvHmKf/xisP+Z8YNMUihY1HbgRgf0E/fHaL4lGIrI/rZy+Z3mjQVJfe4DlsT9iuaWRbG859OWRHMonNlxOeKCBgK6vvYlE5257LOENdeWArRKRGgeSlgpXaqJyk7ZSUh2lxEXX7lJCPToWQsdWaL8B2m+0x9mOPBNeaXk7r7uCQE+VgUmA2jzCXVVk25QidQAUvaRYBFRZDSm/QJfGrldYkGRnManOfJJdO8kOlRnoV/aXEm5vJttzJ3TeCm210FHkZeu2kurbwaWeal6eei/+r4zD0nPWTUV0Ew0tHthoLqHn8mpUzRG1OrbcBIR6GKtNlarP7XNKKCWeg0eHeHn4VzjX2kS4u5FYdx3J7nIYKuXiR29ibeJd8Ox9toZGa22UZRQ4za17DTZfl7swd3ruY1dWk+kiNPRlTpSEbK+kIPoMkfkP4ZPpb62GgXrSncpW7STdXUqsq5RQdxUCiLSejuHuCmNytiMfjVhHGeHO8qsaSgCkuhqtvit2oIB0Z4XNxay1NhLurCfZowRAJckj74FnPmlBuXzai+pkbskI3ZX5k69Bop/+lughJ031SM56aFL1q5wb/nNSg78BPU3QkU+m62bCfSWs99ey3ttMuKuZTFsDaNXigTxC7TuMVom+GpJ9TcS7mwm2NrC+v8YeJ7oaSHdVkeosJNN+M5n2G8h0/Dy0/zy03QBtN5Jpu4lMxzZSHZrr2WlaPiZXqLuEaGcR4bYdBFu2ET6wlcjBrTYRmGrdBl159p2a95C7muqrRNcR7Kpktb3ceGZ866rZcLGk/OQl6PxYdz7hnmIDeKBzL+H2O4m13UpaZSDyKto10/3/km7/OeL9hVzuv4VzY79H5stjth+I5E9S5/jgLIr60qXkQtkut5I397oBwnNyVAakoaJEsqrdet7iy5WhO4n3lZEarLbro7MADlWS6Skhcuo/WlcU/d6lgN+aWPxsgEg+7Cx9TDiVCLl8tK039laKCekK6LM57Cn1pqzCPw4RmX4vSwoauxtJ99aQ6Sol011MqrfMABHoqWG9VyCpI9RdgRgn10Aj1lFyVeAwUHVUk+7Za5mWRPsO0t3FRLt3E+y5lWDfLgK9Daz17IaH/hYuPm7VnBJoFUSL2BllM67hT59e9poMWhXrxSfJPjRIeOB3Sbfshu46aC8i0V1CoK+a9b4GfL3NRDsbnYZtKyPdmUei52biPQWEZRm6a1jvrGe5vZHLrY3Ehn+RcP8tbib8wA4C+28k1no9dL8RBncQ6akk0NNowbK/bzf+3lvw9d1CoG8vgf49BHsaCPfUkuiuId1dTqazyEBLWx60bSF04I1Eu24i2b+D1EAB0d4CAp078XUU4tOkZLfc5Arjo1zmWKcUXLl5AbJmod5iAr0CUj0BuWQdil/2uoqL9lLo2k6m5Q2k2n+BdO92/B2lXGrfRWL29+Gro1bek0jEyCTlp7j2RLYNQyJFRk0Kvepbiagstv501NmSVFecouW/wIUfwmf/gtWeUtZbtsOhCtBE4oGtZFpuIDhxJ0vfOkI4eMHm8TW/8rMBol8zJSqAyCf3AiRVYOY60lg1pkCTm573Yhah/OI34B86OT/8W5xr20usrxH6q8n2FMFgGdGeMoK9lfj6am3ocbSnhHRXgQ1ZGRH+qob83r59pHoarYgy3p1Hoq+ZyNAd+AaaOdfTxNLEb9rkkm1Kk4b1jNv5QgBJZLwNQD3Cv96DeLLkbZlH/DzZR4aJDP0WdN9maxfoqUPaP6R4oW+XgTba00CyS8CpIdtZSaq/iPhgAdGBIhM2X5c3M97TZMK+2r3bQO6XsA80ERxsIDBYjb+vlIt9jVw++iHOHP0fNs4e+2vOzf01F4/9FctH/4K1o39CYOYPCYz9Dv6+X8anyb2DSgTUgaohhhoI9exgvXc7Po2ePAI9eYR68wn3FRDtKzZeiYcacVn/rkobic5qIt3VxtP1vnoCvfVEuutJdNaT6qgl1VlNQuf2VeDvyDfQxfpLCHcVsX73zcRbS4iOvxOe/gSsv+S2V1PK3eaO0taFUzsebHg43ky7BFp013DuWYqQukSav5uBHz3I+vHfZFUKoDMP5NJ/JI9Y+82sDNTy4vT7SDz9oMm5Xw2wfwbTr7MzDEa6misWxC5MwNmwMLn3BZKUazOpb9e65pcfZuXev+OZjrey0tVEqq+ajPzjoRLiPUWEe8vw9TliCiCRnjJS3QU24t1lRLsrr2pEumuJ9ewhOdBMrCePcOcW4v01RIf3cXloN88PvoXApz8CF9R0IYlaNkW9mEoaKPKvXzfzE8mo2zf3QAHlS//A2ux/InigzhV03pVHtqeGUE+TaXRpdwEl0V1FuqvSwKGj6p/8A+VEhmoID1YT7K8k0FdBeLCO8OEm/MO3sHJoHxcHbuPs0Nu5OPluAif/mNRn/ob0l/tg+XFYfdEbL8CqxnOw8jQsfx9e/BI8fi98aZDU6f9JaPwD+Pp/g/WOt7PWuY+zHRWc7ytldagS36EKAkNlhAdLLLGQGiol0V3gjRLEK4HE8auacHc9a32NrPU3EOytJdpdQaqrhHRnCQkpvZ5KQkNNLHXXsNRZRWigllRfubl0tLyJTF8Nl+Y/TPrpT0HqsqvgCF22tqZG282pYC3BSIdMLC3es5WCScLErIe1lbLY9N2L8MMp0vO/xtpHt0KPJlQriQ/UstZfy/nOBlKf/4htnaFCSc86/ET+6kUvBtFD11XDlZyYSXHoNYBonkSi4LLSKhE25Jnd0z5350k9usjzY3/AGWVAuivIdBdAXz7JXmmiUoL9VajsQ8dIXzmJ3kKSPYXEesuJyqpczeipMdMeHdxFsDuP9Y43E+0vwD9UxdnhvTw/+3vw9AMQPKPmUWSEDrOGyj5lbYbW7uOnkudnveFlW8IvEvxcK0tDbzd3KttyI3Tnk+gpJ9DfjG/AuTvR3l0ke6vMH870FhHvreJSSx3LvXsJDu4lNCRNXMx69834+rfhGypgbayWlZk7WT/1BwQ/9zGS356H5/4Zln8EIc0uez6HbmTzkIq1Xsra3FPNHc65FPczD8PXjhH+ZA/rp/6Wx9ru4OXBO1mbejuh6bcQOiw3rdw0vcAhPmZ6dprCS/YUb/BL9W3BvgazjP7+BuOfeJru2Umydyex3mLjdXBkn9VGXehsZqWnkWg4+/l8AAAgAElEQVR/FdnefOi5AQYK+VF7E2v3/xU892mIvuB6Msd91g4sF/ua/pYFSav61lkPFxXHrdG3XLJkWP2lBR25Pefg0UGCw2/D39ZoMd/64K2s9zeR6CojPfsr8MRJW1hl2bLXYPN1OQTphzcPez2XSVCmwIYKykT5K2ZOKTerk1n6Lv7PHOTy8C+ZH20A6dlOujfP5gCkGX0DNfgHqgj1a3a7mERvsZngWF8VVzOiskrdtUSG6vF1F+Lr2kp0cAvLvTfzo7HbWfrsRyHwDKgSQK37NVQ5l3VrloNZr1r0NQj0mm8py6LdWZ/7JC9O/h5Lw7eSHa0g1v4GGMoj1ZdPuL/WQKKYINTXbPeZ7Csk1b/d/P611hJLYEQE8v4qVjuLudRZyNJABcvje0k++B/gKx+BZ4+D//uuMZs1lfO692hNkDwMrU60NqNXeCO8RCPy5dWxRn661v6sQfSitwfhN4h8vp31U/+VsyPv5kzf21jpv5XwQJNZg2iLXJSd0J1Htnsn6Z4C45sUnK412F9DpLeOWE+V8TLRt5NY/3aiA3kEB4rxidejt7M2eDuXuvey1NnEuiaQ5V733ES2P8+U6bnOvayc+jN4+XOg5hCJgDU9F0A0TOIsw5rT+HpFHo1iFK2CxeKVVDrrMlsCidrePjrKub63s9631yo/ZMEZqiMy2Ezsof8Boe+49SuvwWRvHkT9ZF13buvMbh/QBag8YNXrqKElkl4DM9PC7hJXVEem89We9NkHSN37QWKDzdBbAN0qENxOoreA4EC5EUxEE1hkVQSSWF/FVYFDgBJAZJUiI40OeAMFxIffyMXuX+D8kXfAD+bNLEuRpiVU2nBFBYQJVUzJiniW8DUI9JpvqT5t7Z9JfPYj/HDoHVwYv43wdA3BwZvIDLwJDm0l3V9gbl9gYB/rg/vwDzUQGSoiPaQg+9+Q7f6/oP/nYXArkd4iVnsaWBv5VaKn/pLUI0Nw6VFYeR4CWqUXcQrSEw8THGUU1fFEbYDMRVYFhGuCLQ6aK6ljNkMsk7BCwbSWxKpTSuwiBB+3lXvh+z/CxdH3szr4a0SGfpFkT70VKtK5Ezq3G0hkSTThGeovxT9QaYou21WMMkWyHAKGf2gn64cKWD0kt62KwKFmwkP7iA7eRrB3N6tdtaz0VLA2WEZwsNDiVK0/We65lewX7rGl3lK4ygpq2wjZhNwwS2LzI86TMWWtxtsJFc+rQthlu9RoxXVb+RHhz/xPwlNvs0VW9NWTGHkLl/puJTX7a/C9Qbdy9jWY/GMA2VjgYggV0QWQNWclcl3APTdFDFjSmgkz7apYfQ6+0odv8teJDtaS7M0j03ezaVIBISyNMqhjGbF+N+KyJgOVXM0xMlhNeLiO2FgjoUNVxEeKyYy9mbNdN/Hi8T+CpW9btaYW7jjbHCezvmSbZ+bW4hvRN/Vr3WxF7b1cHOatRReznGCKMVqWPI9v+le5MHQna6O3EhirIDqyg0D3z8Ooywzp/oKDjQQGmwgYQIpJHfoFGLoO+q9zQBnaYe9dGn03a/ffBd/9NFx+CdS+VJtYumpw85py+UYrJldvrOQFSC07PuGaiGpBqbSvRElDQMlp5Ki2184kSce1BuYyRF6Ey9+Fby5wee5veLHrnSy330aqvxm6S6Bnh41M3w5iA4UWo4QGHf/oLoLuQpL9+YSH8vENF7J6uIjVw2X4hlx5vWJEhu8ge2gfkcEmUxKrY3VGKwZ2wGCxxSO+sV8n/vVJp2yzKfwh3cUVi3gFIKKAZ02sY13MmtWFBRPNk0QduKx+68JDxD7xYWJDt8LoHQRGf8lcPsYa4BMfgOQLr/BMc/x3nlWK69zPu5jDXUAOTnpNFyEYeBOJVz7t+YLq46Qafq+Vik5dfp7ww/2cHXsHofE6ksMFMFwMh+rJ9lcbIeP9+SQGK0gMVJE4VEnsUOVVHquJDCgoryYyWkh2ugRmKjnbu5tL37qXpLZgyAm4lTXrfpRk8NYh6Fb1unZa8jSVYd1r2qbHJlXm2kZtn75lEq77i0qlg0+yNPMrpMbrjPkM7CEzWGpZqdBgCb7BcvyHqu0Y6S+0ib1wa4mluePDNxDr+39gVtm+Yla6dnFp6vdJf/soqD9yKkTCr7okL7ZQutMrstNLEnaR2/iXm3U2fgm+Wrh65aO5r9h8NF4bcVyhpTbPsS0rLr1A5GunWZn7c1b63krmUJNNtikFnBgoIDZURHyohPShMjhcTXKwjsRgnfEgNFJKcLTERvhwqfE0018HfQ3QI6+ikdSAOzc4Uk5wtIjo0M0k+7ZDbyGx/jpWFn8bnj1mnktOHjPaPiOrqUFHDyW7tMAvpcV8Of6+KkRw9xeG2LPw0oNcuv8veHbg7Vwc3kd4uBr63kx48hYuP3GvWZ71RIalQNqslegkmU/FVgSQq/9TQB+3jSmTLqsl+QtFyTz1CEsP/gXnx24lMFxKWqa0Twvsy8n2FZAa2ElS5d6DHkCGy22WWTPNr2sckqtWTWSwnPj4DtKTO2Gshosjv8nFpx4l4CmZjXy1uR6qLXOiZcLn5dklVnpVR7lfAr1ZCiX2JI3am5tLXLJmz0JT2LJ3genbYLQYBhpNAOgrJSH/e6iapUN1rIzsYn2onFh/Hpkel3ZUGjw+mkf40E0wVE5q8DbiJz4M35kDdT5Mhm22WNfvmOUlAxTzKC70wKzrdYJwdTzUd+s7AllYkfepG9YXqhXt9+8n+sm/4/LAW0iN7iNzuIZY906ivfkWZ6WGS1lu3070UB3hQw2EDlcTGiknNFJCeKSE2HAJqcFyS/nTWwc9DeiYGagmcajczgmNFhAbzSc5tMMSOqn+Utan9xH+yt/C0jddBa8tB9e+NRc9d9/F4bpMgcSuVyDRCxvE0Au6mSiZ6Hnr65V85jTPzP8hL/TfYpk6+m9k7VAVz365i0T2slUeBsIZ2yrFyuGzKbJJ37UCRH58VN2RHCNFcU3uBF+C70xzfvY9LA3X45f57Soi2VNKpr/EtGxSbtZguRHrdYFiM4g8gMgnjo/l20gdqiJ0/EPEL71o/aZMw9iEjoRIvvmrAKL3tDegJyy6BblfapBpANF0rZUFrZJkBbc7t9ImPlLfmMU3uQ9Gi2BAmrIe+svRvfkP1bA8Usf6WL2lT02w+opgaBccbiA6VoJvpIz1Q7eROPmf4bE5iD1j20/okgJagOix2dSkFfFpnY4uxpvI3SwTV4MRTQLr67z14i6DKfdFTRdegDMPE/9cK6Hp9xHpaoYele5UkBwsZKW/gKWhYqLDlabUoodL0ZDl0BBPk0OVBohsfy301ZoHkRrMnV9CdKSAzHgR6ZECkoMlluZeGmnkR3O/SeCbYxBTwzq/xZGZ5IoJbA4I4pOWnOeebxw9OrhmDHGS2hE5ex7ijxH6p27OzvwmlwcaifQVsDJUxeOL7ye7/AVXaRyJWB4jJYUopyiTuVaAgPabCKXFMKFWKk8r6Fbh4jeJfKGFy7O/wuWhSlY6dxLuKSQ9VGNDLlZ8qIzkcAXxw1c5hquI9FZZajQxXkBoZIelSnnkHtsJS6tdnYvi9Y81i/cqgJjr5dYnyJNyNNdzJ4TZHECiasK8ip+EbdiiJg+XH/gYvom9ZEeKwbRlFQxUkRysxne4hpXRWoLTjYQOK41dQLa3FA7vIjVcz9qhCi6O30Lw/j+F7y+4HrWpVeu4KBHVzyp2MJAat+ROKJvo/G9dp4RkQ2leLUD0dfZF+iX9oiaLFXsG3A6za98j85VDBMd/l8TQbWQPNRDpL7YYI3K03uK+9HA+qeFikgLGSBkx8XO4iuShGlLG7yrSQxoVHr/L7DzFjAIH46UWQ64N1bI03MgLg7dx4fR/gR/e56p/CZNKh60DpQXmuHUlWrPuGCYllxvuJUcb9R9Tlx7F0IqzHib7T21cnvkNLvXWszLSzGMje0l8tx2C33W7pwVd11VVIgmA1+hiOQaqeEyz7M5HVDcJBZbL8NKX8D30V1yafSsX+vIJDOSTGd5FZqjBxSCHSkkOlxE/fJXDA4jqnGITBQRGCvGP3Q6PTdtKPtMwopRUssBiluHVAHGvb86USPu4lpVeYki3lnAdyjUxZfe59G1emv0g/pE9pA8JIIUwUAoDNSQ1nzFax/p4DdHZBmLjtbZoS6U3HK61DNDZwTrOzv82PPMA+J51lkPdTuKwnAGVUQokuq5XWJBNbta1AiQHsmgsQcKv5MXKxv7rcl8StlOvD0JPw2NHuXzs/ZzpaSR4uIHUzC0kJ6tJH94OwzeRHd5pdDCFN1xDXMmT4ToDilOApcQPbx5lBqjE4E7S45WsjdZxabiB9dE9LA81szKhXmV/CuvfMIumWbqgJnvNgiYIxZIEtPmoaTSl7jUEkisAEX2Up0yoF0D8AiSfg0ufI/bFuzk3/m5e7N/NC4criXzyt+Hl+yB0zpltTR0pJLOJQmPA1f0zAstMW38usVN7QwRA2wpr5ZbM2+MnWP/EBzk/Usn6cB6ZkV2khxotQBcxEyPl1zAqbeIpKndtogDfZDnB+d+Cs582S2baN+eOGu2037cAolvPaU73MAcQ90xkFUu89I+2G7ZOWZeJmZZdgR/cy5nx38F/uJnkIQXaO2CokKyC1uFGgnKtJqoJTVSRnKwl0VdtFQbpkRqWBqo4N/F2/A/fDckX0W5VyZTbi1zrps8m4Ly3gaqzICLw/3oLovtTuiIUD5MIr0My6LZ1SyZtbkEC5hcvZU1SzxF+bJYfHf+PnBt5C6HRvTBZC4e3wMj1cPhmGC4iPVxpFjIx3EB0pJ7oaDWRsTIi40VEx4qIjxWRGCkznqcOlxMfLCIxVsHSeB3nDzcQHN1Damw38ZFaLkrZPTEBseeNF/6E5ntjqOmDwBHJlUIYODyQbEpQiHaK2BJqdapFbNotLfgkvPhpgp89wA8G38HFsRpWj94GT6gJ9osbKT9tuKZK9mu2IMKBJqiypu/E3iiJeNQaNlou2vc80X/q5vyRJtbH8yzjkz7UQHywhuRIHYmRSuKj5Vc19FnNzCoTFh3PJ3Cknuin/gR834JUzK7LLIdTJUbkDYAYup2PoocCiAmjnlga2+u6pzcCApTmGi7YnkwkzhD+UgerU79O4NAuEkPFZIa3wKE80oPSms0EJxrwT1WxcqiA5Fg1GZVZDNSRGG/g0kgTq/d9EF76lKVmtVgzqOF13VjKuhovPdflGJsNIF6ud1PnFfe+g/Xr/S9L6U+qC7oSxk77EtdKJm9vMz1MaxdbpTUElDMknrufyx//c/xj/x6mmmBkC4y+EQ5vM4BkDlUj/uYAEh6rJDhRQnCygOBkvgPJaAmpw5WkDlfbMThcweXxKi6N1jgXeawRFKMNlrL2iT+Giw/bfiYCSMTbFTkmoklvmdVQsC4o5LIyjpfGOq0jyRFJezqGzkLoOXj+YZYf+ihnJ27n/NSd8MQshF6EkLrVO6Wv/Mw1AySkCgDjjITIbylUlZL7c/6BOpp/7xTnjt/B6tQ2UhPSMLVunmS0kfg1AiQ2ICJXG0D8c3uIPHIXxJ81Kyba/WsAousXMe0+vM+oTseCVs1S+5VilHU8R1pBY+QF1j/xF0SPvIPAUJ2lPpPDbyZ1eItpz9jIbkKTuwhMV3OpZ5tpS1kWWc7k9G4uz7yV4MMHIKGgXLu0OsuuBgXy/kU6m7fIXVCuosEL0MVvfUYjx/vXCw6dL1Bo+zaXtXOFBrZE3wiSJOpXax53LYpMNB2npt688HFin/pzVoabLDGSHruJzIjc5zIyh2o3AaSW4EQ5vqkifNP5BKbyCU8UEM8BZLiWzESzxWMrY2WsjpVZZTCHqmC6kMxoAWfHb4UfztoqwFA8ixbwqVWpFzo6gOi6cgDxKj1yPF2JqHWQRyvpAK1n0vp47Wf/wuf5wcRv89z078Czn4DIefArRPBCh/8VABEBjY8miRIqEdxlRmVZzJu5+AOWHnwna7PbiI8VkB1zmiI62kRsrIbYePlVjcRoFYw2GUAyR8pZOnorPLvgUoK2/NW7OF2gN/t/xYJ4cYn3+gZAdK4pU1ebZmWllni/DFllRCKw8n3WTn+A6HgzsdFdJEcryIxvsZEYrSEy1oR/0lkQuRdyKZK9FWRHmgjP7GP9vt+HMw9BTFsKXJkMywm96OcEXxci5lsazfnZnqXLzYM42l8NPLzPbNDFeRf6Xl2Hl7qzZuY5QKrmyWbr0y+Q/eExzp1+PxfEg+N1ZKdrLHhnpJHMcAMRrSuZqCM0WY5/upj12QJ8MwWEpgqJjZeSHqkwDyIyupvg+C5CU8XEJgqRC8pwBRzeCqNbCY5Xc2bud2H9Sbt/TYMabHVRqo4wLSiKeRbEAOLmgXQfup/cJKnOMsKqIkTLrtWX4IXPwsuftyXSBC+5LKzR2GUyr9mC6EeNSTltbZNsTgvq4izRvvQjVj71btaObUXZJgEkeGgXIk70WgAyVgEjDUZUAWR54S2e2xIgq15KuYvTtW0IgtwFuSqvBIi0qLkZ3mdkPdTcOKNcu24ivQTZFcgmrEo2ePp3iY9XkhitMx86PbGF5MQWYmMVhMZ34RNAJmtsXiAzVkFmsJzsaCOBI3cQ+NyfweVvubZDpvlEqNyfLjYXdG4Gh3MfROsc40XfawGIZEyf1y1vFiRl8DSZakLnJThU7xTXHmuWRVsmtfYtVr8zzMr9v41vdg/B8SZSU3tgtJn0oVo313GoiNBkqQeQQtZnigkKCALIaDnJkRpCY7cYQCQX6fE8A05mRHzdbgBJjxWyMv1Wkt9dtNWAiiC1qYEtqLK9F0Q3j2YbINHzV96XQKI72pBV3ZdcrrWXwH8G1IVRnek9vidto4fAtbpYLh0qAhuVxUOvRFkmWV6rcXN9hfXPvIfgwjbSE8UwXk9guN6IGpuoJj5RcVUjOV5pAMmM1pIWQE7/GsmLX7OLyajn0WaAiB4mCDmAvLKuTEG5neEdBBYtEEvZPuhx0iq3kfOTUYDwfSKn30VqopDkWC26jtTkzSSnthKbLCM0VYt/ahcBjaEimKgETZiONeA79g4y39TWYS9dabG0YSFUfKdgfNOw9xw4JNC6vBxANhhu4vD6/+m7xCMNPXb/5SpruCSFZNC0bjpDMqJ9Gl3NV1Z7eYQeI/Hdfl4+8uucGb2T5Pw7SE40kzpUQXa0DH/fNqJTJQRmSvDNluGbrSA0XWE0yoyVkRqrJDTRbO5oZmIHjG/DvS6LXEh2fAeM7SQy2cT5U/8NVtRVft21LY2onsxZCtHFgcRztSxod5edo9VmK2LnC0O54SlPR1vJtFqnrhPj8rUBxE2oOR92g3Oe3Akg8qWNm/41Ap97D+GF7WQnq2G8Gd9IHb6pOqKTlVcFDoFKgpk9XG8WKX2kEt8nf5ek7wemDV1libeQRoTwgKvSGGvRLSJ66lcHWQsTEB1EZ7OEWmvu2h9pP1jLM4m6y08Qu/dXyU7m2TXEx6tJTu4gPrWN6HQhgekKA0hoopFAfxHYPZeQGq9hfUFroxdd5xO7rlyGKpel2lw57SYFxUn9bE7bO0Y6cXbCYRLyuv9JeLwCDvt+A6atu5CFdb+nCnJHpjjp+JrbxXajFiFkmw1d/Mzf8dzku1mefgexid3IYjJWSPzQNuKTRYSnyvBPV9sITVUSmxAAikiNlxGdqCc6UUt2PB/GbyY1XkR8ooz4RIkpICYKyEzV8OKht8D3pyFzxs2Qp5MkE25qM4dho8iGS6pXHY2kSGQhddQ92/2IcJ5M5N7XOe48RVtrpFi6doDEbPGit6xOv+RZkZxmMuD4zhL6zDuJzefB2G4Yu4P18SrWj1QSmS4nPnV1IzVRZQBhvJaUtNMX/iOpxHmb6jKOW5m3R5GfBBBHQyPYhkshCnrpdVcjKosjpyxiwaxpnaUnSdz3yzC5xZIOiYkaElNFxGfyCB/JI3CkGP9Mk9X6hAcqYKoSJnaSmq5m7eR74dIjZj0sRjNGea7Uj1kOd4GbgaHHV0AhhF39Xw4gpsjEOE3yqu5J5bCebtEuUcpxpQmTZp1Map2stxGnlWQkLpF+/iFevPeveGb47QSndsOUXKRtMJVvHkNsqsomTAPTTYSn64hPVpKdKLKhpE1istImCxFoJouIyw3zZILpMpit57JKWj75XyDwLbd5qZKLXqcTiZ0DcS5m87J9KhfxQOKx1cTxCv3c5/SegJGznTG0DEJrT66x1EQWRGJj+5YKGDmYZlxgJLNmV7T6FIGH3k78iCaVfhFGf5n16Qp8CyWEZ0qvASCVZIbrYKLBABL/yp+g7rjatsCIEPfUn2dBHCFkQVSyobUhTric0Hno1sEDSMx2kEgQsSSn61huMeHK0yTueztMv5nURDnxyRoSk6XEZnYSnNuCf26nA8jEPhKH60ECMLGV7JEqC+7xPWrmXfS5wlhdjOcimJZxVkOv/jhzJQhe4G6IvTqQODdSFlKiIffOS9/oorziA4UdcjWdIxYkk9GcTRS1EHVLYiWlZ1n/+gTPTP8WgZk9MFsGozfC7E4k9FIgKgwMTu4jPNVIfLKK7EQBTOTZMauYZLzKKZupAmIzBUSmK4lN1pCcqoIj9eZl+KZ/EZ5ZsK6Y4q+WO0sRS+w8Vjq6mKLRO1dcRd2SznG8lkA4emfScVKag9K0ne5JsZadp+RJ+NosiH4i7NUt2VoRXamG139VP2SLlC79M74H9hGd2goDvwoj78Z/pBL/qXzCs8XXBBAFhEzuMoBkvv6X5hKpKMQAoopbj9l64bUB4gmJBxB9TABxu0gkCXo70Cq2Y/kZDyBvIjVZagCRFRRjg3M34p/bbgAJTtxOdnw3jJfD2BthoYr1+z4EgadsT8L17BXmOsY5Qde163lOs+X0ziuEwFuC6kovrg4gBrLkZbfc1eusLtdYbtcGeFXMqeBVrVWTMdsXPpnN2PqSUEaWRWCKwvkv4//8X+Of20tmpgCmboLJmwwgqnaOjN+K6BGe3G0AscLSyS0wsQ3GC0iMNxKbaDQaxmbzDCCRqTqbcE3N1MNMoZXs8MW/h7Xv2e9e2HStopX9SXFYSY7WyXi1axZPuSSMc8oEeDFaMx1K3/vNauo2teWH9iYRraUYrimLJeK4qlepm9QVjnp5Z110JrgK5z6P73QzkfGtMPjvYfRdBI9UET5ZRHS2hMR0xVUNaRdNFqand6G6oPQ3/952JpIFMYFTJZ4eSOF6LpbsnWnMTVksCaRbn+y5il54ogU4mqXQ5JQ2rzQmCCArT+N78L2EjpQRmakmOlNNcrqE5MwO4nM3EDl6M6HpZqITt8FMswlJcuwGEieauPTgn0LwaaKRDD51c93kG4spulwNPdbviY25c/SaZdqkgZQ8sGXQYrbe0U16msG0gycvGy/pwjedY0pUM2JaA67da3WPrqGF6KfH9jXWKTIO4bDtcaLYTtfn9nF3v2yZvfgZeHKeS/O/juIM5gvIjr4BpnaQnKogOtVEdHIviYm9JCbrSczuID0rEG2HiWKSE7uITzYRnykheiTfaBs8Us3KWBXRiWqYyLe6r8SJ34LlLxKN+FHxT27OSBRw9/cqgJgVcTQSONxw0xHO/njrnTIpuw31IVGJl+PDNRYrOoaI6B7hRXRx1cOKbVGWUlnGAmtzjXCsGCb3kZ64g9hMA7HZSpKzVab9FUO83pE8UmHfEZlrwH/irfDUIeuWKIFywuTRzNMIrqAy559urmLdJFtecJoTTgmC7iORWkcVpbZ4LPUCgX8Z4fuTv8ZLU00Ej1TAfBEcuxlmbyQ5sZ3YaAPp0SY4vMOC+fhiHWcW38aTn/rvEH8SUhHXpdSzwB7ZbNmAkVPVCd61KMMmbaYNZ3Rebuge3TZwcicEGC91vQkLDmy50hmvXkkf3PgSCZRgqHPcy3qmx/Zn37mJxxuAE82SxBKCk1Zpxmzb7PVHOjl/5JdJzVeTmLmR+PxNxOa3E58rJj1db/FndnIvoYVCQovbSR0phekqmKqxuZTkbA2RuQr8i4WsHy/EN1tCbKqSzFgJmdlaLszW4//a39iW1NpuIncbjre6MV19LprwvAJzEV1Bbe7WdZZS+y5oFoCuKI/NeuaaLIgxRBpG6BPh9E+/5S2V1LwnqQvwgzn8RwWQQjIzDTabnJzSsYbUNQAkfrSC8NFSfAsNrJ78JXhyzNYY54TCBEgaT4v/jLEyDXK7LE11BUSbCtxyBLxCeImJAljtuXfR7X+XfpHVJz/J149+mOen9xI6Wgzz2+HYFpjZ7vzqsToYb4DRfDJT+QQWannp1Nt47KE/Bf8/u7XQlkN1gqlr1hANbWwIonxht5+eSz++2qpIGwog7p7sS/RFHr/d/UiQ9D06Ry6Tlh97aJCMXPWfmO0t9xXvEwkyT36OCyc/RPBYI/GFbYSO30R48SaSx7aTnVFscgeM324AWDu5ndhcmQHDgvHpCpIz9YTnalg7kc/ayTz8R4tIzAggFaRmarg4V8rKlz4A4ccM2LonKQ9XO6fr+UlDVBCdHaB067KQOopUr/V3bQCRiJmZ1yo9XaT3i0Z/IVZFbpccQObqYD6P9Gwt8dldJGaqScyWkzxSdtUjfrTMABKY9wDy1Ajax0TXIZJoCByvBZDNbol47ATKASr3Hc790szrZZArkT5D4uzX+N6J/86FiWYS09vJLNxIdnEr6aNlpGeqbGUjM7Wg1YbTVQSPbmflvjqee+B9ZF7+uPWINV7mfkSCugFUWQyXTFBCQWlpCYGbzPQUkWcldK9OezpQXLlx933ufrQOxgmSA5IHEr2pm77qP31BiHD4svsa9d4JvET4iy2sCiAndxJazCO8kGcAYaYURm83gAQWClk/uZXYXCnpmQr0ngDkAFJr1mP9xA6CR4vJHK0iO15OarqSpWNFnP3UO0mc+bR5C8o+ilqiwZWkb26idTNxHUAcPa7oh591+9cOEAt0AsvFCHwAACAASURBVF4a0HNQDSDy9TU7uWTNE/xz1bC4hdTRSuJHaokdKSF2tIjUXBmpo+VXNRJz5Q4gC3WsnnobPHXYNSfecE2c16FGFFcsiLp7iICe9hCFPOHMvZYja+5oK9ekb5JrrtgtcxF8T/PiQ/fgn2qG6ZuIL9xA9MR2YvMVpI7Vw9EaOFoP07eQmakjOPMGIvflc+H0W4g92g8KjqXCcppckm4AcRZDOzA5cAgoEgBPlj1gGBA8cus67R5ymkFKVOd54NHnlbJWulpAM8DbZJpnZtypV/FfQpnAH1hxIFWAHPHBE/dy+fg+Qid3ElwsIbJQQlru50wJjN1mbnZkoYDAyW3E54tMqXCkBFVDJI7WETlWR+B4CYHjRUSPlsJRxSBlpKdKWDtWxEsnbmftu4fdQigVx+aUYY5GdvOeW5h77FVK5EiU+8z/ZoCIE8p7aFLFZ8Q3ThpAVFK2Cl4M4jtWCYs3GCBiR6uIzu0kOpdH4liJA4mA8jpHYq7MCBicr2P91C/C04OWI7/iu3vh0StcLKUynWCIWBuC5Qle7rM5cNhRfXWk7lMh0kG5WW59gf8bo6Rnb4GZm2wSdP3kTgKnikicqAbFXEf2wuxe0keqic38HKkTb8Z/ooLgwx+G4DNYxzNxSmT0LkYa0c055DSj6zbzyuvU5KJLY+r6DPy6fu8ynel00uLcCvUOUBCudkculnHrXTwUXQU0ch+JRuOEoxGi6q6iOjW1Vlp5nNVP/QbLxwsILFYSPVZO5uhOmCmCsVtgcg/RhZ2ET2whPr+T9FGBxAEl7gEkuFBOcLEMeQkGkKkyMlPFBOcKeWmunkuP/A3EvmcWLCfsuv0NgdeDVw97yVkS0cVInruRn3K8RgsizrrOJwnW3Eo7/arlkj2AJNetSZdvvhxOXI+EOjJXRfjYzYTnt5GcLyZ9rPyqRvJYuQeQWnynboOne627R07IjWCeT29CJMmR9XAGJCeTzq/yAGJafJMF0mSe+i0ZktIxsjGf279Cu+I+/0k4/jY4VkDweAHLp3fiv/dm4qdK4KiAcxvZI3tIzZaRnXsTyfl/R+z4Vvwf/yV45iFYWbkCDuOsxF0mxeXvjcE5wc8hVs+lqY3usjLuE3aJuuHceXbzkhH53SmbrwqT0tq8jbkrJSDsZ3+KcPxrXg4EEpYWjWT8ZLNhL610kdS3/orLp+vwHa8nNl8FitNmS2GiCaZ3kVjIJ3piK8mFPNLz+aTnC0nOl5I4Vkd0vp7IQhXRhQrzOCw+mSknO11kivXisUouffJ9cP5+U845gOjWc5jYeJB7IXf0LInTjD9bQfwvAEiALGvE8RkjctpL5lwzkWgfusfvZW2+0gASP1ZOeK6K0PzNhBa2kFhwhBFxXvc4Vk7sSCnh+Wr8p/bC0x1uUcyGL38l4HXC4Pnem+JZ08yirATqp4FEk2WpjCtcTGsWUdYkAr7vwIPvg5O7CJ0sY+X0DgL3XU/slGKtW0hO3E5qtpH4bDGczCMy8/+ROnG9CU30kTY4q/0qvASHSaMuwAuk9VDXJR2UG7lrtDe0TZlK9zyA6PP6jM7JBV5Zl/uXACkolc3JDfUEU4pep1/1nzqze1NNccFO/bb0Q9EAnJth+aFfZvXELgeQI+UwXQGTdTBTRXp+J4njW0kubietGGUhn8R8KbH5GgNIdL6GxLEqMrOVruTkaDnM5BM7utOs0tp9byf5WDdwecPF+tcCxBFKZ2uIaD/97xoBoh/QlHzAupuIEcZMS/OK8VrXHDKA+I5Vw/EbiM9VWpYiOJ9HaH7btQFEBD1SQuRYNcFTzfB0K6TPmxrZUBgeWFyFqjfP8WqAmEBtAoho5lkefY/ejicypHNd9bJpsmqynDxD8rN/Dff/EqHT1ayf2kbw3jcSO7mV+Gw9sfFbSM40EZ0qgvsLWZ/8v0mduhHfXBmB0x+A5z/t9loxRomWHjhela7dDN7cfclF0CdeARAxW7PhueGVWogvSn1LdnNDDpyjyWsLyE8XHUejXN9oS0OnI07m1Jwv+lVWHv5Dlk7tIrpQDrM1lso1kBwpJDOfR1JJjQUHElmU2EIpUVmO+ToDSvJYFdmjFdYBhvlSOLKTxNGdxE+UEzrZSOSrfw7ZM0YH0ws/yYJ4vNywKAYIj9b/+wGiX48Ty4RYT/qNWQbnhJo56JKV+QmR+fYCsZO7SR65gfBMuQVhMd3kwg4SC8XmZsnVev2j1Fyz1Ik6/MdryHzzv9uiJk3+iiAJW5TuzK6yOJbl8LSxCb64bzNfV5ILRj9PCnPCuPlohNbn9GJqDZa/QuJz/5W1kzUkPr6D0OIbCc/dQPZkDSzuIT27l+RcvblWodNvImiZnRpiR29nae79pH74WQhdtM4dmpHOxCOkUgnDhG4jlFaoriDbyZ6oamlgZay9+Sa954ot5ZopFrgyg6wO9loguBJO4Yu7NKeyYQnNxMuGaH/FTMZGNpu1phGvCYrNb+bcVytHcXM1Ya3I04XFniHx/S7OLNYRO14Es41WxY2s6dFtJI9uIXnsJtKLWwwoscWdRI4XE1msMJAkjtWQnquCuTKYK7KJR+YLyS6UkjleRvJEGZfu/RVY/66xIpFME405axzTfSZc4bW9uZmBUj5KUGyY5ddWENdoQUzCiGcS1oI+Z0HEn1AsSkp9YONr8Ogs8ePNJKevJzpTQXxxF4lTNQTnCzyAyM0SQF7vsZjssQoyJ2os+E1/408grd1rnQCr2lMEEjDcDPoVCyJBs4kiESznkohWr9I4r6Cthwu9Zn8qsfA/D49PEPjEW/EvbiN5agepE3nEFgvJnqgnfWS3ASR0Mg/fvVtZO1VOaLEGZitIHLmdcw/8BclnP2kd/qzDeWydYGgdXzyCL6vlrqou04y+A4mu29DjTa/ruQO/nCclTLyOJBbHZAhFde+OJFpfZDeogFrd1JVlzAgkVw8Q0ctau3qxTiBqUmA9gONPHuPsqSZiJ/JhugkmG+FoPtn5LaSOaW7kJjILW8yKRI/nEzpeTOh4KZHFcmQ9MnPlFt9xbAcs7iS7WAiLVbBYAcdvZv30bbD0Vbu7dCpBQp0WBPtEBtWpWkX3qxmoC/4/CRAZCi29EFnECAFTfRuCwSDx8GXX3PnRw8QXmkhMXE9itpTMyWbS9zYRPFpCbKHYAjbFIjKzr+coUDFfSWaxitDJEmJf/U9uua2shGbPdbTyAjkUShp4KterYVRcYieKkhqmXTwUGKmvIOLVdDaQ6PxYABUfJr/631iaqyB5qhTuKyZ8bCeZE5WmBcXs4IlSVk+Vs3KqjpAYfGQrHMnn/HQTa5/9Y/jBGCz/o9sLnCTBDJyNWR5QuUDWSVviN6kYSF0y5CuZPCgF7FkOA0cOIK57pLXv9DS9aVNtkJk8A2vfIqG9XdIqQLxKgIg80jkxZ3k00+JPu6JOKZ3Ui1/k4ifuIHpyB8zsghlNFheY1UgJJPNbPIBsJ7pYSPBEsQ1ZEiVgsnPFsLATjm8nfSKP1PFi0ov1MC93fQvhU03wo4ecxUyGScmdV0ugZBrNwUrvbeZbjqWeuvCsiGmNK2+96tE1WxBrTOilUS2TIqaF427f8PQyxB6Df2kltdBAavx6kmq1eV8z3L+X4NEy4oslJBcLrmqkFmR6nUaJnCok8MhvQ+AJd/+qp/EQq5lm5W9+HCAyyR5IcsccRV9FKD3NvbX5aCDMLMOzR1i7/zfwK7g8WWL3FZ8vILOQj64zdLyW9ZMNrJ1qIHq8DI69CY69gfjCDpYWGrh477sIf+0gXPoyiG4qlMzmaqOyrKvlaTpMUn2e1FZJpR1S3RvaMGdBZEW0j4abuNVOssJTStZUiQVtDbD+T0S+P8xL2l8ktXL1ABHwFJtph9e0RSH4iFoiQG4gS4+z8vl3EjldADPVcHQXmYUiEioxOb7NgMLCNjKLO4ieKDElFzpVQPREAZn5Emc9FreRPbGFxGmXHUwu7IGFBlh8s3khsafnIbPutHJc27ElkTVRvJhbcGhu6Sb+/R8FiMmV/H1ZEYFDWw2oc7hptYtw+eMkv/6nZBY12XM96aN58EADPHALIU0aCiDH869qpBYLYEGFcaXETu1k9TO/CivfdvcvcJg/Yo1trC731QDR5JmbgZX4vwoor3j+E9DiEdw8CpWB+n5A6ls9nJ+/Df9CGdn76okc2UbmxPWkjm8lutBAaGGvAcVcjoU3wOK/IXP852w+QDHM2ifehf/Lf0/iBydh5QnH9HQuThCYZadlOhRnBLxq1Zya1A2LAa73sJ7p7KglGFQ2HHYZvsSj8OQQlz71+zzzwIcgffaaAGLOfjwqBFpdmCJRVQObcQudIfyP/4nwac2GF8HxalKL5cidSp/aTnrxBgwgSvkeLyNwqoTA6R1ET95MZqEA5nfAyTeTPn0Dkfu3E7q3jMTx22H+Fli8kezJKgL/Mux6XqmJtxoWqrIjE7e19IpBJQI5UdBjBxbRUu6F3vnfaUGkJfT9qkpVoy0pMRWvZdVHVcszn2XlsTYCj7wbTpbB9PVkj94I95fDx/cQPVZN4ngpqRMFVzXSx51Pmj1WQvz0Di4++Fay5796RUHYvcv9CFi7HgtkJTWeiyWAKA5xhHMpUWdRRLhXj80AcoDR7UtDxzQ5Jq1+8R9Z+9yHCdx3KzxYR3zuTWRP/QLpk1vMgibmbyG1WEXqxE7Sp64nffrnSJ1+I4lTNxK7txj/6V1cPvGLXLrv9wl/tRWefdAqf4mqe7sAoXuR6GkyUW6jHO1NfN6It5TCzcHFuRpypUg9C+cWWP38B7h8qonAI38AqeevHiD6cbl8aW14rmVVMes8qcXJcnYkC4lv/iXhU+Wkj26HUyU2iRo9XkH6vjxTHizkWWwRO1FJ8HQ5wfvyiNy7jezxnbCoz7yJ5H1vIPjAVgL3VxLVvNP87bBwo8nU0lfarMsMGVUmqLGGytxjqLFXPOEqEF7NSSlKDVeaYkLykzXgtXY1MZ8jnbGsogxHPCIOnQdeIBl8maUXP8sTX/gAlx/aC6eLYe4GMnM/D/fmw8ebiS/UGkASJwq4mmEAOV4Nx4qI33szZ+69lcTL/+BkW0DYAIhm+VUOc6UOX6DQXI3iEEdAN8MqojnC6VUv5bWhbXKkdmARQMICiAJmE9RlePE42X/4ANmPF8Dpf+uAcCKPrCzdXA3MVZJdLCB83434HthC7BPVhE5W2ERj/HQhiVOFRBaKiJ9qJv3J98BjnfDsKbj4bQircYRL2arzoqINSxbqXnVpnjHR01xaN6wgOp0iE3yJ1I8+ZZWwZxabid5fAI99ANJPXj1AFIOpEljzHxpErOOI1uPo+lTgmf3ufoKnakgeeyM8kE/8RD2h4/VkH1CM9iYEEHkAkRNVBE6XE7h/O5F7t5A9ngeL2+D0z5P4+C/g/8QWfA9UEznxKzD/Vpi/CY6Xcf5zH3HN4DLnXZeS/5+99wCvK7vuey+HBexEryQI9jKcrhLLlhRFUYrlOLFjRYmduDyX51jOs2PHzy+2FSdOHMe27Eh2JCeO7TiSZoYVBAESIFhBdICoLGCvYEXH7fee8sv33/scEBwNZ4ag9OaT7MtvcV/ce8655+y9/qvttdfWPimKEmn9TkZCxGrSQC4GvrICFyFI3hUgQc/qgUKNoFZkR/3x3p/5zp6nxFgJEa2nMWkG3g3w+0kPH+Rq62/Q9+bL3Nu7GjQgXyvA+4vl8GYF7H2FzNd2PBNAHGkQOcL/ew3ZXWUMv/4iXN4T2OjGQw8yYZXwJ6n7iOctQEJwSAmGAJEm0bO9Ez0CiNZsG7lu1JBWil2G/t9hatcLUFNBamcpmTdWY8D81XXwlxvwv7aO6d2rGd9bSaz6BSbf2GzylvzqCqguwH9zqZWuezbh7PsY3pGfgP4/hLuHIXnZaESbfBgkmYWKZbbVYMZPs3j3IN5L+vrr3G/+Ja7v/hRX/mIb6Vr11S+aiolzdtJNBFAMoHX00hvTJuImD2pC/aF0mL4vMfn6R0h+dSXUFJHauY2YUnH2VuK/XmCjU1/dgNUgG4jvKie1s8Q45Xy9BH9XLpm9uUxXlzG1dwupNz6K/7W/AV8thTe2cefAL8Jkn82yVgV4E8lThzikJLUDs0qjKWGouZ/3Cg6dG1FKulmPrQ7VQ4V8IR4wyzoVU5ddp/yfADPqGMMWWjgfOMNG2GoBzg0YP0i29xe5Vf0Sk3VFZGqWwe4iUDRj52rYuQHvzZdxRbu24O5eP2ealk9TvRn2V5KQ3d/4y5A6C9nbSqNDm6rIbZVE1eOFzyAnUo8a4j1sTY+GBz2xtUfpfHlbijKNKe6uPsjEYLIbzvxH4oe/l7Fdm4ntrSSzbzXe3rW4O9eTfXMLqV3Pk9i9nWT1RpLVlST3lhDfk0ty7zKy1ctwDuTi1JYQ21/JWPU27u77EMO1f5/Rpp8ideY/wJ3/CeMHIN4N0SsQGzPBEeOiyOSTuZHthZt/SLL9hxmv/zATh17l9u5XuPS1DxNv1sKtertPn32cp/9fnRbyCbIcVJrVztZLYpsgwnAz0T0/QqZ6De6BxWRqVxPbWYH7xhp4vQp2rjf84O7ahLN7A+7uKkPsqsLQ7tW4e1bj7Kkis2cD7s7n8XZugzcrcXa+yoM9Pws3DgfFHMaIE2XKi5NUiXYDYPGt5sBsJFOC0pimM/f+zo8dUYRnBiB62BAkRgJoMY11euxcQdghYg2hNGvX6MjgNQDRhu7tOKd/k9G672GqutyCozoHdufDLgFkA7z5PO7OF3F37kAdM1eAqENjr6/F27ce9pfh7NxItu5HYKIR3CtkmWQKz6h7yRITZVOniYLGasp37qQnfasu0nUFQJXFlDa1/TAJY21w7U94eOQHGa3/AGP7q5jcW0Jq7xq8vZtgz0uw+wXYtwl33xqye8rJ7Com9WYuyTdXkti5koTs75oy4nvLGNtZxv3X13D/jY2M73uFaP3HSDV+H+7Rn8A98StkW36fdMefEe38KhMtX2Hi5K8TO/rPyTZ+GK++Cu9gCfGaNdza9SpXq/8JsbN/CpnLgTnypCd8D58bgOjBh4F7xoQROyjEapK0bvUxtednSO5fS6ZuIdmDhcR2FeMKHPJLJSx3rZ8TJXe9xujuH4aLX7fzX0wHAtEnrXQgSSyFMs2CMIX5Y0E0U3wdKAMB5R1ej4d5jRgN5gTCuQFdaDaZC4ZXDxlCnSHtcRku/1eGD34vt9/YTqx6E44xr4pgT67VIm9uw3vzRZxdz+Ps3mKkhrNnPXOi3RvI7N5GRtJmf4H5reje74NbO8G5Y6I+kvCKqsxokFByCCDmed+hd97tq/BainKmPDzVtVXXCDlmYdZNkre+yoPun+PSgRe5truA2IEiOFQJ+7QKcT18bQN8bRN8XZNfWyxpnuSrG+z3e7fDni2wax3+m3Luy3Dk4O4rxzlQydiu1Yzs38BI/YuMHHmVh0df4cHhrYzUljCxbxmxNyL4e5ea4yd27uBuzQ8ycfr38cds1XSL6Hd70Hf4Xs9rQs1Wg0hzPAKIA7d6mNr706Rq1pOpW0TmUCHx3SV4byg/7dkAkt69g5E9fwuv9z9C9Kzpc5Ut0+/LjEopcBIKRFum26xR8iXcQ0Vg+PnJzxeZYX7zRpe2mmHGHwnBYa4RRnoeuSjupBbxqlTMLbzLf8ndY/+Y4dpXmTrwMun9L+Lu3o6/ewPsLYU95fi7tuLu3kF2z1Yy+zbi7t0wd9oj0+pVUm+sxd+/HKe6kLGd3w1n/hhSDw2zChgCiRk0PUPIwCbi8+SOeU/f6Fq6sCjj4afTpNJZEloKaraukFq5Rfx+Dfd7/jW3Gz/JWN1WsjUVsEtOZoExFYxW3bUZdj0PO1+ANxTnfwG+qkVmz5tiD3x9E7y5DnZXwp4y2FcK1SVk9xeTqCliqjaPsbqlTNTnEDu8AKdxHhyeD/ueg+oiMw5Te/8uifbfgtFmM4v+XqI479oPBiD6TzPzY2/RIA7c7mZq708agGTrlpA5VPw4QHYpJL5hTpTZu5XJ/a8wffJHYVg75Gr9kd3xV0ZVLJMwOLAYkJYL54qCWgUCybsCRAfp+YxtoAtYBycEiMn7MULRLuSxSxxtRXdjxEdVCmIYRvfzoOknuLhnB6N1W3GPvIpb+xLerpfwd76Av3uz6QR37zoy1etIVW80lN0ngKybGwkgNR+0E0v7c0jvX8bIzhdItfwaTN6dCecKJNZJD8KiQce8S9+8K2/Y5w9imhKdXoqEN8GoO8KIM82Yp0rAQmIC4lfg+k7iLf83o9UvMbVzMfHdEZyGlWQO5ZOpKzLmlHyV7M4qk2vkfG0L3td3wNdfwn/dkv5Of3ULib/cQPx/rYc9r8Gul3B2bia+s5LpXcXE9xXg7s+D/dLcq6H6Faj/AWj7DbhVD9mHZng10uqKZ3qpE01HWic91CAS3CbfY7iV6L4fJ1OzCad2BdlDZSR2l+FrScCbmwxPeHvWMRcSH8Xrt/Cw9ruh5/dg+oKZb1AQS+WKNNFqx143Ewh/41dbnjf3+C5MEDGDrPNNjdhAe+hPhbi1LmKmGohmozWfoOWfds2RERfJcXjQSKbvl7hT/z3c2rve2NvZui0QAuTN1/B3voK7ZyvZ6jWkasqJ799gHNTMMwFkI96+l40GcaoXk6pZzET1ZkYbfgLuXLRYN4l0EixKD1e0JeAKo0H0xzO8dLrsN8kVk+ikvpEdPMUUUSbNAiXbl7avJuBBA5z5fxk/9ho39y5j5GABD2vzeLg/l4f78pjYV0S8uoJMzQao3YovU2zvVti7w5AmIOWoOm9ux1M0aOfL8OYOM7ckXyZRXczk3gLGdxUz+vpaxt/4AG79j0L/H8Gdk6AFX1m7F59qQH3zACJPzFaqFytagCThbhPR6h8xAHHrVpE9VE5sT5kJVjwrQLL71pKuX8+DvVtIHFZ29Fdh/ApMJMzWcvIPZT2ofJMBiHwSw7wWIIGcfEcGiFgnRikLIhtINxGeIIIoiaDQWMasGNTKQaVsWDVmBv1hO5z+PA8PfZwHNZuYrN1IbH8Vmeq1ULcdf8/zsOtV/F0fxNnzItmaNaRqy4gfWE+iZiPZ/etxqqvmSBvJ7txKdvc6MvuWkqhZSrxhHXdqvheuttjZKj2AAYPmQSatsDMC4R375T1+6eE7SbxsAleFkM3MvECiPlL8TJVGAuFlR8ok8ZHpxbn9RUZO/xhjJ/8eE43fxcO6l7i/bxOj1WuJ1a3Bb6yE42tIV6/A2bfMkLdvJX51oBkOlMEBTaSthF1L8WqeI1kfYeJohAfHl3P/2HpuH/448bZfhbO74e51u1+3wKx5PUWkg4Vj7/Fh3/Yw48eZb3Rhm4elrBYjiBTRu9dAouYzFiC1BTj1FcT3lpvADLs2wt71+Puq5kTim/SBDUzv20h0/wtwSnuu1MDEPTN3qTuysA3sYIFDKiUYFv35LgqEiPHwTY2lIIw7S2uE6tKGxkIG088Gxyrdu//3SDR8L8NvbuDhntWk6jbi12+GgxvgwEb8vZsMSLzdL+PsfYF0TRWJ2jXEa6sMZfdX4eyvtAB52rZ6vdUee6pI7c0nXrOC9LE13K7+EFzYA9H4jEulaTWl/tkZ1PfQM2/LDo9/aFfraalQnKnsJAnHrs03PyqzypTCCUJd6rZQQWu2lxtAD9z8czj728Ta/hX3Gz7D7eq/yZ3qVxk9sI3pQ+tIHV5Dur6EVF0+yZpckjX5ZA8U49aVwcE1UF2Jt7+CWG0po42rGWnZzlj/p5i69C9JXP0CRLshqRBwIE7VamCVGiSz/BlfhsH0nzFd5BrbyxspqjX892tIHPh+sgc249YWBQApMwAxvocBSGUAkKdrFb1M7Nlorh3bnUviwA4499swNWhuQjETC5AgvPgNAJGn8s5WRMSIEhMKCyWt1UIWHCrXrIJpGVw/Y2qymlTd7Ah+8gZMH+b+wU+QqN1MsuZF0vt34NSsw62pxN+/FqrXQfVGQ17NetyazWRqXiB14HmSB6tIHlqDW7sW74Co6m1bt2atud7bt2vN6j1qN+HUrCVzsIL08RLuHtjCaOMvwd0ztockQJgmyR0eRm+RNqkINrbwLPyh/g75TuVJVWjBqi3lJgWaI5BYxsWzCtrwpz42zGX2c7wNYwNw6yjZ83/B9On/xFjL5xg98c+41/ApHjR8jJGGv8FYwwcZa3iNcVH9Bxk/9D1EG/4xsaM/Q6Ll10gP/BHO1Wq7tcLUdUjIaQ2KgolHdE+iQIOa33+WDtClpJ3N9SQ0fbPxpX7CAuQBXPwTotWfNP4R9atNQCG2rwRqt8C+DbB/PdSshZqqp2/3byaz60VczYMdWkaqeinj1R+Bi182m+RIu92Pxon6Lqr270xmbPqBKS3rEc3Kong3gISdFhhkOkErD2xVjRgJN246wfrwStWYAO8q7tgBJgd/gYn6DWTqCnD2b8WpfhF3/6YAHAJIJdSsgQMVeLUVZGvXk6l5idSBl0geXEvqUBlu7ZoAGCFQHm8Ftnei7G519nrky6TqqsicLOZhXQXjDZ+BK/tgUqvbNGJavz3CpDOM1k+LO73A/Jorj6jrHgHEbjFtfkyTqmLIwC80bWDs6yfDuROda8ZH32mFT3oSErfsjrfRdpg+Qvbqn+Fc+zL+tS/CtS/Atd+Ha79r6eofwd39JgeMBwMwph2SMvam9NshKAJA6Oc0vtKidjZZUcm5v3SuwZveCCkz/l7wXM4wXPg9YjUfNRVdLEAKie8vhroNVoDOAEMgeUravxktmzA+Wl0O1D5H4kAVbudPwugpM5OvqKI2/jGRaPGBOt8YQFlSbuI9AEQjpo40QFJ+ihxxrfO9h8MYWSWhqbNlQ0tAph7AZD0T5/8frh1+nujhlbgHF+PuX4e7bwf+vu341QJJJf6BEqgrwj9YhFe3Gq92A86BHWRrt5M9WIlzsNx+iNxyWgAAIABJREFUXleJ9wRyDqzmSeTWrMZTuLNmrVG1ybqNOKdKGT9cyOjBD+F1fR7GtB2b/CsFGSZJ8ZCoP4JWnRkp9wwcolOtpp2xWiy36QsxvbhnFpMaHAS40TgJINNmpl+bwqiCvNXWnjbpEYWJgEpTl6bJToEzYZfpOqopO4HvTpF1EmZDmWzaMRntM4drvPSjgeYSKFRjK8U0CcZNa1cizg0keszw8ULNFMpbA5jsFTjz6yRrP0hS0bRDFcRri+xc0EFN7lbBgSr82so5ETVKen0Z9m2DmuVwKIfsoVwmG14mPvAbJsUGd1ZIVzyuLcudJI6XfU8r8iNGus0ARPlKIzgMGzLLddQLAocy47Sr6EQP8b7Pc+Poa9xtzCd1eAkcXAZyGvfLnHoeb/8WozG8ukL8Q3kBQKrwajcFtM4C4z0ARBrmSSStRE2ZAWNszwYSBy1Aoo25jNRUMnXkH8GNg5AYMdJEjnMKpSOMkxBoxDx6vmd46XQTCg9MDRPnEEOK9JkJENif0s/ZteTKIrabKkSJMUXMGIBy6xUWNqFJreEIuU9t+F4caC9kri0MKJCmIVKuqOZfdJ7Bls4xzyetoXI/KvIQI8UkCSMs5DPpgnN76dLKhlYxa7PMOdAguj1T8Do5iN/zOdIHXzKpNn5dGfHaAkPGfzqwZk7ACAHFgXVwQDlvyu3Kx9+/FL9+KWM1hYwc/bv4V/8bZIZnKQBFZWM4fswsNdbYvNsrYkSg6SOpW0VfBJAHRnuYkpZCnVFNHkzfhPN/wp2Df4fhA4VkmwvIHsrHqyvGq8vHqy3Fq9mIe2Ajbl0x7sFc/Pp8/EPlJmTJgW1QVwEHpWLLg/dr8A8+mdzaCp5EBiC1miRcS6x6PbH6KtInS4keWcF47UpGa1+CPjltKlOpNAPVhRKDxNEaZpM79W499I7fiwMlOZTNaplY2lYRIpn+0i6PAhzKeQu9dHW4zpVWGwm09YiJss3YR8pk0DWFlpCkyeV5aofXjLaHU4DACxxRa9EZ5jSo1Y3rBFWdUXBC46qJvOngnuzl3wOPPLEHZK4pwqm0DpNmMwMQDy8zZdKO0m0/TKZ+G87+CqgtJXEw35B3sAxqy22gQcGGuVDdWji0CQ5uxtldQWpXHv6hXBIHcxmv28B002fhYYOZ95GNZarSe6rTr37ROAWq9YlPqF1u1aPGvLILi6yJpTmDQLJIa5g9z4fh0k4mjv4YD/dvJ3E4F5oEDqnIdVZLHMpDD+4cKsOpV0ivALe+HP+gkL4NDmyGulI4mA91AUgOVrwjQIxpJvPsbUiShAPrze/L6Y83VBI/WkyscQWx+kVM1FUwffKfw/B+8GQ6iv0yaJcpMYZjp7vfoXve7St1tFIVVSvLmlSKlivmod+ypL4ctyWQjFEVLJZWv2vQXG31NULWH8fxdJ1gIZRSVVRqXNmgIam0p8iohwwyxaQTQs2T9rWKzsNxHLOXeNrXdpcqxyRgKEQvsMjcnLm5Z9KgFiBBxUbFdmcAksVT5GzsKNMnvx+ncaPxQ2Vyi3mTh1aZ3DAOCiASmHMFSAXUr4H6dXj7NpLctcYIaw4Xkj5YxKiiWmd/C8aVOTBNwneY9jX6Fhi++PpdJETESixZw7bOklSmBtYkmylOrm18YsMwXM10288b2z7VsA4aC6FOam0HXs2Lxodw6nNxGueTPTKfbEMe2UOrceu24tdug9qNIMRLatQWQ22eBcrBclxFs55AzsHVPIkETmoEuk1kG9cSP1LK1KE84oeX4xxdQOpIIQ/qP0ri7Bcg8cDIApkk0pMaXLNB57t00DtDRKiQJpBmUB+K9F5kxH3wffDeJNAFzKnTgnXT0jTqcwk0Mbg2qElnYmTMfoCPrqfUEOEqBJ/RFvoj9CNVFE9FGHx5M3ESZvmrZmVmfFOrNnQ74a3q/Dm+LEDSpPwk2cCn0z2ZxVyJh/j3DzF65JM4R6qMteDVFBpwpBpW4R4shEMSls8AEAMwCdtSOLQDr+5F3AProaES6guJ7i8n3vSPyF79Y9zUJZR4orCvFLIBhgSQ1Q5P7IGIzaGRhLOTPDpZp5n13Or41Bjc3k+i++eZOPZxEkd34BxbS7ZhJXHlPx3YbgDi1FXh1OeRPTLPkgBysAq3bpsh/+BajMSQ31BbasBlNIkBSEUAkG9sLTjKA5A83kqrULsODm3AO1ZF8shqJmuKiB/Kh5OL8JpWcL9uPZMdPwsT/Qb1inzLsJEv4D1m2AfqVoB5K5lONGp2piPDQ+wH4jJBT9pEqyHU6u9gSwIdPDv5U1wUAEQ8rbQIs2RZZpmksOeazUMdXwaU9vASqWio/tnx0TjJjDNRGXn8Kd9qGhNREUDSZgY5hK05XvehC+iPbwJALJ95qJBENp0x2knmi5ZQuLHbcGcfIw0fgSMSjJX4e0vJ1JWQqi8kVZ9PtrHImN/+IQuSp20NPx1YhX+gAI6+CMc/aMx7o5GOFuDUrWKiYQuJgZ/FGzthxkQjpa4y5rUZUv1n1wBJAIVkgaP9QdwL4N03kkn9LItapL1nTOdPNOIP/hzRxh1m0so7+SLJk5t5cHIZd48uJN24Gk8MWl+J11CGd7jAkt7rs0PrLdUL1astqUPqywKqwGuowKtfPYe2DBoKoKEEt76KbP0WMod2kG1YT/bYQjInIsRPFDJxXFt3/RkkboIW9gdCdzKrFGhxipg5ULe2v6ykNe8FHKlSda3pUYMfKylDDa3P9b06LaTg+BBJT2hnO/LhIRZ04Y2Eg/Z4oqh+X8eb/3SoOXn2OXYBmO5CNHP8o0NmfWh/8an/12+q+0yKm2z7MaLcI+3fhvR1HrT/LtOHP2yiV1RvsNGmA9vM+ERPFDB9MpdsY4kZd/GG+OBpWuor4FCZvf6hqhleM583LsM9ugSvpYzx4x/GH/oSxIcgOYyTiptKPNFE0G3BpkrihTAlySwBIU3ELJF1Y6YCRMyz4FDI0Umrds8AXPk8tH836YNL4NBiOLKZ5PGtPGjK5cGp5WSOluI3rJk7HV6NP1dqLIYjK/AbC/AaNuIdegHn0Es4DRtxji0ne2IBiWPLiB/fQbrj5+DmPltWx1UMHKKeCg1I6UokBAAJOT/g70c5ajJ1JH5tp4aHGSZ9as76DjlBYAtki8cUSYZJowW3Kit0nJHWf0288RU4pIRJpZVshwPbcQ5vYPJUIZPNeZZ/5jj+NJRDQymmrV+L3yBSmk4x/tHleMeW4p0sJN30Aqn2H4crfwqxQcjGSacgqjVVZjwVvbVmaTj/p4CKQBIxYy7fL6mCW5YFHH8YEtUw/pskWz9K9nAZ1ETMRIx1qDYYcyZ1fA1O4zOAQw9jOqcc//AcSB0RAqR+qwWIwCGt1liKc6SYeH0BiRObud/wYaZO/yKkZGrFSCWSJnPK5mfJeQ0UxGMiV3+IA2ymqhWXdnLNdux3CKPP9TFkqpj6COqTSZL+/UCI3CF2/Q8Ybf00ySNbLUBq1oGobi1OYznRk8VMNRWSlYCdy9gfLofDxXA4DznlHLIpTv7hUrwjefhH8gwPZA9vxm1+jQdHNnO/9R+Zbau1dimbcEwdcw28xlK+t5YxK/iufwqLi8x6EBN5kWUQ6uJUL/7Ir+Fe/hST9ZW4tQVQuxRqta5Ys+Mb4dB6aNxgESsmD5n9ads5do7pVAOQPPzGQryGDTgNmw1gDUAa1uEcXke8roTEkUruH67iYcvfgzt/AdnL4GgpsawMJRfK1LKZy+E8hkWMNIY0jAASgkQdJdH51y8xlqpqJh2bnJlOaxJTxfmGGOn/UUabt5I5oiiTQvqiUuM8e0cKiZ8oJnailOyROQjGkGdkQTSugsY8oz28hnV4jeV44gsB5fBa3MPbcI5v537jSm4c3Ub6whdtpRhV59dgy4IOMtY1stYHF1gUrPKIRLlPQkvtLYxAD/mwlvTQDzPduo2E0tYP7oD656FuG+zfDvuVyr4eDlaCNMiRCvy5tjr3SNmcicYyOFKCf6QE92gJ7pE1uI3r8Ru24TdsIXmwlOThIlKtFUy2v8h474/C2C6zTUJoVdn5ChsMmq1AbM0s63Q+8s6kUWRu6ci/2kDRVECcaSY91X1Ut2gtfBRGD/Kg5UNMnVqFe7QIDhdBQyE05MORlfjHckkfLzOJpe4zjL/GnSOFcCQXjq3AP1qIf3ijGXsatsDh9XBsDenGQmInyhk9uZV77Z+G+18CX0JSmylZB01NCBALEssPkYdcYJrbJlpialqNNeFc+vdEOz7CZGMp2cMb4ejLcOQDcPAFqN0OhzbbUJom/BpD5g4Z/Wnb8Py5tBVwuMoA1D2+EufECpxjZThH1hvJIYBogU5aczany0h2r+P28RdJX/lVu5FmQuvtbUc8Aomteh7+bftPjrK80TCEIa2iCNU3IV/821gRaeJ1iklGmcTsMSQOmxyAi/+esRNVJE8shKO50JgPjSuNv2gY+ViuHaejFbhH5zLu4TkCSBEcXYl/IgfxgNu4EbfhRfz6HdCwEY7bbA+/ZS2pjm3cPL6O6NA/hWSdWQVrXIxZ4XaBwwQFA20SGeE8ca7hch9nopn4lV9lsvtjTDVVkjieS+p4Hl5TKeljVcTq15I6vBZOrYNTRbhHlsOxYjhaCkfL59jq3DnSkUo4vBWOrMM7uRS3aRHuiSLco2vxjmyynzeU4B/Nh5Z8Uq0FPDhRznjH98L1P4eoainZ2sICgt3yTHudaDsHK1FCoEhaKnxpTS2p578GiGbRJ5hmTCaqOEuK9eGfE2/7hNEQcpQ5mmfp+HI4sRKOFbwNz8xx/MV75nqr8E6swj1RgHusEq9RAvwFOLwJGpfhH1sEp8rJNK/l4ckiRjtewb/xKyYbnawmZAMtEqxAFEBCikS5RUZrE9whssP/i8nuTzF2tJh4YwE0lxA/lkPs5BLGGldy9+AqJo8V4Xeuga5csicW4R2f9cAhWN5rO1dghOeFAGncYDuoaRFOkzRJEd7R1SBqLIKThWQOLyJ+dAmp1jU8PPYSMc2NxNpmOkeTXjaLQLPOsRl1GwJFALKhP4FExQCCGXGZpn9FX7LTJ0iaYnFW7I7BvX/DpMyeYyU2BG8kfAHeiZW4J1fhHRMYVsORUjgSCte5AqQIjq2C46vwjhfhHi+1dGw13pEN0LgWrYOnKRfv6ApSR1eRbClnpKmKye6PwvgfgDKOBRAZA8EiwVCLCCSRTDZqioulbuxntOVzJJtfhpMlODUqCFBEtG4BY4efY+T4Ukab8phoKSTenofTtRg6l+E15eKfKMY/XvK2LSdKzfXettV3z0LHK+HIFji6BY5X4jcV4LUuNERTnr22BuJoARxfin8yl8zJKmInX2Sy6XsYPqnatNfMwiY/JSfclkBS/ELLUdVRIoHEmlryOYI5Eb0VfYcDJJlM4rrGw5gRA0plUbE59Uk438h0FG7W4PR+F7GGCN6BFXB0HWgW/dhqOzanivCPr4Nj66zFIRAdfxYeKIETeXCyAP9EFd7JKmPteKfyMON/UsK7HATKk0vh1EqyJzcRP7WFyY4S7rV/GGInbE2xVAIv7ZqJWo23lOG0r1ysWILszS7GOn6V0caPkT2sybw8qF8LTdugZQ20l5LtLCPRUUa8o5hE6yrSzYtwTi0OAFJowXHiG1uBjZOS4m/X6rOA3glIOubtvj8hLbEBjm6CYxvwmkpw2ubjtEfwmxcbyYGcRCPFVhnV7h3bSPbEVuLN6xhp+Qjc/e8Q7wHtXJuJ4SY8m2wYzLiGGkSmlmETASLAyaPZtxne+Y57k1UF+VkvgSOVSpHJZMxkm9GsWpE60YV76TdJd1bhHo9Ag6JLSknaEoxNqVmK4J3YYAEipj1WBCeeYfwNXxSYa4QA0XIHp2UlfstCaM6xADy22gLpRAH+8U2kT1UR75rHWOdquP3FIMviAXhR9LwSjgKIwB9h/C7x7v/O2PHvMrOOnFA4Ls+mghypgJPr4dQGaN0E7evw2yugvQRayuCUkFlkpIPfNJdW54RUErx/760BzbEqK6mOS3qU4LTm4LRF8Noi0LwQjss5FEgqbMqDgHSykmzLSqbaS7nb8Qmc2/8Zkn2QiJuisr7WUwWJd9bcUgasXU6qyKARnValfMdrkFnYMFojnU4jraLWJHuqH5K38O/9IdG+v028qQhOSVrLzKqAxu2g9KSmSpymciPlpe2NVDcAeYbxF0CkgY6X4mvsT5WSPbXaZJmLB/zWCDTJTy4H45NughOr8ZpXku6MMNWZS7Tvx+HO18HrM9u5+dkJ4umMqYov6yHC6HmiLb/JyPHV0LMYWjT5kgtHFkDjAlAlioPFpOvLSB8pIXN8hbH3OVYGRyugqdR2hjpkTlQMp+ZI+u3ja+DYWjhRgd9cjNu6Cqd9MW5HBNrmQdMSOFYIjZq3kaapNOrXb1tAqnMxd04UMNb1d+HG/4DoDSs2BBAzy6p1G9q9V6T3wfJSM2nyV8PECgGi7dlkakmDhCaWieIlJ01KefLSZxlrryB6LA9OFUCr+KECjm6DYy/gN63DbS6DpjI4KbOq2FLTHMdePKPxP1Fh6VQxbnMxmZZSMq35OO0L8NvnQctiOKmMi+ct6bdbF+F2Rkh0LmOi9WOkz/0bmHzd1ghQqkx6ykasDUCifcT6/h13W9cy0TmPZPNyaFkCp8VgEWhZhH8qzyDUbcrHPbUQ9+RCM/egORBOlUFLwZOpOR/eieYEqgCM6tzjZdb8kt3ZnIfTIhVbGHRQBP/UAjiuUKMiGpIgCj4shrbncDqeI9a8gPGmtaS7fgzu7oLkHe1nYBas+07UOO42/UCTR3Yy0ZhYf0V8kLCwtVqB5LGXKqmPt8Dl32SyZzvjrYtJyKIwTFgIHRqbdXBiI37TGiPANEacWgUnCi3jPuv4Gx9WwKvAbS4l07aCdMcy0vKTW/OgdbH9vaObQdSkiOYi/LbnSLevINYmf+Rv41xWTed9wEV8Z8xsnaDxjpC+Cnd3M33lJ5m88Alig99Dov9lol1FjJzIIdO5AbdtB27LDvzW9fgdi6H9OWjRepBiKykkLZ5E7wQefddcNHc6Jd9GpOvkGpC6LavJtpbjti/Ha8+xdujJXDhaaemkgCSALIT2+dC6ALelkHTLh/H6fgZG3gDnnNlsJjl536TFS3M8CveGVpU+lX0upHznvkKN8VZH3Txx5oZZJ+/2fpqp9nyi7QtJN5eQPVlmwdBRbiKhSHM0yyzPh9YlVgCfyoemkrmPvfjmlPwXgbAcmlbjtZSQ6VxKunM5mbYysq1l0Cq+yIWT5ZbEJ61LoXUlbls+XncZE80bmOz6Xrj/e+B2KlZt19QYJ13FAlLjuNNncSaPQ3QPzv0vMXH+3zLe86+Y6vgcsbafJtn8z0m3fJxM1woy3REyHRGy0jDtK6BVKvUJpE55Ij0jQKSZZO/KzhRYTpXjtpTbzmkrwW0rwD+10n5/cjmcXAUGIOqkJQFp0EqMepY0yV74LEx+GZIDZKeUOvFoD0aBRGkpZnvrmXV8Asp37is0q6RBwpc+m5qaInG/iWj3D+C0VxqJnWpbbswbmTlO8xoj0WlbDm3q65XQovcSSvOtQNOYPZOAVPBGjC8AFuC15ZLpyCfZWUC6XQCRv1EZgFPAXGjvwQjmcgMoTq8g1Z7PWPsHmTr3L2F8f1CIW7V9IaI1I1pPnFbRQVMoSTVWr4FzEbyLMNIM9w7BzTfg8m+RufgPiJ17kYnezYx3bCTd+jJOy6s4LS/itm7DbduI37YaX458Rz62g4TYgNRJLeqs3MD0eitIZLIV4bWtxGtbjteabx7EPuhqa8pJKrQKeJIOS+CUACJptBq3tYxsW4mRHnrvyR4WSKQ1TikmLpNPYNY9rIQuResK8ZoWku5YQqy3itjQ98OD/2krhodhLNXaDXK3lOCoORNVC58Jos947gJMSKEdFrLWW9vw+0fM99YjvvFvHTv7+sERb7F+vvG8Wcfp2JCMFgwnPYPU/mBdhCZHwyW11gOTxozhJm8yMdzC5MUv8PDoFtzWZdBZQKYtj3RrLm5HCdmWSpInCvDalwSaejm0CiALLZnx19iHJLBIy4iK8FpFBUbKu225hvnFC9ac17EKEgXgMD7PcmM1CCDpjiIyhgfK8Zqr7LHmd2X5iBdK4FQVXrP4NA+vp5ypzud50PV9pG79PtBrsibcdErp7rM6y3SyOkFBrqQtja9dUc1S0HG0nx3eADgdkGmD1Ammu3+HVOevkez4HIn2Hyba/kmibS8Saysn0bYMvzcHTs+DLmkbRZbmwcn5cCwHjkj654LUrbRBSwlKCXAUVj49n2xPhGRbDpm2cpyWl8k0bSPVtIx0i663AnpLwNiZYvhi/NZSQ15bMV6bfe+3hWZgqOH0d/CZAhIKNhzXdZZC53y8vgWk+0pJnP4E453K/r0KsYytjKCZI8OeWr6kPKQkqewkjjONp6WyBjBxfD+K40Uxc0yGmR9lAAdsOhMrDhfoaKJy9lA89kf4hTHnwvFREFKMHVxKuAmPe1Ib4nF2kCGtPdqDfdqVm5SeJp0cIZoeYcofNxOBYyZbz8wMmN3DmK4hdeHfMNr0UTJtimzm4bcJDCEVojHw2wrx21fhy8poywtoFbSJgvFoXmV8RxMRba7Ab60w45/pLCTVvYrk6aUkexaTOr2ITPdSnI5VZmy9lg34LeusCdW2xJjTbscSAxIDKAnW1gBIBnShQC2E5lJoroDmdbintnGvbjX3jlYy2vMhRs78E+LDvwsZ1TEYeRuAPKFzTeKjQp8qjZX2SKdc/MQUZJWuMQSJdhivgTtfxr/6a2Qv/ATZc/+QB03PM9a0jqlTFSSay0g3y4kuNSFlMTRdq/E6y030IXGqmERTKanWPLI9C/AGFpLpWoXftQE6PoDb+hLptkJSbYtwupbC6VVmYASCuRD6fWO/llrV27nQBid6l+L2rCfd9Sm48QV4cBQS9yFjCiqZ6Q8rQlwyaKGQdiHRbubSKAk8L2FLkqrqvUkPDpg3QIedtbf1qbRG3G4oGWiRsP9DZp7dmu/0gUAi1faWRV76Ssc8FkV4y0rJ8HohSJJBKSEJQbN92SiOPxqUBkrywFHdSNcsozXrPKL7cK/8IomeD5HUOLYXBmNQ8NStAY20T+uKwDKwgi3bUUyqq4Dk6RUGHMmeBaR6FpDpXozTucIAT/NzApPAkO1YOousJnHb5WMoB68YTpdCt3itFDpk2ZSYuT3a1kLXazjN20l2ljPds5oH3duZOP9ZmPg6+L08vj/II/H2De9kXphCG9peN+uS0QaJaS3JUpEBbUg/aXdl1TpqLdNNaOuvq7ao2d1quPUVvKufJzX0fxEd/D7G+j7CWM8Oome2MN27jrG29Yw2lTHZstSEX+nLh4FKaFday0Y4vRm6NuO0bTKZuYnOJWROL8KXGddeNDdqk6ottoMjqdaeCx1F0FmB172WdO9Wov0fxLn+4zD9v8A7axfOyMIxeTvSIirZM2FaUw5ADKjvQ69erfg5OEcdqzRqFQ5QqoYFR3hAwMxvZeKQmdXqOuHfBhCBRjG5YdIooU04u9Uxb0M6X1pR92h8qmEy3CLBXVOlS1eQTDC/qe2nJ0+QvfILTHa/xlR7LunORfid6rP8OZLOlXmWAxJOXTn4nctxO3NxO/Mtda00n9G5HDpWgpjemNgy11YabZPqqCDVWUqqU8DKM0I1073MaJx01xIe0VLrQ3etItOVZzQe/VXQVwI9EdLdEaY7lzLR/Tzu5Z+H+J+/O0AEjJBCgGgPakfFBVzHbHWVcFLGoTECVuMQMofpYY2N6uA8hPQgJI5A9HX86S/jTf4W0Zs/TvTqP2bq/A8x2fdJpk9vJdFdQbZD4FhrHT1FyLoK8bvKcNu3km7fQrxbEmaZ6UTaCwOAzKHVtY26z8NeZzV0VOJ1VZDpLWaqbwWT59YRv/YDMPoFSPWCmEUZJ1qVSNqYIZOoBI9vhIhhKPVByKN6L0YTQwZvw69kYs0s6Q21TQgQtSEYZrch0+ozk1EcACNYjz5jIpu5YJliYUJIYDrPmNCPcmm0Pj/JQ6a4yzSjpsCBWQKh8Uw8hFvVpPp/gWjnh4h2FpLuysE5LYZeOUdw5Btw6Xy/axm+wBEAxABOQk8CrK3IRkhbQodeJpL8xxzr/LeXmCCB07Eap6MCp7MEp6sQpyufbHcusZYc4go/ty0h2b6UlELAinLJMjGgWwYdEdxmmfMR3IEI8Z4iJrv+Jt7Fn353gMzUGnjrxlOqwOFbASTrW6TiZSFpd1UzZZ/FgEemmeeEeymo5pO83jH8dDskD8LUXnjwP+Dar+Ke/Qzprg+Sbi0j0zwfpyVCtjWC07aEbMdqMt2bSfZUkTxdjttVbBxEOiX5C3jqVuAyWiiUgrpeMV5XAU7PCtKDOUwN5jAyWMjEhe8iO/xvYbIBEhNGEIj39cyBR2Aqk5jl66GgCJk5ZPqA58OPH/Mhws42TC/zaRbjP/Ze380qHxSCT79hflzw0x3J5HvkG1ng6MZ0QmCO6TIKZHqPjtaZGipzmPacvPtnuAN/n2RrCamWZZaxThfA6aDPu/JgDmT6uLsUp6sct7PcmNq0r4Y2BWPWQnOQOd6kQMoaE8o1Gl9zKc2Knsq3CfwZ+Zptpfbc1rXQKv8kIJlSum57CX5HIZ78mI4VZsrCO6G5sgiJpgjxtgjZMxGifUsZ73wJZ+gH3h0g0haimbELNEooCbWb6Zg/kwQeBD89s/zd1uNQ7SbPTN0nHEhqP/UEJGKgTUgd1XkyVUDugXcJEi0w8jrc/E9w9V8YU8fpXkCyPUKiPUK6vYRM90bSfevJ9K3F7Q6BIXA8JWlQu1daEsA6iq0W6cjF61xKtlt+0HxSg88xPfgco70FjPZ+F6kLKox9GOIjNgYc+g1GSfhknCzxRIppVZcPlnTObi13iptnuQvSBobpW/u1AAAgAElEQVTBw0iSmFwSX+wqCqW/PhdApHmCc2bODZjecLaAIAqON5GpxwMBOl3Va6TgtUZf1T5mLC79XvYc2cu/Ded/EPrXQvcC6BDlQFe+te3nAIwQTJ7ModMFZE4X48gX7VDe3xpoXWNzAAWQpg3QpOUVa6BZ5rAm/wSMFdAus2uJpfaV0C5zW2NYZq/TVgkdG6FjA7Svx2+rxGkuI3WqhGRTEfGTinZuxu3cQra3nNRgKfGBKsb7XyN16Sdg/D+/O0A0UaS4dzijOns2VR0cDqM6Vqu8tVN2nAfEuE2Cm6S4Tdps7jhhlLhklzHZxAeu3Wdc+1QYZ1bF0hRvFnomJmCyBy7+PAy9QmZgAfFuAaSMbNdG3L5S/L58/O58mCudXgH986FP6l3Om6RMkYm6eJ0RXEXe+nLw+paT6VtJ7HQhk53rifV+DIZ+Dq5+Be6dhrG7kBizRd/MRpEJprwkDzOquWs1jPJ6JArUZ9Zx12KtoPxpaD4ZgIQH6Q91jCgAkzk3+D441jr5ykLW/Iwt56/fMBQIs3AjJNO+xWrzFVTw0wYoRvkId6lpstNv4D78Wcb7N+D0F0LfGji9Gjplii4BZXP3LA8ETC50Pz35PcvIDkQMub0r8but9raCShpB0UUBpgI6ZGYvg57n8PsieL05NkLasxB65sPpoO3VeNoxpW8x9OVCfwl+/zr8/m3Q9xp+/3dB/8dxej8DF34Xd+jLcPO/woMvkb31X0nf/EtTYhfn3nsHSAgSI/Vm/acCcyILHMFFal2p45pPuYNnViuqHcFjHOkWVQT0Xc9YWcbq0vyC4QchJli4p+IYIknqG79AamgLsf4ckt0FuKfL8HuKoEca4OkHJjxHA8Tgc/j98w3QvM5S/I4AIB3zcZXL02s1TLYzD6e7FG9gLe7AJjI9zxNt+YTdj+LqmzB2DNyztqIHWnSl1XYm99HssiugCCTCgl0LHdQoDRjdcrTtWBvynV3uZ5Yror4K+svCR9saq2qkStbY2YoQVo8BIzxvFkCM/2PGSoWwo5CagOlrZO/tYfziP+Nezxrigwtx+iK2n00EqBDkd3QrfC9aMecx8HtW4PblkO1fhNe7BL9nCfSE11WrlKcV+KdzcXsKSfWWEOtbw1R/FRP9m5ns20ayZysp0entJHu2E+/ZQaz3RaZ7X2Gq50Okzv0d0kM/gHv5x+DmL8Dd34SRL8DEV2ByP4xfIHvnEkQvgn/Njp+CTupETRTO4vU5vBUyZMTOIv09Q2FUJTQVQps4XLoqyRdIOyP9BLBAaEolhYffbyZ55WeZPqsiYItwJSVkTyoPTCZWz6q5Ue9y/L7F+P05RlPQuxxkW0ubGCqC7lXQLSCWQPdq6KrE75FEkgRbSeJUFdnOT8LQ5+D+VyDeaje1N7UxrPYI/TOjZV3XhMiduEs2ljUVFNPOFBlPJULF6KpwKVJwVaVoRCqXak2gpI8xV5Ne0JpAlN0YT6asjpMuMUVrfG2HbPfsMAWmFa71M2Qczd/ESKfDQndX7bzWw68Qv/gjTPR+gMmuzcS7V5s5oWz/KpyBHDzzzHm2j06vhNPqrzn2vcasu5xsy3YTmfT7lsPZCJyP4A3KUY4w2RkhdSFC8sJSYhe3Ebv4aaYv/Sumr/4Hpm7+B6K3fpvYtf9C4voXyd7+U3iwEyZk+nZCesguqXVGzF4hpjq+CnVoBaFK+GjORwWFJbFMirYV7D7jQSEP64c9I0AEs3BCUVPy+kFbKcIgcLbpICfG2MQCi25GIAm26TXhTlUWETPYpEBpFqPydfhkGu/OHuKX/qlxmJNKpOyQk7jeDpak/BzJ712FGRxjaklNSzqWQKfs10roEkgEkIKABMjlMBCx1CtToxS371WcMz+Ie+XX4OHXINkO/k18Rs0KxaSXJOlmTd1ps/4o0BxZL0nW1NC1GyGEhctsETNVXFHFS81oPwqKGbAEGkmgC0ldpe8eeR2BSacKHdkkmeQkWWkJV7pNY3AP6IP4Trw7nyd1/h+QHqjC61sJfUUwUIHTU066v4jk2RxSZxeSHViO17fKAuNZwBEAhK4PQOfL+D0FeAPzcM5FSJ2PED0zj9GBFSRvvED85qeJ3/4lUvf/B9mxo/iJPvDOABfsgjd3GNwR+1wh0wcaYCaiqh0lAtktv8vUT9beQmHcyFS/j5tKxyr7E/Zx5Bsmld46yfSOf+suQi9Erf7WyIcOp1UP8jmkKozvEah4fWTUh0BkTpF5oOrrsrJkuz8we1joigZX02M4t/6EyfNVTPVG8HsXwflSAwwx+JxI4JA2kLlm7NWF0KsJSGmRSujURFJpICVzQJqrV8eEtNisrNQMb6p7Gan+IjLnN+Fc+wT+7Z/CHf7PkD4JqSugOSP5zDJJg6Wd5tlCFaruMCnlvgmfy9FPOtrfS7P2tuC2ClUn3/Jv0plmyomZIngJ3xZTNf2tPhUJWWHgylTxiEL6ISRuQ6KD2J3PE7v+A8TOvkqmrwp61tgMhe4VZl5C/ldqYBWxMwXEzq4ieXYx2QEJBYEkz7TSpHMhc43+EhhcA4MbcPrKiPYuY2KwgOjlV0ne+imI1sPEdZiSbyrhK6tDGla1q4JAhfrQc021+2wmTiodJZGeJpqaCjYLst0gDn0raQysHBf/2cqKj8xVlR41vRj25tO2spPtTHCYKhEqDdMGdm94E2pnkwFJeEJQh0gAiZJkmltEGTaAMbeoJxs5TfL6DzN1tojU4AI4vwA0WP3LoF8O91O2koS9hdBTDL35oL/NZ7n2MzMLq+/0uX7nORiYB/0LoHeFOTbVtYBk7zwy/RGcs/Pwzi3COZtHZnAj8b6Pw93fgIf/G6ZaIH0zcOS1eUvW1OD1ZDNJBejBxcxhZwVDoWohUspWfEibqD69FnBpjYpMMn1jB9lEGnWNUEmbCIFCh5q3eQDuZUi2wsguYtf+iPGhX+Th4HczMbiGeO9yXK0HklaQ+dS1yKTe+P0ryAzkkzhTSvxsMalzS3DOLLL9HfbXXNv+JXjdETi3DC5WkT2zgdHTVYye+QjZe78M6Xq7kWKgIhXpVoFvdVnMc4m5jonAKZ9Q2M9mfLPSMZNNkHaiyHQN657ZfrLWibpodldbGaUIn0o8CYA2amgrKz4DQGzKxONMbwAgtRXSrJvRoiO7tkKFEe6ZTTWN3hNIBKYA4eoPYVll/RNmB4yslYKxEbMpZOrazzBxMY/xM2LK56B/SQCOubSLoE+OpsKYMtnKLEgGnoPBSHBtgUHREIFQQFF0ZAX+wDK8C4twLi4ge24h7uCSQJLmQW8xfs8asn1byJ79CM7lz+Ld/XUY/7pNy3HugqJ2GqnZYi3U7eFnihQnbAVDL+0bU0m5X9o2IasJSwFJnR5eR+CwHWjb2ChM9cCD1+H2b5C69hmmL36IyaF1TA+VkDxbQuZMAU7vIrPKLq3VmFps1rsAzhZBr8zHMpz+YgMUZyAXd2AZDEg4LXjUP6Zv1D9PQQOLyZ6O4A5GyAxFSFwoIXrxk2Tu/DokjoP/wFTz1opeJ4UpF6qa0Yp6ahMgU+ZUyHgrxxsmnFENto8MCtRZYehbroFsrVC6BBZN+L2ZdFXp0W8FQGZpCv1+KOGsWpR61IYuD5BDZMwsHSB//y1jLR4xm904U2aDeFIqqHoPRvYyeePT3DuziszZlTCwdI60BAYWghzEnnLoqYRe2d5LrcMop3FwCd5gLt5AkSEGVsLgUhhcjndmCdmhRTiXFsOlfLhQCufLYKDISGLNEDunl5DoXs5EVyljp19kcvAfkrr6y3D/yza12u8B9xw4lyGj4toPQflRiRTE3EfgERNkpU60h7N8Ny17jEImjukXTS5pdZ9209I+h8mzkFQm9p/DzX9n0vhj/a8x1VNMtH8B6XMRGFK4VH2QB2dXwJnnyPZFSPdE8Pol2eXbCSRltu0rAFH/SnMsZ2z/mD5RvzwtDS6BcxHivRHGTkeIDr0AD38dsifBVdjcagWlnaeUca6ijYGbq02BTJhTfaF+MTRr7/JQwDwGFplockQUPAoA8tj3gVkq4ATHRd5G/s9i7/DsJ7UhGnU3eh8au0Jq8LL6y3L/W9+H0k+XD7SIjb9oNyYb01fJRy+TxpeZoH23tVdf7AqJe/uYvvxLJBXbVkfPhfqXW80gp3Qgx4R8zaCfmR8AYyXeQB7uQBHuQImlwQLcMyvxzi7EPbuQZJ9MkGI4VwEXVsOFcjhTjNujXKXlMFiC119CuqeIeHehSWNI9pfinFtL8uJ2Jm/8LcZv/xDT93+S9Oj/B9NfhMROSJ+ATD8412xCaHYEU9hPrcwlkXcrAFcbJPeTnfxjEg9+hanhzzJ57buZurKVyaFyEufzyJ5bhX9mFQzkQ48icuUm58wsk+4shDOFcGkV/pUcspeso5wxgQgFLpbbYEWnzivARPsG50PQTwwoaDEHOpODfz5CvC+HidOvkLn4CzDRaKteah2OapYZ5ah6AFpeoK0lFPtXHyjJUoEGmUWWFMOTESVBrKkEV6onVABv5T39Lf4LNUjY6jOT8mN5O9Aguo2QW5+21U1Ir4tC2M4Cy+z8ovAm1M7GXPi5btpcR4FRXctmz5pGE4jZuO21dJLM5D0y9w8x1vd9JAfXkRkswT2z3A7aGZlHOVYTDIpJl+KdEc0C0oDMseXQK6koiRh5pDXOKtS43IJDgAgoa9o88zvuufkGIJwXcFS1cRVRrSfpzCGl+ZUzBTBURqYnD29A70vgSjFcWW4kt8yK6b4Ik5dzGL2Sz/iVtUxce4mpqx8nevUHiV/5KWKXfxnvzn/Bu/0l3Nt/gnvrT8ne/jMyt//MhDWzw3/M9LXfYPL6zzNx7UcYu/JJRi5t4eHFlYwMqdxAhNGBCNGBiIkOcUFh1FwYkLbcAF3roXuNiQhmOpeQ6FtI9sIS3GuL8a9H4FLE9ouWLGi+qUuRw1zokwZdaISE6degj42Qmq3N1e/qx8GVVgsP6rdX2n7vk+ZaTrwvzwQI0hc/B7e/DmMXIWHNJpM5bgAi01xb1N0K9pdX1oXdnFOTnOH+KWFrLRWBJvSPQ/4OzKgQLGL1t5LhQZkzOkfFq81LR+k1l1bnhNz+1l8L7CZdOryp2a0On/23uQd7YzN3Hn4fPokBXPgAw8Qe/jfunP8h7g++SOJSMdmhYGCv5sKFIvzBfOM0Z8+tIHt+Kd65pXBGJlng0A/KtJBJtQDOzTetf24R/tml+GeXPUbezN9Lsccstt+fWYEvmvl+1nmPfW7Ps781D//8PJyh+WSHFpAdWkxmaBmZ83lkzhaTOVNBZnAdqcGNpAY3kxrcSmpgu6HkYNhuJXlmM/Gz60meW0PyfCmpoVVkhhaTvTCf7IUIzoUI3lDESGr7fIvs8+q5B6VRVoHpg2Wmb7xzi3HOL8I7vwDf9IeOXwyDmlQNyPTXYnO8NJP61j+72PbhwCIQnVEfLsYZWk7qfB6pc6U4Z8vgrLSVwsjSTCXcaf0oYxd/Bab2WG2onXy1D6MsoWCLd+tiaApAQQlpEjnRllcfTarKyVbQSK2lGR6a4WvDYI/+C3krbB99M4OcZ5wHeeyK78Mfk2TSzTy8818YufpZoldfJHrWJpxxYQWcL8Y7U0j2bKFhnNTQMpzzGkgNYuC3nBWTv090TqCcZ+xw2eJzITGxf07MPFeyjDyXPpDAyJ7LI3s23wgII2gGF4EijGd1P4vInFtC4twK4mfzyZ4rsZpUWqx/vqm1Nn7mXzI9/HVIdmH3qlG6keVPhaslLi1ANBckTaKoXbBNYCBfv5WM920OEEmSGzjJg2Qe/g7pW59lerCS6e75NtJyvgD3bBHOuRLSQ/lkLyzHHVpow8NnckB0LgfOv1+ke3nOzB7PrdW5CnfrOnOlZ3j2czJp7YSiWbszKA2x/LE+VUg4O5hDdnAhvo6/WAgX8412muqvxB/5HfyYIlZK9VAoOmXTxGUkBJFNa5/IDxE47D6Nssol+L/Vr29zgMjXUSRMdmsjjH2BzOVPm5VhSUWmhlbinCsykit7vojs0ErcC2IoSevn4OxCOCfGegYmeaZz9du6HzH6XCiYC5ozOJ7x2c/Jj5A2CEAyWIF7Lh/3/DLcoRz8oQX4Z+bBWaslvbPPkT6zgtT5CjJXXiB54zPgVQPnbA6UP24mVJ1k1gaRAh9aGiPc0MZuSRGA4/8HhHybA0TLSpQHFofMXRvWfPgfyVz4W8QHC3AuLsW5UIBzvtSQO7QKLs7DOqvz4Px8OL8ILryPJI32rHRhIcyZnuHZBUwBQNGsc4vh/AqcoWIyFwpxLy6Ciza3yrSXrB80PbiUiaEPkb3972HyEDAUVBEZA2cSJxkjGU+YZd2O6xt/QuB45HAHfu3b+g3ffH3ybQ0Q9VEyyBq3AbQHkKiD4Z8mfqGM9JX5ZC6uMoMmkLhDeXbQNHBy5gUQMef7CpBFAUDm2M4ZGCGongEgQ4H/pPki9eeF+TgXVpG5mGcBoiiY+vpqBK5F8C+tJHbhg8Su/zKMd9mJUi3ZVl6eF8XNREmnEqb2bzqbQbvnWsfbAsR6JIFtFcaDvsVa5NseIJkw3V7uiJYxpgfgweeJXqkgcT1C5tJynKFC3POleOdnA2TeI8n9zEwWMtvTtrOYc2ixBerTtu/bvetZA2AYcFgweJciuJfmmwwDo60FjqsREzb2br8Gd34fJs7a1BpZyCYY5Zg9610nY0ubellcX4ZVOF0QtoH20DlyQtT+NUCerDbVN4pwKK0lE7VFOUwh5YdfYPL6ahK3IqQvLzKawztXDAKIBtVoEJk2YsqnZepv9vHSHIvnRkbzfbPv52muJ60R9Kf69DGah3dpHt61CPGLS5m8uJXM8Ocg3mUHLVwVocltOeOhoDM8H4ZsBQyFtNTOWkH51wB5MihmfxMCRGsjTDxQc5XOBKS/xtjtSkavRshcXYB3cSWcL7STZBpEqf4LS/AvrMC/mIN/acH7RIvs719cgj8n0r0vep/uXX22CG4sw72cg2f8uQVwcQlcXoh/KULsfITJC0VMX/t7ZLQvYKzT1uCSz+hlTI0CZTYrSjV7rtgu5HoLOKQuNOAijXdIsxniW/D+29zEUjzcIe27NpVewsZVbtJeRu+s5+H1COnr8+CSIlrSHsvh0jy4PM/Yw97lVfiXF5sB1aC+L3Rpsb2HS0ufukU5YJdz3p/7vrwQ70oO8cs5pK6thGv5cKUALmqCdjnOhRyiQ2vI3v8XpB9+hezEOUgZO9gWkvAn8bLKywusgEDGWQyEk8WB5ngrOEKQqP0Wv77NAaKYuGZYlbvl4GeVyKe850OM3N3G6M0QIDKllsIFMVMErszDubyK7JU8vCuL8a/Mf59oocl9EqMJqE/bKm/Kv6JrPAvN/dmz1+YzdT1CbHgp7s0KuFJhtfKFUvwrH8e9qYrpbZAew/fs/IWiUWaxljuFrw2LvgEgdgnFjGllEfNIY4RFML7FwAgv/20NEClnVTPMKn6ueirelF0qljnF6L0PMHYrggaRSznW11CI93IE/6qK2q8kbQAiJps7kzzrud5VK4kFjqemq88CDJ37bM+tvo3fWcz0neVMX80jeqGY5MX1cOP7YOSLEDsNWWUdWjNK/qIAYUBiFnBoCfAjDTLznQ1JfqPfEVhZOk6LpRT+tcgJ2fmb336bA8TD99K4qupoikQoy1MiqZ+Je59i4lYOzjXZxEpHj1jtEURU0tdXkL6+yiTmce053he6ugD/2iLcOZLO5apowbPRHJ9ffZu6XcbEjXxGrkcYv1lK+s6/gIk3ID5uI1WBby1RpjWAqqWmajZmnLQc4zGAyDmXnSxnUl58wP86XuCYmVlXzq6WI2vlvQb8W/f69gaIOtBRRpvWWA/bNdYSKpnrTN39DNO3i3CursLY6gLIFRuPd29ESN1YQuLmCsOc7ws4rj2Hb2gB7vUFFiRzaN8vcKjP3KsrSF5ea9Lqo3eex5n6p5B5E3zt92gDUI7WcHguGT9lSNEqJSEaVaLIVYAVtdb8CjPDNQEcHKcvtRRmJjdLK/VjM6b1tw4ez1zV5Ft5a+/h2gKIBI4q0JlU6LvBwDwgevcnid3egHe1CC4vseBQTP5GBO9WhMRwDonbS3FvLAQ58u8LzYfr8/FuiBbOoV0I156Rruse5vj8V3Nxr+yAG/8ERv4Akk1mrYakuoYl5vlmWWzKyZr14mZfu0BDSFPITJL8F+m91gLNFMkzi5ZmAWSWtlFJpYTZy16ZvZKI37rXt78Gyfi2IIJZTKNUaYFmgqm7v8T07VfIXiuxTBTM5gog7q0IyeH5hrybFjT6/BGJYcQ4Ak9Ilpm5MW/WcW85R98F5z1i+Hl4TzxH54fnBNc357/H999wXd3bbGafHwBoCVxdakkmp55p9rla+xHchwVreB2dn2OvoWMM5cD1HNRv2WtFMPxD8FD+Rp9d3aiSs6aMaYqEaoR4abP2/tEiJIFAzrnW1atVnlWoPULzSu1jDsfbahBpkb82sd5ROEj2hLULg8VVEiiph7jRL5Gd+H4SwyW4dyJwOwK3xAg5cHMZ7vB8nLsR0Hez6XYE//ZC/FvLcG8tnyH97d/KgdvPwXDE0h0da8m9GcG58RzpazmkruaQuLqI+JUFZG8vIDs8D2c4gnMngjdsSeeZ+7n5HNycPze6FcG/E8G9Z1u9N/cWXts8rwCRC0ZQlMD1fNBz6BkeO34+7u0cnGHRQtzh5+zz3iiAaysMOPxrEdwruTg38snciTA9vB7YbXaHxZ0GN2N3AJgpMiGtELzMG9lMoc7Qd/Z7e0w4saHv9X7Wef+nvXMNte266vg65+xzzj7Pe+7Nuff03pt3G1objG1ME7W9IpVQCTVSqpZUKkZMiSik2pp+CKgfzAcpVBFbNdoiFB/UQCBSIaCxRaGlEUKj1UaTe85+v9+P9V4/+c+51r773ua2+ZDmZF+yYbDW2WudNedce/zHGHOOMcfQDenqlT21pSNMVpP5e7P/eQ2Pi61BjLpo2fyFenN6twKI1yAYfgGv+wCj8gFBykAGJIe7cLhHUlw2zDVj9pTpzffFVcssxQ3C4gaRoXUiAUf/V7IMaRhMIBOVVgxDhUeb+IfbeEebuEdr+KVlw0xBeQ4gApWYV9pLx1ekZSjM0woUrqDisgG/gB6lNGN68/xlOBRAduHidRYcR1skxRU79jmA6DuBIyht4JUFEgsYLp6Bl09YgBw6hC/t4R+ewa+t06v+KPCNWVraLD3t/PE15NVjedSCA0RBbG0SJfkUQLIZn9cmGn+JsP8rDCoHeLWUoSVZ5wEiUBxtw+EpC5qjbZLChmEgaZ2M6SSZDSikPTIm1bmeV1mGSg7Ka1DKQ3ELSlvEpTxROWe0lJ5zuXRfsYx7tGo1wByDZ22a9tUHUUm0bLSeQJqRwBsW9ggLu/Z6phHNuASOdduOjuqrKAW3GY/uS7/XM/W8oLx+OUAO94y3XCBOjhyiQwmNMwT1c4waHzTlyuYBceX5sXD1a9jowgNEkaAJw0srI9IickxNvkwyfJhh5Rx+PZXyxSU4OgFHJ43Ej8s5OHwLXLzJHgsnoZg31y4zu8pLICqtXKJiDor6O2OyHJRWobgOZdEqSWWZuOIQV+fMOPWhoBCNbShsEpdXCGvWTAqrVx6XiCpLhFUdVwirK5cdo3KeqLBvSIBUW7N+q39qR6Q2pS10vWr7Y8ZeEqgtaOLSGmF5E7+Sx6+uEVbWDMiTw004WjX3xUUHX1TZJ27eRdB+xGQ3VMK7q9FryKvH8qhrACADU/4sVLmRTIOoypVqjkw/ybBy/rsBUjhBXF5CTBEfnic+vIm4cNL8PQNCYQcO9y1wdCxsg5hODJWZJjqXmXSZqSTQrBmQJBULkkQAEXOKStI8q1DYgKNdKJwkKl5HXLiOqHiSuHiCqLRDLO90OY8YWeBQfzXXENiiWnrUuUBS2jb3azy2jysWFCnzZyDIwGPGXs4ZcGZjEcAMQKpr+FVpPgFkjeRIgmDJ9F3j8CoOXv0MtD8Cg88DxauCQ6BZ9M+CA0TqYmQAIoetqvVaM0t5tP4VvN9nVLmZoGElp2XOXSjuoh87ruaIi6dJCgck5S0SmUoymaQBCqfg6ACOzkJhH4oblrkFDgMQSdVV4osOiZaOj7R8rIntBklph6SyTVzbJK6vkEiDZSSwGI0iIOVtO4XTcHTatqN2i3tQ3IGS2pQ2kgknU+6SFphpA2mfWo64sk5S3ki119y92f9Iu5W27DjLW8QpeM1zypqPbRFVdghqqwS1HFFV4F414zL3qJ26w7Tu4LbOQ+dRGP9zmsl/0WFw9f4vPEC0dq4sF0EYm4TEBiDBANxvgv8ZhtVb8RsOSU2MLem9C5LQ6Q8e1WQGrVqGETBKYsydOQYVYFKmFrPpXPMMAefoPMHhJkEhT1DcJSjvE1UPiOpvIWyeIWrv4TVX8VoOQdtS3HKImw5JMwWNeZ6kdEoFmWnSMK90FHA1n0iPpRxx3TECQMxrxqL5lsYqyjSXQCYAHp0nKZwnKe4bQOkdWNBdDSA5Qk329ZyWQ9R0GDUdJu3boPtn4B7aBGxX56+Fv7LgALGhCcqDpGTP0iIGICai97/A/3OGldssQIzUFhMKAFsWIA2HSFRPtUl5awYgM58QIPR/lzGbJvGnSApniUoHTKs7TGu7TGvXM629E7f2Ptz6B/DqP4vfuJ9h6f0My+9lXHkX49oPMandilu/Aa9+lrB+ykr8kky3V0tWc2VzHb+2wbSxjltfNeOUttSYYgkFM/fSsvSOAXRydKMBiLRmXNkgrl7SNJdpkPqy1SBVh1CTeD2n5eA3Hfoth3H73baisetjizIuPA6uOoCFB4h2nYmU7yJK4nQeotpuSjL2JOPq7YQtB2RmVVahvA2VzRlAdM1I9NoqSXUTypv2PgFDjNFO/7eYJzw8QVg4RVw5CXrZgg4AAAtoSURBVO1NktEq3bbDoJ/HG78L/Acg/AxET0P0gu1DUAXvJZh+E8ZfgcET0H2cpPVJks4DJK0fgc710DpH3NwnrO/h17bwqmu41WW8umx+B6/x3TSt5Um6dzOu30W3dJpBZQWv48DQgZEDfS3drpKUdqFyGqpnoLZPXN4lKGziyjko8NeXobpDUjtB1FgjauZIGjk7fr23ngNdh2HNodtycMf3wvg/bKzVnMviqly2wBcWHCByKcndZGM7zTZNpThX2Luv3K1fYVK7IwWImECTZwEkTyIp23TMtVAmT32JpLYC1ZRpxDiipv7OQ/mAuHQ9fuU00/oWk7ZDv+vQ7p+mN76bIHwQ+CPgqxBXLfMoM2ZGZiuEijMewvQ5m5zZ+xuYPgbex8F9CNwHYfIxkvFHiEcfJhrdz7DxfobNCwxb72XY+rHLqf4zMHiUuPfbBIMP4vVvZ9o9ybTtGLNO4Pc1udcSdG0DGinVNohKm4SFHMjUayxBfZu4sUvcXCdurUBTpPELaBlAlui0tnHHD8BEmeJTjb3AAPh+XV98gJgANoFEOkSe3DSpq+KzkmcZ1d5NIKmqH7y6DtUtqK0bE0TAkLQUGUbINI00R00T9h2oHED5RpLqzfiNc0xaJ+h3lul2HarNk/T6v8Zw9DmI/xH4lg2YNHn6bbIVw0RZaQOFsmq/ihIuKwI5Ua5ZBVkeXkEXbTkwlQTzXgTvf9KjzkWqN/J/4L4I4+fBfxaSv4LwEdz+Bbr1M3SqDoOqw7TmmFU8CQOjDVtL0MpBbQ2qcyCob84BRPdYs8ocew5xZ4lBbYtu++1Mp58CL417e1ODfD+MHd/1zDeo1V3l3FNu1kRhpAohVV3j+BvG/Ai7S9CUFM1DbRsa6ySadIrqW0SSrGIgMYWOAkhVK0L7UD5PVLkBt3aGcWufQfeAweAWBoMfptv7RfzpU/iT75AombRSYipUNTH1XAh8peyPbck1PyJRhgl5+udJ4eAaQLZMHdnVOFVDMqtyuq65laK/rzjOIgdUU8/klnoK3MeYdO6jU7uJTjXHpOnga5wyFWe0DNIS+jsDQiNP3Ngmaq0TtZcu3avrPYewk6PfOE2v89P43mchTOv4Lf5K7vdk4IXWIBlAxDeKH1X5MlOlT3HRRv0/z6R+D1FvGdrr1sSo70Azbxggbmsyu2fImFIZQBrSNntQvc7Y5V4jz6jt0Ovs0u/dyWT0q0STJ0imXzf17xJhMiviog2NgUK8lSAwxo083GiKH3oEQWTvU6oi5eFWOq8AVB5bKf7NMbT15ZXuX3Xm9RyVZ1ZMplFEc0cpJBNBYMKYhKgaRP8O7ufwex9l1Lwdt3OCsOsQSYvOAJKedx3Ivm+tkbS2iNt54s6y/V7XDUBWCDp5hs23Mug+SBT+A9A3+zNM+9+TxRb74jUEEC33XgmQbzNt/ARxfwU6eWhuQmPbAqTjkHQEkG2o71oTbAYQzTs2jV0uDeO39ph0rmc6uIA3fgS8v4egOB93Zwq8yKpTni5PlY5izYtk9qkYp6JOxzY1v8K2Vc5R6wjTtE55Visv3RCUKhS7IGeUi4LzsgC9YC7LoA0X97VXQgGwZhVPCRFeAO8JGD5E3LsLBmegl7NMn40xM7kEGoGklSNpbbwyQDrrBJ0NRq07mPQfI4m+ZnZwqp/XuALJsrsvJsqNBpltotEOs4HdU6BfTltEoheZNt9HPJREzENrywKktW5XZnoOSVvmRg5km18pYVs5osZJovZ7oKeUNV8A7zkIW2memjTwVB2RRSdGjxLCWKtq6o9KgGXFgurEtEzcWKLiLaaSZEzsTYi9MYk/MWSrsGpvvW/J7ItIH64G5klGZTRmjGtSH6kP1nyTSvoOeE9C52EY/DgMdu2YBRBjQq5BffWSpmivQnuDpLNB0l0xq1ZaudJ7STqb+J0dxq27mfY/C8l/ktAzuzmUSf1a/iy+BplJ3WzDjSoHmdJUEB/Rb96HPxRzbEB7DVoyr1ZIeg7RwCHpWk0ibWKo6xB3HWNSuK0bGFcvEDQ/AcOnIaxdmj+oqJFKHqkSkeFKa+jZdTUFUSqZRB8vruFTMRSmZecS7Z8XghPXFoFRIRjZW7Fr615okm9SqgagIhmKo8lIdpUSganZOGIYdEyZumy/t9EiMi81B4vK0Pw76PwOcfcCQec0QTN1LMr7LoAIBKIUIFq+pnNJgFjBsYHbyzPs3sN08NeQaGGhk9by07iv3c9CA0RAMOE+EmLSGjqa8lppYQma+NGniZO7jEc7C/kwXmzXIZSvYOLA2B4j18FzHSbuHsPBXXRbvwxodep/LcOKKWecqMYyu0bnGYlZdG73O1hO1n0Z6SGXwDQ7NzPwbD/qFUcB/pXIlAHQ8oTmX5bsttW0CfVV5L4Mk7/EG/wS7vBtuON1omkOvHW8wSphb90KjqY07J4xtYKuJubWl8JwnUnPYTB8D/CMWYyIgxrE3VQ4vAmQN+wbmAEkE+RGoqcOQwZMkj8k4l6C3qaVlNmk1F3DFUCCJZg6uAP5Ndbo925hNLmPIPhd4lCZxwvWrMmePzO89YVE9fFJUJk3mulYcNgkBjo3XbQYtQARSLxvEbt/QeB+FHf6TsajLYajJcbjNYLhBnRWQaZnW9p225il8UDaJQ+DTaaDJUbjnySOv27BqoDQWJrw+Mb/ejDlYmuQbJIoZsgY+DKAjHD5Y+B+kuGe9TDL6dVeIh6fYNhdI/KW8CcO49Eqo94dTFufhsHTELycMoCYQMtOWWNiCGkDgUNHXTiejwWIACESWNJIm6yrpm/qZ5r1JX4egi8RTn7DLFO3uuuM/FWCqQMCgzzm/TUQYCZ5GG9D6xR0zuIODphMfoFQBUfN+5apl6YsOZ7hvy6tLjRAZHWIKYz5MbNabLoY41tgyjT+PAk/TzLcTwGi5cwNgv4NdJtnGfZPMRnfSOz/FISPgvcv4CoaODVPDP8LfQKD3OFyjeuo744PHJY7sjiCOXCkczKZWppIJ5RT56Wck/JdfBvcv8V1H2Ls3s3QO4fnrs8BxCEZrRBNNmC8B63T0LqFYHAn/vS3CIKL1m+TCST9CNfw55oAiPmNMvEp71qWewmPSfBF4vhjBL2zYPwh20T1A/zunXRq9zDs30sw+TgEXwSesxNQ1S9XbXIpjxkOBAyFr6RfqtHjxkfaB5mZOrUaRWDRUvDYACSmSmKqwk7AV7HxLgT/DclTwOOMph/And5KMsgZEzToOfgDB3+cIx5uQ/skNG8j6X+I2PtT/KBpMi3JspL/xzT8JkDemG9ATJFVHJrZF2ZZNPVOyzoPv0wS/Tpe+3oSrWI194kb74DRzzHuChh/AsE/QfCSRYRQEcuZEZo0v+a5BggyVQQO1dhOwaHvdX5cH7UtzWnUqLqiRGpKhdMzWUOSNHtIGA8J/AmhOzXLyoQaR9XsJ/emjxO5H4bRLdBfwu87TAcOk5FDMNIqV564+TYYPAzBk3hB2zgtzWt+EyDH9cu/unYlMTVFNdktLFpsPIYWgQzTyAR5BpJH8btvNev5tG+G3gWIPwHTL0DUtNaTvG3hBCLtb1fohkgJ6dKJuJ4vEijm6dV19Qdzl/ohJpWWM1pEDskyCSUSswwbGLdJpKGFIUE0IIx7JhvlDPj+C2ZbANMPwehGgvEy3sRhqlU+1yHpO8Stm2D0KYifwY2qxtgMtYqctvuDGdwb46kLbmLNAyQtMGEcaRlA9JL/Dfg9wt7boasVmnfA+D7gD4BnLf9rSiFGk3POVDxSphTFVknKyqxKJ+kCXSaxj1t7aGjqg6ZGM0kuR+nLJLyUAiSe1d2QqIgZEtEhSnzj+Y+MM1UhKl8F/zfBvYPE3cL3HfzQIQwdkoEins/B+DHga0ySkpmeyRUj18yxatDXAUP/Dzulf7mPSc8LAAAAAElFTkSuQmCC +``` + +### `annotation` + +A Kubernetes annotation for the NavLink custom resource. + +### `label` + +A Kubernetes label for the NavLink custom resource. + +### `sideLabel` + +Label that appears in the left navigation bar + +### `target` + +Sets the target property of the link's anchor tag (``), which (depending on browsers) determines if it opens in a new window or in an existing tab. + +The default value is `_self`, which opens the link on the current tab. To open the link in a new window or tab, set the target to `_blank`. + +For more information about the target property, see [this page.](https://www.w3schools.com/tags/att_a_target.asp) + +### `toService` + +Has five fields that are constructed to create a URL like the following: `https:///k8s/clusters//k8s/namespace//service/::/proxy/` + +For example, a link to a monitoring service can be set up as follows: + +- name: `rancher-monitoring-grafana` +- namespace: `cattle-monitoring-system` +- path: `proxy/?orgId=1` +- port: `"80"` +- scheme: `http` + +It is required to provide either the `toService` directive or the `toURL` directive. + +### `toUrl` + +Can be any link, even to links outside of the cluster. + +It is required to provide either the `toService` directive or the `toURL` directive. + +# Link Examples + +### Example of Link with `toUrl` + +This example NavLink YAML shows an example of configuring a NavLink to a Grafana dashboard: + +```yaml +apiVersion: ui.cattle.io/v1 +kind: NavLink +metadata: + name: grafana +spec: + group: "Monitoring Dashboards" + toURL: https:///api/v1/namespaces/cattle-monitoring-system/services/http:rancher-monitoring-grafana:80/proxy/?orgId=1 +``` + +Adding the above YAML results in a link to Grafana being created, as shown in the following screenshot: + +![Screenshot of Grafana Link](/img/example-grafana-link.png) + +### Example of Link with `toService` + +This example YAML shows an example of `toService` used for the link target: + +```yaml +apiVersion: ui.cattle.io/v1 +kind: NavLink +metadata: + annotations: + key: annotation + labels: + key: label + name: navlinkname +spec: + description: This is a description field # Optional. + group: "group1" # Optional. If not provided, the links appear standalone. + iconSrc: data:image/jpeg;base64,[icon source string is clipped for brevity] + label: This is a label # Optional. + sideLabel: A side label. # Optional. + target: _blank #Optional. _blank opens the link in a new tab or window. + toService: # toService or #toUrl needs to be provided. + name: rancher-monitoring-grafana + namespace: cattle-monitoring-system + path: proxy/?orgId=1 + port: "80" + scheme: http +``` + +Adding the `toService` parameters above results in a link to Grafana being created, as shown in the following screenshot: + +![Screenshot of Grafana Link](/img/example-service-link.png) + diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md new file mode 100644 index 0000000000..c1baf083b3 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md @@ -0,0 +1,38 @@ +--- +title: Configuring a Global Default Private Registry +weight: 40 +--- + +You might want to use a private container registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the container images that are used in your clusters. + +There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. + +For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air-gapped installation guide](../../../pages-for-subheaders/air-gapped-helm-cli-install.md). + +If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. + +# Setting a Private Registry with No Credentials as the Default Registry + +1. Log into Rancher and configure the default administrator password. +1. Click **☰ > Global Settings**. +1. Go to the setting called `system-default-registry` and choose **⋮ > Edit Setting**. +1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. + +**Result:** Rancher will use your private registry to pull system images. + +# Setting a Private Registry with Credentials when Deploying a Cluster + +You can follow these steps to configure a private registry when you create a cluster: + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Choose a cluster type. +1. In the **Cluster Configuration** go to the **Registries** tab and click **Pull images for Rancher from a private registry**. +1. Enter the registry hostname and credentials. +1. Click **Create**. + +**Result:** The new cluster will be able to pull images from the private registry. + +The private registry cannot be configured after the cluster is created. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-cluster-templates.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-cluster-templates.md new file mode 100644 index 0000000000..acccba1c94 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-cluster-templates.md @@ -0,0 +1,140 @@ +--- +title: Cluster Templates +weight: 100 +--- + +Cluster templates encompass both Kubernetes configuration and node pool configuration, allowing a single template to contain all the information Rancher needs to provision new nodes in a cloud provider and install Kubernetes on those nodes. + +- [Overview](#overview) +- [RKE2 Cluster Template](#rke2-cluster-template) +- [Adding a Cluster Template to Rancher](#adding-a-cluster-template-to-rancher) +- [Creating a Cluster from a Cluster Template](#creating-a-cluster-from-a-cluster-template) +- [Updating a Cluster Created from a Cluster Template](#updating-a-cluster-created-from-a-cluster-template) +- [Deploying Clusters from a Template with Fleet](#deploying-clusters-from-a-template-with-fleet) +- [Uninstalling Cluster Templates](#uninstalling-cluster-templates) +- [Configuration Options](#configuration-options) + +# Overview + +Cluster templates are provided as Helm charts. To use them, you will need to clone and fork the templates, change them according to your use case, and then install the Helm charts on the Rancher management cluster. When the Helm chart is installed on the Rancher management cluster, a new cluster resource is created, which Rancher uses to provision the new cluster. + +After the cluster is provisioned using the template, no changes to the template will affect the cluster. After the cluster is created from the cluster template, its configuration and infrastructure can change, because no restrictions are enforced by cluster templates. + +### Kubernetes Distribution + +Cluster templates can use any Kubernetes distribution. For now, we provide an example with an RKE2 Kubernetes cluster. We may provide more examples in the future using other Kubernetes distributions. + +### Versioning + +Rancher doesn't manage version control for cluster templates. Version control is handled in the repository containing the template's Helm chart. + +# RKE2 Cluster Template + +The example repository for an RKE2 cluster template is [here](https://github.com/rancher/cluster-template-examples). As of Rancher v2.6.0, we provide an RKE2 cluster template and may add more in the future. + +# Adding a Cluster Template to Rancher + +In this section, you'll learn how to add the cluster template to the `local` cluster's chart repo list. The result is that Rancher will include the cluster template as an option when users install new Kubernetes clusters. + +:::note Prerequisites: + +- You will need permission to install Helm charts on the `local` Kubernetes cluster that Rancher is installed on. +- In order for the chart to appear in the form for creating new clusters, the chart must have the annotation `catalog.cattle.io/type: cluster-template`. + +::: + +1. Go to a cluster template example repository. Rancher's examples are in [this GitHub repository.](https://github.com/rancher/cluster-template-examples) As of Rancher v2.6.0, we provide an RKE2 cluster template and add to more in the future. +1. Fork the repository. +1. Optional: Edit the cluster options by editing the `values.yaml` file. For help editing the file, see the cluster template's Helm chart README. +1. Add the chart repository to Rancher. Click **☰ > Cluster Management**. +1. Go to the `local` cluster and click **Explore.** +1. In the left navigation bar, click **Apps & Marketplace > Chart Repositories.** +1. Click **Create.** +1. Enter a name for the cluster template repository. +1. Click **Git Repository containing Helm chart definitions.** +1. In the **Git Repo URL** field, enter the URL for the repository. For example, `https://github.com/rancher/cluster-template-examples.git`. +1. In the **Git Branch** field, enter the branch to use as the source for the template. Rancher's example repository uses `main`. +1. Click **Create.** + +**Result:** The cluster template available from the **Apps & Marketplace** in Rancher's `local` cluster. It can now be used to deploy clusters. + +:::note Restricted Admin access: + +If you are a restricted admin and don’t have access to the `local` cluster, you may still add new RKE2 templates and manage cluster repositories. To navigate to the chart repository, go to the left navigation bar and click **☰ > Cluster Management > Advanced > Repositories**. You will bypass steps 1 - 6 above, then proceed to follow steps 7 - 12 to create the cluster template. + +::: + +# Creating a Cluster from a Cluster Template + +:::note Prerequisites: + +- You will need permission to provision new Kubernetes clusters. +- You will need cloud credentials for provisioning infrastructure using the template. +- In order to show in the form for creating new clusters, the cluster template's Helm chart must have the `catalog.cattle.io/type: cluster-template` annotation. + +::: + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create.** +1. Click the name of your cluster template. +1. Finish installing the Helm chart. + +**Result:** After Rancher provisions the new cluster, it is managed in the same way as any other Rancher-launched Kubernetes cluster. You can configure any options through the UI if the cluster template has options for the user to choose from. + +# Updating a Cluster Created from a Cluster Template + +You can update any clusters using a template from the **Apps & Marketplace > Installed Apps** page, given there is a new version of a template being used by those clusters. + +# Deploying Clusters from a Template with Fleet + +:::note Prerequisites: + +- You will need permission to provision new Kubernetes clusters. +- You will need cloud credentials for provisioning infrastructure using the template. +- In order to show in the form for creating new clusters, the cluster template's Helm chart must have the `catalog.cattle.io/type:cluster-template` annotation. +- In order to use a template as part of continuous delivery/GitOps, the cluster template needs to be deployed in the `fleet-local` namespace of the `local` cluster. +- All values must be set in the `values.yaml` of the template. +- Fleet repositories must follow the guidelines on [this page.](http://fleet.rancher.io/gitrepo-structure/) For RKE2 cluster templates, that means a `fleet.yaml` file must be added to the repository. + +::: + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create.** +1. Click **Create Cluster from Template.** + +**Result:** After Rancher provisions the new cluster, it is managed by Fleet. + +# Uninstalling Cluster Templates + +1. Click **☰ > Cluster Management**. +1. Go to the `local` cluster and click **Apps & Marketplace > Chart Repositories.** +1. Go to the chart repository for your cluster template and click **⋮ > Delete.** +1. Confirm the deletion. + +**Result:** The cluster template is uninstalled. This action does not affect clusters created with the cluster template. + +An admin with access to the `local` cluster can also remove a cluster deployed via cluster templates through the **Apps & Marketplace > Installed Apps** page. + +# Configuration Options + +Cluster templates are flexible enough that they can be used to configure all of the following options: + +- Node configuration +- Node pools +- Pre-specified cloud credentials +- Enable/configure an authorized cluster endpoint to get kubectl access to the cluster without using Rancher as a proxy +- Install Rancher V2 monitoring +- Kubernetes version +- Assign cluster members +- Infrastructure configuration such as AWS VPC/subnets or vSphere data center +- Cloud provider options +- Pod security options +- Network providers +- Ingress controllers +- Network security configuration +- Network plugins +- Private registry URL and credentials +- Add-ons +- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services + +For details on how to configure the template, refer to the cluster template's Helm chart README. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md new file mode 100644 index 0000000000..55908e358a --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md @@ -0,0 +1,240 @@ +--- +title: Cluster and Project Roles +weight: 1127 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Cluster and project roles define user authorization inside a cluster or project. + +To manage these roles, + +1. Click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles** and go to the **Cluster** or **Project/Namespaces** tab. + +### Membership and Role Assignment + +The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. + +When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. + +:::note + +Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. + +::: + +### Cluster Roles + +_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. + +- **Cluster Owner:** + + These users have full control over the cluster and all resources in it. + +- **Cluster Member:** + + These users can view most cluster level resources and create new projects. + +#### Custom Cluster Roles + +Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. + +#### Cluster Role Reference + +The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. + +| Built-in Cluster Role | Owner | Member | +| ---------------------------------- | ------------- | --------------------------------- | +| Create Projects | ✓ | ✓ | +| Manage Cluster Backups             | ✓ | | +| Manage Cluster Catalogs | ✓ | | +| Manage Cluster Members | ✓ | | +| Manage Nodes [(see table below)](#Manage-Nodes-Permissions)| ✓ | | +| Manage Storage | ✓ | | +| View All Projects | ✓ | | +| View Cluster Catalogs | ✓ | ✓ | +| View Cluster Members | ✓ | ✓ | +| View Nodes | ✓ | ✓ | + +#### Manage Nodes Permissions + +The following table lists the permissions available for the `Manage Nodes` role in RKE and RKE2. + +| Manage Nodes Permissions | RKE | RKE2 | +|-----------------------------|-------- |--------- | +| SSH Access | ✓ | ✓ | +| Delete Nodes | ✓ | ✓ | +| Scale Clusters Up and Down | ✓ | * | +***In RKE2, you must have permission to edit a cluster to be able to scale clusters up and down.** +
+ +For details on how each cluster role can access Kubernetes resources, you can look them up in the Rancher UI: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Click the **Cluster** tab. +1. Click the name of an individual role. The table shows all of the operations and resources that are permitted by the role. + +:::note + +When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +::: + +### Giving a Custom Cluster Role to a Cluster Member + +After an administrator [sets up a custom cluster role,](custom-roles.md) cluster owners and admins can then assign those roles to cluster members. + +To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. + +To assign the role to a new cluster member, + + + + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to assign a role to a member and click **Explore**. +1. Click **RBAC > Cluster Members**. +1. Click **Add**. +1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create**. + + + + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to assign a role to a member and click **Explore**. +1. Click **Cluster > Cluster Members**. +1. Click **Add**. +1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create**. + + + + +**Result:** The member has the assigned role. + +To assign any custom role to an existing cluster member, + +1. Click **☰ > Users & Authentication**. +1. Go to the member you want to give the role to. Click the **⋮ > Edit Config**. +1. If you have added custom roles, they will show in the **Custom** section. Choose the role you want to assign to the member. +1. Click **Save**. + +**Result:** The member has the assigned role. + +### Project Roles + +_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. + +- **Project Owner:** + + These users have full control over the project and all resources in it. + +- **Project Member:** + + These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. + + :::note + + By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + + ::: + +- **Read Only:** + + These users can view everything in the project but cannot create, update, or delete anything. + + :::note danger + + Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + + ::: + +#### Custom Project Roles + +Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. + +#### Project Role Reference + +The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. + +| Built-in Project Role | Owner | Member | Read Only | +| ---------------------------------- | ------------- | ----------------------------- | ------------- | +| Manage Project Members | ✓ | | | +| Create Namespaces | ✓ | ✓ | | +| Manage Config Maps | ✓ | ✓ | | +| Manage Ingress | ✓ | ✓ | | +| Manage Project Catalogs | ✓ | | | +| Manage Secrets | ✓ | ✓ | | +| Manage Service Accounts | ✓ | ✓ | | +| Manage Services | ✓ | ✓ | | +| Manage Volumes | ✓ | ✓ | | +| Manage Workloads | ✓ | ✓ | | +| View Secrets | ✓ | ✓ | | +| View Config Maps | ✓ | ✓ | ✓ | +| View Ingress | ✓ | ✓ | ✓ | +| View Project Members | ✓ | ✓ | ✓ | +| View Project Catalogs | ✓ | ✓ | ✓ | +| View Service Accounts | ✓ | ✓ | ✓ | +| View Services | ✓ | ✓ | ✓ | +| View Volumes | ✓ | ✓ | ✓ | +| View Workloads | ✓ | ✓ | ✓ | + +:::note Notes: + +- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. +- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. +- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. + +::: + +### Defining Custom Roles +As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. + +When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. + +### Default Cluster and Project Roles + +By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. + +There are two methods for changing default cluster/project roles: + +- **Assign Custom Roles**: Create a [custom role](custom-roles.md) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. + +- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. + + For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). + +:::note + +- Although you can [lock](locked-roles.md) a default role, the system still assigns the role to users who create a cluster/project. +- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. + +::: + +### Configuring Default Roles for Cluster and Project Creators + +You can change the cluster or project role(s) that are automatically assigned to the creating user. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Click the **Cluster** or **Project/Namespaces** tab. +1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit Config**. +1. In the **Cluster Creator Default** or **Project Creator Default** section, enable the role as the default. +1. Click **Save**. + +**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. + +If you want to remove a default role, edit the permission and select **No** from the default roles option. + +### Cluster Membership Revocation Behavior + +When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: + +- Access the projects they hold membership in. +- Exercise any [individual project roles](#project-role-reference) they are assigned. + +If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md new file mode 100644 index 0000000000..414cfea570 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md @@ -0,0 +1,129 @@ +--- +title: Custom Roles +weight: 1128 +--- + +Within Rancher, _roles_ determine what actions a user can make within a cluster or project. + +Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. + +:::tip + +It is possible for a custom role to enable privilege escalation. For details, see [this section.](#privilege-escalation) + +::: + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a custom role](#creating-a-custom-role) +- [Creating a custom role that inherits from another role](#creating-a-custom-role-that-inherits-from-another-role) +- [Deleting a custom role](#deleting-a-custom-role) +- [Assigning a custom role to a group](#assigning-a-custom-role-to-a-group) +- [Privilege escalation](#privilege-escalation) + +# Prerequisites + +To complete the tasks on this page, one of the following permissions are required: + + - [Administrator Global Permissions](global-permissions.md). + - [Custom Global Permissions](global-permissions.md#custom-global-permissions) with the [Manage Roles](global-permissions.md) role assigned. + +# Creating A Custom Role + +While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. + +The steps to add custom roles differ depending on the version of Rancher. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Select a tab to determine the scope of the role you're adding. The tabs are: + + - **Global:** The role is valid for allowing members to manage global scoped resources. + - **Cluster:** The role is valid for assignment when adding/managing members to clusters. + - **Project/Namespaces:** The role is valid for assignment when adding/managing members to projects or namespaces. + +1. Click **Create Global Role,** **Create Cluster Role** or **Create Project/Namespaces Role,** depending on the scope. +1. Enter a **Name** for the role. +1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. + + > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. + +1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. + + > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + + You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. + +1. Use the **Inherit from** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. + +1. Click **Create**. + +# Creating a Custom Role that Inherits from Another Role + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. + +The custom role can then be assigned to a user or group so that the role takes effect the first time the user or users sign into Rancher. + +To create a custom role based on an existing role, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Click the **Cluster** or **Project/Namespaces** tab. Click **Create Cluster Role** or **Create Project/Namespaces Role** depending on the scope. Note: Only cluster roles and project/namespace roles can inherit from another role. +1. Enter a name for the role. +1. In the **Inherit From** tab, select the role(s) that the custom role will inherit permissions from. +1. In the **Grant Resources** tab, select the Kubernetes resource operations that will be enabled for users with the custom role. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. +1. Optional: Assign the role as default. +1. Click **Create**. + +# Deleting a Custom Role + +When deleting a custom role, all global role bindings with this custom role are deleted. + +If a user is only assigned one custom role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. + +Custom roles can be deleted, but built-in roles cannot be deleted. + +To delete a custom role, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +2. Go to the custom global role that should be deleted and click **⋮ (…) > Delete**. +3. Click **Delete**. + +# Assigning a Custom Role to a Group + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. + +When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom role that was assigned to the group. They would continue to have their individual Standard User role. + +:::note Prerequisites: + +You can only assign a global role to a group if: + +* You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +* The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) +* You have already set up at least one user group with the authentication provider + +::: + +To assign a custom role to a group, follow these steps: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Groups**. +1. Go to the existing group that will be assigned the custom role and click **⋮ > Edit Config**. +1. If you have created roles, they will show in the **Custom** section. Choose any custom role that will be assigned to the group. +1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. +1. Click **Save.**. + +**Result:** The custom role will take effect when the users in the group log into Rancher. + +# Privilege Escalation + +The `Configure Catalogs` custom permission is powerful and should be used with caution. When an admin assigns the `Configure Catalogs` permission to a standard user, it could result in privilege escalation in which the user could give themselves admin access to Rancher provisioned clusters. Anyone with this permission should be considered equivalent to an admin. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md new file mode 100644 index 0000000000..ccbaa2ebc4 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md @@ -0,0 +1,264 @@ +--- +title: Global Permissions +weight: 1126 +--- + +_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. + +Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are four default global permissions: `Administrator`, `Restricted Admin`,`Standard User` and `User-base`. + +- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. + +- **Restricted Admin:** These users have full control over downstream clusters, but cannot alter the local Kubernetes cluster. + +- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. + +- **User-Base:** User-Base users have login-access only. + +You cannot update or delete the built-in Global Permissions. + +This section covers the following topics: + +- [Restricted Admin](#restricted-admin) +- [Global permission assignment](#global-permission-assignment) + - [Global permissions for new local users](#global-permissions-for-new-local-users) + - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) +- [Custom global permissions](#custom-global-permissions) + - [Custom global permissions reference](#custom-global-permissions-reference) + - [Configuring default global permissions for new users](#configuring-default-global-permissions) + - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) + - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + - [Refreshing group memberships](#refreshing-group-memberships) + +# Restricted Admin + +A new `restricted-admin` role was created in Rancher v2.5 in order to prevent privilege escalation from the local Rancher server Kubernetes cluster. This role has full administrator access to all downstream clusters managed by Rancher, but it does not have permission to alter the local Kubernetes cluster. + +The `restricted-admin` can create other `restricted-admin` users with an equal level of access. + +A new setting was added to Rancher to set the initial bootstrapped administrator to have the `restricted-admin` role. This applies to the first user created when the Rancher server is started for the first time. If the environment variable is set, then no global administrator would be created, and it would be impossible to create the global administrator through Rancher. + +To bootstrap Rancher with the `restricted-admin` as the initial user, the Rancher server should be started with the following environment variable: + +``` +CATTLE_RESTRICTED_DEFAULT_ADMIN=true +``` +### List of `restricted-admin` Permissions + +The following table lists the permissions and actions that a `restricted-admin` should have in comparison with the `Administrator` and `Standard User` roles: + +| Category | Action | Global Admin | Standard User | Restricted Admin | Notes for Restricted Admin role | +| -------- | ------ | ------------ | ------------- | ---------------- | ------------------------------- | +| Local Cluster functions | Manage Local Cluster (List, Edit, Import Host) | Yes | No | No | | +| | Create Projects/namespaces | Yes | No | No | | +| | Add cluster/project members | Yes | No | No | | +| | Deploy MulticlusterApp in local cluster | Yes | No | No | | +| | Global DNS | Yes | No | No | | +| | Access to management cluster for CRDs and CRs | Yes | No | Yes | | +| | Save as RKE Template | Yes | No | No | | +| Security | | | | | | +| Enable auth | Configure Authentication | Yes | No | Yes | | +| Roles | Create/Assign GlobalRoles | Yes | No (Can list) | Yes | Auth webhook allows creating globalrole for perms already present | +| | Create/Assign ClusterRoles | Yes | No (Can list) | Yes | Not in local cluster | +| | Create/Assign ProjectRoles | Yes | No (Can list) | Yes | Not in local cluster | +| Users | Add User/Edit/Delete/Deactivate User | Yes | No | Yes | | +| Groups | Assign Global role to groups | Yes | No | Yes | As allowed by the webhook | +| | Refresh Groups | Yes | No | Yes | | +| PSP's | Manage PSP templates | Yes | No (Can list) | Yes | Same privileges as Global Admin for PSPs | +| Tools | | | | | | +| | Manage RKE Templates | Yes | No | Yes | | +| | Manage Global Catalogs | Yes | No | Yes | Cannot edit/delete built-in system catalog. Can manage Helm library | +| | Cluster Drivers | Yes | No | Yes | | +| | Node Drivers | Yes | No | Yes | | +| | GlobalDNS Providers | Yes | Yes (Self) | Yes | | +| | GlobalDNS Entries | Yes | Yes (Self) | Yes | | +| Settings | | | | | | +| | Manage Settings | Yes | No (Can list) | No (Can list) | | +| Apps | | | | | | +| | Launch Multicluster Apps | Yes | Yes | Yes | Not in local cluster | +| User | | | | | | +| | Manage API Keys | Yes (Manage all) | Yes (Manage self) | Yes (Manage self) | | +| | Manage Node Templates | Yes | Yes (Manage self) | Yes (Manage self) | Can only manage their own node templates and not those created by other users | +| | Manage Cloud Credentials | Yes | Yes (Manage self) | Yes (Manage self) | Can only manage their own cloud credentials and not those created by other users | +| Downstream Cluster | Create Cluster | Yes | Yes | Yes | | +| | Edit Cluster | Yes | Yes | Yes | | +| | Rotate Certificates | Yes | | Yes | | +| | Snapshot Now | Yes | | Yes | | +| | Restore Snapshot | Yes | | Yes | | +| | Save as RKE Template | Yes | No | Yes | | +| | Run CIS Scan | Yes | Yes | Yes | | +| | Add Members | Yes | Yes | Yes | | +| | Create Projects | Yes | Yes | Yes | | +| Feature Charts since v2.5 | | | | | | +| | Install Fleet | Yes | | Yes | Should not be able to run Fleet in local cluster | +| | Deploy EKS cluster | Yes | Yes | Yes | | +| | Deploy GKE cluster | Yes | Yes | Yes | | +| | Deploy AKS cluster | Yes | Yes | Yes | | + + +### Changing Global Administrators to Restricted Admins + +If Rancher already has a global administrator, they should change all global administrators over to the new `restricted-admin` role. + +This can be done through **Security > Users** and moving any Administrator role over to Restricted Administrator. + +Signed-in users can change themselves over to the `restricted-admin` if they wish, but they should only do that as the last step, otherwise they won't have the permissions to do so. + +# Global Permission Assignment + +Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. + +### Global Permissions for New Local Users + +When you create a new local user, you assign them a global permission as you complete the **Add User** form. + +To see the default permissions for new users, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. The **Roles** page has tabs for roles grouped by scope. Each table lists the roles in that scope. In the **Global** tab, in the **New User Default** column, the permissions given to new users by default are indicated with a checkmark. + +You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) + +### Global Permissions for Users with External Authentication + +When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. + +To see the default permissions for new users, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. The **Roles** page has tabs for roles grouped by scope. Each table lists the roles in that scope. In the **New User Default** column on each page, the permissions given to new users by default are indicated with a checkmark. + +You can [change the default permissions to meet your needs.](#configuring-default-global-permissions) + +Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) + +You can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. + +# Custom Global Permissions + +Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. + +When a user from an [external authentication source](../../../../pages-for-subheaders/about-authentication.md) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. + +However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. + +The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. + +Administrators can enforce custom global permissions in multiple ways: + +- [Changing the default permissions for new users](#configuring-default-global-permissions) +- [Configuring global permissions for individual users](#configuring-global-permissions-for-individual-users) +- [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + +### Custom Global Permissions Reference + +The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. + +| Custom Global Permission | Administrator | Standard User | User-Base | +| ---------------------------------- | ------------- | ------------- |-----------| +| Create Clusters | ✓ | ✓ | | +| Create RKE Templates | ✓ | ✓ | | +| Manage Authentication | ✓ | | | +| Manage Catalogs | ✓ | | | +| Manage Cluster Drivers | ✓ | | | +| Manage Node Drivers | ✓ | | | +| Manage PodSecurityPolicy Templates | ✓ | | | +| Manage Roles | ✓ | | | +| Manage Settings | ✓ | | | +| Manage Users | ✓ | | | +| Use Catalog Templates | ✓ | ✓ | | +| User-Base (Basic log-in access) | ✓ | ✓ | | + +For details on which Kubernetes resources correspond to each global permission, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. If you click the name of an individual role, a table shows all of the operations and resources that are permitted by the role. + +:::note Notes: + +- Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. +- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +::: + +### Configuring Default Global Permissions + +If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. + +:::note + +Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. + +::: + +To change the default global permissions that are assigned to external users upon their first log in, follow these steps: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. On the **Roles** page, make sure the **Global** tab is selected. +1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit Config**. +1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. If you want to remove a default permission, edit the permission and select **No**. + +**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. + +### Configuring Global Permissions for Individual Users + +To configure permission for a user, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Users**. +1. Go to the user whose access level you want to change and click **⋮ > Edit Config**. +1. In the **Global Permissions** and **Built-in** sections, check the boxes for each permission you want the user to have. If you have created roles from the **Roles** page, they will appear in the **Custom** section and you can choose from them as well. +1. Click **Save**. + +**Result:** The user's global permissions have been updated. + +### Configuring Global Permissions for Groups + +If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. + +After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. + +For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) + +For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default**. Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,](#refreshing-group-memberships) whichever comes first. + +:::note Prerequisites: + +You can only assign a global role to a group if: + +* You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +* The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) +* You have already set up at least one user group with the authentication provider + +::: + +To assign a custom global role to a group, follow these steps: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Groups**. +1. Go to the group you want to assign a custom global role to and click **⋮ > Edit Config**. +1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. +1. Click **Create**. + +**Result:** The custom global role will take effect when the users in the group log into Rancher. + +### Refreshing Group Memberships + +When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. + +To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. + +An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. + +To refresh group memberships, + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Users**. +1. Click **Refresh Group Memberships**. + +**Result:** Any changes to the group members' permissions will take effect. diff --git a/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md new file mode 100644 index 0000000000..e91770b429 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md @@ -0,0 +1,39 @@ +--- +title: Locked Roles +weight: 1129 +--- + +You can set roles to a status of `locked`. Locking roles prevent them from being assigned to users in the future. + +Locked roles: + +- Cannot be assigned to users that don't already have it assigned. +- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. +- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. + + **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. + + To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. + +Roles can be locked by the following users: + +- Any user assigned the `Administrator` global permission. +- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. + + +## Locking/Unlocking Roles + +If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. + +You can lock roles in two contexts: + +- When you're [adding a custom role](custom-roles.md). +- When you editing an existing role (see below). + +Cluster roles and project/namespace roles can be locked, but global roles cannot. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Go to the **Cluster** tab or the **Project/Namespaces** tab. +1. From the role that you want to lock (or unlock), select **⋮ > Edit Config**. +1. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md new file mode 100644 index 0000000000..2eb74ba76b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -0,0 +1,41 @@ +--- +title: Configure Alerts for Periodic Scan on a Schedule +weight: 8 +--- + +It is possible to run a ClusterScan on a schedule. + +A scheduled scan can also specify if you should receive alerts when the scan completes. + +Alerts are supported only for a scan that runs on a schedule. + +The CIS Benchmark application supports two types of alerts: + +- Alert on scan completion: This alert is sent out when the scan run finishes. The alert includes details including the ClusterScan's name and the ClusterScanProfile name. +- Alert on scan failure: This alert is sent out if there are some test failures in the scan run or if the scan is in a `Fail` state. + +:::note Prerequisite + +Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.](monitoring-alertincis-scans/configuration) + +While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.](monitoring-alertincis-scans/configuration/receiver/#example-route-config-for-cis-scan-alerts) + +::: + +To configure alerts for a scan that runs on a schedule, + +1. Please enable alerts on the `rancher-cis-benchmark` application (#enabling-alerting-for-rancher-cis-benchmark) +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to run a CIS scan and click **Explore**. +1. Click **CIS Benchmark > Scan**. +1. Click **Create**. +1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. +1. Choose the option **Run scan on a schedule**. +1. Enter a valid [cron schedule expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) in the field **Schedule**. +1. Check the boxes next to the Alert types under **Alerting**. +1. Optional: Choose a **Retention Count**, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. +1. Click **Create**. + +**Result:** The scan runs and reschedules to run according to the cron schedule provided. Alerts are sent out when the scan finishes if routes and receiver are configured under `rancher-monitoring` application. + +A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md new file mode 100644 index 0000000000..167d23785b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -0,0 +1,10 @@ +--- +title: Create a Custom Benchmark Version for Running a Cluster Scan +weight: 9 +--- + +There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. + +It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. + +For details, see [this page.](../explanations/integrations-in-rancher/cis-scans/custom-benchmark.md) \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md new file mode 100644 index 0000000000..0877fec3eb --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -0,0 +1,21 @@ +--- +title: Enable Alerting for Rancher CIS Benchmark +weight: 7 +--- + +Alerts can be configured to be sent out for a scan that runs on a schedule. + +:::note Prerequisite: + +Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.](monitoring-alertincis-scans/configuration) + +While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.](monitoring-alertincis-scans/configuration/receiver/#example-route-config-for-cis-scan-alerts) + +::: + +While installing or upgrading the `rancher-cis-benchmark` Helm chart, set the following flag to `true` in the `values.yaml`: + +```yaml +alerts: + enabled: true +``` \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md new file mode 100644 index 0000000000..7c9cb11059 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -0,0 +1,12 @@ +--- +title: Install Rancher CIS Benchmark +weight: 1 +--- + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to install CIS Benchmark and click **Explore**. +1. In the left navigation bar, click **Apps & Marketplace > Charts**. +1. Click **CIS Benchmark** +1. Click **Install**. + +**Result:** The CIS scan application is deployed on the Kubernetes cluster. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md new file mode 100644 index 0000000000..3b04bef267 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -0,0 +1,21 @@ +--- +title: Run a Scan Periodically on a Schedule +weight: 4 +--- + +To run a ClusterScan on a schedule, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to run a CIS scan and click **Explore**. +1. Click **CIS Benchmark > Scan**. +1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. +1. Choose the option **Run scan on a schedule**. +1. Enter a valid cron schedule expression in the field **Schedule**. +1. Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. +1. Click **Create**. + +**Result:** The scan runs and reschedules to run according to the cron schedule provided. The **Next Scan** value indicates the next time this scan will run again. + +A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. + +You can also see the previous reports by choosing the report from the **Reports** dropdown on the scan detail page. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md new file mode 100644 index 0000000000..1825f97b17 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -0,0 +1,23 @@ +--- +title: Run a Scan +weight: 3 +--- + +When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. + +:::note + +There is currently a limitation of running only one CIS scan at a time for a cluster. If you create multiple ClusterScan custom resources, they will be run one after the other by the operator, and until one scan finishes, the rest of the ClusterScan custom resources will be in the "Pending" state. + +::: + +To run a scan, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to run a CIS scan and click **Explore**. +1. Click **CIS Benchmark > Scan**. +1. Click **Create**. +1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. +1. Click **Create**. + +**Result:** A report is generated with the scan results. To see the results, click the name of the scan that appears. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md new file mode 100644 index 0000000000..cd0a1d9ef9 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -0,0 +1,35 @@ +--- +title: Skip Tests +weight: 5 +--- + +CIS scans can be run using test profiles with user-defined skips. + +To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to run a CIS scan and click **Explore**. +1. Click **CIS Benchmark > Profile**. +1. From here, you can create a profile in multiple ways. To make a new profile, click **Create** and fill out the form in the UI. To make a new profile based on an existing profile, go to the existing profile and click **⋮ Clone**. If you are filling out the form, add the tests to skip using the test IDs, using the relevant CIS Benchmark as a reference. If you are creating the new test profile as YAML, you will add the IDs of the tests to skip in the `skipTests` directive. You will also give the profile a name: + + ```yaml + apiVersion: cis.cattle.io/v1 + kind: ClusterScanProfile + metadata: + annotations: + meta.helm.sh/release-name: clusterscan-operator + meta.helm.sh/release-namespace: cis-operator-system + labels: + app.kubernetes.io/managed-by: Helm + name: "" + spec: + benchmarkVersion: cis-1.5 + skipTests: + - "1.1.20" + - "1.1.21" + ``` +1. Click **Create**. + +**Result:** A new CIS scan profile is created. + +When you [run a scan](#running-a-scan) that uses this profile, the defined tests will be skipped during the scan. The skipped tests will be marked in the generated report as `Skip`. diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md new file mode 100644 index 0000000000..65b6ccf7b1 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -0,0 +1,10 @@ +--- +title: Uninstall Rancher CIS Benchmark +weight: 2 +--- + +1. From the **Cluster Dashboard,** go to the left navigation bar and click **Apps & Marketplace > Installed Apps**. +1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. +1. Click **Delete** and confirm **Delete**. + +**Result:** The `rancher-cis-benchmark` application is uninstalled. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md new file mode 100644 index 0000000000..72b6f4f189 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -0,0 +1,13 @@ +--- +title: View Reports +weight: 6 +--- + +To view the generated CIS scan reports, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to run a CIS scan and click **Explore**. +1. Click **CIS Benchmark > Scan**. +1. The **Scans** page will show the generated reports. To see a detailed report, go to a scan report and click the name. + +One can download the report from the Scans list or from the scan detail page. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md new file mode 100644 index 0000000000..9c438d9b2b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -0,0 +1,30 @@ +--- +title: 1. Enable Istio in the Cluster +weight: 1 +--- + +:::note Prerequisites: + +- Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. +- If you have pod security policies, you will need to install Istio with the CNI enabled. For details, see [this section.](../../../explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md) +- To install Istio on an RKE2 cluster, additional steps are required. For details, see [this section.](../../../explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md) +- To install Istio in a cluster where project network isolation is enabled, additional steps are required. For details, see [this section.](../../../explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md) + +::: + +1. Click **☰ > Cluster Management**. +1. Go to the where you want to enable Istio and click **Explore**. +1. Click **Apps & Marketplace**. +1. Click **Charts**. +1. Click **Istio**. +1. If you have not already installed your own monitoring app, you will be prompted to install the rancher-monitoring app. Optional: Set your Selector or Scrape config options on rancher-monitoring app install. +1. Optional: Configure member access and [resource limits](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. +1. Optional: Make additional configuration changes to values.yaml if needed. +1. Optional: Add additional resources or configuration via the [overlay file.](../../../pages-for-subheaders/configuration-options.md#overlay-file) +1. Click **Install**. + +**Result:** Istio is installed at the cluster level. + +# Additional Config Options + +For more information on configuring Istio, refer to the [configuration reference.](../../../pages-for-subheaders/configuration-options.md) diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md new file mode 100644 index 0000000000..ed6c93471b --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -0,0 +1,53 @@ +--- +title: 2. Enable Istio in a Namespace +weight: 2 +--- + +You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. + +This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. + +:::note Prerequisite: + +To enable Istio in a namespace, the cluster must have Istio installed. + +::: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Go to the namespace where you want to enable Istio and click **⋮ > Enable Istio Auto Injection**. Alternately, click the namespace, and then on the namespace detail page, click **⋮ > Enable Istio Auto Injection**. + +**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. + +### Verifying that Automatic Istio Sidecar Injection is Enabled + +To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. + +### Excluding Workloads from Being Injected with the Istio Sidecar + +If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: + +``` +sidecar.istio.io/inject: “false” +``` + +To add the annotation to a workload, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. +1. Go to the workload that should not have the sidecar and edit as yaml +1. Add the following key, value `sidecar.istio.io/inject: false` as an annotation on the workload +1. Click **Save**. + +**Result:** The Istio sidecar will not be injected into the workload. + +:::note + +If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. + +::: + + +### [Next: Add Deployments with the Istio Sidecar ](use-istio-sidecar.md) \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md new file mode 100644 index 0000000000..da00a48ea0 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -0,0 +1,27 @@ +--- +title: 6. Generate and View Traffic +weight: 7 +--- + +This section describes how to view the traffic that is being managed by Istio. + +# The Kiali Traffic Graph + +The Istio overview page provides a link to the Kiali dashboard. From the Kiali dashboard, you are able to view graphs for each namespace. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. + +:::note Prerequisites: + +To enable traffic to show up in the graph, ensure you have prometheus installed in the cluster. Rancher-istio installs Kiali configured by default to work with the rancher-monitoring chart. You can use rancher-monitoring or install your own monitoring solution. Optional: you can change configuration on how data scraping occurs by setting the [Selectors & Scrape Configs](../../../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) options. + +::: + +To see the traffic graph, + +1. In the cluster where Istio is installed, click **Istio** in the left navigation bar. +1. Click the **Kiali** link. +1. Click on **Graph** in the side nav. +1. Change the namespace in the **Namespace** dropdown to view the traffic for each namespace. + +If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. + +For additional tools and visualizations, you can go to Grafana, and Prometheus dashboards from the **Monitoring** **Overview** page diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md new file mode 100644 index 0000000000..d80ea26e64 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -0,0 +1,147 @@ +--- +title: 4. Set up the Istio Gateway +weight: 5 +--- + +The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. + +You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. + +To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two Ingresses. + +You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. + +You can route traffic into the service mesh with a load balancer or use Istio's NodePort gateway. This section describes how to set up the NodePort gateway. + +For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) + +![In an Istio-enabled cluster, you can have two Ingresses: the default Nginx Ingress, and the default Istio controller.](/img/istio-ingress.svg) + +# Enable an Istio Gateway + +The ingress gateway is a Kubernetes service that will be deployed in your cluster. The Istio Gateway allows for more extensive customization and flexibility. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Istio > Gateways**. +1. Click **Create from Yaml**. +1. Paste your Istio Gateway yaml, or **Read from File**. +1. Click **Create**. + +**Result:** The gateway is deployed, and will now route traffic with applied rules. + +# Example Istio Gateway + +We add the BookInfo app deployments in services when going through the Workloads example. Next we add an Istio Gateway so that the app is accessible from outside your cluster. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Istio > Gateways**. +1. Click **Create from Yaml**. +1. Copy and paste the Gateway yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +``` + +Then to deploy the VirtualService that provides the traffic routing for the Gateway: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Istio > VirtualServices**. +1. Copy and paste the VirtualService yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage + port: + number: 9080 +``` + +**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. + +Confirm that the resource exists by running: +``` +kubectl get gateway -A +``` + +The result should be something like this: +``` +NAME AGE +bookinfo-gateway 64m +``` + +### Access the ProductPage Service from a Web Browser + +To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: + +`http://:/productpage` + +To get the ingress gateway URL and port, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Workload**. +1. Scroll down to the `istio-system` namespace. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. +1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. + +**Result:** You should see the BookInfo app in the web browser. + +For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) + +# Troubleshooting + +The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. + +### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller + +You can try the steps in this section to make sure the Kubernetes gateway is configured properly. + +In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Workload**. +1. Scroll down to the `istio-system` namespace. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. + +### [Next: Set up Istio's Components for Traffic Management](set-up-traffic-management.md) diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md new file mode 100644 index 0000000000..8b7bd4c233 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -0,0 +1,76 @@ +--- +title: 5. Set up Istio's Components for Traffic Management +weight: 6 +--- + +A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. + +- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. +- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. + +This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. + +In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. + +After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. + +To deploy the virtual service and destination rules for the `reviews` service, +1. Click **☰ > Cluster Management**. +1. Go to the cluster where Istio is installed and click **Explore**. +1. In the cluster where Istio is installed, click **Istio > DestinationRules** in the left navigation bar. +1. Click **Create**. +1. Copy and paste the DestinationRule yaml provided below. +1. Click **Create**. +1. Click **Edit as YAML** and use this configuration: + + ```yaml + apiVersion: networking.istio.io/v1alpha3 + kind: DestinationRule + metadata: + name: reviews + spec: + host: reviews + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 + ``` +1. Click **Create**. + +Then to deploy the VirtualService that provides the traffic routing that utilizes the DestinationRule: + +1. Click **VirtualService** in the side nav bar. +1. Click **Create from Yaml**. +1. Copy and paste the VirtualService yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: reviews +spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + subset: v1 + weight: 50 + - destination: + host: reviews + subset: v3 + weight: 50 +--- +``` + +**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. + +### [Next: Generate and View Traffic](generate-and-view-traffic.md) diff --git a/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md new file mode 100644 index 0000000000..9eb7ed930c --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -0,0 +1,360 @@ +--- +title: 3. Add Deployments and Services with the Istio Sidecar +weight: 4 +--- + +:::note Prerequisite: + +To enable Istio for a workload, the cluster and namespace must have the Istio app installed. + +::: + +Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. + +To inject the Istio sidecar on an existing workload in the namespace, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. +1. Click **Workload**. +1. Go to the workload where you want to inject the Istio sidecar and click **⋮ > Redeploy**. When the workload is redeployed, it will have the Envoy sidecar automatically injected. + +Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see `istio-proxy` alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. + +### Add Deployments and Services + +There are a few ways to add new **Deployments** in your namespace: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. +1. Click **Create**. +1. Click **Deployment**. +1. Fill out the form, or **Edit as Yaml**. +1. Click **Create**. + +To add a **Service** to your namespace: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Service Discovery > Services**. +1. Click **Create**. +1. Select the type of service that you want. +1. Fill out the form, or **Edit as Yaml**. +1. Click **Create** + +You can also create deployments and services using the kubectl **shell** + +1. Run `kubectl create -f .yaml` if your file is stored locally in the cluster +1. Or run `cat<< EOF | kubectl apply -f -`, paste the file contents into the terminal, then run `EOF` to complete the command. + +### Example Deployments and Services + +Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the top navigation bar, open the kubectl shell. +1. Run `cat<< EOF | kubectl apply -f -` +1. Copy the below resources into the the shell. +1. Run `EOF` + +This will set up the following sample resources from Istio's example BookInfo app: + +Details service and deployment: + +- A `details` Service +- A ServiceAccount for `bookinfo-details` +- A `details-v1` Deployment + +Ratings service and deployment: + +- A `ratings` Service +- A ServiceAccount for `bookinfo-ratings` +- A `ratings-v1` Deployment + +Reviews service and deployments (three versions): + +- A `reviews` Service +- A ServiceAccount for `bookinfo-reviews` +- A `reviews-v1` Deployment +- A `reviews-v2` Deployment +- A `reviews-v3` Deployment + +Productpage service and deployment: + +This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. + +- A `productpage` service +- A ServiceAccount for `bookinfo-productpage` +- A `productpage-v1` Deployment + +### Resource YAML + +```yaml +# Copyright 2017 Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +``` + +### [Next: Set up the Istio Gateway](set-up-istio-gateway.md) diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md new file mode 100644 index 0000000000..b79d86653c --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md @@ -0,0 +1,67 @@ +--- +title: Adding Users to Clusters +weight: 2020 +--- + +If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. + +:::tip + +Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members](../../manage-projects/add-users-to-projects.md) instead. + +::: + +There are two contexts where you can add cluster members: + +- Adding Members to a New Cluster + + You can add members to a cluster as you create it (recommended if possible). + +- [Adding Members to an Existing Cluster](#editing-cluster-membership) + + You can always add members to a cluster after a cluster is provisioned. + +## Editing Cluster Membership + +Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster you want to add members to and click **⋮ > Edit Config**. +1. In the **Member Roles** tab, click **Add Member**. +1. Search for the user or group that you want to add to the cluster. + + If external authentication is configured: + + - Rancher returns users from your [external authentication](../../../../pages-for-subheaders/about-authentication.md) source as you type. + + :::note Using AD but can't find your users? + + There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5](../../authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md). + + ::: + + - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. + + :::note + + If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users](../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + + ::: + +1. Assign the user or group **Cluster** roles. + + [What are Cluster Roles?](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + + :::tip + + For Custom Roles, you can modify the list of individual roles available for assignment. + + - To add roles to the list, [Add a Custom Role](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + - To remove roles from the list, [Lock/Unlock Roles](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). + + ::: + +**Result:** The chosen users are added to the cluster. + +- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md new file mode 100644 index 0000000000..d1c74d7274 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -0,0 +1,44 @@ +--- +title: How the Authorized Cluster Endpoint Works +weight: 2015 +--- + +This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) + +### About the kubeconfig File + +The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). + +This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. + +After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. + +If admins have [enforced TTL on kubeconfig tokens](../../../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](authorized-cluster-endpoint.md) to be present in your PATH. + + +### Two Authentication Methods for RKE Clusters + +If the cluster is not an [RKE cluster,](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. + +For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: + +- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. +- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. + +This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. + +To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) + +These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page](../../../../pages-for-subheaders/rancher-manager-architecture.md#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. + +### About the kube-api-auth Authentication Webhook + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) which is only available for [RKE clusters.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. + +The scheduling rules for `kube-api-auth` are listed below: + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
`node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md new file mode 100644 index 0000000000..fe8062263d --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -0,0 +1,111 @@ +--- +title: "Access a Cluster with Kubectl and kubeconfig" +description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." +weight: 2010 +--- + +This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. + +For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). + +- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) +- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) +- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) +- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) + - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) + - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) + + +### Accessing Clusters with kubectl Shell in the Rancher UI + +You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster you want to access with kubectl and click **Explore**. +1. In the top navigation menu, click the **Kubectl Shell** button. Use the window that opens to interact with your Kubernetes cluster. + +### Accessing Clusters with kubectl from Your Workstation + +This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. + +This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. + +:::note Prerequisites: + +These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +::: + +1. Log into Rancher. Click **☰ > Cluster Management**. +1. Go to the cluster that you want to access with kubectl and click **Explore**. +1. In the top navigation bar, click **Download KubeConfig** button. +1. Save the YAML file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: + ``` + kubectl --kubeconfig /custom/path/kube.config get pods + ``` +1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. + + +### Note on Resources Created Using kubectl + +Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. + +# Authenticating Directly with a Downstream Cluster + +This section intended to help you set up an alternative method to access an [RKE cluster.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +This method is only available for RKE clusters that have the [authorized cluster endpoint](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](authorized-cluster-endpoint.md) + +We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. + +:::note Prerequisites: + +The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) + +::: + +To find the name of the context(s) in your downloaded kubeconfig file, run: + +``` +kubectl config get-contexts --kubeconfig /custom/path/kube.config +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* my-cluster my-cluster user-46tmn + my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn +``` + +In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. + +With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. + +When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. + +### Connecting Directly to Clusters with FQDN Defined + +If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: + +``` +kubectl --context -fqdn get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods +``` + +### Connecting Directly to Clusters without FQDN Defined + +If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: +``` +kubectl --context - get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context - get pods +``` diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md new file mode 100644 index 0000000000..823e00ae32 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md @@ -0,0 +1,40 @@ +--- +title: Adding a Pod Security Policy +weight: 80 +--- + +:::note Prerequisite: + +The options below are available only for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +::: + +When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy](../authentication-permissions-and-global-configuration/create-pod-security-policies.md), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. + +You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster to which you want to apply a pod security policy and click **⋮ > Edit Config**. +1. From **Pod Security Policy Support**, select **Enabled**. + + :::note + + This option is only available for clusters [provisioned by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + + ::: + +4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. + + Rancher ships with [policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) as well. + +5. Click **Save**. + +**Result:** The pod security policy is applied to the cluster and any projects within the cluster. + +:::note + +Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. + +To check if a running workload passes your pod security policy, clone or upgrade it. + +::: \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md new file mode 100644 index 0000000000..a26b152f50 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md @@ -0,0 +1,26 @@ +--- +title: Assigning Pod Security Policies +weight: 2260 +--- + +_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). + +## Adding a Default Pod Security Policy + +When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. + +:::Prerequisite: + +Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +::: + +:::note + +For security purposes, we recommend assigning a PSP as you create your clusters. + +::: + +To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. + +When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md new file mode 100644 index 0000000000..7d9f0caa6a --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md @@ -0,0 +1,308 @@ +--- +title: Removing Kubernetes Components from Nodes +description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually +weight: 2055 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. + +When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. + +When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. + +## What Gets Removed? + +When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. + +| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Registered Nodes][4] | +| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | +| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | +| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | +| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | +| Rancher Deployment | ✓ | ✓ | ✓ | | +| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | +| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | +| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | + +[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +[2]: ../../../pages-for-subheaders/use-existing-nodes.md +[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +[4]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md + +## Removing a Node from a Cluster by Rancher UI + +When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +## Removing Rancher Components from a Cluster Manually + +When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. + +:::danger + +The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. + +::: + +### Removing Rancher Components from Registered Clusters + +For registered clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. + +After the registered cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. + + + + +:::danger + +This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. + +::: + +After you initiate the removal of a registered cluster using the Rancher UI (or API), the following events occur. + +1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. + +1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. + +1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. + +**Result:** All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + +Rather than cleaning registered cluster nodes using the Rancher UI, you can run a script instead. + +:::note Prerequisite: + +Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +::: + +1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. + +1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: + + ``` + chmod +x user-cluster.sh + ``` + +1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. + + If you don't have an air gap environment, skip this step. + +1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): + + :::tip + + Add the `-dry-run` flag to preview the script's outcome without making changes. + ``` + ./user-cluster.sh rancher/rancher-agent: + ``` + + ::: + +**Result:** The script runs. All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + +### Windows Nodes + +To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. + +To run the script, you can use this command in the PowerShell: + +``` +pushd c:\etc\rancher +.\cleanup.ps1 +popd +``` + +**Result:** The node is reset and can be re-added to a Kubernetes cluster. + +### Docker Containers, Images, and Volumes + +Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) + +**To clean all Docker containers, images and volumes:** + +``` +docker rm -f $(docker ps -qa) +docker rmi -f $(docker images -q) +docker volume rm $(docker volume ls -q) +``` + +### Mounts + +Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. + +Mounts | +--------| +`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | +`/var/lib/kubelet` | +`/var/lib/rancher` | + +**To unmount all mounts:** + +``` +for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done +``` + +### Directories and Files + +The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. + +:::note + +Depending on the role you assigned to the node, some of the directories will or won't be present on the node. + +::: + +Directories | +--------| +`/etc/ceph` | +`/etc/cni` | +`/etc/kubernetes` | +`/opt/cni` | +`/opt/rke` | +`/run/secrets/kubernetes.io` | +`/run/calico` | +`/run/flannel` | +`/var/lib/calico` | +`/var/lib/etcd` | +`/var/lib/cni` | +`/var/lib/kubelet` | +`/var/lib/rancher/rke/log` | +`/var/log/containers` | +`/var/log/kube-audit` | +`/var/log/pods` | +`/var/run/calico` | + +**To clean the directories:** + +``` +rm -rf /etc/ceph \ + /etc/cni \ + /etc/kubernetes \ + /opt/cni \ + /opt/rke \ + /run/secrets/kubernetes.io \ + /run/calico \ + /run/flannel \ + /var/lib/calico \ + /var/lib/etcd \ + /var/lib/cni \ + /var/lib/kubelet \ + /var/lib/rancher/rke/log \ + /var/log/containers \ + /var/log/kube-audit \ + /var/log/pods \ + /var/run/calico +``` + +### Network Interfaces and Iptables + +The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. + +### Network Interfaces + +:::note + +Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. + +::: + +Interfaces | +--------| +`flannel.1` | +`cni0` | +`tunl0` | +`caliXXXXXXXXXXX` (random interface names) | +`vethXXXXXXXX` (random interface names) | + +**To list all interfaces:** + +``` +# Using ip +ip address show + +# Using ifconfig +ifconfig -a +``` + +**To remove an interface:** + +``` +ip link delete interface_name +``` + +### Iptables + +:::note + +Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. + +::: + +Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. + +Chains | +--------| +`cali-failsafe-in` | +`cali-failsafe-out` | +`cali-fip-dnat` | +`cali-fip-snat` | +`cali-from-hep-forward` | +`cali-from-host-endpoint` | +`cali-from-wl-dispatch` | +`cali-fw-caliXXXXXXXXXXX` (random chain names) | +`cali-nat-outgoing` | +`cali-pri-kns.NAMESPACE` (chain per namespace) | +`cali-pro-kns.NAMESPACE` (chain per namespace) | +`cali-to-hep-forward` | +`cali-to-host-endpoint` | +`cali-to-wl-dispatch` | +`cali-tw-caliXXXXXXXXXXX` (random chain names) | +`cali-wl-to-host` | +`KUBE-EXTERNAL-SERVICES` | +`KUBE-FIREWALL` | +`KUBE-MARK-DROP` | +`KUBE-MARK-MASQ` | +`KUBE-NODEPORTS` | +`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | +`KUBE-SERVICES` | +`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | + +**To list all iptables rules:** + +``` +iptables -L -t nat +iptables -L -t mangle +iptables -L +``` diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md new file mode 100644 index 0000000000..6e1f87e79e --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md @@ -0,0 +1,111 @@ +--- +title: Cloning Clusters +weight: 2035 +--- + +If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. + +Duplication of registered clusters is not supported. + +| Cluster Type | Cloneable? | +|----------------------------------|---------------| +| [Nodes Hosted by Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) | ✓ | +| [Hosted Kubernetes Providers](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) | ✓ | +| [Custom Cluster](../../../pages-for-subheaders/use-existing-nodes.md) | ✓ | +| [Registered Cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) | | + +:::caution + +During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, **_not_** wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. + +::: + +## Prerequisites + +Download and install [Rancher CLI](../../../pages-for-subheaders/cli-with-rancher.md). Remember to [create an API bearer token](../../../reference-guides/user-settings/api-keys.md) if necessary. + + +## 1. Export Cluster Config + +Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. + +1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. + +1. Enter the following command to list the clusters managed by Rancher. + + + ./rancher cluster ls + + +1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. + +1. Enter the following command to export the configuration for your cluster. + + + ./rancher clusters export + + + **Step Result:** The YAML for a cloned cluster prints to Terminal. + +1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). + +## 2. Modify Cluster Config + +Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. + +:::note + +Cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher) + +::: + +1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. + + :::caution + + Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. + + ::: + + +1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. + + ```yml + Version: v3 + clusters: + : # ENTER UNIQUE NAME + dockerRootDir: /var/lib/docker + enableNetworkPolicy: false + rancherKubernetesEngineConfig: + addonJobTimeout: 30 + authentication: + strategy: x509 + authorization: {} + bastionHost: {} + cloudProvider: {} + ignoreDockerVersion: true + ``` + +1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. + + ```yml + nodePools: + : + clusterId: do + controlPlane: true + etcd: true + hostnamePrefix: mark-do + nodeTemplateId: do + quantity: 1 + worker: true + ``` + +1. When you're done, save and close the configuration. + +## 3. Launch Cloned Cluster + +Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: + + ./rancher up --file cluster-template.yml + +**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md new file mode 100644 index 0000000000..29d5a38aa9 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md @@ -0,0 +1,38 @@ +--- +title: GlusterFS Volumes +weight: 5000 +--- + +:::note + +This section only applies to [RKE clusters.](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +::: + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: + +- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) +- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) + +``` +docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version +``` + +:::caution + +Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +::: + +``` +services: + kubelet: + extra_binds: + - "/usr/bin/systemd-run:/usr/bin/systemd-run" +``` + +After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: + +``` +Detected OS with systemd +``` \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md new file mode 100644 index 0000000000..0a0f6a2b32 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md @@ -0,0 +1,80 @@ +--- +title: How Persistent Storage Works +weight: 1 +--- + +A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. + +There are two ways to use persistent storage in Kubernetes: + +- Use an existing persistent volume +- Dynamically provision new persistent volumes + +To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. + +For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. + +![Setting Up New and Existing Persistent Storage](/img/rancher-storage.svg) + +For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) + +This section covers the following topics: + +- [About persistent volume claims](#about-persistent-volume-claims) + - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) +- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) + - [Binding PVs to PVCs](#binding-pvs-to-pvcs) +- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) + +# About Persistent Volume Claims + +Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. + +To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. + +Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes**. You can reuse these PVCs when creating deployments in the future. + +### PVCs are Required for Both New and Existing Persistent Storage + +A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. + +If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. + +If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. + +Rancher lets you create as many PVCs within a project as you'd like. + +You can mount PVCs to a deployment as you create it, or later, after the deployment is running. + +# Setting up Existing Storage with a PVC and PV + +Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. + +PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +:::note Important: + +PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. + +::: + +### Binding PVs to PVCs + +When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + +> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. + +In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PV that has at least the amount of disk space required by the PVC. + +To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. + +# Provisioning New Storage with a PVC and Storage Class + +Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. + +For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. + +The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. + diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md new file mode 100644 index 0000000000..232511bb33 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md @@ -0,0 +1,88 @@ +--- +title: Dynamically Provisioning New Storage in Rancher +weight: 2 +--- + +This section describes how to provision new persistent storage for workloads in Rancher. + +This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) + +New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. + +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.](../../../../../explanations/integrations-in-rancher/longhorn.md) + +To provision new storage for your workloads, follow these steps: + +1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage) +2. [Use the Storage Class for Pods Deployed with a StatefulSet.](#2-use-the-storage-class-for-pods-deployed-with-a-statefulset) + +### Prerequisites + +- To set up persistent storage, the `Manage Volumes` [role](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. +- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](../../../../../pages-for-subheaders/set-up-cloud-providers.md) +- Make sure your storage provisioner is available to be enabled. + +The following storage provisioners are enabled by default: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.](../../../../../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) + +### 1. Add a storage class and configure it to use your storage + +These steps describe how to set up a storage class at the cluster level. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to dynamically provision persistent storage volumes and click **Explore**. +1. Click **Storage > Storage Classes**. +1. Click **Create**. +1. Enter a name for your storage class. +1. From the **Provisioner** drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. +1. In the **Parameters** tab, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. +1. Click **Create**. + +**Result:** The storage class is available to be consumed by a PVC. + +For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). + +### 2. Use the Storage Class for Pods Deployed with a StatefulSet + +StatefulSets manage the deployment and scaling of Pods while maintaining a sticky identity for each Pod. In this StatefulSet, we will configure a VolumeClaimTemplate. Each Pod managed by the StatefulSet will be deployed with a PersistentVolumeClaim based on this VolumeClaimTemplate. The PersistentVolumeClaim will refer to the StorageClass that we created. Therefore, when each Pod managed by the StatefulSet is deployed, it will be bound to dynamically provisioned storage using the StorageClass defined in its PersistentVolumeClaim. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to add use the StorageClass for a workload and click **Explore**. +1. In the left navigation bar, click **Workload**. +1. Click **Create**. +1. Click **StatefulSet**. +1. In the **Volume Claim Templates** tab, click **Add Claim Template**. +1. Enter a name for the persistent volume. +1. In the **StorageClass* field, select the StorageClass that will dynamically provision storage for pods managed by this StatefulSet. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch**. + +**Result:** When each Pod managed by the StatefulSet is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to Pod with a compatible PVC. + +To attach the PVC to an existing workload, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to add use the StorageClass for a workload and click **Explore**. +1. In the left navigation bar, click **Workload**. +1. Go to the workload that will use storage provisioned with the StorageClass that you cared at click **⋮ > Edit Config**. +1. In the **Volume Claim Templates** section, click **Add Claim Template**. +1. Enter a persistent volume name. +1. In the **StorageClass* field, select the StorageClass that will dynamically provision storage for pods managed by this StatefulSet. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save**. + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md new file mode 100644 index 0000000000..12f8d0f8e2 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md @@ -0,0 +1,34 @@ +--- +title: iSCSI Volumes +weight: 6000 +--- + +In [Rancher Launched Kubernetes clusters](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. + +Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. + +If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: + +| Platform | Package Name | Install Command | +| ------------- | ----------------------- | -------------------------------------- | +| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | +| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | + + +After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. + +:::note Notes + +- Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +- The example YAML below does not apply to K3s, but only to RKE clusters. Since the K3s kubelet does not run in a container, adding extra binds is not necessary. However, all iSCSI tools must still be installed on your K3s nodes. + +::: + +``` +services: + kubelet: + extra_binds: + - "/etc/iscsi:/etc/iscsi" + - "/sbin/iscsiadm:/sbin/iscsiadm" +``` diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md new file mode 100644 index 0000000000..53fe7c9297 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md @@ -0,0 +1,86 @@ +--- +title: Setting up Existing Storage +weight: 1 +--- + +This section describes how to set up existing persistent storage for workloads in Rancher. + +:::note + +This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) + +::: + +To set up storage, follow these steps: + +1. [Set up persistent storage.](#1-set-up-persistent-storage) +2. [Add a PersistentVolume that refers to the persistent storage.](#2-add-a-persistentvolume-that-refers-to-the-persistent-storage) +3. [Use the PersistentVolume for Pods Deployed with a StatefulSet.](#3-use-the-persistentvolume-for-pods-deployed-with-a-statefulset) + +### Prerequisites + +- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +### 1. Set up persistent storage + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../provisioning-storage-examples/vsphere-storage.md) [NFS,](../provisioning-storage-examples/nfs-storage.md) or Amazon's [EBS.](../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) + +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.](../../../../../explanations/integrations-in-rancher/longhorn.md) + +### 2. Add a PersistentVolume that refers to the persistent storage + +These steps describe how to set up a PersistentVolume at the cluster level in Kubernetes. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to add a persistent volume and click **Explore**. +1. In the left navigation bar, click **Storage > Persistent Volumes**. +1. Click **Create**. +1. Enter a **Name** for the persistent volume. +1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. +1. Enter the **Capacity** of your volume in gigabytes. +1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. +1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. +1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. +1. Click **Create**. + +**Result:** Your new persistent volume is created. + + +### 3. Use the Storage Class for Pods Deployed with a StatefulSet + +StatefulSets manage the deployment and scaling of Pods while maintaining a sticky identity for each Pod. In this StatefulSet, we will configure a VolumeClaimTemplate. Each Pod managed by the StatefulSet will be deployed with a PersistentVolumeClaim based on this VolumeClaimTemplate. The PersistentVolumeClaim will refer to the PersistentVolume that we created. Therefore, when each Pod managed by the StatefulSet is deployed, it will be bound a PersistentVolume as defined in its PersistentVolumeClaim. + +You can configure storage for the StatefulSet during or after workload creation. + +The following steps describe how to assign existing storage to a new StatefulSet: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure storage for the StatefulSet and click **Explore**. +1. In the left navigation bar, click **Workload > StatefulSets**. +1. Click **Create**. +1. Choose the namespace where the workload will be deployed. +1. Enter a name for the StatefulSet. +1. On the **Volume Claim Templates** tab, click **Add Claim Template**. +1. Click **Use an existing Persistent Volume**. +1. In the Persistent Volumes field, select the Persistent Volume that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch**. + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +The following steps describe how to assign persistent storage to an existing workload: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure storage for the StatefulSet and click **Explore**. +1. In the left navigation bar, click **Workload > StatefulSets**. +1. Go to the workload that you want to add the persistent storage to. Click **⋮ > Edit**. +1. On the **Volume Claim Templates** tab, click **Add Claim Template**. +1. Click **Use an existing Persistent Volume**. +1. In the Persistent Volumes field, select the Persistent Volume that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch**. + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver.md new file mode 100644 index 0000000000..9fe5c3a214 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver.md @@ -0,0 +1,437 @@ +--- +title: Using an External Ceph Driver +weight: 10 +--- + +These instructions are about using the external Ceph driver in an RKE2 cluster. If you are using RKE, additional steps are required. For details, refer to [this section.](#using-the-ceph-driver-with-rke) + +- [Requirements](#requirements) +- [Using the Ceph Driver with RKE](#using-the-ceph-driver-with-rke) +- [Installing the ceph-csi driver on an RKE2 cluster](#installing-the-ceph-csi-driver-on-an-rke2-cluster) +- [Install the ceph-csi driver using Helm](#install-the-ceph-csi-driver-using-helm) +- [Creating RBD Ceph Resources](#creating-rbd-ceph-resources) +- [Configure RBD Ceph Access Secrets](#configure-rbd-ceph-access-secrets) + - [User Account](#user-account) + - [Admin Account](#admin-account) +- [Create RBD Testing Resources](#create-rbd-testing-resources) + - [Using RBD in Pods](#using-rbd-in-pods) + - [Using RBD in Persistent Volumes](#using-rbd-in-persistent-volumes) + - [Using RBD in Storage Classes](#using-rbd-in-storage-classes) + - [RKE2 Server/Master Provisioning](#rke2-server-master-provisioning) + - [RKE2 Agent/Worker provisioning](#rke2-agent-worker-provisioning) +- [Tested Versions](#tested-versions) +- [Troubleshooting](#troubleshooting) + +# Requirements + +Make sure ceph-common and xfsprogs packages are installed on SLE worker nodes. + +# Using the Ceph Driver with RKE + +The resources below are fully compatible with RKE based clusters, but there is a need to do an additional kubelet configuration for RKE. + +On RKE clusters, the kubelet component is running in a Docker container and doesn't have access to the host's kernel modules as rbd and libceph by default. + +To solve this limitation, you can either run `modprobe rbd` on worker nodes, or configure the kubelet containers to automatically mount the `/lib/modules` directory from the host into the container. + +For the kubelet configuration, put the following lines into the `cluster.yml` file prior to RKE cluster provisioning. You can also modify the `cluster.yml` later in the Rancher UI by clicking on **Edit Cluster > Edit as YAML** and restarting the worker nodes. + +```yaml +services: + kubelet: + extra_binds: + - '/lib/modules:/lib/modules:ro' +``` + +For more information about the `extra_binds` directive, refer to [this section.](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds) + +# Installing the ceph-csi driver on an RKE2 cluster + +:::note + +These steps are needed for dynamic RBD provisioning only. + +::: + +For more information about the `ceph-csi-rbd` chart, refer to [this page.](https://github.com/ceph/ceph-csi/blob/devel/charts/ceph-csi-rbd/README.md) + +To get details about your SES cluster, run: + +``` +ceph mon dump +``` + +Read its output: + +``` +dumped monmap epoch 3 +epoch 3 +fsid 79179d9d-98d8-4976-ab2e-58635caa7235 +last_changed 2021-02-11T10:56:42.110184+0000 +created 2021-02-11T10:56:22.913321+0000 +min_mon_release 15 (octopus) +0: [v2:10.85.8.118:3300/0,v1:10.85.8.118:6789/0] mon.a +1: [v2:10.85.8.123:3300/0,v1:10.85.8.123:6789/0] mon.b +2: [v2:10.85.8.124:3300/0,v1:10.85.8.124:6789/0] mon.c +``` + +Later you'll need the fsid and mon addresses values. + +# Install the ceph-csi Driver Using Helm + +Run these commands: + +``` +helm repo add ceph-csi https://ceph.github.io/csi-charts +helm repo update +helm search repo ceph-csi -l +helm inspect values ceph-csi/ceph-csi-rbd > ceph-csi-rbd-values.yaml +``` + +Modify the `ceph-csi-rbd-values.yaml` file and keep there only the required changes: + +```yaml +# ceph-csi-rbd-values.yaml +csiConfig: + - clusterID: "79179d9d-98d8-4976-ab2e-58635caa7235" + monitors: + - "10.85.8.118:6789" + - "10.85.8.123:6789" + - "10.85.8.124:6789" +provisioner: + name: provisioner + replicaCount: 2 +``` + +Make sure the ceph monitors are reachable from the RKE2 cluster, for example, by ping. + +``` +kubectl create namespace ceph-csi-rbd +helm install --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml +kubectl rollout status deployment ceph-csi-rbd-provisioner -n ceph-csi-rbd +helm status ceph-csi-rbd -n ceph-csi-rbd +``` + +in case you'd like to modify the configuration directly via Helm, you may adapt the `ceph-csi-rbd-values.yaml` file and call: + +``` +helm upgrade \ + --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml +``` + +# Creating RBD Ceph Resources + +``` +# Create a ceph pool: +ceph osd pool create myPool 64 64 + +# Create a block device pool: +rbd pool init myPool + +# Create a block device image: +rbd create -s 2G myPool/image + +# Create a block device user and record the key: +ceph auth get-or-create-key client.myPoolUser mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=myPool" | tr -d '\n' | base64 +QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== + +# Encode the ceph user myPoolUser into a bash64 hash: +echo "myPoolUser" | tr -d '\n' | base64 +bXlQb29sVXNlcg== + +# Create a block device admin user and record the key: +ceph auth get-or-create-key client.myPoolAdmin mds 'allow *' mgr 'allow *' mon 'allow *' osd 'allow * pool=myPool' | tr -d '\n' | base64 +QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== + +# Encode the ceph user myPoolAdmin into a bash64 hash: +echo "myPoolAdmin" | tr -d '\n' | base64 +bXlQb29sQWRtaW4= +``` +# Configure RBD Ceph Access Secrets + +### User Account + +For static RBD provisioning (the image within the ceph pool must exist), run these commands: + +``` +cat > ceph-user-secret.yaml << EOF +apiVersion: v1 +kind: Secret +metadata: + name: ceph-user + namespace: default +type: kubernetes.io/rbd +data: + userID: bXlQb29sVXNlcg== + userKey: QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== +EOF + +kubectl apply -f ceph-user-secret.yaml +``` + +### Admin Account + +For dynamic RBD provisioning (used for automatic image creation within a given ceph pool), run these commands: + +``` +cat > ceph-admin-secret.yaml << EOF +apiVersion: v1 +kind: Secret +metadata: + name: ceph-admin + namespace: default +type: kubernetes.io/rbd +data: + userID: bXlQb29sQWRtaW4= + userKey: QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== +EOF + +kubectl apply -f ceph-admin-secret.yaml +``` + +# Create RBD Testing Resources + +### Using RBD in Pods + +``` +# pod +cat > ceph-rbd-pod-inline.yaml << EOF +apiVersion: v1 +kind: Pod +metadata: + name: ceph-rbd-pod-inline +spec: + containers: + - name: ceph-rbd-pod-inline + image: busybox + command: ["sleep", "infinity"] + volumeMounts: + - mountPath: /mnt/ceph_rbd + name: volume + volumes: + - name: volume + rbd: + monitors: + - 10.85.8.118:6789 + - 10.85.8.123:6789 + - 10.85.8.124:6789 + pool: myPool + image: image + user: myPoolUser + secretRef: + name: ceph-user + fsType: ext4 + readOnly: false +EOF + +kubectl apply -f ceph-rbd-pod-inline.yaml +kubectl get pod +kubectl exec pod/ceph-rbd-pod-inline -- df -k | grep rbd +``` + +### Using RBD in Persistent Volumes + +``` +# pod-pvc-pv +cat > ceph-rbd-pod-pvc-pv-allinone.yaml << EOF +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ceph-rbd-pv +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + rbd: + monitors: + - 10.85.8.118:6789 + - 10.85.8.123:6789 + - 10.85.8.124:6789 + pool: myPool + image: image + user: myPoolUser + secretRef: + name: ceph-user + fsType: ext4 + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ceph-rbd-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: ceph-rbd-pod-pvc-pv +spec: + containers: + - name: ceph-rbd-pod-pvc-pv + image: busybox + command: ["sleep", "infinity"] + volumeMounts: + - mountPath: /mnt/ceph_rbd + name: volume + volumes: + - name: volume + persistentVolumeClaim: + claimName: ceph-rbd-pvc +EOF + +kubectl apply -f ceph-rbd-pod-pvc-pv-allinone.yaml +kubectl get pv,pvc,pod +kubectl exec pod/ceph-rbd-pod-pvc-pv -- df -k | grep rbd +``` + +### Using RBD in Storage Classes + +This example is for dynamic provisioning. The ceph-csi driver is needed. + +``` +# pod-pvc-sc +cat > ceph-rbd-pod-pvc-sc-allinone.yaml < /root/.bashrc << EOF +export PATH=$PATH:/var/lib/rancher/rke2/bin/ +export KUBECONFIG=/etc/rancher/rke2/rke2.yaml +EOF + +cat /var/lib/rancher/rke2/server/node-token +token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED +``` + +### RKE2 Agent/Worker provisioning + +``` +mkdir -p /etc/rancher/rke2/ + +cat > /etc/rancher/rke2/config.yaml << EOF +server: https://10.100.103.23:9345 +token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED +EOF + +curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +systemctl enable --now rke2-agent.service +``` + +To import the cluster into Rancher, click **☰ > Cluster Management**. Then on the **Clusters** page, click **Import Existing**. Then run the provided kubectl command on the server/master node. + +# Tested Versions + +OS for running RKE2 nodes: JeOS SLE15-SP2 with installed kernel-default-5.3.18-24.49 + +``` +kubectl version +Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-22T12:00:00Z", GoVersion:"go1.13.11", Compiler:"gc", Platform:"linux/amd64"} +Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7+rke2r1", GitCommit:"1dd5338295409edcfff11505e7bb246f0d325d15", GitTreeState:"clean", BuildDate:"2021-01-20T01:50:52Z", GoVersion:"go1.15.5b5", Compiler:"gc", Platform:"linux/amd64"} + +helm version +version.BuildInfo{Version:"3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.12"} +``` + +Kubernetes version on RKE2 cluster: v1.19.7+rke2r1 + +# Troubleshooting + +In case you are using SUSE's ceph-rook based on SES7, it might be useful to expose the monitors on hostNetwork by editing `rook-1.4.5/ceph/cluster.yaml` and setting `spec.network.hostNetwork=true`. + +Also for operating the ceph-rook cluster, it is useful to deploy a toolbox on the Kubernetes cluster where ceph-rook is provisioned by `kubectl apply -f rook-1.4.5/ceph/toolbox.yaml` Then all the ceph related commands can be executed in the toolbox pod, for example, by running `kubectl exec -it -n rook-ceph rook-ceph-tools-686d8b8bfb-2nvqp -- bash` + +Operating with the ceph - basic commands: + +``` +ceph osd pool stats +ceph osd pool delete myPool myPool --yes-i-really-really-mean-it +rbd list -p myPool +> csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 +> image +``` + +Delete the image: `rbd rm csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 -p myPool` + +CephFS commands in rook toolbox: + +``` +ceph -s +ceph fs ls +ceph fs fail cephfs +ceph fs rm cephfs --yes-i-really-mean-it +ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it +ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it +``` + +To prepare a cephfs filesystem, you can run this command on a rook cluster: + +``` +kubectl apply -f rook-1.4.5/ceph/filesystem.yaml +``` \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md new file mode 100644 index 0000000000..ff3707aa1c --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md @@ -0,0 +1,76 @@ +--- +title: NFS Storage +weight: 3054 +--- + +Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. + +:::note + +- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage](../../../../../pages-for-subheaders/create-kubernetes-persistent-storage.md). + +- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. + +::: + +:::note Recommended: + +To simplify the process of managing firewall rules, use NFSv4. + +::: + +1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. + +1. Enter the following command: + + ``` + sudo apt-get install nfs-kernel-server + ``` + +1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. + + ``` + mkdir -p /nfs && chown nobody:nogroup /nfs + ``` + - The `-p /nfs` parameter creates a directory named `nfs` at root. + - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. + +1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. + + 1. Open `/etc/exports` using your text editor of choice. + 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. + + ``` + /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) + ``` + + :::tip + + You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` + + ::: + + 1. Update the NFS table by entering the following command: + + ``` + exportfs -ra + ``` + +1. Open the ports used by NFS. + + 1. To find out what ports NFS is using, enter the following command: + + ``` + rpcinfo -p | grep nfs + ``` + 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: + + ``` + sudo ufw allow 2049 + ``` + +**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. + +## What's Next? + +Within Rancher, add the NFS server as a storage volume and/or storage class. After adding the server, you can use it for storage for your deployments. diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md new file mode 100644 index 0000000000..97b725a39f --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md @@ -0,0 +1,16 @@ +--- +title: Creating Persistent Storage in Amazon's EBS +weight: 3053 +--- + +This section describes how to set up Amazon's Elastic Block Store in EC2. + +1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes**. +1. Click **Create Volume**. +1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. +1. Click **Create Volume**. +1. Click **Close**. + +**Result:** Persistent storage has been created. + +For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.](../manage-persistent-storage/set-up-existing-storage.md) \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md new file mode 100644 index 0000000000..a4c2baf99e --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md @@ -0,0 +1,78 @@ +--- +title: vSphere Storage +weight: 3055 +--- + +To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a PersistentVolumeClaim. + +In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../../../../pages-for-subheaders/vsphere-cloud-provider.md) + +- [Prerequisites](#prerequisites) +- [Creating a StorageClass](#creating-a-storageclass) +- [Creating a Workload with a vSphere Volume](#creating-a-workload-with-a-vsphere-volume) +- [Verifying Persistence of the Volume](#verifying-persistence-of-the-volume) +- [Why to Use StatefulSets Instead of Deployments](#why-to-use-statefulsets-instead-of-deployments) + +### Prerequisites + +In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), the [vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md). + +### Creating a StorageClass + +:::tip + +The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. + +::: + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to provide vSphere storage. +1. In the left navigation bar, click **Storage > StorageClasses**. +1. Click **Create**. +3. Enter a **Name** for the StorageClass. +4. Under **Provisioner**, select **VMWare vSphere Volume**. + + ![](/img/vsphere-storage-class.png) + +5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. +5. Click **Create**. + +### Creating a Workload with a vSphere Volume + +1. In the left navigation bar, click **Workload**. +1. Click **Create**. +1. Click **StatefulSet**. +1. In the **Volume Claim Templates** tab, click **Add Claim Template**. +1. Enter a persistent volume name. +1. In the Storage Class field, select the vSphere StorageClass that you created. +6. Enter the required **Capacity** for the volume. Then click **Define**. +7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. +8. Click **Create**. + +### Verifying Persistence of the Volume + +1. In the left navigation bar, click **Workload > Pods**. +1. Go to the workload you just created and click **⋮ > Execute Shell**. +2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). +3. Create a file in the volume by executing the command `touch //data.txt`. +4. Close the shell window. +5. Click on the name of the workload to reveal detail information. +7. Click **⋮ > Delete**. +8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. +9. Once the replacement pod is running, click **Execute Shell**. +10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. + + ![workload-persistent-data](/img/workload-persistent-data.png) + +### Why to Use StatefulSets Instead of Deployments + +You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. + +Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. + +Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. + +### Related Links + +- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) +- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md new file mode 100644 index 0000000000..00e1e08014 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md @@ -0,0 +1,584 @@ +--- +title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups +weight: 1 +--- + +This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. + +- [Prerequisites](#prerequisites) +- [1. Create a Custom Cluster](#1-create-a-custom-cluster) +- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) +- [3. Deploy Nodes](#3-deploy-nodes) +- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) + - [Parameters](#parameters) + - [Deployment](#deployment) +- [Testing](#testing) + - [Generating Load](#generating-load) + - [Checking Scale](#checking-scale) + +# Prerequisites + +These elements are required to follow this guide: + +* The Rancher server is up and running +* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles + +### 1. Create a Custom Cluster + +On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: + +* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag +* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag +* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster + + ```sh + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum + ``` + +### 2. Configure the Cloud Provider + +On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. + +1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. + * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:DescribeTags", + "autoscaling:DescribeLaunchConfigurations", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + +2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. + * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + + * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` + * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) + * Tags: + `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--etcd --controlplane" + + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. + * IAM profile: Provides cloud_provider worker integration. + This profile is called `K8sWorkerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } + ] + } + ``` + + * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` + * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--worker" + + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +More info is at [RKE clusters on AWS](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) + +### 3. Deploy Nodes + +Once we've configured AWS, let's create VMs to bootstrap our cluster: + +* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.](../../../../pages-for-subheaders/checklist-for-production-ready-clusters.md) + * IAM role: `K8sMasterRole` + * Security group: `K8sMasterSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` + +* worker: Define an ASG on EC2 with the following settings: + * Name: `K8sWorkerAsg` + * IAM role: `K8sWorkerRole` + * Security group: `K8sWorkerSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` + * Instances: + * minimum: 2 + * desired: 2 + * maximum: 10 + +Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. + +### 4. Install Cluster-autoscaler + +At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. + +#### Parameters + +This table shows cluster-autoscaler parameters for fine tuning: + +| Parameter | Default | Description | +|---|---|---| +|cluster-name|-|Autoscaled cluster name, if available| +|address|:8085|The address to expose Prometheus metrics| +|kubernetes|-|Kubernetes master location. Leave blank for default| +|kubeconfig|-|Path to kubeconfig file with authorization and master location information| +|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| +|namespace|"kube-system"|Namespace in which cluster-autoscaler run| +|scale-down-enabled|true|Should CA scale down the cluster| +|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| +|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| +|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| +|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| +|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| +|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| +|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| +|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| +|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| +|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| +|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| +|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +cloud-provider|-|Cloud provider type| +|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| +|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| +|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| +|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| +|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| +|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| +|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| +|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| +|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: `::`| +|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| +|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| +|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| +|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| +|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| +|write-status-configmap|true|Should CA write status information to a configmap| +|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| +|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| +|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| +|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| +|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| +|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| +|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| +|regional|false|Cluster is regional| +|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| +|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| +|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| +|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| +|profiling|false|Is debug/pprof endpoint enabled| + +#### Deployment + +Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: + + +```yml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler + name: cluster-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["endpoints"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list", "get", "update"] + - apiGroups: [""] + resources: + - "pods" + - "services" + - "replicationcontrollers" + - "persistentvolumeclaims" + - "persistentvolumes" + verbs: ["watch", "list", "get"] + - apiGroups: ["extensions"] + resources: ["replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["watch", "list"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["coordination.k8s.io"] + resourceNames: ["cluster-autoscaler"] + resources: ["leases"] + verbs: ["get", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create","list","watch"] + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] + verbs: ["delete", "get", "update", "watch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8085' + spec: + serviceAccountName: cluster-autoscaler + tolerations: + - effect: NoSchedule + operator: "Equal" + value: "true" + key: node-role.kubernetes.io/controlplane + nodeSelector: + node-role.kubernetes.io/controlplane: "true" + containers: + - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=aws + - --skip-nodes-with-local-storage=false + - --expander=least-waste + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + imagePullPolicy: "Always" + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" + +``` + +Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): + +```sh +kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml +``` + +:::note + +Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) + +::: + +# Testing + +At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. + +### Generating Load + +We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hello-world + name: hello-world +spec: + replicas: 3 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + ports: + - containerPort: 80 + protocol: TCP + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi +``` + +Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): + +``` +kubectl -n default apply -f test-deployment.yaml +``` + +### Checking Scale + +Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. + +Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md new file mode 100644 index 0000000000..2e3e5b3b0d --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md @@ -0,0 +1,204 @@ +--- +title: Nodes and Node Pools +weight: 2030 +--- + +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. + +:::note + +If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters](../../../pages-for-subheaders/cluster-configuration.md). + +::: + +This section covers the following topics: + +- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) + - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) + - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) + - [Registered nodes](#registered-nodes) +- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) +- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) +- [Deleting a node](#deleting-a-node) +- [Scaling nodes](#scaling-nodes) +- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) +- [Cordoning a node](#cordoning-a-node) +- [Draining a node](#draining-a-node) + - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) + - [Grace period](#grace-period) + - [Timeout](#timeout) + - [Drained and cordoned state](#drained-and-cordoned-state) +- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) + +# Node Options Available for Each Cluster Creation Option + +The following table lists which node options are available for each type of cluster in Rancher. Click the links in the **Option** column for more detailed information about each feature. + +| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Registered EKS Nodes][4] | [All Other Registered Nodes][5] | Description | +| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | -------------------| ------------------------------------------------------------------ | +| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable. | +| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable _and_ evicts all pods. | +| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | ✓ | ✓ | Enter a custom name, description, label, or taints for a node. | +| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | ✓ | ✓ | View API data. | +| [Delete](#deleting-a-node) | ✓ | ✓ | | * | * | Deletes defective nodes from the cluster. | +| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | | Download SSH key in order to SSH into the node. | +| [Node Scaling](#scaling-nodes) | ✓ | | | ✓ | | Scale the number of nodes in the node pool up or down. | + +[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +[2]: ../../../pages-for-subheaders/use-existing-nodes.md +[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +[4]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md +[5]: ../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md + +\* Delete option accessible via View API + + +### Nodes Hosted by an Infrastructure Provider + +Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +Clusters provisioned using [one of the node pool options](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) can be scaled up or down if the node pool is edited. + +A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. + +Rancher uses [node templates](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. + +### Nodes Provisioned by Hosted Kubernetes Providers + +Options for managing nodes [hosted by a Kubernetes provider](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. + +### Registered Nodes + +Although you can deploy workloads to a [registered cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. + +# Managing and Editing Individual Nodes + +Editing a node lets you: + +* Change its name +* Change its description +* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + +To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**..**.). + +# Viewing a Node in the Rancher API + +Select this option to view the node's [API endpoints](../../../pages-for-subheaders/about-the-api.md). + +# Deleting a Node + +Use **Delete** to remove defective nodes from the cloud provider. + +When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) + +:::tip + +If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. + +::: + +# Scaling Nodes + +For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) by using the scale controls. This option isn't available for other cluster types. + +# SSH into a Node Hosted by an Infrastructure Provider + +For [nodes hosted by an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to SSH into a node and click the name of the cluster. +1. On the **Machine Pools** tab, find the node that you want to remote into and click **⋮ > Download SSH Key**. A ZIP file containing files used for SSH will be downloaded. +1. Extract the ZIP file to any location. +1. Open Terminal. Change your location to the extracted ZIP file. +1. Enter the following command: + + ``` + ssh -i id_rsa root@ + ``` + +# Cordoning a Node + +_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. + +# Draining a Node + +_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. + +- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. + +- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. + +You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. + +However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. + +### Aggressive and Safe Draining Options + +When you configure the upgrade strategy for the cluster, you will be able to enable node draining. If node draining is enabled, you will be able to configure how pods are deleted and rescheduled. + +- **Aggressive Mode** + + In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. + + Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. + +- **Safe Mode** + + If a node has standalone pods or ephemeral data it will be cordoned but not drained. +### Grace Period + +The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. + +### Timeout + +The amount of time drain should continue to wait before giving up. + +:::note Kubernetes Known Issue: + +The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node before Kubernetes 1.12. + +::: + +### Drained and Cordoned State + +If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. + +If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. + +Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. + +**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). + +# Labeling a Node to be Ignored by Rancher + +Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. + +Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. + +In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. + +You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. + +:::note + +There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. + +::: + + +### Labeling Nodes to be Ignored with kubectl + +To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: + +``` +cattle.rancher.io/node-status: ignore +``` + +**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. + +If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. + +If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. + +If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings in the Rancher API under `v3/settings/ignore-node-name`. diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md new file mode 100644 index 0000000000..8ca8666898 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md @@ -0,0 +1,197 @@ +--- +title: Projects and Kubernetes Namespaces with Rancher +description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces +weight: 2032 +--- + +A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. + +A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +:::note + +As of Rancher v2.6, projects are de-emphasized on the UI because it is no longer required to create any Kubernetes resources within a project scope. However, resources such as [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md#creating-secrets-in-projects) can still be created in a project scope if the legacy feature flag is enabled. + +::: + +This section describes how projects and namespaces work with Rancher. It covers the following topics: + +- [About namespaces](#about-namespaces) +- [About projects](#about-projects) + - [The cluster's default project](#the-cluster-s-default-project) + - [The system project](#the-system-project) +- [Project authorization](#project-authorization) +- [Pod security policies](#pod-security-policies) +- [Creating projects](#creating-projects) +- [Switching between clusters and projects](#switching-between-clusters-and-projects) + +# About Namespaces + +A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) + +:::note + +Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. + +::: + +Namespaces provide the following functionality: + +- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. +- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. + +You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. + +You can assign the following resources directly to namespaces: + +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) + +To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. + +For more information on creating and moving namespaces, see [Namespaces](../manage-projects/manage-namespaces.md). + +### Role-based access control issues with namespaces and kubectl + +Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. + +This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. + +If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](../manage-projects/manage-namespaces.md) to ensure that you will have permission to access the namespace. + +If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. + +# About Projects + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. + +You can use projects to perform actions such as: + +- Assign users to a group of namespaces (i.e., [project membership](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md)). +- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). +- Assign resources to the project. +- Assign Pod Security Policies. + +When you create a cluster, two projects are automatically created within it: + +- [Default Project](#the-cluster-s-default-project) +- [System Project](#the-system-project) + +### The Cluster's Default Project + +When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. + +If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. + +If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. + +### The System Project + +When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. + +To open it, open the cluster view and click **Cluster > Projects/Namespaces**. This view shows all of the namespaces in the `system` project. + +The `system` project: + +- Is automatically created when you provision a cluster. +- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. +- Allows you to add more namespaces or move its namespaces to other projects. +- Cannot be deleted because it's required for cluster operations. + +:::note + +In RKE clusters where the project network isolation option is enabled, the `system` project overrides the project network isolation option so that it can communicate with other projects, collect logs, and check health. + +::: + +# Project Authorization + +Standard users are only authorized for project access in two situations: + +- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. +- Standard users can access projects that they create themselves. + +# Pod Security Policies + +Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) at the project level in addition to the cluster level. However, as a best practice, we recommend applying Pod Security Policies at the cluster level. + +# Creating Projects + +This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. + +1. [Name a new project.](#1-name-a-new-project) +2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) +3. [Recommended: Add project members.](#3-recommended-add-project-members) +4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) + +### 1. Name a New Project + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to project in and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Click **Create Project**. +1. Enter a **Project Name**. + +### 2. Optional: Select a Pod Security Policy + +This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +Assigning a PSP to a project will: + +- Override the cluster's default PSP. +- Apply the PSP to the project. +- Apply the PSP to any namespaces you add to the project later. + +### 3. Recommended: Add Project Members + +Use the **Members** section to provide other users with project access and roles. + +By default, your user is added as the project `Owner`. + +:::note Notes on Permissions: + +- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + +- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + +- Choose `Custom` to create a custom role on the fly: [Custom Project Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#custom-project-roles). + +::: + +To add members: + +1. In the **Members** tab, click **Add**. +1. From the **Select Member** field, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. +1. In the **Project Permissions** section, choose a role. For more information, refer to the [documentation on project roles.](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + +### 4. Optional: Add Resource Quotas + +Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas](projects-and-namespaces.md). + +To add a resource quota, + +1. In the **Resource Quotas** tab, click **Add Resource**. +1. Select a **Resource Type**. For more information, see [Resource Quotas.](projects-and-namespaces.md). +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. +1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit](../../../pages-for-subheaders/manage-project-resource-quotas.md) +1. Click **Create**. + +**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. + +| Field | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------- | +| Project Limit | The overall resource limit for the project. | +| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md b/docs/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md new file mode 100644 index 0000000000..e4bf50dad4 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md @@ -0,0 +1,82 @@ +--- +title: Certificate Rotation +weight: 2040 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::caution + +Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. + +::: + +By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. + +Certificates can be rotated for the following services: + + + + +- etcd +- kubelet (node certificate) +- kubelet (serving certificate, if [enabled](https://rancher.com/docs/rke/latest/en/config-options/services/#kubelet-options)) +- kube-apiserver +- kube-proxy +- kube-scheduler +- kube-controller-manager + + + + +- admin +- api-server +- controller-manager +- scheduler +- rke2-controller +- rke2-server +- cloud-controller +- etcd +- auth-proxy +- kubelet +- kube-proxy + + + + +:::note + +For users who didn't rotate their webhook certificates, and they have expired after one year, please see this [page](../../../troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md) for help. + +::: + +### Certificate Rotation + +Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to rotate certificates for amd click **⋮ > Rotate Certificates**. +1. Select which certificates that you want to rotate. + + * Rotate all Service certificates (keep the same CA) + * Rotate an individual service and choose one of the services from the drop-down menu + +1. Click **Save**. + +**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. + +### Additional Notes + + + + +Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher launched Kubernetes clusters. + + + + +In RKE2, both etcd and control plane nodes are treated as the same `server` concept. As such, when rotating certificates of services specific to either of these components will result in certificates being rotated on both. The certificates will only change for the specified service, but you will see nodes for both components go into an updating state. You may also see worker only nodes go into an updating state. This is to restart the workers after a certificate change to ensure they get the latest client certs. + + + diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md b/docs/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md new file mode 100644 index 0000000000..6f798aca62 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md @@ -0,0 +1,63 @@ +--- +title: Adding Users to Projects +weight: 2505 +--- + +If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. + +You can add members to a project as it is created, or add them to an existing project. + +:::tip + +Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) instead. + +::: + +### Adding Members to a New Project + +You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.](../manage-clusters/projects-and-namespaces.md) + +### Adding Members to an Existing Project + +Following project creation, you can add users as project members so that they can access its resources. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to add members to a project and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Go to the project where you want to add members and click **⋮ > Edit Config**. +1. In the **Members** tab, click **Add**. +1. Search for the user or group that you want to add to the project. + + If external authentication is configured: + + - Rancher returns users from your external authentication source as you type. + + - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. + + :::note + + If you are logged in as a local user, external users do not display in your search results. + + ::: + +1. Assign the user or group **Project** roles. + + [What are Project Roles?](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + + :::note Notes: + + - Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create or delete namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + + - By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + + - For `Custom` roles, you can modify the list of individual roles available for assignment. + + - To add roles to the list, [Add a Custom Role](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + - To remove roles from the list, [Lock/Unlock Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). + + ::: + +**Result:** The chosen users are added to the project. + +- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md b/docs/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md new file mode 100644 index 0000000000..9121696179 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md @@ -0,0 +1,16 @@ +--- +title: Rancher's CI/CD Pipelines +description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users +weight: 4000 +--- +Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +For details, refer to the [pipelines](../../../pages-for-subheaders/pipelines.md) section. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md new file mode 100644 index 0000000000..9d7753a24f --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md @@ -0,0 +1,75 @@ +--- +title: Namespaces +weight: 2520 +--- + +Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. + +Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. + +Resources that you can assign directly to namespaces include: + +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) + +To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. + +:::note + +If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace. + +::: + +### Creating Namespaces + +Create a new namespace to isolate apps and resources in a project. + +:::tip + +When working with project resources that you can assign to a namespace (i.e., [workloads](../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md), [certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md), [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md), etc.) you can create a namespace on the fly. + +::: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to create a namespace and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Go to the project where you want to add a namespace and click **Create Namespace**. Alternately, go to **Not in a Project** to create a namespace not associated with a project. + +1. **Optional:** If your project has [Resource Quotas](../../../pages-for-subheaders/manage-project-resource-quotas.md) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). + +1. Enter a **Name** and then click **Create**. + +**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. + +### Moving Namespaces to Another Project + +Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to move a namespace and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Go to the namespace you want to move and click **⋮ > Move**. + +1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. + + :::note Notes: + + - Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. + - You cannot move a namespace into a project that already has a [resource quota](../../../pages-for-subheaders/manage-project-resource-quotas.md)configured. + - If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. + +1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. + +**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. + +### Editing Namespace Resource Quotas + +You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +For more information, see how to [edit namespace resource quotas](./manage-project-resource-quotas/override-default-limit-in-namespaces.md). \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md new file mode 100644 index 0000000000..6fa3506c69 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -0,0 +1,40 @@ +--- +title: Pod Security Policies +weight: 5600 +--- + +:::note + +These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +::: + +You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. + +### Prerequisites + +- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). +- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster](../manage-clusters/add-a-pod-security-policy.md). + +### Applying a Pod Security Policy + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to move a namespace and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit Config**. +1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. + Assigning a PSP to a project will: + + - Override the cluster's default PSP. + - Apply the PSP to the project. + - Apply the PSP to any namespaces you add to the project later. + +1. Click **Save**. + +**Result:** The PSP is applied to the project and any namespaces added to the project. + +:::note + +Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. + +::: \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md new file mode 100644 index 0000000000..a061b33831 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -0,0 +1,61 @@ +--- +title: How Resource Quotas Work in Rancher Projects +weight: 1 +--- + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. + +In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. + +In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. + +Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace +![Native Kubernetes Resource Quota Implementation](/img/kubernetes-resource-quota.svg) + +Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. + +The resource quota includes two limits, which you set while creating or editing a project: + + +- **Project Limits:** + + This set of values configures a total limit for each specified resource shared among all namespaces in the project. + +- **Namespace Default Limits:** + + This set of values configures the default quota limit available for each namespace for each specified resource. + When a namespace is created in the project without overrides, this limit is automatically bound to the namespace and enforced. + + +In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. + +Rancher: Resource Quotas Propagating to Each Namespace +![Rancher Resource Quota Implementation](/img/rancher-resource-quota.png) + +Let's highlight some more nuanced functionality for namespaces created **_within_** the Rancher UI. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. + +Before creating a namespace in a project, Rancher compares the amounts of the project's available resources and requested resources, regardless of whether they come from the default or overridden limits. +If the requested resources exceed the remaining capacity in the project for those resources, Rancher will assign the namespace the remaining capacity for that resource. + +However, this is not the case with namespaces created **_outside_** of Rancher's UI. For namespaces created via `kubectl`, Rancher +will assign a resource quota that has a **zero** amount for any resource that requested more capacity than what remains in the project. + +To create a namespace in an existing project via `kubectl`, use the `field.cattle.io/projectId` annotation. To override the default +requested quota limit, use the `field.cattle.io/resourceQuota` annotation. +``` +apiVersion: v1 +kind: Namespace +metadata: + annotations: + field.cattle.io/projectId: [your-cluster-ID]:[your-project-ID] + field.cattle.io/resourceQuota: '{"limit":{"limitsCpu":"100m", "limitsMemory":"100Mi", "configMaps": "50"}}' + name: my-ns +``` + +The following table explains the key differences between the two quota types. + +| Rancher Resource Quotas | Kubernetes Resource Quotas | +| ---------------------------------------------------------- | -------------------------------------------------------- | +| Applies to projects and namespace. | Applies to namespaces only. | +| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | +| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md new file mode 100644 index 0000000000..05f4c3b682 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -0,0 +1,34 @@ +--- +title: Overriding the Default Limit for a Namespace +weight: 2 +--- + +Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. + +In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. + +Namespace Default Limit Override +![Namespace Default Limit Override](/img/rancher-resource-quota-override.svg) + +How to: [Editing Namespace Resource Quotas](../../manage-clusters/projects-and-namespaces.md) + +### Editing Namespace Resource Quotas + +If there is a resource quota configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to edit a namespace resource quota and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Find the namespace for which you want to edit the resource quota. Click **⋮ > Edit Config**. +1. Edit the resource limits. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. + + For more information about each **Resource Type**, see [the type reference](resource-quota-types.md). + + :::note + + - If a resource quota is not configured for the project, these options will not be available. + - If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. + + ::: + +**Result:** Your override is applied to the namespace's resource quota. diff --git a/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md new file mode 100644 index 0000000000..f111346b07 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md @@ -0,0 +1,28 @@ +--- +title: Resource Quota Type Reference +weight: 4 +--- + +When you create a resource quota, you are configuring the pool of resources available to the project. You can set the following resource limits for the following resource types. + +| Resource Type | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CPU Limit* | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the project/namespace.1 | +| CPU Reservation* | The minimum amount of CPU (in millicores) guaranteed to the project/namespace.1 | +| Memory Limit* | The maximum amount of memory (in bytes) allocated to the project/namespace.1 | +| Memory Reservation* | The minimum amount of memory (in bytes) guaranteed to the project/namespace.1 | +| Storage Reservation | The minimum amount of storage (in gigabytes) guaranteed to the project/namespace. | +| Services Load Balancers | The maximum number of load balancers services that can exist in the project/namespace. | +| Services Node Ports | The maximum number of node port services that can exist in the project/namespace. | +| Pods | The maximum number of pods that can exist in the project/namespace in a non-terminal state (i.e., pods with a state of `.status.phase in (Failed, Succeeded)` equal to true). | +| Services | The maximum number of services that can exist in the project/namespace. | +| ConfigMaps | The maximum number of ConfigMaps that can exist in the project/namespace. | +| Persistent Volume Claims | The maximum number of persistent volume claims that can exist in the project/namespace. | +| Replications Controllers | The maximum number of replication controllers that can exist in the project/namespace. | +| Secrets | The maximum number of secrets that can exist in the project/namespace. | + +:::note ***** + +When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. A container default resource limit can be set at the same time to avoid the need to explicitly set these limits for every workload. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. + +::: \ No newline at end of file diff --git a/content/rancher/v2.6/en/project-admin/resource-quotas/override-container-default/_index.md b/docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/resource-quotas/override-container-default/_index.md rename to docs/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md new file mode 100644 index 0000000000..a0bc1a2994 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md @@ -0,0 +1,152 @@ +--- +title: Persistent Grafana Dashboards +weight: 6 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. + +- [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) +- [Known Issues](#known-issues) + +# Creating a Persistent Grafana Dashboard + + + + +:::note Prerequisites: + +- The monitoring application needs to be installed. +- To create the persistent dashboard, you must have at least the **Manage Config Maps** Rancher RBAC permissions assigned to you in the project or namespace that contains the Grafana Dashboards. This correlates to the `monitoring-dashboard-edit` or `monitoring-dashboard-admin` Kubernetes native RBAC Roles exposed by the Monitoring chart. +- To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.](../../../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#users-with-rancher-cluster-manager-based-permissions) + +::: + +### 1. Get the JSON model of the dashboard that you want to persist + +To create a persistent dashboard, you will need to get the JSON model of the dashboard you want to persist. You can use a premade dashboard or build your own. + +To use a premade dashboard, go to [https://grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards), open up its detail page, and click on the **Download JSON** button to get the JSON model for the next step. + +To use your own dashboard: + +1. Click on the link to open Grafana. On the cluster detail page, click **Monitoring**. +1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. + + :::note + + Regardless of who has the password, in order to access the Grafana instance, you still need at least the Manage Services or View Monitoring permissions in the project that Rancher Monitoring is deployed into. Alternative credentials can also be supplied on deploying or upgrading the chart. + + ::: + +1. Create a dashboard using Grafana's UI. Once complete, go to the dashboard's settings by clicking on the gear icon in the top navigation menu. In the left navigation menu, click **JSON Model**. +1. Copy the JSON data structure that appears. + +### 2. Create a ConfigMap using the Grafana JSON model + +Create a ConfigMap in the namespace that contains your Grafana Dashboards (e.g. cattle-dashboards by default). + +The ConfigMap should look like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + grafana_dashboard: "1" + name: + namespace: cattle-dashboards # Change if using a non-default namespace +data: + .json: |- + +``` + +By default, Grafana is configured to watch all ConfigMaps with the `grafana_dashboard` label within the `cattle-dashboards` namespace. + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, refer to [this section.](#configuring-namespaces-for-the-grafana-dashboard-configmap) + +To create the ConfigMap in the Rancher UI, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. +1. Click **More Resources > Core > ConfigMaps**. +1. Click **Create**. +1. Set up the key-value pairs similar to the example above. When entering the value for `.json`, click **Read from File** to upload the JSON data model as the value. +1. Click **Create**. + +**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. + +Dashboards that are persisted using ConfigMaps cannot be deleted or edited from the Grafana UI. + +If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. + +### Configuring Namespaces for the Grafana Dashboard ConfigMap + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: + +``` +grafana.sidecar.dashboards.searchNamespace=ALL +``` + +Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. + + + + +:::note Prerequisites: + +- The monitoring application needs to be installed. +- You must have the cluster-admin ClusterRole permission. + +::: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to configure the Grafana namespace and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Grafana**. +1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. + + :::note + + Regardless of who has the password, cluster administrator permission in Rancher is still required to access the Grafana instance. + + ::: + +1. Go to the dashboard that you want to persist. In the top navigation menu, go to the dashboard settings by clicking the gear icon. +1. In the left navigation menu, click **JSON Model**. +1. Copy the JSON data structure that appears. +1. Create a ConfigMap in the `cattle-dashboards` namespace. The ConfigMap needs to have the label `grafana_dashboard: "1"`. Paste the JSON into the ConfigMap in the format shown in the example below: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + labels: + grafana_dashboard: "1" + name: + namespace: cattle-dashboards + data: + .json: |- + + ``` + +**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. + +Dashboards that are persisted using ConfigMaps cannot be deleted from the Grafana UI. If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. + +To prevent the persistent dashboard from being deleted when Monitoring v2 is uninstalled, add the following annotation to the `cattle-dashboards` namespace: + +``` +helm.sh/resource-policy: "keep" +``` + + + + +# Known Issues + +For users who are using Monitoring V2 v9.4.203 or below, uninstalling the Monitoring chart will delete the `cattle-dashboards` namespace, which will delete all persisted dashboards, unless the namespace is marked with the annotation `helm.sh/resource-policy: "keep"`. + +This annotation will be added by default in the new monitoring chart released by Rancher v2.5.8, but it still needs to be manually applied for users of earlier Rancher versions. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md new file mode 100644 index 0000000000..ad266ba8c1 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md @@ -0,0 +1,41 @@ +--- +title: Customizing Grafana Dashboards +weight: 5 +--- + +In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. + +### Prerequisites + +Before you can customize a Grafana dashboard, the `rancher-monitoring` application must be installed. + +To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.](../../../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#users-with-rancher-cluster-manager-based-permissions) + +### Signing in to Grafana + +1. In the Rancher UI, go to the cluster that has the dashboard you want to customize. +1. In the left navigation menu, click **Monitoring.** +1. Click **Grafana.** The Grafana dashboard should open in a new tab. +1. Go to the log in icon in the lower left corner and click **Sign In.** +1. Log in to Grafana. The default Admin username and password for the Grafana instance is `admin/prom-operator`. (Regardless of who has the password, cluster administrator permission in Rancher is still required access the Grafana instance.) Alternative credentials can also be supplied on deploying or upgrading the chart. + + +### Getting the PromQL Query Powering a Grafana Panel + +For any panel, you can click the title and click **Explore** to get the PromQL queries powering the graphic. + +For this example, we would like to get the CPU usage for the Alertmanager container, so we click **CPU Utilization > Inspect.** + +The **Data** tab shows the underlying data as a time series, with the time in first column and the PromQL query result in the second column. Copy the PromQL query. + + ``` + (1 - (avg(irate({__name__=~"node_cpu_seconds_total|windows_cpu_time_total",mode="idle"}[5m])))) * 100 + + ``` + +You can then modify the query in the Grafana panel or create a new Grafana panel using the query. + +See also: + +- [Grafana docs on editing a panel](https://grafana.com/docs/grafana/latest/panels/panel-editor/) +- [Grafana docs on adding a panel to a dashboard](https://grafana.com/docs/grafana/latest/panels/add-a-panel/) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/memory-usage/_index.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md similarity index 100% rename from content/rancher/v2.5/en/monitoring-alerting/guides/memory-usage/_index.md rename to docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md new file mode 100644 index 0000000000..2244470709 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md @@ -0,0 +1,75 @@ +--- +title: Enable Monitoring +weight: 1 +--- + +As an [administrator](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. + +This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. + +You can enable monitoring with or without SSL. + +# Requirements + +- Make sure that you are allowing traffic on port 9796 for each of your nodes because Prometheus will scrape metrics from here. +- Make sure your cluster fulfills the resource requirements. The cluster should have at least 1950Mi memory available, 2700m CPU, and 50Gi storage. A breakdown of the resource limits and requests is [here.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#configuring-resource-limits-and-requests) +- When installing monitoring on an RKE cluster using RancherOS or Flatcar Linux nodes, change the etcd node certificate directory to `/opt/rke/etc/kubernetes/ssl`. +- For clusters provisioned with the RKE CLI and the address is set to a hostname instead of an IP address, set `rkeEtcd.clients.useLocalhost` to `true` during the Values configuration step of the installation. The YAML snippet will look like the following: + +```yaml +rkeEtcd: + clients: + useLocalhost: true +``` + +:::note + +If you want to set up Alertmanager, Grafana or Ingress, it has to be done with the settings on the Helm chart deployment. It's problematic to create Ingress outside the deployment. + +::: + +# Setting Resource Limits and Requests + +The resource requests and limits can be configured when installing `rancher-monitoring`. To configure Prometheus resources from the Rancher UI, click **Apps & Marketplace > Monitoring** in the upper left corner. + +For more information about the default limits, see [this page.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#configuring-resource-limits-and-requests) + +# Install the Monitoring Application + +### Enable Monitoring for use without SSL + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Cluster Tools** (bottom left corner). +1. Click **Install** by Monitoring. +1. Optional: Customize requests, limits and more for Alerting, Prometheus, and Grafana in the Values step. For help, refer to the [configuration reference.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md) + +**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. + +### Enable Monitoring for use with SSL + +1. Follow the steps on [this page](../../new-user-guides/kubernetes-resources-setup/secrets.md) to create a secret in order for SSL to be used for alerts. + - The secret should be created in the `cattle-monitoring-system` namespace. If it doesn't exist, create it first. + - Add the `ca`, `cert`, and `key` files to the secret. +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to enable monitoring for use with SSL and click **Explore**. +1. Click **Apps & Marketplace > Charts**. +1. Click **Monitoring**. +1. Click **Install** or **Update**, depending on whether you have already installed Monitoring. +1. Check the box for **Customize Helm options before install** and click **Next**. +1. Click **Alerting**. +1. In the **Additional Secrets** field, add the secrets created earlier. + +**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. + +When [creating a receiver,](../monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md#creating-receivers-in-the-rancher-ui) SSL-enabled receivers such as email or webhook will have a **SSL** section with fields for **CA File Path**, **Cert File Path**, and **Key File Path**. Fill in these fields with the paths to each of `ca`, `cert`, and `key`. The path will be of the form `/etc/alertmanager/secrets/name-of-file-in-secret`. + +For example, if you created a secret with these key-value pairs: + +```yaml +ca.crt=`base64-content` +cert.pem=`base64-content` +key.pfx=`base64-content` +``` + +Then **Cert File Path** would be set to `/etc/alertmanager/secrets/cert.pem`. diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md new file mode 100644 index 0000000000..76478d44f7 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md @@ -0,0 +1,138 @@ +--- +title: Migrating to Rancher v2.5+ Monitoring +weight: 9 +--- + +If you previously enabled Monitoring, Alerting, or Notifiers in Rancher before v2.5, there is no automatic upgrade path for switching to the new monitoring/alerting solution. Before deploying the new monitoring solution via Cluster Explore, you will need to disable and remove all existing custom alerts, notifiers and monitoring installations for the whole cluster and in all projects. + +- [Monitoring Before Rancher v2.5](#monitoring-before-rancher-v2-5) +- [Monitoring and Alerting via Cluster Explorer in Rancher v2.5](#monitoring-and-alerting-via-cluster-explorer-in-rancher-v2-5) +- [Changes to Role-based Access Control](#changes-to-role-based-access-control) +- [Migrating from Monitoring V1 to Monitoring V2](#migrating-from-monitoring-v1-to-monitoring-v2) + - [Migrating Grafana Dashboards](#migrating-grafana-dashboards) + - [Migrating Alerts](#migrating-alerts) + - [Migrating Notifiers](#migrating-notifiers) + - [Migrating for RKE Template Users](#migrating-for-rke-template-users) + +# Monitoring Before Rancher v2.5 + +As of v2.2.0, the global view in the legacy Rancher UI allowed users to enable Monitoring & Alerting V1 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) independently within a cluster. + +When Monitoring is enabled, Monitoring V1 deploys [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/docs/grafana/latest/getting-started/what-is-grafana/) onto a cluster to monitor the state of processes of your cluster nodes, Kubernetes components, and software deployments and create custom dashboards to make it easy to visualize collected metrics. + +Monitoring V1 could be configured on both a cluster-level and on a project-level and would automatically scrape certain workloads deployed as Apps on the Rancher cluster. + +When Alerts or Notifiers are enabled, Alerting V1 deploys [Prometheus Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) and a set of Rancher controllers onto a cluster that allows users to define alerts and configure alert-based notifications via Email, Slack, PagerDuty, etc. Users can choose to create different types of alerts depending on what needs to be monitored (e.g. System Services, Resources, CIS Scans, etc.); however, PromQL Expression-based alerts can only be created if Monitoring V1 is enabled. + +# Monitoring and Alerting via Cluster Explorer in Rancher 2.5 + +As of v2.5.0, Rancher's Cluster Explorer now allows users to enable Monitoring & Alerting V2 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) together within a cluster. + +Unlike in Monitoring & Alerting V1, both features are packaged in a single Helm chart found [here](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring). The behavior of this chart and configurable fields closely matches [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), a Prometheus Community Helm chart, and any deviations from the upstream chart can be found in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md) maintained with the chart. + +Monitoring V2 can only be configured on the cluster level. Project-level monitoring and alerting is no longer supported. + +For more information on how to configure Monitoring & Alerting V2, see [this page.](../../../pages-for-subheaders/monitoring-v2-configuration-guides.md) + +# Changes to Role-based Access Control + +Project owners and members no longer get access to Grafana or Prometheus by default. If view-only users had access to Grafana, they would be able to see data from any namespace. For Kiali, any user can edit things they don’t own in any namespace. + +For more information about role-based access control in `rancher-monitoring`, refer to [this page.](../../../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md) + +# Migrating from Monitoring V1 to Monitoring V2 + +While there is no automatic migration available, it is possible to manually migrate custom Grafana dashboards and alerts that were created in Monitoring V1 to Monitoring V2. + +Before you can install Monitoring V2, Monitoring V1 needs to be uninstalled completely. In order to uninstall Monitoring V1: + +* Remove all cluster and project specific alerts and alerts groups. +* Remove all notifiers. +* Disable all project monitoring installations under Cluster -> Project -> Tools -> Monitoring. +* Ensure that all project-monitoring apps in all projects have been removed and are not recreated after a few minutes +* Disable the cluster monitoring installation under Cluster -> Tools -> Monitoring. +* Ensure that the cluster-monitoring app and the monitoring-operator app in the System project have been removed and are not recreated after a few minutes. + +#### RKE Template Clusters + +To prevent V1 monitoring from being re-enabled, disable monitoring and in future RKE template revisions via modification of the RKE template yaml: + +```yaml +enable_cluster_alerting: false +enable_cluster_monitoring: false +``` + +#### Migrating Grafana Dashboards + +You can migrate any dashboard added to Grafana in Monitoring V1 to Monitoring V2. In Monitoring V1 you can export an existing dashboard like this: + +* Sign into Grafana +* Navigate to the dashboard you want to export +* Go to the dashboard settings +* Copy the [JSON Model](https://grafana.com/docs/grafana/latest/dashboards/json-model/) + +In the JSON Model, change all `datasource` fields from `RANCHER_MONITORING` to `Prometheus`. You can easily do this by replacing all occurrences of `"datasource": "RANCHER_MONITORING"` with `"datasource": "Prometheus"`. + +If Grafana is backed by a persistent volume, you can now [import](https://grafana.com/docs/grafana/latest/dashboards/export-import/) this JSON Model into the Monitoring V2 Grafana UI. +It is recommended to provide the dashboard to Grafana with a ConfigMap in the `cattle-dashboards` namespace that has the label `grafana_dashboard: "1"`: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-dashboard + namespace: cattle-dashboards + labels: + grafana_dashboard: "1" +data: + custom-dashboard.json: | + { + ... + } +``` + +Once this ConfigMap is created, the dashboard will automatically be added to Grafana. + +### Migrating Alerts + +It is only possible to directly migrate expression-based alerts to Monitoring V2. Fortunately, the event-based alerts that could be set up to alert on system component, node or workload events, are already covered out-of-the-box by the alerts that are part of Monitoring V2. So it is not necessary to migrate them. + +To migrate the following expression alert + +![](/img/monitoring/migration/alert_2.4_to_2.5_source.png) + +you have to either create a PrometheusRule configuration like this in any namespace + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: custom-rules + namespace: default +spec: + groups: + - name: custom.rules + rules: + - alert: Custom Expression Alert + expr: prometheus_query > 5 + for: 5m + labels: + severity: critical + annotations: + summary: "The result of prometheus_query has been larger than 5 for 5m. Current value {{ $value }}" +``` + +or add the Prometheus Rule through the Cluster Explorer + +![](/img/monitoring/migration/alert_2.4_to_2.5_target.png) + +For more details on how to configure PrometheusRules in Monitoring V2 see [Monitoring Configuration](../../../pages-for-subheaders/monitoring-v2-configuration-guides.md). + +### Migrating Notifiers + +There is no direct equivalent for how notifiers work in Monitoring V1. Instead you have to replicate the desired setup with Routes and Receivers in Monitoring V2. + + +### Migrating for RKE Template Users + +If the cluster is managed using an RKE template, you will need to disable monitoring in future RKE template revisions to prevent legacy monitoring from being re-enabled. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md new file mode 100644 index 0000000000..fa13cc4c52 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards.md @@ -0,0 +1,8 @@ +--- +title: Customizing Grafana Dashboards +weight: 3 +--- + +Grafana dashboards are customized the same way whether it's for rancher-monitoring or for Prometheus Federator. + +For instructions, refer to [this page](../customize-grafana-dashboard.md). \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md new file mode 100644 index 0000000000..6264d66c42 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator.md @@ -0,0 +1,70 @@ +--- +title: Enable Prometheus Federator +weight: 1 +--- + +- [Requirements](#requirements) +- [Install the Prometheus Federator Application](#install-the-prometheus-federator-application) + +# Requirements + +By default, Prometheus Federator is configured and intended to be deployed alongside [rancher-monitoring](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/), which deploys Prometheus Operator alongside a Cluster Prometheus that each Project Monitoring Stack is configured to federate namespace-scoped metrics from by default. + +For instructions on installing rancher-monitoring, refer to [this page](../enable-monitoring.md). + +The default configuration should already be compatible with your rancher-monitoring stack. However, to optimize the security and usability of Prometheus Federator in your cluster, we recommend making these additional configurations to rancher-monitoring: + +- [Ensure the cattle-monitoring-system namespace is placed into the System Project](#ensure-the-cattle-monitoring-system-namespace-is-placed-into-the-system-project-or-a-similarly-locked-down-project-that-has-access-to-other-projects-in-the-cluster). +- [Configure rancher-monitoring to only watch for resources created by the Helm chart itself](#configure-rancher-monitoring-to-only-watch-for-resources-created-by-the-helm-chart-itself). +- [Increase the CPU / memory limits of the Cluster Prometheus](#increase-the-cpu--memory-limits-of-the-cluster-prometheus). + +## Ensure the cattle-monitoring-system namespace is placed into the System Project (or a similarly locked down Project that has access to other Projects in the cluster) + +Prometheus Operator's security model expects that the namespace it is deployed into (e.g., `cattle-monitoring-system`) has limited access for anyone except Cluster Admins to avoid privilege escalation via execing into Pods (such as the Jobs executing Helm operations). In addition, deploying Prometheus Federator and all Project Prometheus stacks into the System Project ensures that each Project Prometheus is able to reach out to scrape workloads across all Projects, even if Network Policies are defined via Project Network Isolation. It also provides limited access for Project Owners, Project Members, and other users so that they're unable to access data that they shouldn't have access to (i.e., being allowed to exec into pods, set up the ability to scrape namespaces outside of a given Project, etc.). + +## Configure rancher-monitoring to only watch for resources created by the Helm chart itself + +Since each Project Monitoring Stack will watch the other namespaces and collect additional custom workload metrics or dashboards already, it's recommended to configure the following settings on all selectors to ensure that the Cluster Prometheus Stack only monitors resources created by the Helm Chart itself: + +``` +matchLabels: + release: "rancher-monitoring" +``` + +The following selector fields are recommended to have this value: +- `.Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector` +- `.Values.prometheus.prometheusSpec.serviceMonitorSelector` +- `.Values.prometheus.prometheusSpec.podMonitorSelector` +- `.Values.prometheus.prometheusSpec.ruleSelector` +- `.Values.prometheus.prometheusSpec.probeSelector` + +Once this setting is turned on, you can always create ServiceMonitors or PodMonitors that are picked up by the Cluster Prometheus by adding the label `release: "rancher-monitoring"` to them, in which case they will be ignored by Project Monitoring Stacks automatically by default, even if the namespace in which those ServiceMonitors or PodMonitors reside in are not system namespaces. + +:::note + +If you don't want to allow users to be able to create ServiceMonitors and PodMonitors that aggregate into the Cluster Prometheus in Project namespaces, you can additionally set the namespaceSelectors on the chart to only target system namespaces (which must contain `cattle-monitoring-system` and `cattle-dashboards`, where resources are deployed into by default by rancher-monitoring; you will also need to monitor the `default` namespace to get apiserver metrics or create a custom ServiceMonitor to scrape apiserver metrics from the Service residing in the default namespace) to limit your Cluster Prometheus from picking up other Prometheus Operator CRs. In that case, it would be recommended to turn `.Values.prometheus.prometheusSpec.ignoreNamespaceSelectors=true` to allow you to define ServiceMonitors that can monitor non-system namespaces from within a system namespace. + +::: + +## Increase the CPU / memory limits of the Cluster Prometheus + +Depending on a cluster's setup, it's generally recommended to give a large amount of dedicated memory to the Cluster Prometheus to avoid restarts due to out-of-memory errors (OOMKilled) usually caused by churn created in the cluster that causes a large number of high cardinality metrics to be generated and ingested by Prometheus within one block of time. This is one of the reasons why the default Rancher Monitoring stack expects around 4GB of RAM to be able to operate in a normal-sized cluster. However, when introducing Project Monitoring Stacks that are all sending `/federate` requests to the same Cluster Prometheus and are reliant on the Cluster Prometheus being "up" to federate that system data on their namespaces, it's even more important that the Cluster Prometheus has an ample amount of CPU / memory assigned to it to prevent an outage that can cause data gaps across all Project Prometheis in the cluster. + +:::note + +There are no specific recommendations on how much memory the Cluster Prometheus should be configured with since it depends entirely on the user's setup (namely the likelihood of encountering a high churn rate and the scale of metrics that could be generated at that time); it generally varies per setup. + +::: + +# Install the Prometheus Federator Application + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you want to install Prometheus Federator and click **Explore**. +1. Click **Apps -> Charts**. +1. Click the **Prometheus Federator** chart. +1. Click **Install**. +1. On the **Metadata** page, click **Next**. +1. In the **Project Release Namespace Project ID** field, the `System Project` is used as the default but can be overridden with another project with similarly [limited access](#ensure-the-cattle-monitoring-system-namespace-is-placed-into-the-system-project-or-a-similarly-locked-down-project-that-has-access-to-other-projects-in-the-cluster). + +### Display CPU and Memory Metrics for a Workload + +Displaying CPU and memory metrics with Prometheus Federator is done the same way as with rancher-monitoring. For instructions, refer [here](../set-up-monitoring-for-workloads.md#display-cpu-and-memory-metrics-for-a-workload). + +### Setting up Metrics Beyond CPU and Memory + +Setting up metrics beyond CPU and memory with Prometheus Federator is done the same way as with rancher-monitoring. For instructions, refer [here](../set-up-monitoring-for-workloads.md#setting-up-metrics-beyond-cpu-and-memory). + + diff --git a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/uninstall-prom-fed/_index.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/uninstall-prom-fed/_index.md rename to docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/monitoring-workloads/_index.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/guides/monitoring-workloads/_index.md rename to docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md new file mode 100644 index 0000000000..66da5a42fe --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md @@ -0,0 +1,20 @@ +--- +title: Uninstall Monitoring +weight: 2 +--- + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Apps & Marketplace**. +1. Click **Installed Apps**. +1. Go to the `cattle-monitoring-system` namespace and check the boxes for `rancher-monitoring-crd` and `rancher-monitoring`. +1. Click **Delete**. +1. Confirm **Delete**. + +**Result:** `rancher-monitoring` is uninstalled. + +:::note Persistent Grafana Dashboards: + +For users who are using Monitoring V2 v9.4.203 or below, uninstalling the Monitoring chart will delete the cattle-dashboards namespace, which will delete all persisted dashboards, unless the namespace is marked with the annotation `helm.sh/resource-policy: "keep"`. This annotation is added by default in Monitoring V2 v14.5.100+ but can be manually applied on the cattle-dashboards namespace before an uninstall if an older version of the Monitoring chart is currently installed onto your cluster. + +::: \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md new file mode 100644 index 0000000000..c532bee560 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md @@ -0,0 +1,44 @@ +--- +title: Alertmanager Configuration +weight: 1 +--- + +It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. + +When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. + +:::note + +This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#3-how-alertmanager-works) + +::: + +# About the Alertmanager Custom Resource + +By default, Rancher Monitoring deploys a single Alertmanager onto a cluster that uses a default Alertmanager Config Secret. + +You may want to edit the Alertmanager custom resource if you would like to take advantage of advanced options that are not exposed in the Rancher UI forms, such as the ability to create a routing tree structure that is more than two levels deep. + +It is also possible to create more than one Alertmanager in a cluster, which may be useful if you want to implement namespace-scoped monitoring. In this case, you should manage the Alertmanager custom resources using the same underlying Alertmanager Config Secret. + +### Deeply Nested Routes + +While the Rancher UI only supports a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager YAML. + +### Multiple Alertmanager Replicas + +As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster. The replicas can all be managed using the same underlying Alertmanager Config Secret. + +This Secret should be updated or modified any time you want to: + +- Add in new notifiers or receivers +- Change the alerts that should be sent to specific notifiers or receivers +- Change the group of alerts that are sent out + +By default, you can either choose to supply an existing Alertmanager Config Secret (i.e. any Secret in the `cattle-monitoring-system` namespace) or allow Rancher Monitoring to deploy a default Alertmanager Config Secret onto your cluster. + +By default, the Alertmanager Config Secret created by Rancher will never be modified or deleted on an upgrade or uninstall of the `rancher-monitoring` chart. This restriction prevents users from losing or overwriting their alerting configuration when executing operations on the chart. + +For more information on what fields can be specified in the Alertmanager Config Secret, please look at the [Prometheus Alertmanager docs.](https://prometheus.io/docs/alerting/latest/alertmanager/) + +The full spec for the Alertmanager configuration file and what it takes in can be found [here.](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md new file mode 100644 index 0000000000..da04c25932 --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md @@ -0,0 +1,20 @@ +--- +title: Prometheus Configuration +weight: 1 +--- + +It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. + +:::note + +This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +::: + +# About the Prometheus Custom Resource + +The Prometheus CR defines a desired Prometheus deployment. The Prometheus Operator observes the Prometheus CR. When the CR changes, the Prometheus Operator creates `prometheus-rancher-monitoring-prometheus`, a Prometheus deployment based on the CR configuration. + +The Prometheus CR specifies details such as rules and what Alertmanagers are connected to Prometheus. Rancher builds this CR for you. + +Monitoring V2 only supports one Prometheus per cluster. However, you might want to edit the Prometheus CR if you want to limit monitoring to certain namespaces. \ No newline at end of file diff --git a/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md new file mode 100644 index 0000000000..1683f3e4de --- /dev/null +++ b/docs/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md @@ -0,0 +1,81 @@ +--- +title: Configuring PrometheusRules +weight: 3 +--- + +A PrometheusRule defines a group of Prometheus alerting and/or recording rules. + +:::note + +This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +::: + +### Creating PrometheusRules in the Rancher UI + +:::note Prerequisite: + +The monitoring application needs to be installed. + +::: + +To create rule groups in the Rancher UI, + +1. Go to the cluster where you want to create rule groups. Click **Monitoring** and click **Prometheus Rules**. +1. Click **Create**. +1. Enter a **Group Name**. +1. Configure the rules. In Rancher's UI, we expect a rule group to contain either alert rules or recording rules, but not both. For help filling out the forms, refer to the configuration options below. +1. Click **Create**. + +**Result:** Alerts can be configured to send notifications to the receiver(s). + +### About the PrometheusRule Custom Resource + +When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Alertmanager to figure out which Route should receive this Alert. For example, an Alert with the label `team: front-end` will be sent to all Routes that match on that label. + +Prometheus rule files are held in PrometheusRule custom resources. A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: + +- The name of the new alert or record +- A PromQL expression for the new alert or record +- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) +- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. + +For more information on what fields can be specified, please look at the [Prometheus Operator spec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec) + +Use the label selector field `ruleSelector` in the Prometheus object to define the rule files that you want to be mounted into Prometheus. + +For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) + +# Configuration + +### Rule Group + +| Field | Description | +|-------|----------------| +| Group Name | The name of the group. Must be unique within a rules file. | +| Override Group Interval | Duration in seconds for how often rules in the group are evaluated. | + + +### Alerting Rules + +[Alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) allow you to define alert conditions based on PromQL (Prometheus Query Language) expressions and to send notifications about firing alerts to an external service. + +| Field | Description | +|-------|----------------| +| Alert Name | The name of the alert. Must be a valid label value. | +| Wait To Fire For | Duration in seconds. Alerts are considered firing once they have been returned for this long. Alerts which have not yet fired for long enough are considered pending. | +| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and all resultant time series will become pending/firing alerts. For more information, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md) | +| Labels | Labels to add or overwrite for each alert. | +| Severity | When enabled, labels are attached to the alert or record that identify it by the severity level. | +| Severity Label Value | Critical, warning, or none | +| Annotations | Annotations are a set of informational labels that can be used to store longer additional information, such as alert descriptions or runbook links. A [runbook](https://en.wikipedia.org/wiki/Runbook) is a set of documentation about how to handle alerts. The annotation values can be [templated.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#templating) | + +### Recording Rules + +[Recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) allow you to precompute frequently needed or computationally expensive PromQL (Prometheus Query Language) expressions and save their result as a new set of time series. + +| Field | Description | +|-------|----------------| +| Time Series Name | The name of the time series to output to. Must be a valid metric name. | +| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and the result will be recorded as a new set of time series with the metric name as given by 'record'. For more information about expressions, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md) | +| Labels | Labels to add or overwrite before storing the result. | diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md new file mode 100644 index 0000000000..261a1a86f6 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md @@ -0,0 +1,71 @@ +--- +title: Backing up Rancher Installed with Docker +shortTitle: Backups +weight: 3 +--- + +After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. + +## Before You Start + +During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher +``` + +In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
+ +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. + +## Creating a Backup + +This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. + + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data- rancher/rancher: + ``` + +1. From the data container that you just created (rancher-data-<DATE>), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). Use the following command, replacing each placeholder: + + ``` + docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** A stream of commands runs on the screen. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. + +1. Restart Rancher Server. Replace `` with the name of your Rancher container: + + ``` + docker start + ``` + +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs](restore-docker-installed-rancher.md) if you need to restore backup data. diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md new file mode 100644 index 0000000000..94774115f1 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md @@ -0,0 +1,166 @@ +--- +title: Backing up a Cluster +weight: 2045 +--- + +In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. + +Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. + +Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. + +This section covers the following topics: + +- [How snapshots work](#how-snapshots-work) +- [Configuring recurring snapshots](#configuring-recurring-snapshots) +- [One-time snapshots](#one-time-snapshots) +- [Snapshot backup targets](#snapshot-backup-targets) + - [Local backup target](#local-backup-target) + - [S3 backup target](#s3-backup-target) + - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) + - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) +- [Viewing available snapshots](#viewing-available-snapshots) +- [Safe timestamps](#safe-timestamps) +- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) + +# How Snapshots Work + +### Snapshot Components + +When Rancher creates a snapshot, it includes three components: + +- The cluster data in etcd +- The Kubernetes version +- The cluster configuration in the form of the `cluster.yml` + +Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. + +The multiple components of the snapshot allow you to select from the following options if you need to restore a cluster from a snapshot: + +- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. +- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. +- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. + +It's always recommended to take a new snapshot before any upgrades. + +### Generating the Snapshot from etcd Nodes + +For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. + +The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. + +In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. + +### Snapshot Naming Conventions + +The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. + +When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: + +- `m` stands for manual +- `r` stands for recurring +- `l` stands for local +- `s` stands for S3 + +Some example snapshot names are: + +- c-9dmxz-rl-8b2cx +- c-9dmxz-ml-kr56m +- c-9dmxz-ms-t6bjb +- c-9dmxz-rs-8gxc8 + +### How Restoring from a Snapshot Works + +On restore, the following process is used: + +1. The snapshot is retrieved from S3, if S3 is configured. +2. The snapshot is unzipped (if zipped). +3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. +4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. +5. The cluster is restored and post-restore actions will be done in the cluster. + +# Configuring Recurring Snapshots + +Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. + +By default, [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. + +During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. + +In the **Advanced Cluster Options** section, there are several options available to configure: + +| Option | Description | Default Value| +| --- | ---| --- | +| etcd Snapshot Backup Target | Select where you want the snapshots to be saved. Options are either local or in S3 | local| +|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| +| Recurring etcd Snapshot Creation Period | Time in hours between recurring snapshots| 12 hours | +| Recurring etcd Snapshot Retention Count | Number of snapshots to retain| 6 | + +# One-Time Snapshots + +In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, navigate to the cluster where you want to take a one-time snapshot. +1. Click **⋮ > Take Snapshot**. + +**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. + +# Snapshot Backup Targets + +Rancher supports two different backup targets: + +* [Local Target](#local-backup-target) +* [S3 Target](#s3-backup-target) + +### Local Backup Target + +By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. + +### S3 Backup Target + +The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. + +| Option | Description | Required| +|---|---|---| +|S3 Bucket Name| S3 bucket name where backups will be stored| *| +|S3 Region|S3 region for the backup bucket| | +|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | +|S3 Access Key|S3 access key with permission to access the backup bucket|*| +|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| +| Custom CA Certificate | A custom certificate used to access private S3 backends || + +### Using a custom CA certificate for S3 + +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. + +### IAM Support for Storing Snapshots in S3 + +The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: + + - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. + - The cluster etcd nodes must have network access to the specified S3 endpoint. + - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. + - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. + + To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +# Viewing Available Snapshots + +The list of all available snapshots for the cluster is available in the Rancher UI. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the **Clusters** page, go to the cluster where you want to view the snapshots and click its name. +1. Click the **Snapshots** tab to view the list of saved snapshots. These snapshots include a timestamp of when they were created. + +# Safe Timestamps + +Snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. + +The option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. + +This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. + +# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 + +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restore-rancher-launched-kubernetes-clusters-from-backup.md). diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md new file mode 100644 index 0000000000..2142323dd1 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md @@ -0,0 +1,93 @@ +--- +title: Backing up Rancher +weight: 1 +--- + +In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer the instructions for [single node backups](back-up-docker-installed-rancher.md) + +The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. + +Note that the rancher-backup operator version 2.x.x is for Rancher v2.6.x. + +::: caution + +When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. The Kubernetes version should also be considered when restoring a backup, since the supported apiVersion in the cluster and in the backup file could be different. + +::: + +### Prerequisites + +The Rancher version must be v2.5.0 and up. + +Refer [here](migrate-rancher-to-new-cluster.md#2-restore-from-backup-using-a-restore-custom-resource) for help on restoring an existing backup file into a v1.22 cluster in Rancher v2.6.3. + +### 1. Install the Rancher Backups operator + +The backup storage location is an operator-level setting, so it needs to be configured when the Rancher Backups application is installed or upgraded. + +Backups are created as .tar.gz files. These files can be pushed to S3 or Minio, or they can be stored in a persistent volume. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. The `local` cluster runs the Rancher server. +1. Click **Apps & Marketplace > Charts**. +1. Click **Rancher Backups**. +1. Click **Install**. +1. Configure the default storage location. For help, refer to the [storage configuration section.](../../../reference-guides/backup-restore-configuration/storage-configuration.md) +1. Click **Install**. + +:::note + +There is a known issue in Fleet that occurs after performing a restoration using the backup-restore-operator: Secrets used for clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here](../deploy-apps-across-clusters/fleet.md#troubleshooting) for a workaround. + +::: + +### 2. Perform a Backup + +To perform a backup, a custom resource of type Backup must be created. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. +1. In the left navigation bar, click **Rancher Backups > Backups**. +1. Click **Create**. +1. Create the Backup with the form, or with the YAML editor. +1. For configuring the Backup details using the form, click **Create** and refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/backup-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md#backup) +1. For using the YAML editor, we can click **Create > Create from YAML**. Enter the Backup YAML. This example Backup custom resource would create encrypted recurring backups in S3. The app uses the `credentialSecretNamespace` value to determine where to look for the S3 backup secret: + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Backup + metadata: + name: s3-recurring-backup + spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 10 + ``` + + :::note + + When creating the Backup resource using YAML editor, the `resourceSetName` must be set to `rancher-resource-set` + + ::: + + For help configuring the Backup, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/backup-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md#backup) + + :::caution + + The `rancher-backup` operator doesn't save the EncryptionConfiguration file. The contents of the EncryptionConfiguration file must be saved when an encrypted backup is created, and the same file must be used when restoring from this backup. + + ::: + +1. Click **Create**. + +**Result:** The backup file is created in the storage location configured in the Backup custom resource. The name of this file is used when performing a restore. + diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md new file mode 100644 index 0000000000..b3ac971ca9 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md @@ -0,0 +1,141 @@ +--- +title: Migrating Rancher to a New Cluster +weight: 3 +--- + +If you are migrating Rancher to a new Kubernetes cluster, you don't need to install Rancher on the new cluster first. If Rancher is restored to a new cluster with Rancher already installed, it can cause problems. + +### Prerequisites + +These instructions assume you have [created a backup](back-up-rancher.md) and you have already installed a new Kubernetes cluster where Rancher will be deployed. + +:::caution + +It is required to use the same hostname that was set as the server URL in the first cluster. If not done, downstream clusters will show as unavailable in the cluster management page of the UI, and you won't be able to click inside the cluster or on the cluster's Explore button. + +::: + +Rancher version must be v2.5.0 and up + +Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes clusters such as Amazon EKS clusters. For help installing Kubernetes, refer to the documentation of the Kubernetes distribution. One of Rancher's Kubernetes distributions may also be used: + +- [RKE Kubernetes installation docs](https://rancher.com/docs/rke/latest/en/installation/) +- [K3s Kubernetes installation docs](https://rancher.com/docs/k3s/latest/en/installation/) + +### 1. Install the rancher-backup Helm chart +Install version 2.x.x of the rancher-backup chart. The following assumes a connected environment with access to DockerHub: +``` +helm repo add rancher-charts https://charts.rancher.io +helm repo update +helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace --version $CHART_VERSION +helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system --version $CHART_VERSION +``` +
+For an **air-gapped environment**, use the option below to pull the `backup-restore-operator` image from your private registry when installing the rancher-backup-crd helm chart. +``` +--set image.repository $REGISTRY/rancher/backup-restore-operator +``` + +### 2. Restore from backup using a Restore custom resource + +:::note Important: + +Kubernetes v1.22, available as an experimental feature of v2.6.3, does not support restoring from backup files containing CRDs with the apiVersion `apiextensions.k8s.io/v1beta1`. In v1.22, the default `resourceSet` in the rancher-backup app is updated to collect only CRDs that use `apiextensions.k8s.io/v1`. There are currently two ways to work around this issue: + +1. Update the default `resourceSet` to collect the CRDs with the apiVersion v1. +1. Update the default `resourceSet` and the client to use the new APIs internally, with `apiextensions.k8s.io/v1` as the replacement. + +- Note that when making or restoring backups for v1.22, the Rancher version and the local cluster's Kubernetes version should be the same. The Kubernetes version should be considered when restoring a backup since the supported apiVersion in the cluster and in the backup file could be different. + +::: + +If you are using an S3 store as the backup source and need to use your S3 credentials for restore, create a secret in this cluster using your S3 credentials. The Secret data must have two keys - `accessKey` and `secretKey` - that contain the S3 credentials. + +:::caution + +The values `accessKey` and `secretKey` in the example below must be base64-encoded first when creating the object directly. If not encoded first, the pasted values will cause errors when you are attempting to backup or restore. + +::: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: s3-creds +type: Opaque +data: + accessKey: + secretKey: +``` + +This secret can be created in any namespace; with the above example, it will get created in the default namespace. + +In the Restore custom resource, `prune` must be set to false. + +Create a Restore custom resource like the example below: + +```yaml +# migrationResource.yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-migration +spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + prune: false + encryptionConfigSecretName: encryptionconfig + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: backup-test + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com +``` + +:::note Important: + +The field `encryptionConfigSecretName` must be set only if your backup was created with encryption enabled. Provide the name of the Secret containing the encryption config file. If you only have the encryption config file, but don't have a secret created with it in this cluster, use the following steps to create the secret: + +::: + +1. The encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. So save your `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: +``` +kubectl create secret generic encryptionconfig \ + --from-file=./encryption-provider-config.yaml \ + -n cattle-resources-system +``` + +1. Apply the manifest, and watch for the Restore resources status: + + Apply the resource: +``` +kubectl apply -f migrationResource.yaml +``` + + Watch the Restore status: +``` +kubectl get restore +``` + + Watch the restoration logs: +``` +kubectl logs -n cattle-resources-system --tail 100 -f rancher-backup-xxx-xxx +``` + +Once the Restore resource has the status `Completed`, you can continue the Rancher installation. + +### 3. Install cert-manager + +Follow the steps to [install cert-manager](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#5-install-cert-manager) in the documentation about installing cert-manager on Kubernetes. + +### 4. Bring up Rancher with Helm + +Use the same version of Helm to install Rancher, that was used on the first cluster. + +``` +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname= \ +``` diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md new file mode 100644 index 0000000000..d5ca32d6af --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md @@ -0,0 +1,74 @@ +--- +title: Restoring Backups—Docker Installs +shortTitle: Restores +weight: 3 +--- + +If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. + +## Before You Start + +During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from -v $PWD:/backup \ +busybox sh -c "rm /var/lib/rancher/* -rf && \ +tar pzxvf /backup/rancher-data-backup--" +``` + +In this command, `` and `-` are environment variables for your Rancher deployment. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version number for your Rancher backup. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
+ +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Restoring Backups + +Using a [backup](back-up-docker-installed-rancher.md) that you created earlier, restore Rancher to its last known healthy state. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container: + + ``` + docker stop + ``` +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs](back-up-docker-installed-rancher.md) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Creating Backups—Docker Installs](back-up-docker-installed-rancher.md), it will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. + + :::danger + + This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. + + ::: + + ``` + docker run --volumes-from -v $PWD:/backup \ + busybox sh -c "rm /var/lib/rancher/* -rf && \ + tar pzxvf /backup/rancher-data-backup--.tar.gz" + ``` + + **Step Result:** A series of commands should run. + +1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. + + ``` + docker start + ``` + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md new file mode 100644 index 0000000000..dc28c051d1 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -0,0 +1,85 @@ +--- +title: Restoring a Cluster from Backup +weight: 2050 +--- + +etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. + +Rancher recommends enabling the [ability to set up recurring snapshots of etcd](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots), but [one-time snapshots](back-up-rancher-launched-kubernetes-clusters.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). + +Clusters can also be restored to a prior Kubernetes version and cluster configuration. + +This section covers the following topics: + +- [Viewing Available Snapshots](#viewing-available-snapshots) +- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) +- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) +- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) + +## Viewing Available Snapshots + +The list of all available snapshots for the cluster is available. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the **Clusters** page, go to the cluster where you want to view the snapshots and click the name of the cluster. +1. Click the **Snapshots** tab. The listed snapshots include a timestamp of when they were created. + +## Restoring a Cluster from a Snapshot + +If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. + +Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: + +- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. +- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. +- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. + +When rolling back to a prior Kubernetes version, the [upgrade strategy options](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. + +:::note Prerequisite: + +To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots) + +::: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. In the **Clusters** page, go to the cluster where you want to view the snapshots and click the name of the cluster. +1. Click the **Snapshots** tab to view the list of saved snapshots. +1. Go to the snapshot you want to restore and click **⋮ > Restore Snapshot**. +1. Click **Restore**. + +**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. + +## Recovering etcd without a Snapshot + +If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. The cluster should have three etcd nodes to prevent a loss of quorum. If you want to recover your set of etcd nodes, follow these instructions: + +1. Keep only one etcd node in the cluster by removing all other etcd nodes. + +2. On the single remaining etcd node, run the following command: + + ``` + $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd + ``` + + This command outputs the running command for etcd, save this command to use later. + +3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. + + ``` + $ docker stop etcd + $ docker rename etcd etcd-old + ``` + +4. Take the saved command from Step 2 and revise it: + + - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. + - Add `--force-new-cluster` to the end of the command. + +5. Run the revised command. + +6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster](../../../pages-for-subheaders/use-existing-nodes.md) and you want to reuse an old node, you are required to [clean up the nodes](../../advanced-user-guides/manage-clusters/clean-cluster-nodes.md) before attempting to add them back into a cluster. + +# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 + +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restore-rancher-launched-kubernetes-clusters-from-backup.md). diff --git a/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md new file mode 100644 index 0000000000..91c6d76549 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md @@ -0,0 +1,93 @@ +--- +title: Restoring Rancher +weight: 2 +--- + +This page outlines how to perform a restore with Rancher. + +:::note Important: + +* Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.](migrate-rancher-to-new-cluster.md) +* While restoring rancher on the same setup, the operator will scale down the rancher deployment when restore starts, and it will scale back up the deployment once restore completes. So Rancher will be unavailable during the restore. +* If you need to restore Rancher to a previous version after an upgrade, see the [rollback documentation.](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md) + +::: + +### Additional Steps for Rollbacks with Rancher v2.6.4+ + +In Rancher v2.6.4, the cluster-api module has been upgraded from v0.4.4 to v1.0.2 in which the apiVersion of CAPI CRDs are upgraded from `cluster.x-k8s.io/v1alpha4` to `cluster.x-k8s.io/v1beta1`. This has the effect of causing rollbacks from Rancher v2.6.4 to any previous version of Rancher v2.6.x to fail because the previous version the CRDs needed to roll back are no longer available in v1beta1. + +To avoid this, the Rancher resource cleanup scripts should be run **before** the restore or rollback is attempted. Specifically, two scripts have been created to assist you: one to clean up the cluster (`cleanup.sh`), and one to check for any Rancher-related resources in the cluster (`verify.sh`). Details on the cleanup script can be found in the [rancherlabs/support-tools repo](https://github.com/rancherlabs/support-tools/tree/master/rancher-cleanup). + +:::caution + +Rancher will be down as the `cleanup` script runs as it deletes the resources created by rancher. + +::: + +The additional preparations: + +1. Follow these [instructions](https://github.com/rancherlabs/support-tools/blob/master/rancher-cleanup/README.md) to run the scripts. +1. Follow these [instructions](https://rancher.com/docs/rancher/v2.6/en/backups/migrating-rancher/) to install the rancher-backup Helm chart on the existing cluster and restore the previous state. + 1. Omit Step 3. + 1. When Step 4 is reached, install the required Rancher v2.6.x version on the local cluster you intend to roll back to. + +### Create the Restore Custom Resource + +A restore is performed by creating a Restore custom resource. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. The `local` cluster runs the Rancher server. +1. In the left navigation bar, click **Rancher Backups > Restores**. +1. Click **Create**. +1. Create the Restore with the form, or with YAML. For creating the Restore resource using form, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) +1. For using the YAML editor, we can click **Create > Create from YAML**. Enter the Restore YAML. + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Restore + metadata: + name: restore-migration + spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + encryptionConfigSecretName: encryptionconfig + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + ``` + + For help configuring the Restore, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) + +1. Click **Create**. + +**Result:** The rancher-operator scales down the rancher deployment during restore, and scales it back up once the restore completes. The resources are restored in this order: + +1. Custom Resource Definitions (CRDs) +2. Cluster-scoped resources +3. Namespaced resources + +### Logs + +To check how the restore is progressing, you can check the logs of the operator. Run this command to follow the logs: + +``` +kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f +``` + +### Cleanup + +If you created the restore resource with kubectl, remove the resource to prevent a naming conflict with future restores. + +### Known Issues +In some cases, after restoring the backup, Rancher logs will show errors similar to the following: +``` +2021/10/05 21:30:45 [ERROR] error syncing 'c-89d82/m-4067aa68dd78': handler rke-worker-upgrader: clusters.management.cattle.io "c-89d82" not found, requeuing +``` +This happens because one of the resources that was just restored has finalizers, but the related resources have been deleted so the handler cannot find it. + +To eliminate the errors, we need to find and delete the resource that causes the error. See more information [here](https://github.com/rancher/rancher/issues/35050#issuecomment-937968556) diff --git a/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md new file mode 100644 index 0000000000..770773157d --- /dev/null +++ b/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md @@ -0,0 +1,76 @@ +--- +title: Fleet - GitOps at Scale +weight: 1 +--- + +Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. + +Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. + +- [Architecture](#architecture) +- [Accessing Fleet in the Rancher UI](#accessing-fleet-in-the-rancher-ui) +- [Windows Support](#windows-support) +- [GitHub Repository](#github-repository) +- [Using Fleet Behind a Proxy](#using-fleet-behind-a-proxy) +- [Helm Chart Dependencies](#helm-chart-dependencies) +- [Troubleshooting](#troubleshooting) +- [Documentation](#documentation) + +# Architecture + +For information about how Fleet works, see [this page.](../../../explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md) + +# Accessing Fleet in the Rancher UI + +Fleet comes preinstalled in Rancher and is managed by the **Continous Delivery** option in the Rancher UI. For additional information on Continuous Delivery and other Fleet troubleshooting tips, refer [here](https://fleet.rancher.io/troubleshooting/). + +Users can leverage continuous delivery to deploy their applications to the Kubernetes clusters in the git repository without any manual operation by following **gitops** practice. + +Follow the steps below to access Continuous Delivery in the Rancher UI: + +1. Click **☰ > Continuous Delivery**. + +1. Select your namespace at the top of the menu, noting the following: + - By default,`fleet-default` is selected which includes all downstream clusters that are registered through Rancher. + - You may switch to `fleet-local`, which only contains the `local` cluster, or you may create your own workspace to which you may assign and move clusters. + - You can then manage clusters by clicking on **Clusters** on the left navigation bar. + +1. Click on **Gitrepos** on the left navigation bar to deploy the gitrepo into your clusters in the current workspace. + +1. Select your [git repository](https://fleet.rancher.io/gitrepo-add/) and [target clusters/cluster group](https://fleet.rancher.io/gitrepo-structure/). You can also create the cluster group in the UI by clicking on **Cluster Groups** from the left navigation bar. + +1. Once the gitrepo is deployed, you can monitor the application through the Rancher UI. + +# Windows Support + +For details on support for clusters with Windows nodes, see [this page.](../../../explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md) + + +# GitHub Repository + +The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/releases/latest) + + +# Using Fleet Behind a Proxy + +For details on using Fleet behind a proxy, see [this page.](../../../explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md) + +# Helm Chart Dependencies + +In order for Helm charts with dependencies to deploy successfully, you must run a manual command (as listed below), as it is up to the user to fulfill the dependency list. If you do not do this and proceed to clone your repository and run `helm install`, your installation will fail because the dependencies will be missing. + +The Helm chart in the git repository must include its dependencies in the charts subdirectory. You must either manually run `helm dependencies update $chart` OR run `helm dependencies build $chart` locally, then commit the complete charts directory to your git repository. Note that you will update your commands with the applicable parameters. + +# Troubleshooting + +--- +* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backups-operator). We will update the community once a permanent solution is in place. + +* **Temporary Workaround:**
+By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). + +--- + +# Documentation + +The Fleet documentation is at [https://fleet.rancher.io/.](https://fleet.rancher.io/) diff --git a/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md b/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md new file mode 100644 index 0000000000..3911d56a63 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md @@ -0,0 +1,193 @@ +--- +title: Multi-cluster Apps +weight: 2 +--- + +> As of Rancher v2.5, multi-cluster apps are deprecated. We now recommend using [Fleet](fleet.md) for deploying apps across clusters. + +Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. + +Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. + +After creating a multi-cluster application, you can program a global DNS entry to make it easier to access the application. + +- [Prerequisites](#prerequisites) +- [Launching a multi-cluster app](#launching-a-multi-cluster-app) +- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) + - [Targets](#targets) + - [Upgrades](#upgrades) + - [Roles](#roles) +- [Application configuration options](#application-configuration-options) + - [Using a questions.yml file](#using-a-questions-yml-file) + - [Key value pairs for native Helm charts](#key-value-pairs-for-native-helm-charts) + - [Members](#members) + - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) +- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) +- [Multi-cluster application management](#multi-cluster-application-management) +- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) + +# Prerequisites + +### Permissions + +To create a multi-cluster app in Rancher, you must have at least one of the following permissions: + +- A [project-member role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads +- A [cluster owner role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) for the clusters(s) that include the target project(s) + +### Enable Legacy Features + +Because multi-cluster apps were deprecated and replaced with Fleet in Rancher v2.5, you will need to enable multi-cluster apps with a feature flag. + +1. In the upper left corner, click **☰ > Global Settings**. +1. Click **Feature Flags**. +1. Go to the `legacy` feature flag and click **Activate**. + +# Launching a Multi-Cluster App + +1. In the upper left corner, click **☰ > Multi-cluster Apps**. +1. Click **Launch**. +1. Find the application that you want to launch. +1. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. +1. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. +1. Select a **Template Version**. +1. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). +1. Select the **Members** who can [interact with the multi-cluster application](#members). +1. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. +1. Review the files in the **Preview** section. When you're satisfied, click **Launch**. + +**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: + +# Multi-cluster App Configuration Options + +Rancher has divided the configuration option for the multi-cluster application into several sections. + +### Targets + +In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. + +### Upgrades + +In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. + +* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. + +* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. + +### Roles + +In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications](../../../pages-for-subheaders/helm-charts-in-rancher.md), that specific user's permissions are used for creation of all workloads/resources that is required by the app. + +For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. + +Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. + +- **Project** - This is the equivalent of a [project member](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), a [cluster owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or a [project owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles), then the user is considered to have the appropriate level of permissions. + +- **Cluster** - This is the equivalent of a [cluster owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), then the user is considered to have the appropriate level of permissions. + +When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. + +:::note + +There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. + +::: + +# Application Configuration Options + +For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. + +:::note Example + +When entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). + +::: + +### Using a questions.yml file + +If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. + +### Key Value Pairs for Native Helm Charts + +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a custom Helm chart repository, answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. + +### Members + +By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. + +1. Find the user that you want to add by typing in the member's name in the **Member** search box. + +2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. + + - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. + + - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. + + - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. + + :::caution + + Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. + + ::: + +### Overriding Application Configuration Options for Specific Projects + +The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. + +1. In the **Answer Overrides** section, click **Add Override**. + +2. For each override, you can select the following: + + - **Scope**: Select which target projects you want to override the answer in the configuration option. + + - **Question**: Select which question you want to override. + + - **Answer**: Enter the answer that you want to be used instead. + +# Upgrading Multi-Cluster App Roles and Projects + +- **Changing Roles on an existing Multi-Cluster app** +The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. + +- **Adding/Removing target projects** +1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. +2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. + + +# Multi-Cluster Application Management + +One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. + +:::note Prerequisite: + +The `legacy` feature flag needs to be enabled. + +::: + +1. In the upper left corner, click **☰ > Multi-cluster Apps**. + +2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: + + * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. + * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). + * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. + +# Deleting a Multi-Cluster Application + +:::note Prerequisite: + +The `legacy` feature flag needs to be enabled. + +::: + +1. In the upper left corner, click **☰ > Multi-cluster Apps**. + +2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. + + :::note + + The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. + + ::: \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md new file mode 100644 index 0000000000..a5c38f4206 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps.md @@ -0,0 +1,140 @@ +--- +title: Creating Apps +weight: 400 +--- + +Rancher's App Marketplace is based on Helm Repositories and Helm Charts. You can add HTTP based standard Helm Repositories as well as any Git Repository which contains charts. + +:::tip + +For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. + +::: + +- [Chart types](#chart-types) + - [Helm charts](#helm-charts) + - [Rancher charts](#rancher-charts) +- [Chart directory structure](#chart-directory-structure) +- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) + - [questions.yml](#questions-yml) + - [Min/Max Rancher versions](#min-max-rancher-versions) + - [Question variable reference](#question-variable-reference) +- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) + +# Chart Types + +Rancher supports two different types of charts: Helm charts and Rancher charts. + +### Helm Charts + +Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you' can provide the chart's parameter values in a YAML editor. + +### Rancher Charts + +Rancher charts are native helm charts with two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) + +Rancher charts add simplified chart descriptions and configuration forms to make the application deployment easy. Rancher users do not need to read through the entire list of Helm variables to understand how to launch an application. + +# Chart Directory Structure + +You can provide Helm Charts in a standard, HTTP based Helm Repository. For more information see the [Chart Repository Guide](https://helm.sh/docs/topics/chart_repository) in the official Helm documentation. + +Alternatively you can organize your charts in a Git Repository and directly add this to the App Marketplace. + +The following table demonstrates the directory structure for a Git repository. The `charts` directory is the top level directory under the repository base. Adding the repository to Rancher will expose all charts contained within it. The `questions.yaml`, `README.md`, and `requirements.yml` files are specific to Rancher charts, but are optional for chart customization. + +``` +/ + │ + ├── charts/ + │ ├── / # This directory name will be surfaced in the Rancher UI as the chart name + │ │ ├── / # Each directory at this level provides different app versions that will be selectable within the chart in the Rancher UI + │ │ │ ├── Chart.yaml # Required Helm chart information file. + │ │ │ ├── questions.yaml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* + │ │ │ ├── README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. + │ │ │ ├── requirements.yml # Optional: YAML file listing dependencies for the chart. + │ │ │ ├── values.yml # Default configuration values for the chart. + │ │ │ ├── templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. +``` + +# Additional Files for Rancher Charts + +Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. + +- `app-readme.md` + + A file that provides descriptive text in the chart's UI header. + +- `questions.yml` + + A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using a values YAML config, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). + + +
Rancher Chart with questions.yml (top) vs. Helm Chart without (bottom)
+ + ![questions.yml](/img/rancher-app-2.6.png) + ![values.yaml](/img/helm-app-2.6.png) + + +### Chart.yaml annotations + +Rancher supports additional annotations that you can add to the `Chart.yaml` file. These annotations allow you to define application dependencies or configure additional UI defaults: + +| Annotation | Description | Example | +| --------------------------------- | ----------- | ------- | +| catalog.cattle.io/auto-install | If set, will install the specified chart in the specified version before installing this chart | other-chart-name=1.0.0 | +| catalog.cattle.io/display-name | A display name that should be displayed in the App Marketplace instead of the chart name | Display Name of Chart | +| catalog.cattle.io/namespace | A fixed namespace where the chart should be deployed in. If set, this can't be changed by the user | fixed-namespace | +| catalog.cattle.io/release-name | A fixed release name for the Helm installation. If set, this can't be changed by the user | fixed-release-name | +| catalog.cattle.io/requests-cpu | Total amount of CPU that should be unreserverd in the cluster. If less CPU is available, a warning will be shown | 2000m | +| catalog.cattle.io/requests-memory | Total amount of memory that should be unreserverd in the cluster. If less memory is available, a warning will be shown | 2Gi | +| catalog.cattle.io/os | Restricts the OS where this chart can be installed. Possible values: `linux`, `windows`. Default: no restriction | linux | + +### questions.yml + +Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. + +### Min/Max Rancher versions + +For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. + +:::note + +Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. + +::: + +``` +rancher_min_version: 2.3.0 +rancher_max_version: 2.3.99 +``` + +### Question Variable Reference + +This reference contains variables that you can use in `questions.yml` nested under `questions:`. + +| Variable | Type | Required | Description | +| ------------- | ------------- | --- |------------- | +| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | +| label | string | true | Define the UI label. | +| description | string | false | Specify the description of the variable.| +| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| +| required | bool | false | Define if the variable is required or not (true \| false)| +| default | string | false | Specify the default value. | +| group | string | false | Group questions by input value. | +| min_length | int | false | Min character length.| +| max_length | int | false | Max character length.| +| min | int | false | Min integer length. | +| max | int | false | Max integer length. | +| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
- "ClusterIP"
- "NodePort"
- "LoadBalancer"| +| valid_chars | string | false | Regular expression for input chars validation. | +| invalid_chars | string | false | Regular expression for invalid input chars validation.| +| subquestions | []subquestion | false| Add an array of subquestions.| +| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | +| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| + +:::note + +`subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. + +::: diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md new file mode 100644 index 0000000000..7a89f169b8 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md @@ -0,0 +1,183 @@ +--- +title: Setting up Amazon ELB Network Load Balancer +weight: 5 +--- + +This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. + +These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. + +This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. + +Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. + +# Setting up the Load Balancer + +Configuring an Amazon NLB is a multistage process: + +1. [Create Target Groups](#1-create-target-groups) +2. [Register Targets](#2-register-targets) +3. [Create Your NLB](#3-create-your-nlb) +4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) + +# Requirements + +These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. + +# 1. Create Target Groups + +Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. + +Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. + +1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. +1. Click **Create target group** to create the first target group, regarding TCP port 443. + +:::note + +Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) + +::: + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. + +| Option | Setting | +|-------------------|-------------------| +| Target Group Name | `rancher-tcp-443` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `443` | +| VPC | Choose your VPC | + +Health check settings: + +| Option | Setting | +|---------------------|-----------------| +| Protocol | TCP | +| Port | `override`,`80` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. + +| Option | Setting | +|-------------------|------------------| +| Target Group Name | `rancher-tcp-80` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `80` | +| VPC | Choose your VPC | + + +Health check settings: + +| Option |Setting | +|---------------------|----------------| +| Protocol | TCP | +| Port | `traffic port` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +# 2. Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +![](/img/ha/nlb/edit-targetgroup-443.png) + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +*** +**Screenshot Add targets to target group TCP port 443**
+ +![](/img/ha/nlb/add-targets-targetgroup-443.png) + +*** +**Screenshot Added targets to target group TCP port 443**
+ +![](/img/ha/nlb/added-targets-targetgroup-443.png) + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +# 3. Create Your NLB + +Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. Then complete each form. + +- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) +- [Step 2: Configure Routing](#step-2-configure-routing) +- [Step 3: Register Targets](#step-3-register-targets) +- [Step 4: Review](#step-4-review) + +### Step 1: Configure Load Balancer + +Set the following fields in the form: + +- **Name:** `rancher` +- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. +- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. +- **Availability Zones:** Select Your **VPC** and **Availability Zones**. + +### Step 2: Configure Routing + +1. From the **Target Group** drop-down, choose **Existing target group**. +1. From the **Name** drop-down, choose `rancher-tcp-443`. +1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +### Step 3: Register Targets + +Since you registered your targets earlier, all you have to do is click **Next: Review**. + +### Step 4: Review + +Look over the load balancer details and click **Create** when you're satisfied. + +After AWS creates the NLB, click **Close**. + +# 4. Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to..**. + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. + +# Health Check Paths for NGINX Ingress and Traefik Ingresses + +K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. + +For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. + +- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. +- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. + +To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md new file mode 100644 index 0000000000..2301e4f7ed --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md @@ -0,0 +1,74 @@ +--- +title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' +weight: 1 +--- + +This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. + +The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. + +For more information about each installation option, refer to [this page.](../../../pages-for-subheaders/installation-and-upgrade.md) + +:::note Important: + +These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +::: + +To install the Rancher management server on a high-availability K3s cluster, we recommend setting up the following infrastructure: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. We recommend MySQL. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set a [MySQL](https://www.mysql.com/) external database. Rancher has been tested on K3s Kubernetes clusters using MySQL version 5.7 as the datastore. + +When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the MySQL database, refer to this [tutorial](mysql-database-in-amazon-rds.md) for setting up MySQL on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](amazon-elb-load-balancer.md) + +:::caution + +Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +::: + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md new file mode 100644 index 0000000000..ef68da140b --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md @@ -0,0 +1,65 @@ +--- +title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' +weight: 2 +--- + +This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. + +:::note Important: + +These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +::: + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on any of the three nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](amazon-elb-load-balancer.md) + +:::caution + +Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +::: + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md new file mode 100644 index 0000000000..376df4d2ad --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md @@ -0,0 +1,59 @@ +--- +title: 'Set up Infrastructure for a High Availability RKE2 Kubernetes Cluster' +weight: 1 +--- + +This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. + +The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a RKE2 Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. + +:::note Important: + +These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +:: + +To install the Rancher management server on a high-availability RKE2 cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on all nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE2 tool will deploy an Nginx Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Nginx Ingress controller to listen for traffic destined for the Rancher hostname. The Nginx Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](amazon-elb-load-balancer.md) + +:::caution + +Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +::: + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md new file mode 100644 index 0000000000..f05998b800 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md @@ -0,0 +1,34 @@ +--- +title: Setting up a MySQL Database in Amazon RDS +weight: 4 +--- +This tutorial describes how to set up a MySQL database in Amazon's RDS. + +This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. + +1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. In the left panel, click **Databases**. +1. Click **Create database**. +1. In the **Engine type** section, click **MySQL**. +1. In the **Version** section, choose **MySQL 5.7.22**. +1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. +1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. +1. Click **Create database**. + +You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. + +To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. + +- **Username:** Use the admin username. +- **Password:** Use the admin password. +- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. +- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. +- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name**. + +This information will be used to connect to the database in the following format: + +``` +mysql://username:password@tcp(hostname:3306)/database-name +``` + +For more information on configuring the datastore for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md new file mode 100644 index 0000000000..6486c2d8b1 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md @@ -0,0 +1,91 @@ +--- +title: Setting up an NGINX Load Balancer +weight: 4 +--- + +NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. + +In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. + +:::note + +Do not use one of your Rancher nodes as the load balancer. + +::: + +> These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. + +## Install NGINX + +Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. + +## Create NGINX Configuration + +After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. + +1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. + +2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your nodes. + + :::note + + See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. + + ::: + +
Example NGINX config
+ ``` + worker_processes 4; + worker_rlimit_nofile 40000; + + events { + worker_connections 8192; + } + + stream { + upstream rancher_servers_http { + least_conn; + server :80 max_fails=3 fail_timeout=5s; + server :80 max_fails=3 fail_timeout=5s; + server :80 max_fails=3 fail_timeout=5s; + } + server { + listen 80; + proxy_pass rancher_servers_http; + } + + upstream rancher_servers_https { + least_conn; + server :443 max_fails=3 fail_timeout=5s; + server :443 max_fails=3 fail_timeout=5s; + server :443 max_fails=3 fail_timeout=5s; + } + server { + listen 443; + proxy_pass rancher_servers_https; + } + + } + ``` + + +3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. + +4. Load the updates to your NGINX configuration by running the following command: + + ``` + # nginx -s reload + ``` + +## Option - Run NGINX as Docker container + +Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/nginx.conf:/etc/nginx/nginx.conf \ + nginx:1.14 +``` diff --git a/docs/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md b/docs/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md new file mode 100644 index 0000000000..a244f99642 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md @@ -0,0 +1,75 @@ +--- +title: Setting up Nodes in Amazon EC2 +weight: 3 +--- + +In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. + +If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. + +If the Rancher server is installed in a single Docker container, you only need one instance. + +### 1. Optional Preparation + +- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.](../../../pages-for-subheaders/set-up-cloud-providers.md) +- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.](../../../pages-for-subheaders/installation-requirements.md#port-requirements) + +### 2. Provision Instances + +1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. +1. In the left panel, click **Instances**. +1. Click **Launch Instance**. +1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select**. +1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. +1. Click **Next: Configure Instance Details**. +1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. +1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. +1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group**. +1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements](../../../pages-for-subheaders/installation-requirements.md#port-requirements) for Rancher nodes. +1. Click **Review and Launch**. +1. Click **Launch**. +1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. +1. Click **Launch Instances**. + + +**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. + +:::note + +If the nodes are being used for an RKE Kubernetes cluster, install Docker on each node in the next step. For a K3s Kubernetes cluster, the nodes are now ready to install K3s. + +::: + +### 3. Install Docker and Create User for RKE Kubernetes Cluster Nodes + +1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. +1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect**. +1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: +``` +sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] +``` +1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: +``` +curl https://releases.rancher.com/install-docker/18.09.sh | sh +``` +1. When you are connected to the instance, run the following command on the instance to create a user: +``` +sudo usermod -aG docker ubuntu +``` +1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. + +:::tip + +To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. + +::: + +**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. + +### Next Steps for RKE Kubernetes Cluster Nodes + +If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. + +RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md new file mode 100644 index 0000000000..f275fe2ab5 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md @@ -0,0 +1,25 @@ +--- +title: About High-availability Installations +weight: 1 +--- + +We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. + +In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. + +Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. + +The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. + +For information on how Rancher works, regardless of the installation method, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md) + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
+![High-availability Kubernetes Installation of Rancher](/img/ha/rancher2ha.svg) +Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md new file mode 100644 index 0000000000..3f1110dcac --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md @@ -0,0 +1,128 @@ +--- +title: Setting up a High-availability K3s Kubernetes Cluster for Rancher +shortTitle: Set up K3s for Rancher +weight: 2 +--- + +This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) + +For systems without direct internet access, refer to the air gap installation instructions. + +:::tip Single-node Installation Tip: + +In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. + +To set up a single-node K3s cluster, run the Rancher server installation command on just one node instead of two nodes. + +In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. + +::: + +# Prerequisites + +These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.](../infrastructure-setup/ha-k3s-kubernetes-cluster.md) + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. +# Installing Kubernetes + +### 1. Install Kubernetes and Set up the K3s Server + +When running the command to start the K3s Kubernetes API server, you will pass in an option to use the external datastore that you set up earlier. + +1. Connect to one of the Linux nodes that you have prepared to run the Rancher server. +1. On the Linux node, run this command to start the K3s server and connect it to the external datastore: + ``` + curl -sfL https://get.k3s.io | sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" + ``` + To specify the K3s version, use the INSTALL_K3S_VERSION environment variable: + ```sh + curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" + ``` + + :::note + + The datastore endpoint can also be passed in using the environment variable `$K3S_DATASTORE_ENDPOINT`. + + ::: + +1. Repeat the same command on your second K3s server node. + +### 2. Confirm that K3s is Running + +To confirm that K3s has been set up successfully, run the following command on either of the K3s server nodes: +``` +sudo k3s kubectl get nodes +``` + +Then you should see two nodes with the master role: +``` +ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-172-31-60-194 Ready master 44m v1.17.2+k3s1 +ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1 +``` + +Then test the health of the cluster pods: +``` +sudo k3s kubectl get pods --all-namespaces +``` + +**Result:** You have successfully set up a K3s Kubernetes cluster. + +### 3. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +```yml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### 4. Check the Health of Your Cluster Pods + +Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. + +Check that all the required pods and containers are healthy are ready to continue: + +``` +ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d +kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d +kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d +``` + +**Result:** You have confirmed that you can access the cluster with `kubectl` and the K3s cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md new file mode 100644 index 0000000000..4971baba20 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md @@ -0,0 +1,191 @@ +--- +title: Setting up a High-availability RKE Kubernetes Cluster +shortTitle: Set up RKE Kubernetes +weight: 3 +--- + + +This section describes how to install a Kubernetes cluster. This cluster should be dedicated to run only the Rancher server. + +:::note + +Rancher can run on any Kubernetes cluster, included hosted Kubernetes solutions such as Amazon EKS. The below instructions represent only one possible way to install Kubernetes. + +::: + +For systems without direct internet access, refer to [Air Gap: Kubernetes install.](../../../pages-for-subheaders/air-gapped-helm-cli-install.md) + +:::tip Single-node Installation Tip: + +In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. + +To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. + +In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. + +::: + +# Installing Kubernetes + +### Required CLI Tools + +Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. + +Also install [RKE,](https://rancher.com/docs/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. + +### 1. Create the cluster configuration file + +In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. + +Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. + +If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. + +RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. + +```yaml +nodes: + - address: 165.227.114.63 + internal_address: 172.16.22.12 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.116.167 + internal_address: 172.16.32.37 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.127.226 + internal_address: 172.16.42.73 + user: ubuntu + role: [controlplane, worker, etcd] + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +# Required for external TLS termination with +# ingress-nginx v0.22+ +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + +
Common RKE Nodes Options
+ +| Option | Required | Description | +| ------------------ | -------- | -------------------------------------------------------------------------------------- | +| `address` | yes | The public DNS or IP address | +| `user` | yes | A user that can run docker commands | +| `role` | yes | List of Kubernetes roles assigned to the node | +| `internal_address` | no | The private DNS or IP address for internal cluster traffic | +| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | + +:::note Advanced Configurations: + +RKE has many configuration options for customizing the install to suit your specific environment. + +Please see the [RKE Documentation](https://rancher.com/docs/rke/latest/en/config-options/) for the full list of options and capabilities. + +For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide](../../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md). + +For more information regarding Dockershim support, refer to [this page](../../../getting-started/installation-and-upgrade/installation-requirements/dockershim.md) + +::: + +### 2. Run RKE + +``` +rke up --config ./rancher-cluster.yml +``` + +When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. + +### 3. Test Your Cluster + +This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. + +Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. + +When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_cluster.yml`. This file has the credentials for `kubectl` and `helm`. + +:::note + +If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. + +::: + +Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_cluster.yml`: + +``` +export KUBECONFIG=$(pwd)/kube_config_cluster.yml +``` + +Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: + +``` +kubectl get nodes + +NAME STATUS ROLES AGE VERSION +165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 +``` + +### 4. Check the Health of Your Cluster Pods + +Check that all the required pods and containers are healthy are ready to continue. + +- Pods are in `Running` or `Completed` state. +- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` +- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. + +``` +kubectl get pods --all-namespaces + +NAMESPACE NAME READY STATUS RESTARTS AGE +ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s +kube-system canal-jp4hz 3/3 Running 0 30s +kube-system canal-z2hg8 3/3 Running 0 30s +kube-system canal-z6kpw 3/3 Running 0 30s +kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s +kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s +kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s +kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s +kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s +kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s +kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s +``` + +This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. + +### 5. Save Your Files + +:::note Important: + +The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +::: + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

_The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +:::note + +The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +::: + +### Issues or errors? + +See the [Troubleshooting](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. + + +### [Next: Install Rancher](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + diff --git a/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md new file mode 100644 index 0000000000..67e44d88f6 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md @@ -0,0 +1,167 @@ +--- +title: Setting up a High-availability RKE2 Kubernetes Cluster for Rancher +shortTitle: Set up RKE2 for Rancher +weight: 2 +--- +_Tested on v2.5.6_ + +This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) + +# Prerequisites + +These instructions assume you have set up three nodes, a load balancer, and a DNS record, as described in [this section.](../infrastructure-setup/ha-rke2-kubernetes-cluster.md) + +Note that in order for RKE2 to work correctly with the load balancer, you need to set up two listeners: one for the supervisor on port 9345, and one for the Kubernetes API on port 6443. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the RKE2 version, use the INSTALL_RKE2_VERSION environment variable when running the RKE2 installation script. +# Installing Kubernetes + +### 1. Install Kubernetes and Set up the RKE2 Server + +RKE2 server runs with embedded etcd so you will not need to set up an external datastore to run in HA mode. + +On the first node, you should set up the configuration file with your own pre-shared secret as the token. The token argument can be set on startup. + +If you do not specify a pre-shared secret, RKE2 will generate one and place it at /var/lib/rancher/rke2/server/node-token. + +To avoid certificate errors with the fixed registration address, you should launch the server with the tls-san parameter set. This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access via both the IP and the hostname. + +First, you must create the directory where the RKE2 config file is going to be placed: + +``` +mkdir -p /etc/rancher/rke2/ +``` + +Next, create the RKE2 config file at `/etc/rancher/rke2/config.yaml` using the following example: + +``` +token: my-shared-secret +tls-san: + - my-kubernetes-domain.com + - another-kubernetes-domain.com +``` +After that, you need to run the install command and enable and start rke2: + +``` +curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=v1.20 sh - +systemctl enable rke2-server.service +systemctl start rke2-server.service +``` +1. To join the rest of the nodes, you need to configure each additional node with the same shared token or the one generated automatically. Here is an example of the configuration file: + + token: my-shared-secret + server: https://:9345 + tls-san: + - my-kubernetes-domain.com + - another-kubernetes-domain.com +After that, you need to run the installer and enable, then start, rke2: + + curl -sfL https://get.rke2.io | sh - + systemctl enable rke2-server.service + systemctl start rke2-server.service + + +1. Repeat the same command on your third RKE2 server node. + +### 2. Confirm that RKE2 is Running + +Once you've launched the rke2 server process on all server nodes, ensure that the cluster has come up properly with + +``` +/var/lib/rancher/rke2/bin/kubectl \ + --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes +You should see your server nodes in the Ready state. +``` + +Then test the health of the cluster pods: +``` +/var/lib/rancher/rke2/bin/kubectl \ + --kubeconfig /etc/rancher/rke2/rke2.yaml get pods --all-namespaces +``` + +**Result:** You have successfully set up a RKE2 Kubernetes cluster. + +### 3. Save and Start Using the kubeconfig File + +When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/rke2/rke2.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your control-plane load balancer, on port 6443. (The RKE2 Kubernetes API Server uses port 6443, while the Rancher server will be served via the NGINX Ingress on ports 80 and 443.) Here is an example `rke2.yaml`: + +```yml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your RKE2 cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### 4. Check the Health of Your Cluster Pods + +Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. + +Check that all the required pods and containers are healthy are ready to continue: + +``` +/var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system cloud-controller-manager-rke2-server-1 1/1 Running 0 2m28s +kube-system cloud-controller-manager-rke2-server-2 1/1 Running 0 61s +kube-system cloud-controller-manager-rke2-server-3 1/1 Running 0 49s +kube-system etcd-rke2-server-1 1/1 Running 0 2m13s +kube-system etcd-rke2-server-2 1/1 Running 0 87s +kube-system etcd-rke2-server-3 1/1 Running 0 56s +kube-system helm-install-rke2-canal-hs6sx 0/1 Completed 0 2m17s +kube-system helm-install-rke2-coredns-xmzm8 0/1 Completed 0 2m17s +kube-system helm-install-rke2-ingress-nginx-flwnl 0/1 Completed 0 2m17s +kube-system helm-install-rke2-metrics-server-7sggn 0/1 Completed 0 2m17s +kube-system kube-apiserver-rke2-server-1 1/1 Running 0 116s +kube-system kube-apiserver-rke2-server-2 1/1 Running 0 66s +kube-system kube-apiserver-rke2-server-3 1/1 Running 0 48s +kube-system kube-controller-manager-rke2-server-1 1/1 Running 0 2m30s +kube-system kube-controller-manager-rke2-server-2 1/1 Running 0 57s +kube-system kube-controller-manager-rke2-server-3 1/1 Running 0 42s +kube-system kube-proxy-rke2-server-1 1/1 Running 0 2m25s +kube-system kube-proxy-rke2-server-2 1/1 Running 0 59s +kube-system kube-proxy-rke2-server-3 1/1 Running 0 85s +kube-system kube-scheduler-rke2-server-1 1/1 Running 0 2m30s +kube-system kube-scheduler-rke2-server-2 1/1 Running 0 57s +kube-system kube-scheduler-rke2-server-3 1/1 Running 0 42s +kube-system rke2-canal-b9lvm 2/2 Running 0 91s +kube-system rke2-canal-khwp2 2/2 Running 0 2m5s +kube-system rke2-canal-swfmq 2/2 Running 0 105s +kube-system rke2-coredns-rke2-coredns-547d5499cb-6tvwb 1/1 Running 0 92s +kube-system rke2-coredns-rke2-coredns-547d5499cb-rdttj 1/1 Running 0 2m8s +kube-system rke2-coredns-rke2-coredns-autoscaler-65c9bb465d-85sq5 1/1 Running 0 2m8s +kube-system rke2-ingress-nginx-controller-69qxc 1/1 Running 0 52s +kube-system rke2-ingress-nginx-controller-7hprp 1/1 Running 0 52s +kube-system rke2-ingress-nginx-controller-x658h 1/1 Running 0 52s +kube-system rke2-metrics-server-6564db4569-vdfkn 1/1 Running 0 66s +``` + +**Result:** You have confirmed that you can access the cluster with `kubectl` and the RKE2 cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md new file mode 100644 index 0000000000..bbb9851ed5 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md @@ -0,0 +1,74 @@ +--- +title: Recommended Cluster Architecture +weight: 1 +--- + +There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. + +# Separating Worker Nodes from Nodes with Other Roles + +When designing your cluster(s), you have two options: + +* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements](../node-requirements-for-rancher-managed-clusters.md#networking-requirements). +* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. + +In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. + +Therefore, each node should have one of the following role configurations: + + * `etcd` + * `controlplane` + * Both `etcd` and `controlplane` + * `worker` + +# Recommended Number of Nodes with Each Role + +The cluster should have: + +- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +- At least two nodes with the role `controlplane` for master component high availability. +- At least two nodes with the role `worker` for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](roles-for-nodes-in-kubernetes.md) + + +### Number of Controlplane Nodes + +Adding more than one node with the `controlplane` role makes every master component highly available. + +### Number of etcd Nodes + +The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. + +| Nodes with `etcd` role | Majority | Failure Tolerance | +|--------------|------------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | **1** | +| 4 | 3 | 1 | +| 5 | 3 | **2** | +| 6 | 4 | 2 | +| 7 | 4 | **3** | +| 8 | 5 | 3 | +| 9 | 5 | **4** | + +References: + +* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) +* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) + +### Number of Worker Nodes + +Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. + +### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications + +You may have noticed that our [Kubernetes Install](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +# References + +* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md new file mode 100644 index 0000000000..f0f3b87703 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md @@ -0,0 +1,51 @@ +--- +title: Roles for Nodes in Kubernetes +weight: 1 +--- + +This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. + +This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +![Cluster diagram](/img/clusterdiagram.svg)
+Lines show the traffic flow between components. Colors are used purely for visual aid + +# etcd + +Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. + +:::note + +Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +::: + +# controlplane + +Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. + +:::note + +Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +::: + +### kube-apiserver + +The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. + +### kube-controller-manager + +The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +### kube-scheduler + +The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +# worker + +Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. + +# References + +* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md new file mode 100644 index 0000000000..57320cc1b2 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md @@ -0,0 +1,42 @@ +--- +title: Rancher Agents +weight: 2400 +--- + +There are two different agent resources deployed on Rancher managed clusters: + +- [cattle-cluster-agent](#cattle-cluster-agent) +- [cattle-node-agent](#cattle-node-agent) + +For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture](../../../../pages-for-subheaders/rancher-manager-architecture.md) + +### cattle-cluster-agent + +The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. + +### cattle-node-agent + +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters when `cattle-cluster-agent` is unavailable. + +### Scheduling rules + +The `cattle-cluster-agent` uses a fixed fixed set of tolerations (listed below, if no controlplane nodes are visible in the cluster) or dynamically added tolerations based on taints applied to the controlplane nodes. This structure allows for [Taint based Evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions) to work properly for `cattle-cluster-agent`. The default tolerations are described below. If controlplane nodes are present the cluster, the tolerations will be replaced with tolerations matching the taints on the controlplane nodes. + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | **Note:** These are the default tolerations, and will be replaced by tolerations matching taints applied to controlplane nodes.

`effect:NoSchedule`
`key:node-role.kubernetes.io/controlplane`
`value:true`

`effect:NoSchedule`
`key:node-role.kubernetes.io/control-plane`
`operator:Exists`

`effect:NoSchedule`
`key:node-role.kubernetes.io/master`
`operator:Exists` | +| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | + +The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. When there are no controlplane nodes visible in the cluster (this is usually the case when using [Clusters from Hosted Kubernetes Providers](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)), you can add the label `cattle.io/cluster-agent=true` on a node to prefer scheduling the `cattle-cluster-agent` pod to that node. + +See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. + +The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: + +| Weight | Expression | +| ------ | ------------------------------------------------ | +| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | +| 100 | `node-role.kubernetes.io/control-plane:In:"true"` | +| 100 | `node-role.kubernetes.io/master:In:"true"` | +| 1 | `cattle.io/cluster-agent:In:"true"` | + diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/behavior-differences-between-rke1-and-rke2/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/behavior-differences-between-rke1-and-rke2/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/rke1-vs-rke2-differences.md diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md new file mode 100644 index 0000000000..19ad0650b4 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md @@ -0,0 +1,154 @@ +--- +title: Setting up the Amazon Cloud Provider +weight: 1 +--- + +When using the `Amazon` cloud provider, you can leverage the following capabilities: + +- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. +- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. + +See [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for all information regarding the Amazon cloud provider. + +To set up the Amazon cloud provider, + +1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) +2. [Configure the ClusterID](#2-configure-the-clusterid) + +### 1. Create an IAM Role and attach to the instances + +All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: + +* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. +* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. + +While creating an [Amazon EC2 cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. + +While creating a [Custom cluster](../../../../../../pages-for-subheaders/use-existing-nodes.md), you must manually attach the IAM role to the instance(s). + +IAM Policy for nodes with the `controlplane` role: + +```json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } +] +} +``` + +IAM policy for nodes with the `etcd` or `worker` role: + +```json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } +] +} +``` + +### 2. Configure the ClusterID + +The following resources need to tagged with a `ClusterID`: + +- **Nodes**: All hosts added in Rancher. +- **Subnet**: The subnet used for your cluster. +- **Security Group**: The security group used for your cluster. + +:::note + +Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). + +::: + +When you create an [Amazon EC2 Cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. + +Use the following tag: + +**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` + +`CLUSTERID` can be any string you like, as long as it is equal across all tags set. + +Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: + +**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. + +### Using Amazon Elastic Container Registry (ECR) + +The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md new file mode 100644 index 0000000000..e3825bf274 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md @@ -0,0 +1,112 @@ +--- +title: Setting up the Azure Cloud Provider +weight: 2 +--- + +When using the `Azure` cloud provider, you can leverage the following capabilities: + +- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. + +- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. + +- **Network Storage:** Support Azure Files via CIFS mounts. + +The following account types are not supported for Azure Subscriptions: + +- Single tenant accounts (i.e. accounts with no subscriptions). +- Multi-subscription accounts. + +# Prerequisites for RKE and RKE2 + +To set up the Azure cloud provider for both RKE and RKE2, the following credentials need to be configured: + +1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) +2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) +3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) +4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) + +### 1. Set up the Azure Tenant ID + +Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). + +If you want to use the Azure CLI, you can run the command `az account show` to get the information. + +### 2. Set up the Azure Client ID and Azure Client Secret + +Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). + +1. Select **Azure Active Directory**. +1. Select **App registrations**. +1. Select **New application registration**. +1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. +1. Select **Create**. + +In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. + +The next step is to generate the **Azure Client Secret**: + +1. Open your created App registration. +1. In the **Settings** view, open **Keys**. +1. Enter a **Key description**, select an expiration time and select **Save**. +1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. + +### 3. Configure App Registration Permissions + +The last thing you will need to do, is assign the appropriate permissions to your App registration. + +1. Go to **More services**, search for **Subscriptions** and open it. +1. Open **Access control (IAM)**. +1. Select **Add**. +1. For **Role**, select `Contributor`. +1. For **Select**, select your created App registration name. +1. Select **Save**. + +### 4. Set up Azure Network Security Group Name + +A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. + +If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. + +You should already assign custom hosts to this Network Security Group during provisioning. + +Only hosts expected to be load balancer back ends need to be in this group. + +# RKE2 Cluster Set-up in Rancher + +1. Choose "Azure" from the Cloud Provider drop-down in the Cluster Configuration section. + +1. * Supply the Cloud Provider Configuration. Note that Rancher will automatically create a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you will need to specify them before creating the cluster. + * You can click on "Show Advanced" to see more of these automatically generated names and update them if + necessary. Your Cloud Provider Configuration **must** match the fields in the Machine Pools section. If you have multiple pools, they must all use the same Resource Group, Availability Set, Subnet, Virtual Network, and Network Security Group. + * An example is provided below. You will modify it as needed. + +
+ Example Cloud Provider Config + + ```yaml + { + "cloud":"AzurePublicCloud", + "tenantId": "YOUR TENANTID HERE", + "aadClientId": "YOUR AADCLIENTID HERE", + "aadClientSecret": "YOUR AADCLIENTSECRET HERE", + "subscriptionId": "YOUR SUBSCRIPTIONID HERE", + "resourceGroup": "docker-machine", + "location": "westus", + "subnetName": "docker-machine", + "securityGroupName": "rancher-managed-KA4jV9V2", + "securityGroupResourceGroup": "docker-machine", + "vnetName": "docker-machine-vnet", + "vnetResourceGroup": "docker-machine", + "primaryAvailabilitySetName": "docker-machine", + "routeTableResourceGroup": "docker-machine", + "cloudProviderBackoff": false, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + } + ``` + +
+ +1. Under the **Cluster Configuration > Advanced** section, click **Add** under **Additional Controller Manager Args** and add this flag: `--configure-cloud-routes=false` + +1. Click the **Create** button to submit the form and create the cluster. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md new file mode 100644 index 0000000000..e6492ecb29 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md @@ -0,0 +1,58 @@ +--- +title: Setting up the Google Compute Engine Cloud Provider +weight: 3 +--- + +In this section, you'll learn how to enable the Google Compute Engine (GCE) cloud provider for custom clusters in Rancher. A custom cluster is one in which Rancher installs Kubernetes on existing nodes. + +The official Kubernetes documentation for the GCE cloud provider is [here.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#gce) + +:::note Prerequisites: + +The service account of `Identity and API` access on GCE needs the `Computer Admin` permission. + +::: + +If you are using Calico, + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the custom cluster and click **⋮ > Edit YAML.* Enter the following configuration: + + ``` + rancher_kubernetes_engine_config: + cloud_provider: + name: gce + customCloudProvider: |- + [Global] + project-id= + network-name= + subnetwork-name= + node-instance-prefix= + node-tags= + network: + options: + calico_cloud_provider: "gce" + plugin: "calico" + ``` + +If you are using Canal or Flannel, + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the custom cluster and click **⋮ > Edit YAML.* Enter the following configuration: + + ``` + rancher_kubernetes_engine_config: + cloud_provider: + name: gce + customCloudProvider: |- + [Global] + project-id= + network-name= + subnetwork-name= + node-instance-prefix= + node-tags= + services: + kube_controller: + extra_args: + configure-cloud-routes: true # we need to allow the cloud provider configure the routes for the hosts + ``` \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md new file mode 100644 index 0000000000..c96345bde8 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md @@ -0,0 +1,21 @@ +--- +title: How to Configure In-tree vSphere Cloud Provider +shortTitle: In-tree Cloud Provider +weight: 10 +--- + +To set up the in-tree vSphere cloud provider, follow these steps while creating the vSphere cluster in Rancher: + +1. Set **Cloud Provider** option to `Custom` or `Custom (In-Tree)`. +1. Click on **Edit as YAML** +1. Insert the following structure to the pre-populated cluster YAML. This structure must be placed under `rancher_kubernetes_engine_config`. Note that the `name` *must* be set to `vsphere`. + + ```yaml + rancher_kubernetes_engine_config: + cloud_provider: + name: vsphere + vsphereCloudProvider: + [Insert provider configuration] + ``` + +Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree.md diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md new file mode 100644 index 0000000000..e0e7a74f1b --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md @@ -0,0 +1,103 @@ +--- +title: Creating a DigitalOcean Cluster +shortTitle: DigitalOcean +weight: 2215 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. + +First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. + +Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **DigitalOcean**. +1. Enter your Digital Ocean credentials. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Add Template**. +1. Click **DigitalOcean**. +1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md) + +### 3. Create a cluster with node pools using the node template + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **DigitalOcean**. +1. Enter a **Cluster Name**. +1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. **In the Cluster Configuration** section, choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +### 1. Create your cloud credentials + +If you already have a set of cloud credentials to use, skip this section. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **DigitalOcean**. +1. Enter your Digital Ocean credentials. +1. Click **Create**. + +### 2. Create your cluster + +Use Rancher to create a Kubernetes cluster in DigitalOcean. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Toggle the switch to **RKE2/K3s**. +1. Click **DigitalOcean**. +1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. +1. Enter a **Cluster Name**. +1. Create a machine pool for each Kubernetes role. Refer to the [best practices](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-roles-in-rke2) for recommendations on role assignments and counts. + 1. For each machine pool, define the machine configuration. Refer to the [DigitalOcean machine configuration reference](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md) for information on configuration options. +1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md new file mode 100644 index 0000000000..f6c8336e9d --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md @@ -0,0 +1,288 @@ +--- +title: Creating an Amazon EC2 Cluster +shortTitle: Amazon EC2 +description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher +weight: 2210 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. + +First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. + +Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +### Prerequisites + +- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. +- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: + - [Example IAM Policy](#example-iam-policy) + - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](../../../../../pages-for-subheaders/set-up-cloud-providers.md) or want to pass an IAM Profile to an instance) + - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) +- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. + +# Creating an EC2 Cluster + +The steps to create a cluster differ based on your Rancher version. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Amazon**. +1. Enter a name for the cloud credential. +1. In the **Default Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key**. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials and information from EC2 + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates** +1. Click **Add Template**. +1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md) +1. Click **Create**. + + :::note + + If you want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements](https://rancher.com/docs/rke//latest/en/config-options/dual-stack#requirements) that must be taken into consideration. + + ::: + +### 3. Create a cluster with node pools using the node template + +Add one or more node pools to your cluster. For more information about node pools, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Amazon EC2**. +1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers](../../../../../pages-for-subheaders/set-up-cloud-providers.md) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + + :::note + + If you want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements](https://rancher.com/docs/rke//latest/en/config-options/dual-stack#requirements) that must be taken into consideration. + + ::: + +1. Click **Create**. + + + + +### 1. Create your cloud credentials + +If you already have a set of cloud credentials to use, skip this section. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Amazon**. +1. Enter a name for the cloud credential. +1. In the **Default Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key**. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create your cluster + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Toggle the switch to **RKE2/K3s**. +1. Click **Amazon EC2**. +1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. +1. Enter a **Cluster Name**. +1. Create a machine pool for each Kubernetes role. Refer to the [best practices](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-roles-in-rke2) for recommendations on role assignments and counts. + 1. For each machine pool, define the machine configuration. Refer to [the EC2 machine configuration reference](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md) for information on configuration options. +1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# IAM Policies + +### Example IAM Policy + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` + +### Example IAM Policy with PassRole + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", + "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` +### Example IAM Policy to allow encrypted EBS volumes +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Encrypt", + "kms:DescribeKey", + "kms:CreateGrant", + "ec2:DetachVolume", + "ec2:AttachVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", + "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Resource": "*" + } + ] +} +``` diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md new file mode 100644 index 0000000000..7715520e95 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md @@ -0,0 +1,146 @@ +--- +title: Creating an Azure Cluster +shortTitle: Azure +weight: 2220 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Azure through Rancher. + +First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. + +Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +:::caution + +When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: + +- Terminate the SSL/TLS on the internal load balancer +- Use the L7 load balancer + +For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +::: + +For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) + +- [Preparation in Azure](#preparation-in-azure) +- [Creating an Azure Cluster](#creating-an-azure-cluster) + +# Preparation in Azure + +Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. + +To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. + +The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: + +``` +az ad sp create-for-rbac \ + --name="" \ + --role="Contributor" \ + --scopes="/subscriptions/" +``` + +The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, and *The client secret*. This information will be used when you create a node template for Azure. + +# Creating an Azure Cluster + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Azure**. +1. Enter your Azure credentials. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Add Template**. +1. Click **Azure**. +1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in Azure. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Azure**. +1. Enter a **Cluster Name**. +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. In the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +### 1. Create your cloud credentials + +If you already have a set of cloud credentials to use, skip this section. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Azure**. +1. Enter your Azure credentials. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create your cluster + +Use Rancher to create a Kubernetes cluster in Azure. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Toggle the switch to **RKE2/K3s**. +1. Click **Azure**. +1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. +1. Enter a **Cluster Name**. +1. Create a machine pool for each Kubernetes role. Refer to the [best practices](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-roles-in-rke2) for recommendations on role assignments and counts. + 1. For each machine pool, define the machine configuration. Refer to the [Azure machine configuration reference](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md) for information on configuration options. +1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md new file mode 100644 index 0000000000..d817bb5809 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md @@ -0,0 +1,93 @@ +--- +title: Provisioning Kubernetes Clusters in Nutanix AOS +weight: 1 +--- + +To use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Nutanix AOS (AHV): + +1. Locate Rancher's built-in Nutanix [node driver and activate it](../../../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#activating-deactivating-node-drivers). + +1. Create a node template, which Rancher will use to provision nodes in Nutanix AOS. + +1. Create a Nutanix AOS cluster in Rancher. When configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +For details on configuring the Nutanix AOS node template, refer to the [Nutanix AOS node template configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md) + +For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +- [Preparation in Nutanix AOS](#preparation-in-nutanix-aos) +- [Creating a Nutanix AOS Cluster](#creating-a-nutanix-aos-cluster) + +# Preparation in Nutanix AOS + +The following sections describe the requirements for setting up Nutanix AOS so that Rancher can provision VMs and clusters. + +:::note + +The node templates are documented and tested with Nutanix AOS version 5.20.2 and 6.0.1. + +::: +### Create Credentials in Nutanix AOS + +Before proceeding to create a cluster, you must ensure that you have a [Nutanix Prism Central user account](https://portal.nutanix.com/page/documents/details?targetId=Nutanix-Security-Guide-v6_0:wc-user-create-wc-t.html) with admin permissions. When you set up a node template, the template will need to use these credentials. + +### Network Permissions + +You must ensure that the hosts running the Rancher server are able to establish the following network connections: + +- To the Nutanix Prism Central API (usually port 9440/TCP). +- To port 22/TCP and 2376/TCP on the created VMs + +See [Node Networking Requirements](../../../node-requirements-for-rancher-managed-clusters.md#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. + +### VM-VM Anti-Affinity Policies + +Setting up [VM-VM Anti-Affinity Policies](https://portal.nutanix.com/page/documents/details?targetId=AHV-Admin-Guide-v6_1:ahv-vm-anti-affinity-t.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate AHV hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. + +# Creating a Nutanix AOS Cluster + +1. [Create a node template ](#1-create-a-node-template) +2. [Create a cluster with node pools using the node template](#2-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create a node template + +Creating a [node template](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for Nutanix AOS will allow Rancher to provision new nodes in Nutanix AOS. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Create**. +1. Click **Add Template**. +1. Click **Nutanix**. +1. Fill out a node template for Nutanix AOS. For help filling out the form, refer to the Nutanix AOS node template [configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md). +1. Click **Create**. + +### 2. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in Nutanix AOS. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Nutanix**. +1. Enter a **Cluster Name**, then click **Continue**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users who can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used, and whether you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. + +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-a-vm-template.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-a-vm-template.md new file mode 100644 index 0000000000..d718f33253 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-a-vm-template.md @@ -0,0 +1,167 @@ +--- +title: Creating a vSphere Virtual Machine Template +weight: 4 +--- + +Creating virtual machines in a repeatable and reliable fashion can often be difficult. VMware vSphere offers the ability to build one VM that can then be converted to a template. The template can then be used to create identically configured VMs. Rancher leverages this capability within node pools to create identical RKE1 and RKE2 nodes. + +In order to leverage the template to create new VMs, Rancher has some [specific requirements](#requirements) that the VM must have pre-installed. After you configure the VM with these requirements, you will next need to [prepare the VM](#preparing-your-vm) before [creating the template](#creating-a-template). Finally, once preparation is complete, the VM can be [converted to a template](#converting-to-a-template) and [moved into a content library](#moving-to-a-content-library), ready for Rancher node pool usage. + +- [Requirements](#requirements) +- [Creating a Template](#creating-a-template) +- [Preparing Your VM](#preparing-your-vm) +- [Converting to a Template](#converting-to-a-template) +- [Moving to a content library](#moving-to-a-content-library) +- [Other Resources](#other-resources) + +# Requirements + +There is specific tooling required for both Linux and Windows VMs to be usable by the vSphere node driver. The most critical dependency is [cloud-init](https://cloud-init.io/) for Linux and [cloudbase-init](https://cloudbase.it/cloudbase-init/) for Windows. Both of these are used for provisioning the VMs by configuring the hostname and by setting up the SSH access and the default Rancher user. Users can add additional content to these as desired if other configuration is needed. In addition, other requirements are listed below for reference. + +:::note + +If you have any specific firewall rules or configuration, you will need to add this to the VM before creating a template. + +::: + +## Linux Dependencies + +The packages that need to be installed on the template are listed below. These will have slightly different names based on distribution; some distributions ship these by default, for example. + +* curl +* wget +* git +* net-tools +* unzip +* apparmor-parser +* ca-certificates +* cloud-init +* cloud-guest-utils +* cloud-image-utils +* growpart +* cloud-initramfs-growroot +* open-iscsi +* openssh-server +* [open-vm-tools](https://docs.vmware.com/en/VMware-Tools/11.3.0/com.vmware.vsphere.vmwaretools.doc/GUID-8B6EA5B7-453B-48AA-92E5-DB7F061341D1.html) + +## Windows Dependencies + +The list of packages that need to be installed on the template is as follows: + +* Windows Container Feature +* [cloudbase-init](https://cloudbase.it/cloudbase-init/#download) +* [Docker EE](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/set-up-environment?tabs=Windows-Server#install-docker) - RKE1 Only + +:::note About the configuration for Windows templates varies between RKE1 and RKE2: + +- RKE1 leverages Docker, so any RKE1 templates need to have Docker EE pre-installed as well +- RKE2 does not require Docker EE, and thus it does not need to be installed + +::: + +# Creating a Template + +You may either manually create your VM or you can utilize [other alternatives](#alternatives-to-manual-creation) to create your VM. + +## Manual Creation +1. Manually create your VM by following [these instructions](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-AE8AFBF1-75D1-4172-988C-378C35C9FAF2.html) from VMware. Once you have a VM running, you can manually install the dependencies listed above to configure the VM correctly for the vSphere node driver. +2. Customize as needed based on your specific environment and requirements. +3. Proceed with the final preparation before creating your template. + +## Alternatives to Manual Creation + +Other alternative options to create VMs are listed below: + +* [VMware PowerCLI](https://developer.vmware.com/powercli) +* [Packer](https://www.packer.io/) +* [SaltStack](https://saltproject.io/) +* [Ansible](https://www.ansible.com/) + +Packer is a frequently-used alternative. Refer to this [reference](https://github.com/vmware-samples/packer-examples-for-vsphere) for examples of its usage with vSphere. + +# Preparing Your VM + +After creating a VM with all the required dependencies (and any additional required items), you must perform the most critical step next: preparing the VM to be turned into a template. This preparation will reset critical data such as the VM hostname, IPs, etc., to prevent that information from being brought into a new VM. If you fail to perform this step, you could create a VM with the same hostname, IP address, etc. + +Note that these preparatory steps differ between Linux and Windows. + +## Linux Preparation + +The commands below will reset your VM in Linux: + +```Bash +# Cleaning logs. +if [ -f /var/log/audit/audit.log ]; then + cat /dev/null > /var/log/audit/audit.log +fi +if [ -f /var/log/wtmp ]; then + cat /dev/null > /var/log/wtmp +fi +if [ -f /var/log/lastlog ]; then + cat /dev/null > /var/log/lastlog +fi + +# Cleaning udev rules. +if [ -f /etc/udev/rules.d/70-persistent-net.rules ]; then + rm /etc/udev/rules.d/70-persistent-net.rules +fi + +# Cleaning the /tmp directories +rm -rf /tmp/* +rm -rf /var/tmp/* + +# Cleaning the SSH host keys +rm -f /etc/ssh/ssh_host_* + +# Cleaning the machine-id +truncate -s 0 /etc/machine-id +rm /var/lib/dbus/machine-id +ln -s /etc/machine-id /var/lib/dbus/machine-id + +# Cleaning the shell history +unset HISTFILE +history -cw +echo > ~/.bash_history +rm -fr /root/.bash_history + +# Truncating hostname, hosts, resolv.conf and setting hostname to localhost +truncate -s 0 /etc/{hostname,hosts,resolv.conf} +hostnamectl set-hostname localhost + +# Clean cloud-init +cloud-init clean -s -l +``` + +## Windows Preparation + +Windows has a utility called [sysprep](https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/sysprep--generalize--a-windows-installation) that is used to generalize an image and reset the same items listed above for Linux. The command is as follows: + +```PowerShell +sysprep.exe /generalize /shutdown /oobe +``` + +# Converting to a Template + +1. Shut down and stop the VM. +2. Right-click on the VM in the inventory list and select **Template**. +3. Click on **Convert to Template**. + +**Result:** Once the process has completed, a template will be available for use. + +For additional information on converting a VM to a template, see the [VMware guide](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-5B3737CC-28DB-4334-BD18-6E12011CDC9F.html). + +# Moving to a Content library + +Rancher has the ability to use templates provided by a content library. Content libraries store and manage content within vSphere, and they also offer the ability to publish and share that content. + +Below are some helpful links on content libraries: + +* [Create a content library](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-2A0F1C13-7336-45CE-B211-610D39A6E1F4.html) +* [Clone the template to the content library](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-AC1545F0-F8BA-4CD2-96EB-21B3DFAA1DC1.html) + +# Other Resources + +Here is a list of additional resources that may be useful: + +* [Tutorial for creating a Linux template](https://docs.microsoft.com/en-us/azure/cloud-adoption-framework/manage/hybrid/server/best-practices/vmware-ubuntu-template) +* [Tutorial for creating a Windows template](https://docs.microsoft.com/en-us/azure/cloud-adoption-framework/manage/hybrid/server/best-practices/vmware-windows-template) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md new file mode 100644 index 0000000000..48faf82366 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md @@ -0,0 +1,43 @@ +--- +title: Creating Credentials in the vSphere Console +weight: 3 +--- + +This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. + +The following table lists the permissions required for the vSphere user account: + +| Privilege Group | Operations | +|:----------------------|:-----------------------------------------------------------------------| +| Datastore | AllocateSpace
Browse
FileManagement (Low level file operations)
UpdateVirtualMachineFiles
UpdateVirtualMachineMetadata | +| Global | Set custom attribute | +| Network | Assign | +| Resource | AssignVMToPool | +| Virtual Machine | Config (All)
GuestOperations (All)
Interact (All)
Inventory (All)
Provisioning (All) | +| vSphere Tagging | Assign or Unassign vSphere Tag
Assign or Unassign vSphere Tag on Object | + +The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: + +1. From the **vSphere** console, go to the **Administration** page. + +2. Go to the **Roles** tab. + +3. Create a new role. Give it a name and select the privileges listed in the permissions table above. + + ![](/img/rancherroles1.png) + +4. Go to the **Users and Groups** tab. + +5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. + + ![](/img/rancheruser.png) + +6. Go to the **Global Permissions** tab. + +7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. + + ![](/img/globalpermissionuser.png) + + ![](/img/globalpermissionrole.png) + +**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md new file mode 100644 index 0000000000..fad3045398 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md @@ -0,0 +1,113 @@ +--- +title: Provisioning Kubernetes Clusters in vSphere +weight: 1 +--- + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. + +First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. + +Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md) + +For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) + + +- [Preparation in vSphere](#preparation-in-vsphere) +- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) + +# Preparation in vSphere + +This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. + +The node templates are documented and tested with the vSphere Web Services API version 6.5. + +### Create Credentials in vSphere + +Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. + +Refer to this [how-to guide](create-credentials.md) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. + +### Network Permissions + +It must be ensured that the hosts running the Rancher server are able to establish the following network connections: + +- To the vSphere API on the vCenter server (usually port 443/TCP). +- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required when using the ISO creation method*). +- To port 22/TCP and 2376/TCP on the created VMs + +See [Node Networking Requirements](../../../node-requirements-for-rancher-managed-clusters.md#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. + +### Valid ESXi License for vSphere API Access + +The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. + +### VM-VM Affinity Rules for Clusters with DRS + +If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. + +# Creating a vSphere Cluster + +The a vSphere cluster is created in Rancher depends on the Rancher version. + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **VMware vSphere**. +1. Enter your vSphere credentials. For help, refer to **Account Access** in the [node template configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md) +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Create**. +1. Click **Add Template**. +1. Click **vSphere**. +1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template [configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md). +1. Click **Create**. + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in vSphere. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **VMware vSphere**. +1. Enter a **Cluster Name** and use your vSphere cloud credentials. Click **Continue**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.](../../../../../../pages-for-subheaders/vsphere-cloud-provider.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. +- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../../../../../advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../../../../../pages-for-subheaders/vsphere-cloud-provider.md) \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md new file mode 100644 index 0000000000..770ee0d9e7 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md @@ -0,0 +1,41 @@ +--- +title: Configuration for Storage Classes in Azure +weight: 3 +--- + +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. + +In order to have the Azure platform create the required storage resources, follow these steps: + +1. [Configure the Azure cloud provider.](../set-up-cloud-providers/other-cloud-providers/azure.md) +1. Configure `kubectl` to connect to your cluster. +1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: + + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:azure-cloud-provider + rules: + - apiGroups: [''] + resources: ['secrets'] + verbs: ['get','create'] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:azure-cloud-provider + roleRef: + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + name: system:azure-cloud-provider + subjects: + - kind: ServiceAccount + name: persistent-volume-binder + namespace: kube-system + +1. Create these in your cluster using one of the follow command. + + ``` + # kubectl create -f + ``` diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/windows-linux-cluster-feature-parity.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/windows-linux-cluster-feature-parity.md diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md new file mode 100644 index 0000000000..fbd72e3cea --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md @@ -0,0 +1,134 @@ +--- +title: Node Requirements for Rancher Managed Clusters +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. + +:::note + +If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.](../../../pages-for-subheaders/installation-requirements.md) + +::: + +Make sure the nodes for the Rancher server fulfill the following requirements: + +- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) +- [Hardware Requirements](#hardware-requirements) +- [Networking Requirements](#networking-requirements) +- [Optional: Security Considerations](#optional-security-considerations) + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +If you plan to use ARM64, see [Running on ARM64 (Experimental).](../../../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md) + +For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) + +### Oracle Linux and RHEL Derived Linux Nodes + +Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. + +:::note + +In RHEL 8.4, two extra services are included on the NetworkManager: `nm-cloud-setup.service` and `nm-cloud-setup.timer`. These services add a routing table that interferes with the CNI plugin's configuration. If these services are enabled, you must disable them using the command below, and then reboot the node to restore connectivity: + + ``` + systemctl disable nm-cloud-setup.service nm-cloud-setup.timer + reboot + ``` + +::: + +### SUSE Linux Nodes + +SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. + +### Flatcar Container Linux Nodes + +When [Launching Kubernetes with Rancher](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#rke-cluster-config-file-reference) + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: canal + options: + canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: calico + options: + calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +It is also required to enable the Docker service, you can enable the Docker service using the following command: + +``` +systemctl enable docker.service +``` + +The Docker service is enabled automatically when using [Node Drivers](../../../pages-for-subheaders/about-provisioning-drivers.md#node-drivers). + +### Windows Nodes + +Nodes with Windows Server must run Docker Enterprise Edition. + +Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows](../../../pages-for-subheaders/use-windows-clusters.md) + +# Hardware Requirements + +The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. + +Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. + +For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) + +For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) + +# Networking Requirements + +For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. + +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.](https://rancher.com/docs/rke/latest/en/os/#ports) + +Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes). + +# Optional: Security Considerations + +If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. + +For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.](../../../pages-for-subheaders/rancher-security.md#rancher-hardening-guide) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md new file mode 100644 index 0000000000..9cd07255c6 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md @@ -0,0 +1,305 @@ +--- +title: Registering Existing Clusters +weight: 6 +--- + +The cluster registration feature replaced the feature to import clusters. + +The control that Rancher has to manage a registered cluster depends on the type of cluster. For details, see [Management Capabilities for Registered Clusters.](#management-capabilities-for-registered-clusters) + +- [Prerequisites](#prerequisites) +- [Registering a Cluster](#registering-a-cluster) +- [Management Capabilities for Registered Clusters](#management-capabilities-for-registered-clusters) +- [Configuring K3s Cluster Upgrades](#configuring-k3s-cluster-upgrades) +- [Debug Logging and Troubleshooting for Registered K3s Clusters](#debug-logging-and-troubleshooting-for-registered-k3s-clusters) +- [Authorized Cluster Endpoint Support for RKE2 and K3s Clusters](#authorized-cluster-endpoint-support-for-rke2-and-k3s-clusters) +- [Annotating Registered Clusters](#annotating-registered-clusters) + +# Prerequisites + +### Kubernetes Node Roles + +Registered RKE Kubernetes clusters must have all three node roles - etcd, controlplane and worker. A cluster with only controlplane components cannot be registered in Rancher. + +For more information on RKE node roles, see the [best practices.](../../../pages-for-subheaders/checklist-for-production-ready-clusters.md#cluster-architecture) + +### Permissions + +If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to register the cluster in Rancher. + +In order to apply the privilege, you need to run: + +```plain +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user [USER_ACCOUNT] +``` + +before running the `kubectl` command to register the cluster. + +By default, GKE users are not given this privilege, so you will need to run the command before registering GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). + +If you are registering a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-registration-in-rancher) + +### EKS Clusters + +EKS clusters must have at least one managed node group to be imported into Rancher or provisioned from Rancher successfully. + +# Registering a Cluster + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, **Import Existing**. +1. Choose the type of cluster. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. If you are importing a generic Kubernetes cluster in Rancher, perform the following steps for setup:
+ a. Click **Agent Environment Variables** under **Cluster Options** to set environment variables for [rancher cluster agent](launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables.
+ b. Enable Project Network Isolation to ensure the cluster supports Kubernetes `NetworkPolicy` resources. Users can select the **Project Network Isolation** option under the **Advanced Options** dropdown to do so. +1. Click **Create**. +1. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. +1. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. +1. If you are using self-signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. +1. When you finish running the command(s) on your node, click **Done**. + +**Result:** + +- Your cluster is registered and assigned a state of **Pending**. Rancher is deploying resources to manage your cluster. +- You can access your cluster after its state is updated to **Active**. +- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). + + +:::note + +You can not re-register a cluster that is currently active in a Rancher setup. + +::: + +### Configuring a K3s Cluster to Enable Registration in Rancher + +The K3s server needs to be configured to allow writing to the kubeconfig file. + +This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: + +``` +$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 +``` + +The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: + +``` +$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - +``` + +### Configuring an Imported EKS Cluster with Terraform + +You should define **only** the minimum fields that Rancher requires when importing an EKS cluster with Terraform. This is important as Rancher will overwrite what was in the EKS cluster with any config that the user has provided. + +::caution + +Even a small difference between the current EKS cluster and a user-provided config could have unexpected results. + +::: + +The minimum config fields required by Rancher to import EKS clusters with Terraform using `eks_config_v2` are as follows: + +- cloud_credential_id +- name +- region +- imported (this field should always be set to `true` for imported clusters) + +Example YAML configuration for imported EKS clusters: + +``` +resource "rancher2_cluster" "my-eks-to-import" { + name = "my-eks-to-import" + description = "Terraform EKS Cluster" + eks_config_v2 { + cloud_credential_id = rancher2_cloud_credential.aws.id + name = var.aws_eks_name + region = var.aws_region + imported = true + } +} +``` + +# Management Capabilities for Registered Clusters + +The control that Rancher has to manage a registered cluster depends on the type of cluster. + +- [Features for All Registered Clusters](#2-5-8-features-for-all-registered-clusters) +- [Additional Features for Registered K3s Clusters](#2-5-8-additional-features-for-registered-k3s-clusters) +- [Additional Features for Registered EKS and GKE Clusters](#additional-features-for-registered-eks-and-gke-clusters) + +### Features for All Registered Clusters + +After registering a cluster, the cluster owner can: + +- [Manage cluster access](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) through role-based access control +- Enable [monitoring, alerts and notifiers](../../../pages-for-subheaders/monitoring-and-alerting.md) +- Enable [logging](../../../pages-for-subheaders/logging.md) +- Enable [Istio](../../../pages-for-subheaders/istio.md) +- Use [pipelines](../../advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- Manage projects and workloads + +### Additional Features for Registered K3s Clusters + +[K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. + +When a K3s cluster is registered in Rancher, Rancher will recognize it as K3s. The Rancher UI will expose the features for [all registered clusters,](#features-for-all-registered-clusters) in addition to the following features for editing and upgrading the cluster: + +- The ability to [upgrade the K3s version](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- The ability to configure the maximum number of nodes that will be upgraded concurrently +- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster + +### Additional Features for Registered EKS and GKE Clusters + +Registering an Amazon EKS cluster or GKE cluster allows Rancher to treat it as though it were created in Rancher. + +Amazon EKS clusters and GKE clusters can now be registered in Rancher. For the most part, these registered clusters are treated the same way as clusters created in the Rancher UI, except for deletion. + +When you delete an EKS cluster or GKE cluster that was created in Rancher, the cluster is destroyed. When you delete a cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. + +The capabilities for registered clusters are listed in the table on [this page.](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) + +# Configuring K3s Cluster Upgrades + +:::tip + +It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. + +::: + +The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. + +- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes +- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes + +In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. + +Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. + +# Debug Logging and Troubleshooting for Registered K3s Clusters + +Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. + +To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. + +Logs created by the `system-upgrade-controller` can be viewed by running this command: + +``` +kubectl logs -n cattle-system system-upgrade-controller +``` + +The current status of the plans can be viewed with this command: + +``` +kubectl get plans -A -o yaml +``` + +If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. + +To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. + +# Authorized Cluster Endpoint Support for RKE2 and K3s Clusters + +_Available as of v2.6.3_ + +Authorized Cluster Endpoint (ACE) support has been added for registered RKE2 and K3s clusters. This support includes manual steps you will perform on the downstream cluster to enable the ACE. For additional information on the authorized cluster endpoint, click [here](../../advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md). + +:::note Notes: + +- These steps only need to be performed on the control plane nodes of the downstream cluster. You must configure each control plane node individually. + +- The following steps will work on both RKE2 and K3s clusters registered in v2.6.x as well as those registered (or imported) from a previous version of Rancher with an upgrade to v2.6.x. + +- These steps will alter the configuration of the downstream RKE2 and K3s clusters and deploy the `kube-api-authn-webhook`. If a future implementation of the ACE requires an update to the `kube-api-authn-webhook`, then this would also have to be done manually. For more information on this webhook, click [here](../../advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md#about-the-kube-api-auth-authentication-webhook). + +::: + +###### **Manual steps to be taken on the control plane of each downstream cluster to enable ACE:** + +1. Create a file at `/var/lib/rancher/{rke2,k3s}/kube-api-authn-webhook.yaml` with the following contents: + + apiVersion: v1 + kind: Config + clusters: + - name: Default + cluster: + insecure-skip-tls-verify: true + server: http://127.0.0.1:6440/v1/authenticate + users: + - name: Default + user: + insecure-skip-tls-verify: true + current-context: webhook + contexts: + - name: webhook + context: + user: Default + cluster: Default + +1. Add the following to the config file (or create one if it doesn’t exist); note that the default location is `/etc/rancher/{rke2,k3s}/config.yaml`: + + kube-apiserver-arg: + - authentication-token-webhook-config-file=/var/lib/rancher/{rke2,k3s}/kube-api-authn-webhook.yaml + +1. Run the following commands: + + sudo systemctl stop {rke2,k3s}-server + sudo systemctl start {rke2,k3s}-server + +1. Finally, you **must** go back to the Rancher UI and edit the imported cluster there to complete the ACE enablement. Click on **⋮ > Edit Config**, then click the **Networking** tab under Cluster Configuration. Finally, click the **Enabled** button for **Authorized Endpoint**. Once the ACE is enabled, you then have the option of entering a fully qualified domain name (FQDN) and certificate information. + + :::note + + The FQDN field is optional, and if one is entered, it should point to the downstream cluster. Certificate information is only needed if there is a load balancer in front of the downstream cluster that is using an untrusted certificate. If you have a valid certificate, then nothing needs to be added to the CA Certificates field. + + ::: + +# Annotating Registered Clusters + +For all types of registered Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. + +Therefore, when Rancher registers a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the registered cluster. + +However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. + +By annotating a registered cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. + +This example annotation indicates that a pod security policy is enabled: + +``` +"capabilities.cattle.io/pspEnabled": "true" +``` + +The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. + +``` +"capabilities.cattle.io/ingressCapabilities": "[ + { + "customDefaultBackend":true, + "ingressProvider":"asdf" + } +]" +``` + +These capabilities can be annotated for the cluster: + +- `ingressCapabilities` +- `loadBalancerCapabilities` +- `nodePoolScalingSupported` +- `nodePortRange` +- `pspEnabled` +- `taintSupport` + +All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. + +To annotate a registered cluster, + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the custom cluster you want to annotate and click **⋮ > Edit Config**. +1. Expand the **Labels & Annotations** section. +1. Click **Add Annotation**. +1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. +1. Click **Save**. + +**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md new file mode 100644 index 0000000000..158193312b --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md @@ -0,0 +1,165 @@ +--- +title: Creating an AKS Cluster +shortTitle: Azure Kubernetes Service +weight: 2115 +--- + +You can use Rancher to create a cluster hosted in Microsoft Azure Kubernetes Service (AKS). + +- [Prerequisites in Microsoft Azure](#prerequisites-in-microsoft-azure) +- [Setting Up the Service Principal with the Azure Command Line Tool](#setting-up-the-service-principal-with-the-azure-command-line-tool) + - [Setting Up the Service Principal from the Azure Portal](#setting-up-the-service-principal-from-the-azure-portal) +- [1. Create the AKS Cloud Credentials](#1-create-the-aks-cloud-credentials) +- [2. Create the AKS Cluster](#2-create-the-aks-cluster) +- [Role-based Access Control](#role-based-access-control) +- [AKS Cluster Configuration Reference](#aks-cluster-configuration-reference) +- [Private Clusters](#private-clusters) +- [Syncing](#syncing) +- [Programmatically Creating AKS Clusters](#programmatically-creating-aks-clusters) + +# Prerequisites in Microsoft Azure + +:::caution + +Deploying to AKS will incur charges. + +::: + +To interact with Azure APIs, an AKS cluster requires an Azure Active Directory (AD) service principal. The service principal is needed to dynamically create and manage other Azure resources, and it provides credentials for your cluster to communicate with AKS. For more information about the service principal, refer to the [AKS documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal). + +Before creating the service principal, you need to obtain the following information from the [Microsoft Azure Portal](https://portal.azure.com): + +- Subscription ID +- Client ID +- Client secret + +The below sections describe how to set up these prerequisites using either the Azure command line tool or the Azure portal. + +### Setting Up the Service Principal with the Azure Command Line Tool + +You can create the service principal by running this command: + +``` +az ad sp create-for-rbac --skip-assignment +``` + +The result should show information about the new service principal: +``` +{ + "appId": "xxxx--xxx", + "displayName": "", + "name": "http://", + "password": "", + "tenant": "" +} +``` + +You also need to add roles to the service principal so that it has privileges for communication with the AKS API. It also needs access to create and list virtual networks. + +Below is an example command for assigning the Contributor role to a service principal. Contributors can manage anything on AKS but cannot give access to others: + +``` +az role assignment create \ + --assignee $appId \ + --scope /subscriptions/$/resourceGroups/$ \ + --role Contributor +``` + +You can also create the service principal and give it Contributor privileges by combining the two commands into one. In this command, the scope needs to provide a full path to an Azure resource: + +``` +az ad sp create-for-rbac \ + --scope /subscriptions/$/resourceGroups/$ \ + --role Contributor +``` + +### Setting Up the Service Principal from the Azure Portal + +You can also follow these instructions to set up a service principal and give it role-based access from the Azure Portal. + +1. Go to the Microsoft Azure Portal [home page](https://portal.azure.com). + +1. Click **Azure Active Directory**. +1. Click **App registrations**. +1. Click **New registration**. +1. Enter a name. This will be the name of your service principal. +1. Optional: Choose which accounts can use the service principal. +1. Click **Register**. +1. You should now see the name of your service principal under **Azure Active Directory > App registrations**. +1. Click the name of your service principal. Take note of the application ID (also called app ID or client ID) so that you can use it when provisioning your AKS cluster. Then click **Certificates & secrets**. +1. Click **New client secret**. +1. Enter a short description, pick an expiration time, and click **Add**. Take note of the client secret so that you can use it when provisioning the AKS cluster. + +**Result:** You have created a service principal and you should be able to see it listed in the **Azure Active Directory** section under **App registrations**. You still need to give the service principal access to AKS. + +To give role-based access to your service principal, + +1. Click **All Services** in the left navigation bar. Then click **Subscriptions**. +1. Click the name of the subscription that you want to associate with your Kubernetes cluster. Take note of the subscription ID so that you can use it when provisioning your AKS cluster. +1. Click **Access Control (IAM)**. +1. In the **Add role assignment** section, click **Add**. +1. In the **Role** field, select a role that will have access to AKS. For example, you can use the **Contributor** role, which has permission to manage everything except for giving access to other users. +1. In the **Assign access to** field, select **Azure AD user, group, or service principal**. +1. In the **Select** field, select the name of your service principal and click **Save**. + +**Result:** Your service principal now has access to AKS. + +# 1. Create the AKS Cloud Credentials + +1. In the Rancher UI, click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Azure**. +1. Fill out the form. For help with filling out the form, see the [configuration reference.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md#cloud-credentials) +1. Click **Create**. + +# 2. Create the AKS Cluster + +Use Rancher to set up and configure your Kubernetes cluster. + +1. Click **☰ > Cluster Management**. +1. In the **Clusters** section, click **Create**. +1. Click **Azure AKS**. +1. Fill out the form. For help with filling out the form, see the [configuration reference.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md) +1. Click **Create**. + +**Result:** Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +# Role-based Access Control +When provisioning an AKS cluster in the Rancher UI, RBAC is not configurable because it is required to be enabled. + +RBAC is required for AKS clusters that are registered or imported into Rancher. + +# AKS Cluster Configuration Reference + +For more information about how to configure AKS clusters from the Rancher UI, see the [configuration reference.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md) + +# Private Clusters + +Typically, AKS worker nodes do not get public IPs, regardless of whether the cluster is private. In a private cluster, the control plane does not have a public endpoint. + +Rancher can connect to a private AKS cluster in one of two ways. + +The first way to ensure that Rancher is running on the same [NAT](https://docs.microsoft.com/en-us/azure/virtual-network/nat-overview) as the AKS nodes. + +The second way is to run a command to register the cluster with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster’s Kubernetes API. This command is displayed in a pop-up when you provision an AKS cluster with a private API endpoint enabled. + +:::note + +Please be aware that when registering an existing AKS cluster, the cluster might take some time, possibly hours, to appear in the `Cluster To register` dropdown list. This outcome will be based on region. + +::: + +For more information about connecting to an AKS private cluster, see the [AKS documentation.](https://docs.microsoft.com/en-us/azure/aks/private-clusters#options-for-connecting-to-the-private-cluster) + +# Syncing + +The AKS provisioner can synchronize the state of an AKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md) + +For information on configuring the refresh interval, see [this section.](../../../../pages-for-subheaders/gke-cluster-configuration.md#configuring-the-refresh-interval) + +# Programmatically Creating AKS Clusters + +The most common way to programmatically deploy AKS clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md new file mode 100644 index 0000000000..1207351297 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md @@ -0,0 +1,61 @@ +--- +title: Creating an Aliyun ACK Cluster +shortTitle: Alibaba Cloud Container Service for Kubernetes +weight: 2120 +--- + +You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. + +# Prerequisites Outside of Rancher + +:::caution + +Deploying to ACK will incur charges. + +::: + +1. In Aliyun, activate the following services in their respective consoles. + + - [Container Service](https://cs.console.aliyun.com) + - [Resource Orchestration Service](https://ros.console.aliyun.com) + - [RAM](https://ram.console.aliyun.com) + +2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. + +3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). + +4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. + +# Prerequisite in Rancher + +You will need to enable the Alibaba ACK cluster driver: + +1. Click **☰ > Cluster Management**. +1. Click **Drivers**. +1. In the **Cluster Drivers** tab, go to the **Alibaba ACK** cluster driver and click **⋮ > Activate**. + +When the cluster driver is finished downloading, you will be able to create Alibaba ACK clusters in Rancher. + +# Create an ACK Cluster + +1. Click **☰ > Cluster Management**. +1. From the **Clusters** page, click **Create**. +1. Choose **Alibaba ACK**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. +1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. +1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. +1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md new file mode 100644 index 0000000000..8538353055 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md @@ -0,0 +1,110 @@ +--- +title: Managing GKE Clusters +shortTitle: Google Kubernetes Engine +weight: 2105 +--- + +- [Prerequisites](#prerequisites) +- [Provisioning a GKE Cluster](#provisioning-a-gke-cluster) +- [Private Clusters](#private-clusters) +- [Configuration Reference](#configuration-reference) +- [Updating Kubernetes Version](#updating-kubernetes-version) +- [Syncing](#syncing) +- [Programmatically Creating GKE Clusters](#programmatically-creating-gke-clusters) + +# Prerequisites + +Some setup in Google Kubernetes Engine is required. + +### Service Account Token + +Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. + +The service account requires the following roles: + +- **Compute Viewer:** `roles/compute.viewer` +- **Project Viewer:** `roles/viewer` +- **Kubernetes Engine Admin:** `roles/container.admin` +- **Service Account User:** `roles/iam.serviceAccountUser` + +[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + +For help obtaining a private key for your service account, refer to the Google cloud documentation [here.](https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys) You will need to save the key in JSON format. + +### Google Project ID + +Your cluster will need to be part of a Google Project. + +To create a new project, refer to the Google cloud documentation [here.](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project) + +To get the project ID of an existing project, refer to the Google cloud documentation [here.](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) + +# Provisioning a GKE Cluster + +:::caution + +Deploying to GKE will incur charges. + +::: + +### 1. Create a Cloud Credential + +1. Click **☰ > Cluster Management**. +1. In the left navigation bar, click **Cloud Credentials**. +1. Click **Create**. +1. Enter a name for your Google cloud credentials. +1. In the **Service Account** text box, paste your service account private key JSON, or upload the JSON file. +1. Click **Create**. + +**Result:** You have created credentials that Rancher will use to provision the new GKE cluster. + +### 2. Create the GKE Cluster +Use Rancher to set up and configure your Kubernetes cluster. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Google GKE**. +1. Enter a **Cluster Name**. +1. Optional: Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Optional: Add Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to the cluster. +1. Enter your Google project ID and your Google cloud credentials. +1. Fill out the rest of the form. For help, refer to the [GKE cluster configuration reference.](../../../../pages-for-subheaders/gke-cluster-configuration.md) +1. Click **Create**. + +**Result:** You have successfully deployed a GKE cluster. + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# Private Clusters + +Private GKE clusters are supported. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md) + +# Configuration Reference + +For details on configuring GKE clusters in Rancher, see [this page.](../../../../pages-for-subheaders/gke-cluster-configuration.md) +# Updating Kubernetes Version + +The Kubernetes version of a cluster can be upgraded to any version available in the region or zone fo the GKE cluster. Upgrading the master Kubernetes version does not automatically upgrade worker nodes. Nodes can be upgraded independently. + +:::note + +GKE has removed basic authentication in 1.19+. In order to upgrade a cluster to 1.19+, basic authentication must be disabled in the Google Cloud. Otherwise, an error will appear in Rancher when an upgrade to 1.19+ is attempted. You can follow the [Google documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication#disabling_authentication_with_a_static_password). After this, the Kubernetes version can be updated to 1.19+ via Rancher. + +::: + +# Syncing + +The GKE provisioner can synchronize the state of a GKE cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md) + +For information on configuring the refresh interval, see [this section.](../../../../pages-for-subheaders/gke-cluster-configuration.md#configuring-the-refresh-interval) + +# Programmatically Creating GKE Clusters + +The most common way to programmatically deploy GKE clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md new file mode 100644 index 0000000000..bc3be862e7 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md @@ -0,0 +1,102 @@ +--- +title: Creating a Huawei CCE Cluster +shortTitle: Huawei Cloud Kubernetes Service +weight: 2130 +--- + +You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. + +# Prerequisites in Huawei + +:::caution + +Deploying to CCE will incur charges. + +::: + +1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). + +2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). + +# Prerequisite in Rancher + +You will need to enable the Huawei CCE cluster driver: + +1. Click **☰ > Cluster Management**. +1. Click **Drivers**. +1. In the **Cluster Drivers** tab, go to the **Huawei CCE** cluster driver and click **⋮ > Activate**. + +When the cluster driver is finished downloading, you will be able to create Huawei CCE clusters in Rancher. + +# Limitations + +Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. + +# Create the CCE Cluster + +1. From the **Clusters** page, click **Create**. +1. Click **Huawei CCE**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. Fill in the cluster configuration. For help filling out the form, refer to [Huawei CCE Configuration.](#huawei-cce-configuration) +1. Fill the following node configuration of the cluster. For help filling out the form, refer to [Node Configuration.](#node-configuration) +1. Click **Create** to create the CCE cluster. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# Huawei CCE Configuration + +|Settings|Description| +|---|---| +| Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | +| Description | The description of the cluster. | +| Master Version | The Kubernetes version. | +| Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | +| High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | +| Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | +| Container Network CIDR | Network CIDR for the cluster. | +| VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | +| Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | +| External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | +| Cluster Label | The labels for the cluster. | +| Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | + +:::note + +If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher) + +::: + +# Node Configuration + +|Settings|Description| +|---|---| +| Zone | The available zone at where the node(s) of the cluster is deployed. | +| Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | +| Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | +| Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | +| Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | +| Data Volume Size | Data volume size for the cluster node(s) | +| Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | +| Root Volume Size | Root volume size for the cluster node(s) | +| Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | +| Node Count | The node count of the cluster | +| Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | +| SSH Key Name | The ssh key for the cluster node(s) | +| EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | +| EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | +| EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | +| EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | +| EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | +| EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | +| Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | +| Node Label | The labels for the cluster node(s). Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) | \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md new file mode 100644 index 0000000000..61945215fc --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md @@ -0,0 +1,101 @@ +--- +title: Creating a Tencent TKE Cluster +shortTitle: Tencent Kubernetes Engine +weight: 2125 +--- + +You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. + +# Prerequisites in Tencent + +:::caution + +Deploying to TKE will incur charges. + +::: + +1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. + +2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). + +3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. + +4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. + +# Prerequisite in Rancher + +You will need to enable the Tencent TKE cluster driver: + +1. Click **☰ > Cluster Management**. +1. Click **Drivers**. +1. In the **Cluster Drivers** tab, go to the **Tencent TKE** cluster driver and click **⋮ > Activate**. + +When the cluster driver is finished downloading, you will be able to create Tencent TKE clusters in Rancher. + +# Create a TKE Cluster + +1. From the **Clusters** page, click **Create**. + +2. Choose **Tencent TKE**. + +3. Enter a **Cluster Name**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites-in-tencent). + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Region | From the drop-down chooses the geographical region in which to build your cluster. | + | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | + | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | + +6. Click `Next: Configure Cluster` to set your TKE cluster configurations. + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | + | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | + | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | + | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | + + :::note + + If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher) + + ::: + +7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Availability Zone | Choose the availability zone of the VPC region. | + | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | + | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | + +8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. + + Option | Description + -------|------------ + Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 + Security Group | Security group ID, default does not bind any security groups. + Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). + Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. + Data Disk Type | Data disk type, default value to the SSD cloud drive + Data Disk Size | Data disk size (GB), the step size is 10 + Band Width Type | Type of bandwidth, PayByTraffic or PayByHour + Band Width | Public network bandwidth (Mbps) + Key Pair | Key id, after associating the key can be used to logging to the VM node + +9. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md new file mode 100644 index 0000000000..a79e40f2f3 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md @@ -0,0 +1,36 @@ +--- +title: ConfigMaps +weight: 3061 +--- + +While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). + +ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that has the workload that should reference a ConfigMap and click **Explore**. +1. In the left navigation bar, click **More Resources > Core > ConfigMaps**. +1. Click **Create**. +1. Enter a **Name** for the Config Map. + + :::note + + Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. + + ::: + +1. Select the **Namespace** you want to add Config Map to. + +1. On the **Data** tab, add a key-value pair to your ConfigMap. Add as many values as you need. You can add multiple key value pairs to the ConfigMap by copying and pasting. Alternatively, use **Read from File** to add the data. Note: If you need to store sensitive data, [use a secret](secrets.md), not a ConfigMap. +1. Click **Create**. + +**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. + +## What's Next? + +Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: + +- Application environment variables. +- Specifying parameters for a Volume mounted to the workload. + +For more information on adding ConfigMaps to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/content/rancher/v2.6/en/k8s-in-rancher/service-discovery/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/service-discovery/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md new file mode 100644 index 0000000000..09a679d184 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md @@ -0,0 +1,39 @@ +--- +title: Encrypting HTTP Communication +description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate +weight: 3060 +--- + +When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by adding the certificate to the ingress deployment. + +:::note Prerequisite: + +You must have a TLS private key and certificate available to upload. + +::: + +### 1. Create a Secret + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to deploy your ingress and click **More Resources > Core > Secrets**. +1. Click **Create**. +1. Click **TLS Certificate**. +1. Enter a name for the secret. Note: Your secret must have a unique name among the other certificates, registries, and secrets within your project/workspace. +1. In the **Private Key** field, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. Note: Private key files end with an extension of `.key`. +1. In the **Certificate** field, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. Note: Certificate files end with an extension of `.crt`. +1. Click **Create**. + +### 2. Add the Secret to an Ingress + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to deploy your ingress and click **Service Discovery > Ingresses**. +1. Click **Create**. +1. Select the **Namespace** of the ingress. +1. Enter a **Name** for the ingress. +1. In the **Certificates** tab, select the secret containing your certificate and private key. +1. Click **Create**. + +## What's Next? + +Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress](load-balancer-and-ingress-controller/add-ingresses.md). diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md new file mode 100644 index 0000000000..66c67de9ef --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md @@ -0,0 +1,166 @@ +--- +title: Kubernetes Registry and Docker Registry +description: Learn about the Docker registry and Kubernetes registry, their use cases and how to use a private registry with the Rancher UI +weight: 3063 +--- +Registries are Kubernetes secrets containing credentials used to authenticate with [private Docker registries](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). + +The word "registry" can mean two things, depending on whether it is used to refer to a Docker or Kubernetes registry: + +- A **Docker registry** contains Docker images that you can pull in order to use them in your deployment. The registry is a stateless, scalable server side application that stores and lets you distribute Docker images. +- The **Kubernetes registry** is an image pull secret that your deployment uses to authenticate with a Docker registry. + +Deployments use the Kubernetes registry secret to authenticate with a private Docker registry and then pull a Docker image hosted on it. + +Currently, deployments pull the private registry credentials automatically only if the workload is created in the Rancher UI and not when it is created via kubectl. + +# Creating a Registry in Namespaces + +:::note Prerequisite: + +You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. + +::: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to add a registry and click **Explore**. +1. In the left navigation, click either **Storage > Secrets** or **More Resources > Core > Secrets**. +1. Click **Create**. +1. Click **Registry**. +1. Enter a **Name** for the registry. + + :::note + + Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your registry must have a unique name among all secrets within your workspace. + + ::: + +1. Select a namespace for the registry. +1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password. +1. Click **Save**. + +**Result:** + +- Your secret is added to the namespace you chose. +- You can view the secret in the Rancher UI by clicking either **Storage > Secrets** or **More Resources > Core > Secrets**. +- Any workload that you create in the Rancher UI will have the credentials to access the registry if the workload is within the registry's scope. + +# Creating a Registry in Projects + +:::note Prerequisites: + +You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. + +::: + +Before v2.6, secrets were required to be in a project scope. Projects are no longer required, and you may use the namespace scope instead. As a result, the Rancher UI was updated to reflect this new functionality. However, you may still create a project-scoped registry if desired. Use the following steps to do so: + +1. In the upper left corner, click **☰ > Global Settings** in the dropdown. +1. Click **Feature Flags**. +1. Go to the `legacy` feature flag and click **Activate**. +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to add a registry and click **Explore**. +1. In the left navigation, click either **Storage > Secrets** or **More Resources > Core > Secrets**. +1. Click **Create**. +1. Click **Registry**. +1. In the top navigation bar, filter to see only one project. +1. Enter a **Name** for the registry. + + :::note + + Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your registry must have a unique name among all secrets within your workspace. + + ::: + +1. Select a namespace for the registry. +1. Select the website that hosts your private registry. Then enter credentials that authenticate with the registry. For example, if you use DockerHub, provide your DockerHub username and password. +1. Click **Save**. + +**Result:** + +- Your secret is added to the individual project you chose. +- You can view the secret in the Rancher UI by clicking either **Storage > Secrets** or **More Resources > Core > Secrets**. +- Any workload that you create in the Rancher UI will have the credentials to access the registry if the workload is within the registry's scope. + +:::note + +Project-scoped registries on the local cluster are only visible when a single project is selected. + +::: + +# Using a Private Registry + +You can deploy a workload with an image from a private registry through the Rancher UI, or with `kubectl`. + +### Using the Private Registry with the Rancher UI + +To deploy a workload with an image from your private registry, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to deploy a workload and click **Explore**. +1. Click **Workload**. +1. Click **Create**. +1. Select the type of workload you want to create. +1. Enter a unique name for the workload and choose a namespace. +1. In the **Container Image** field, enter the URL of the path to the image in your private registry. For example, if your private registry is on Quay.io, you could use `quay.io//`. +1. Click **Create**. + +**Result:** Your deployment should launch, authenticate using the private registry credentials you added in the Rancher UI, and pull the Docker image that you specified. + +### Using the Private Registry with kubectl + +When you create the workload using `kubectl`, you need to configure the pod so that its YAML has the path to the image in the private registry. You also have to create and reference the registry secret because the pod only automatically gets access to the private registry credentials if it is created in the Rancher UI. + +The secret has to be created in the same namespace where the workload gets deployed. + +Below is an example `pod.yml` for a workload that uses an image from a private registry. In this example, the pod uses an image from Quay.io, and the .yml specifies the path to the image. The pod authenticates with the registry using credentials stored in a Kubernetes secret called `testquay`, which is specified in `spec.imagePullSecrets` in the `name` field: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: private-reg +spec: + containers: + - name: private-reg-container + image: quay.io// + imagePullSecrets: + - name: testquay +``` + +In this example, the secret named `testquay` is in the default namespace. + +You can use `kubectl` to create the secret with the private registry credentials. This command creates the secret named `testquay`: + +``` +kubectl create secret docker-registry testquay \ + --docker-server=quay.io \ + --docker-username= \ + --docker-password= +``` + +To see how the secret is stored in Kubernetes, you can use this command: + +``` +kubectl get secret testquay --output="jsonpath={.data.\.dockerconfigjson}" | base64 --decode +``` + +The result looks like this: + +``` +{"auths":{"quay.io":{"username":"","password":"","auth":"c291bXlhbGo6dGVzdGFiYzEyMw=="}}} +``` + +After the workload is deployed, you can check if the image was pulled successfully: + +``` +kubectl get events +``` +The result should look like this: +``` +14s Normal Scheduled Pod Successfully assigned default/private-reg2 to minikube +11s Normal Pulling Pod pulling image "quay.io//" +10s Normal Pulled Pod Successfully pulled image "quay.io//" +``` + +For more information, refer to the Kubernetes documentation on [creating a pod that uses your secret.](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) diff --git a/content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/ingress/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md new file mode 100644 index 0000000000..68af8c466c --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration.md @@ -0,0 +1,51 @@ +--- +title: Ingress Configuration +description: Ingress configuration +weight: 9999 +--- + +- [NGINX Ingress controller changes in Kubernetes v1.21](#nginx-ingress-controller-changes-in-Kubernetes-v1-21) +- [Automatically generate a xip.io hostname](#automatically-generate-a-xip-io-hostname) +- [Specify a hostname to use](#specify-a-hostname-to-use) +- [Use as the default backend](#use-as-the-default-backend) +- [Certificates](#certificates) +- [Labels and Annotations](#labels-and-annotations) + +### NGINX Ingress controller changes in Kubernetes v1.21 + +For Kubernetes v1.21 and up, the NGINX Ingress controller no longer runs in hostNetwork but uses hostPorts for port 80 and port 443. This was done so the admission webhook can be configured to be accessed using ClusterIP so it can only be reached inside the cluster. + +# Ingress Rule Configuration + +- [Specify a hostname to use](#specify-a-hostname-to-use) +- [Use as the default backend](#use-as-the-default-backend) +- [Certificates](#certificates) +- [Labels and Annotations](#labels-and-annotations) + +### Specify a hostname to use + +If you use this option, ingress routes requests for a hostname to the service or workload that you specify. + +1. Enter the **Request Host** that your ingress will handle request forwarding for. For example, `www.mysite.com`. +1. Add a **Target Service**. +1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. +1. Enter the **Port** number that each target operates on. +### Certificates + +:::note + +You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates](../encrypt-http-communication.md). + +::: + +1. When creating an ingress, click the **Certificates** tab. +1. Click **Add Certificate**. +1. Select a **Certificate - Secret Name** from the drop-down list. +1. Enter the host using encrypted communication. +1. To add additional hosts that use the certificate, click **Add Hosts**. + +### Labels and Annotations + +Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. + +For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md new file mode 100644 index 0000000000..097dfc1232 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -0,0 +1,68 @@ +--- +title: "Layer 4 and Layer 7 Load Balancing" +description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" +weight: 3041 +--- +Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. + +## Layer-4 Load Balancer + +Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. + +Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. + +:::note + +It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. + +::: + +### Support for Layer-4 Load Balancing + +Support for layer-4 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-4 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GCE cloud provider +Azure AKS | Supported by Azure cloud provider +RKE on EC2 | Supported by AWS cloud provider +RKE on DigitalOcean | Limited NGINX or third-party Ingress* +RKE on vSphere | Limited NGINX or third party-Ingress* +RKE on Custom Hosts
(e.g. bare-metal servers) | Limited NGINX or third-party Ingress* +Third-party MetalLB | Limited NGINX or third-party Ingress* + +\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) + +## Layer-7 Load Balancer + +Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. + +### Support for Layer-7 Load Balancing + +Support for layer-7 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-7 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GKE cloud provider +Azure AKS | Not Supported +RKE on EC2 | Nginx Ingress Controller +RKE on DigitalOcean | Nginx Ingress Controller +RKE on vSphere | Nginx Ingress Controller +RKE on Custom Hosts
(e.g. bare-metal servers) | Nginx Ingress Controller + +### Host Names in Layer-7 Load Balancer + +Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. + +Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: + +1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. +2. Ask Rancher to generate an xip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say `a.b.c.d`, and generate a host name `..a.b.c.d.xip.io`. + +The benefit of using xip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. + +## Related Links + +- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md new file mode 100644 index 0000000000..afcf18d971 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md @@ -0,0 +1,77 @@ +--- +title: Secrets +weight: 3062 +--- + +[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. + +:::note + +This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.](kubernetes-and-docker-registries.md) + +::: + +When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. + +Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) + +# Creating Secrets in Namespaces + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to add a secret and click **Explore**. +1. To navigate to secrets, you may click either **Storage > Secrets** or **More Resources > Core > Secrets**. +1. Click **Create**. +1. Select the type of secret you want to create. +1. Select a **Namespace** for the secret. +1. Enter a **Name** for the secret. + + :::note + + Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. + + ::: + +1. From **Data**, click **Add** to add a key-value pair. Add as many values as you need. + + :::tip + + You can add multiple key value pairs to the secret by copying and pasting. + + ::: + + ![](/img/bulk-key-values.gif) + +1. Click **Save**. + +**Result:** Your secret is added to the namespace you chose. You can view the secret in the Rancher UI by clicking either **Storage > Secrets** or **More Resources > Core > Secrets**. + +Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) + + +# Creating Secrets in Projects + +Before v2.6, secrets were required to be in a project scope. Projects are no longer required, and you may use the namespace scope instead. As a result, the Rancher UI was updated to reflect this new functionality. However, you may still create project-scoped secrets if desired. Note that you have to first enable the `legacy` feature flag and look at a single project to do so. Use the following steps to set up your project-level secret: + +1. In the upper left corner, click **☰ > Global Settings** in the dropdown. +1. Click **Feature Flags**. +1. Go to the `legacy` feature flag and click **Activate**. +1. In the upper left corner, click **☰ > Cluster Management** in the dropdown. +1. Go to the cluster that you created and click **Explore.** +1. Click **Legacy > Projects**. +1. In the top navigation bar, filter to see only one project. +1. In the left navigation bar, click **Secrets**. +1. Click **Add Secret**. + +**Result:** Your secret is added to the individual project you chose. You can view the secret in the Rancher UI by clicking either **Storage > Secrets** or **More Resources > Core > Secrets**. + +:::note + +Project-scoped secrets on the local cluster are only visible when a single project is selected. + +::: + +# What's Next? + +Now that you have a secret added to a namespace, you can add it to a workload that you deploy. + +For more information on adding secret to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md new file mode 100644 index 0000000000..5086662fd3 --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md @@ -0,0 +1,35 @@ +--- +title: Adding a Sidecar +weight: 3029 +--- +A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to add a sidecar and click **Explore**. +1. In the left navigation bar, click **Workload**. + +1. Find the workload that you want to extend. Select **⋮ > + Add Sidecar**. + +1. Enter a **Name** for the sidecar. + +1. In the **General** section, select a sidecar type. This option determines if the sidecar container is deployed before or after the main container is deployed. + + - **Standard Container:** + + The sidecar container is deployed after the main container. + + - **Init Container:** + + The sidecar container is deployed before the main container. + +1. From the **Container Image** field, enter the name of the container image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. + +1. Set the remaining options. You can read about them in [Deploying Workloads](deploy-workloads.md). + +1. Click **Launch**. + +**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. + +## Related Links + +- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md new file mode 100644 index 0000000000..ce6bb5d29f --- /dev/null +++ b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md @@ -0,0 +1,58 @@ +--- +title: Deploying Workloads +description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. +weight: 3026 +--- + +Deploy a workload to run an application in one or more containers. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to upgrade a workload and click **Explore**. +1. In the left navigation bar, click **Workload**. +1. Click **Create**. +1. Choose the type of workload. +1. Select the namespace where the workload will be deployed. +1. Enter a **Name** for the workload. + +1. From the **Container Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. + +1. Either select an existing namespace, or click **Add to a new namespace** and enter a new namespace. + +1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services](../../../../pages-for-subheaders/workloads-and-pods.md#services). + +1. Configure the remaining options: + + - **Environment Variables** + + Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap](../configmaps.md). + + - **Node Scheduling** + - **Health Check** + - **Volumes** + + Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap](../configmaps.md). + + When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. + + - **Scaling/Upgrade Policy** + + :::note Amazon Note for Volumes: + + To mount an Amazon EBS volume: + + - In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. + + - The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster](../../kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) or [Creating a Custom Cluster](../../../../pages-for-subheaders/use-existing-nodes.md). + + ::: + +1. Click **Show Advanced Options** and configure: + + - **Command** + - **Networking** + - **Labels & Annotations** + - **Security and Host Config** + +1. Click **Launch**. + +**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.6/en/k8s-in-rancher/workloads/rollback-workloads/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/workloads/rollback-workloads/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md b/docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md rename to docs/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md diff --git a/docs/k8s-in-rancher/horizontal-pod-autoscaler/horizontal-pod-autoscaler.md b/docs/k8s-in-rancher/horizontal-pod-autoscaler/horizontal-pod-autoscaler.md new file mode 100644 index 0000000000..b62c5fbf10 --- /dev/null +++ b/docs/k8s-in-rancher/horizontal-pod-autoscaler/horizontal-pod-autoscaler.md @@ -0,0 +1,28 @@ +--- +title: The Horizontal Pod Autoscaler +description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment +weight: 3026 +--- + +The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. + +Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. + +You can create, manage, and delete HPAs using the Rancher UI. It only supports HPA in the `autoscaling/v2beta2` API. + +## Managing HPAs + +The way that you manage HPAs is different based on your version of the Kubernetes API: + +- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. +- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. + +You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. +## Testing HPAs with a Service Deployment + +You can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA**. For more information, refer to [Get HPA Metrics and Status](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui/). + +You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] +(k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa/). diff --git a/docs/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background/hpa-background.md b/docs/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background/hpa-background.md new file mode 100644 index 0000000000..96f9cb5438 --- /dev/null +++ b/docs/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background/hpa-background.md @@ -0,0 +1,40 @@ +--- +title: Background Information on HPAs +weight: 3027 +--- + +The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. + +## Why Use Horizontal Pod Autoscaler? + +Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: + +- A minimum and maximum number of pods allowed to run, as defined by the user. +- Observed CPU/memory use, as reported in resource metrics. +- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. + +HPA improves your services by: + +- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. +- Increase/decrease performance as needed to accomplish service level agreements. + +## How HPA Works + +![HPA Schema](/img/horizontal-pod-autoscaler.jpg) + +HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: + +Flag | Default | Description | +---------|----------|----------| + `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. + `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. + `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. + + +For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). + +## Horizontal Pod Autoscaler API Objects + +HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. + +For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). diff --git a/docs/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md b/docs/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md new file mode 100644 index 0000000000..ddb2901c0a --- /dev/null +++ b/docs/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md @@ -0,0 +1,205 @@ +--- +title: Managing HPAs with kubectl +weight: 3029 +--- + +This section describes HPA management with `kubectl`. This document has instructions for how to: + +- Create an HPA +- Get information on HPAs +- Delete an HPA +- Configure your HPAs to scale with CPU or memory utilization +- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics + + +You can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. + +##### Basic kubectl Command for Managing HPAs + +If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: + +- Creating HPA + + - With manifest: `kubectl create -f ` + + - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` + +- Getting HPA info + + - Basic: `kubectl get hpa hello-world` + + - Detailed description: `kubectl describe hpa hello-world` + +- Deleting HPA + + - `kubectl delete hpa hello-world` + +##### HPA Manifest Definition Example + +The HPA manifest is the config file used for managing an HPA with `kubectl`. + +The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. + +```yml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: hello-world +spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi +``` + + +Directive | Description +---------|----------| + `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | + `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | + `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | + `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. + `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. + `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. +
+ +##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) + +Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. + +Run the following commands to check if metrics are available in your installation: + +``` +$ kubectl top nodes +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +node-controlplane 196m 9% 1623Mi 42% +node-etcd 80m 4% 1090Mi 28% +node-worker 64m 3% 1146Mi 29% +$ kubectl -n kube-system top pods +NAME CPU(cores) MEMORY(bytes) +canal-pgldr 18m 46Mi +canal-vhkgr 20m 45Mi +canal-x5q5v 17m 37Mi +canal-xknnz 20m 37Mi +kube-dns-7588d5b5f5-298j2 0m 22Mi +kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi +metrics-server-97bc649d5-jxrlt 0m 12Mi +$ kubectl -n kube-system logs -l k8s-app=metrics-server +I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true +I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 +I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version +I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 +I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink +I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) +I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ +I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 +``` + + +##### Configuring HPA to Scale Using Custom Metrics with Prometheus + +You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. + +For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: + +- Prometheus is deployed in the cluster. +- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. +- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` + +Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. + +For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). + +1. Initialize Helm in your cluster. + ``` + # kubectl -n kube-system create serviceaccount tiller + kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + helm init --service-account tiller + ``` + +1. Clone the `banzai-charts` repo from GitHub: + ``` + # git clone https://github.com/banzaicloud/banzai-charts + ``` + +1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. + ``` + # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system + ``` + +1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. + + 1. Check that the service pod is `Running`. Enter the following command. + ``` + # kubectl get pods -n kube-system + ``` + From the resulting output, look for a status of `Running`. + ``` + NAME READY STATUS RESTARTS AGE + ... + prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h + ... + ``` + 1. Check the service logs to make sure the service is running correctly by entering the command that follows. + ``` + # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system + ``` + Then review the log output to confirm the service is running. + +
+ Prometheus Adaptor Logs + + ... + I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds + I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: + I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT + I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json + I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 + I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} + I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.xip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK + I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} + I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] + I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} + ... +
+ + + +1. Check that the metrics API is accessible from kubectl. + + - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. + ``` + # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
+ API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} +
+ + - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. + ``` + # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
+ API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} +
diff --git a/docs/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md b/docs/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md new file mode 100644 index 0000000000..00563e231e --- /dev/null +++ b/docs/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md @@ -0,0 +1,52 @@ +--- +title: Managing HPAs with the Rancher UI +weight: 3028 +--- + +The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. + +If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +## Creating an HPA + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster you want to create an HPA in and click **Explore**. +1. In the left navigation bar, click **Service Discovery > HorizontalPodAutoscalers**. +1. Click **Create**. +1. Select a **Namespace** for the HPA. +1. Enter a **Name** for the HPA. +1. Select a **Target Reference** as scale target for the HPA. +1. Specify the **Minimum Replicas** and **Maximum Replicas** for the HPA. +1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +1. Click **Create** to create the HPA. + +:::note Result: + +The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. + +::: + +## Get HPA Metrics and Status + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that has the HPA and click **Explore**. +1. In the left navigation bar, click **Service Discovery > HorizontalPodAutoscalers**. The **HorizontalPodAutoscalers** page shows the number of current replicas. + +For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. + + +## Deleting an HPA + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that has the HPA you want to delete and click **Explore**. +1. In the left navigation bar, click **Service Discovery > HorizontalPodAutoscalers**. +1. Click **Resources > HPA**. +1. Find the HPA which you would like to delete and click **⋮ > Delete**. +1. Click **Delete** to confirm. + +:::note Result: + +The HPA is deleted from the current cluster. + +::: \ No newline at end of file diff --git a/docs/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa/testing-hpa.md b/docs/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa/testing-hpa.md new file mode 100644 index 0000000000..5de7277201 --- /dev/null +++ b/docs/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa/testing-hpa.md @@ -0,0 +1,531 @@ +--- +title: Testing HPAs with kubectl +weight: 3031 +--- + +This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI](k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl/). + +For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. + +1. Configure `kubectl` to connect to your Kubernetes cluster. + +1. Copy the `hello-world` deployment manifest below. + +
+ Hello World Manifest + + ``` + apiVersion: apps/v1beta2 + kind: Deployment + metadata: + labels: + app: hello-world + name: hello-world + namespace: default + spec: + replicas: 1 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + resources: + requests: + cpu: 500m + memory: 64Mi + ports: + - containerPort: 80 + protocol: TCP + restartPolicy: Always + --- + apiVersion: v1 + kind: Service + metadata: + name: hello-world + namespace: default + spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: hello-world + ``` + +
+ +1. Deploy it to your cluster. + + ``` + # kubectl create -f + ``` + +1. Copy one of the HPAs below based on the metric type you're using: + +
+ Hello World HPA: Resource Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 1000Mi + ``` + +
+
+ Hello World HPA: Custom Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi + - type: Pods + pods: + metricName: cpu_system + targetAverageValue: 20m + ``` + +
+ +1. View the HPA info and description. Confirm that metric data is shown. + +
+ Resource Metrics + + 1. Enter the following commands. + ``` + # kubectl get hpa + NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE + hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m + # kubectl describe hpa + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 1253376 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
+
+ Custom Metrics + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive the output that follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 3514368 / 100Mi + "cpu_system" on pods: 0 / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
+ +1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). + +1. Test that pod autoscaling works as intended.

+ **To Test Autoscaling Using Resource Metrics:** + +
+ Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to two pods based on CPU Usage. + + 1. View your HPA. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10928128 / 100Mi + resource cpu on pods (as a percentage of request): 56% (280m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm you've scaled to two pods. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
+
+ Upscale to 3 pods: CPU Usage Up to Target + + Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 9424896 / 100Mi + resource cpu on pods (as a percentage of request): 66% (333m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + ``` + 2. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-f46kh 0/1 Running 0 1m + hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
+
+ Downscale to 1 Pod: All Metrics Below Target + + Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10070016 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + +
+ + **To Test Autoscaling Using Custom Metrics:** + +
+ Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale two pods based on CPU usage. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8159232 / 100Mi + "cpu_system" on pods: 7m / 20m + resource cpu on pods (as a percentage of request): 64% (321m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm two pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
+
+ Upscale to 3 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows: + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + ``` + 1. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + # kubectl get pods + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
+
+ Upscale to 4 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm four pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m + hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
+
+ Downscale to 1 Pod: All Metrics Below Target + + Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive similar output to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8101888 / 100Mi + "cpu_system" on pods: 8m / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + 1. Enter the following command to confirm a single pods is running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
diff --git a/docs/pages-for-subheaders/about-authentication.md b/docs/pages-for-subheaders/about-authentication.md new file mode 100644 index 0000000000..305c9075ef --- /dev/null +++ b/docs/pages-for-subheaders/about-authentication.md @@ -0,0 +1,97 @@ +--- +title: Authentication +weight: 10 +--- + +One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. + +This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. + +## External vs. Local Authentication + +The Rancher authentication proxy integrates with the following external authentication services. + +| Auth Service | +| ------------------------------------------------------------------------------------------------ | +| [Microsoft Active Directory](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md) | +| [GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md) | +| [Microsoft Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md) | +| [FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md) | +| [OpenLDAP](configure-openldap.md) | +| [Microsoft AD FS](configure-microsoft-ad-federation-service-saml.md) | +| [PingIdentity](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md) | +| [Keycloak (OIDC)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-oidc.md) | +| [Keycloak (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-saml.md) | +| [Okta](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md) | +| [Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md) | +| [Shibboleth](configure-shibboleth-saml.md) | + +
+However, Rancher also provides [local authentication](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md). + +In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. + +## Users and Groups + +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control](manage-role-based-access-control-rbac.md). + +:::note + +Local authentication does not support creating or managing groups. + +::: + +For more information, see [Users and Groups](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md) + +## Scope of Rancher Authorization + +After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: + +| Access Level | Description | +|----------------------------------------------|-------------| +| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | +| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | +| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | + +To set the Rancher access level for users in the authorization service, follow these steps: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Auth Provider**. +1. After setting up the configuration details for an auth provider, use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. +1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. +1. Click **Save**. + +**Result:** The Rancher access configuration settings are applied. + +{{< saml_caveats >}} + +## External Authentication Configuration and Principal Users + +Configuration of external authentication requires: + +- A local user assigned the administrator role, called hereafter the _local principal_. +- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. + +Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. + +1. Sign into Rancher as the local principal and complete configuration of external authentication. + + ![Sign In](/img/sign-in.png) + +2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. + + ![Principal ID Sharing](/img/principal-ID.png) + +3. After you complete configuration, Rancher automatically signs out the local principal. + + ![Sign Out Local Principal](/img/sign-out-local.png) + +4. Then, Rancher automatically signs you back in as the external principal. + + ![Sign In External Principal](/img/sign-in-external.png) + +5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. + + ![Sign In External Principal](/img/users-page.png) + +6. The external principal and the local principal share the same access rights. diff --git a/docs/pages-for-subheaders/about-provisioning-drivers.md b/docs/pages-for-subheaders/about-provisioning-drivers.md new file mode 100644 index 0000000000..264c9d5ee8 --- /dev/null +++ b/docs/pages-for-subheaders/about-provisioning-drivers.md @@ -0,0 +1,48 @@ +--- +title: Provisioning Drivers +weight: 70 +--- + +Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +### Rancher Drivers + +With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. + +There are two types of drivers within Rancher: + +* [Cluster Drivers](#cluster-drivers) +* [Node Drivers](#node-drivers) + +### Cluster Drivers + +Cluster drivers are used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. + +By default, Rancher has activated several hosted Kubernetes cloud providers including: + +* [Amazon EKS](amazon-eks-permissions.md) +* [Google GKE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +* [Azure AKS](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) + +There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: + +* [Alibaba ACK](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +* [Huawei CCE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) +* [Tencent](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) + +### Node Drivers + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: + +* [Amazon EC2](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) +* [Azure](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md) +* [Digital Ocean](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md) +* [vSphere](vsphere.md) + +There are several other node drivers that are disabled by default, but are packaged in Rancher: + +* [Harvester](../explanations/integrations-in-rancher/harvester.md#harvester-node-driver/), available in Rancher v2.6.1 diff --git a/docs/pages-for-subheaders/about-rke1-templates.md b/docs/pages-for-subheaders/about-rke1-templates.md new file mode 100644 index 0000000000..57b0c31bed --- /dev/null +++ b/docs/pages-for-subheaders/about-rke1-templates.md @@ -0,0 +1,127 @@ +--- +title: RKE Templates +weight: 80 +--- + +RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. + +RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. + +With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. + +RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. + +Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. + +If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. + +You can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. + +The core features of RKE templates allow DevOps and security teams to: + +- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices +- Prevent less technical users from making uninformed choices when provisioning clusters +- Share different templates with different sets of users and groups +- Delegate ownership of templates to users who are trusted to make changes to them +- Control which users can create templates +- Require users to create clusters from a template + +# Configurable Settings + +RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: + +- Cloud provider options +- Pod security options +- Network providers +- Ingress controllers +- Network security configuration +- Network plugins +- Private registry URL and credentials +- Add-ons +- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services + +The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. + +# Scope of RKE Templates + +RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. + +RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +The settings of an existing cluster can be [saved as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#updating-a-template), and the cluster is upgraded to [use a newer version of the template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. + + +# Example Scenarios +When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. + +These [example scenarios](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md) describe how an organization could use templates to standardize cluster creation. + +Some of the example scenarios include the following: + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#allowing-other-users-to-control-and-share-a-template) + +# Template Management + +When you create an RKE template, it is available in the Rancher UI from the **Cluster Management** view under **RKE Templates**. When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. + +Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. + +RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. + +In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. + +For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. + +The documents in this section explain the details of RKE template management: + +- [Getting permission to create templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md) +- [Creating and revising templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md) +- [Enforcing template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) +- [Overriding template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md) +- [Sharing templates with cluster creators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) +- [Sharing ownership of a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-ownership-of-templates) + +An [example YAML configuration file for a template](../reference-guides/rke1-template-example-yaml.md) is provided for reference. + +# Applying Templates + +You can [create a cluster from a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md) + +If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#updating-a-cluster-created-with-an-rke-template) + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +You can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +# Standardizing Hardware + +RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, one option is to use RKE templates [in conjunction with other tools](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). + +Another option is to use [cluster templates,](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-cluster-templates.md) which include node pool configuration options, but don't provide configuration enforcement. + +# YAML Customization + +If you define an RKE template as a YAML file, you can modify this [example RKE template YAML](../reference-guides/rke1-template-example-yaml.md). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. + +The RKE documentation also has [annotated](https://rancher.com/docs/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. + +For guidance on available options, refer to the RKE documentation on [cluster configuration.](https://rancher.com/docs/rke/latest/en/config-options/) + +### Add-ons + +The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file](https://rancher.com/docs/rke/latest/en/config-options/add-ons/). + +The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. + +Some things you could do with add-ons include: + +- Install applications on the Kubernetes cluster after it starts +- Install plugins on nodes that are deployed with a Kubernetes daemonset +- Automatically set up namespaces, service accounts, or role binding + +The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML**. Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/docs/pages-for-subheaders/about-the-api.md b/docs/pages-for-subheaders/about-the-api.md new file mode 100644 index 0000000000..23cdc01002 --- /dev/null +++ b/docs/pages-for-subheaders/about-the-api.md @@ -0,0 +1,84 @@ +--- +title: API +weight: 24 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## How to use the API + +The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it: + + + + +1. Click on your user avatar in the upper right corner. +1. Click **Account & API Keys**. +1. Under the **API Keys** section, find the **API Endpoint** field and click the link. The link will look something like `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment. + + + + +Go to the URL endpoint at `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment. + + + + +## Authentication + +API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys](../reference-guides/user-settings/api-keys.md). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. + +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page](../reference-guides/about-the-api/api-tokens.md). + +## Making requests + +The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). + +- Every type has a Schema which describes: + - The URL to get to the collection of this type of resources + - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. + - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). + - Every field that filtering is allowed on + - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. + + +- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. + +- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. + +- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. + +- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. + +- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. + +- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. + +- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). + +## Filtering + +Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. + +## Sorting + +Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. + +## Pagination + +API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. + +## Capturing Rancher API Calls + +You can use browser developer tools to capture how the Rancher API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster: + +1. In the Rancher UI, go to **Cluster Management** and click **Create.** +1. Click one of the cluster types. This example uses Digital Ocean. +1. Fill out the form with a cluster name and node template, but don't click **Create**. +1. You will need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click on the Rancher UI and click **Inspect.** +1. In the developer tools, click the **Network** tab. +1. On the **Network** tab, make sure **Fetch/XHR** is selected. +1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`. +1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.** +1. Paste the result into any text editor. You will be able to see the POST request, including the URL it was sent to, all of the headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: The request should be stored in a safe place because it contains credentials. diff --git a/docs/pages-for-subheaders/access-clusters.md b/docs/pages-for-subheaders/access-clusters.md new file mode 100644 index 0000000000..7c6eecda01 --- /dev/null +++ b/docs/pages-for-subheaders/access-clusters.md @@ -0,0 +1,32 @@ +--- +title: Cluster Access +weight: 1 +--- + +This section is about what tools can be used to access clusters managed by Rancher. + +For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) + +For more information on roles-based access control, see [this section.](manage-role-based-access-control-rbac.md) + +For information on how to set up an authentication system, see [this section.](about-authentication.md) + + +### Rancher UI + +Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. + +### kubectl + +You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: + +- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). +- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). + +### Rancher CLI + +You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI](cli-with-rancher.md). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. + +### Rancher API + +Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/docs/pages-for-subheaders/advanced-configuration.md b/docs/pages-for-subheaders/advanced-configuration.md new file mode 100644 index 0000000000..e2386f978a --- /dev/null +++ b/docs/pages-for-subheaders/advanced-configuration.md @@ -0,0 +1,16 @@ +--- +title: Advanced Configuration +weight: 500 +--- + +### Alertmanager + +For information on configuring the Alertmanager custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +### Prometheus + +For information on configuring the Prometheus custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md) + +### PrometheusRules + +For information on configuring the Prometheus custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/_index.md b/docs/pages-for-subheaders/advanced-options.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/advanced/_index.md rename to docs/pages-for-subheaders/advanced-options.md diff --git a/docs/pages-for-subheaders/advanced-user-guides.md b/docs/pages-for-subheaders/advanced-user-guides.md new file mode 100644 index 0000000000..e4b8c16e1e --- /dev/null +++ b/docs/pages-for-subheaders/advanced-user-guides.md @@ -0,0 +1,7 @@ +--- +title: Advanced User Guides +--- + +Advanced user guides are "problem-oriented" docs in which users learn how to answer questions or solve problems. The major difference between these and the new user guides is that these guides are geared toward more experienced or advanced users who have more technical needs from their documentation. These users already have an understanding of Rancher and its functions. They know what they need to accomplish; they just need additional guidance to complete some more complex task they they have encountered while working. + +It should be noted that neither new user guides nor advanced user guides provide detailed explanations or discussions (these kinds of docs belong elsewhere). How-to guides focus on the action of guiding users through repeatable, effective steps to learn new skills, master some task, or overcome some problem. \ No newline at end of file diff --git a/docs/pages-for-subheaders/air-gapped-helm-cli-install.md b/docs/pages-for-subheaders/air-gapped-helm-cli-install.md new file mode 100644 index 0000000000..717880520d --- /dev/null +++ b/docs/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -0,0 +1,31 @@ +--- +title: Air-Gapped Helm CLI Install +weight: 1 +--- + +This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. + +For more information on each installation option, refer to [this page.](installation-and-upgrade.md) + +Throughout the installation instructions, there will be _tabs_ for each installation option. + +:::note Important: + +If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. + +::: + +# Installation Outline + +1. [Set up infrastructure and private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) +2. [Collect and publish images to your private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) +3. [Set up a Kubernetes cluster (Skip this step for Docker installations)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) +4. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +# Upgrades + +To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md) + +### [Next: Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) diff --git a/docs/pages-for-subheaders/amazon-eks-permissions.md b/docs/pages-for-subheaders/amazon-eks-permissions.md new file mode 100644 index 0000000000..d78200c71c --- /dev/null +++ b/docs/pages-for-subheaders/amazon-eks-permissions.md @@ -0,0 +1,123 @@ +--- +title: Creating an EKS Cluster +shortTitle: Amazon EKS +weight: 2110 +--- +Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). + +- [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) + - [Amazon VPC](#amazon-vpc) + - [IAM Policies](#iam-policies) +- [Create the EKS Cluster](#create-the-eks-cluster) +- [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) +- [Architecture](#architecture) +- [AWS Service Events](#aws-service-events) +- [Security and Compliance](#security-and-compliance) +- [Tutorial](#tutorial) +- [Minimum EKS Permissions](#minimum-eks-permissions) +- [Syncing](#syncing) +- [Troubleshooting](#troubleshooting) +- [Programmatically Creating EKS Clusters](#programmatically-creating-eks-clusters) +# Prerequisites in Amazon Web Services + +:::caution + +Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). + +::: + +To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate [permissions.](#minimum-eks-permissions) For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). + +### Amazon VPC + +An Amazon VPC is required to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. You can set one up yourself and provide it during cluster creation in Rancher. If you do not provide one during creation, Rancher will create one. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). + +### IAM Policies + +Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. + +1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + +2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. The minimum permissions required for an EKS cluster are listed [here.](#minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. + +3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. + +:::note Important: + +It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. + +::: + +For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). + + +# Create the EKS Cluster + +Use Rancher to set up and configure your Kubernetes cluster. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Choose **Amazon EKS**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Fill out the rest of the form. For help, refer to the [configuration reference.](#eks-cluster-configuration-reference) +1. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# EKS Cluster Configuration Reference + +For the full list of EKS cluster configuration options, see [this page.](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md) + +# Architecture + +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. + +
Managing Kubernetes Clusters through Rancher's Authentication Proxy
+ +![Architecture](/img/rancher-architecture-rancher-api-server.svg) + +# AWS Service Events + +To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). + +# Security and Compliance + +By default only the IAM user or role that created a cluster has access to it. Attempting to access the cluster with any other user or role without additional configuration will lead to an error. In Rancher, this means using a credential that maps to a user or role that was not used to create the cluster will cause an unauthorized error. For example, an EKSCtl cluster will not register in Rancher unless the credentials used to register the cluster match the role or user used by EKSCtl. Additional users and roles can be authorized to access a cluster by being added to the aws-auth configmap in the kube-system namespace. For a more in-depth explanation and detailed instructions, please see this [documentation](https://aws.amazon.com/premiumsupport/knowledge-center/amazon-eks-cluster-access/). + +For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). + +# Tutorial + +This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. + +# Minimum EKS Permissions + +See [this page](../reference-guides/amazon-eks-permissions/minimum-eks-permissions.md) for the minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. + +# Syncing + +The EKS provisioner can synchronize the state of an EKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md) + +For information on configuring the refresh interval, refer to [this section.](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md#configuring-the-refresh-interval) + +# Troubleshooting + +If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) + +If an unauthorized error is returned while attempting to modify or register the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) + +For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). + +# Programmatically Creating EKS Clusters + +The most common way to programmatically deploy EKS clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) \ No newline at end of file diff --git a/docs/pages-for-subheaders/authentication-config.md b/docs/pages-for-subheaders/authentication-config.md new file mode 100644 index 0000000000..62f8443fbf --- /dev/null +++ b/docs/pages-for-subheaders/authentication-config.md @@ -0,0 +1,5 @@ +--- +title: Authentication Config +--- + +In the following tutorials, you will learn how to [manage users and group](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md), [create local users](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md), [configure Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md), [configure Active Directory (AD)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md), [configure FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md), [configure Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md), [configure GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md), [configure Keycloak (OIDC)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-oidc.md), [configure Keycloak (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-saml.md), [configure PingIdentity (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md), and how to [configure Okta (SAML)](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md new file mode 100644 index 0000000000..ca88fdadaa --- /dev/null +++ b/docs/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -0,0 +1,54 @@ +--- +title: Authentication, Permissions and Global Configuration +weight: 6 +--- + +After installation, the [system administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. + +## First Log In + +After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. + +:::danger + +After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. + +::: + +## Authentication + +One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. + +For more information how authentication works and how to configure each provider, see [Authentication](about-authentication.md). + +## Authorization + +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. + +For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)](manage-role-based-access-control-rbac.md). + +## Pod Security Policies + +_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. + +For more information how to create and use PSPs, see [Pod Security Policies](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +## Provisioning Drivers + +Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +For more information, see [Provisioning Drivers](about-provisioning-drivers.md). + +## Adding Kubernetes Versions into Rancher + +With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. + +The information that Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md) + +Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md). + +For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md). + +## Enabling Experimental Features + +Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.](enable-experimental-features.md) diff --git a/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md new file mode 100644 index 0000000000..a9c918a747 --- /dev/null +++ b/docs/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -0,0 +1,107 @@ +--- +title: Backups and Disaster Recovery +weight: 5 +--- + +In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. + +The `rancher-backup` operator is used to backup and restore Rancher on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** page, or by using the Helm CLI. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup) + +The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. + +- [Backup and Restore for Rancher installed with Docker](#backup-and-restore-for-rancher-installed-with-docker) +- [How Backups and Restores Work](#how-backups-and-restores-work) +- [Installing the rancher-backup Operator](#installing-the-rancher-backup-operator) + - [Installing rancher-backup with the Rancher UI](#installing-rancher-backup-with-the-rancher-ui) + - [Installing rancher-backup with the Helm CLI](#installing-rancher-backup-with-the-helm-cli) + - [RBAC](#rbac) +- [Backing up Rancher](#backing-up-rancher) +- [Restoring Rancher](#restoring-rancher) +- [Migrating Rancher to a New Cluster](#migrating-rancher-to-a-new-cluster) +- [Default Storage Location Configuration](#default-storage-location-configuration) + - [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) + +# Backup and Restore for Rancher installed with Docker + +For Rancher installed with Docker, refer to [this page](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) to perform backups and [this page](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md) to perform restores. + +# How Backups and Restores Work + +The `rancher-backup` operator introduces three custom resources: Backups, Restores, and ResourceSets. The following cluster-scoped custom resource definitions are added to the cluster: + +- `backups.resources.cattle.io` +- `resourcesets.resources.cattle.io` +- `restores.resources.cattle.io` + +The ResourceSet defines which Kubernetes resources need to be backed up. The ResourceSet is not available to be configured in the Rancher UI because the values required to back up Rancher are predefined. This ResourceSet should not be modified. + +When a Backup custom resource is created, the `rancher-backup` operator calls the `kube-apiserver` to get the resources in the ResourceSet (specifically, the predefined `rancher-resource-set`) that the Backup custom resource refers to. + +The operator then creates the backup file in the .tar.gz format and stores it in the location configured in the Backup resource. + +When a Restore custom resource is created, the operator accesses the backup .tar.gz file specified by the Restore, and restores the application from that file. + +The Backup and Restore custom resources can be created in the Rancher UI, or by using `kubectl apply`. + +:::note + +Refer [here](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md#2-restore-from-backup-using-a-restore-custom-resource) for help on restoring an existing backup file into a v1.22 cluster in Rancher v2.6.3. + +::: + +# Installing the rancher-backup Operator + +The `rancher-backup` operator can be installed from the Rancher UI, or with the Helm CLI. In both cases, the `rancher-backup` Helm chart is installed on the Kubernetes cluster running the Rancher server. It is a cluster-admin only feature and available only for the **local** cluster. (*If you do not see `rancher-backup` in the Rancher UI, you may have selected the wrong cluster.*) + +:::note + +There is a known issue in Fleet that occurs after performing a restoration using the backup-restore-operator: Secrets used for clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here](./fleet-gitops-at-scale.md#troubleshooting) for a workaround. + +::: + +### Installing rancher-backup with the Rancher UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. +1. In the left navigation bar, **Apps & Marketplace > Charts**. +1. Click **Rancher Backups**. +1. Click **Install**. +1. Optional: Configure the default storage location. For help, refer to the [configuration section.](../reference-guides/backup-restore-configuration/storage-configuration.md) +1. Click **Install**. + +**Result:** The `rancher-backup` operator is installed. + +From the **Cluster Dashboard,** you can see the `rancher-backup` operator listed under **Deployments**. + +To configure the backup app in Rancher, go to the left navigation menu and click **Rancher Backups**. + +### RBAC + +Only the rancher admins and the local cluster’s cluster-owner can: + +* Install the Chart +* See the navigation links for Backup and Restore CRDs +* Perform a backup or restore by creating a Backup CR and Restore CR respectively +* List backups/restores performed so far + +# Backing up Rancher + +A backup is performed by creating a Backup custom resource. For a tutorial, refer to [this page.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) + +# Restoring Rancher + +A restore is performed by creating a Restore custom resource. For a tutorial, refer to [this page.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md) + +# Migrating Rancher to a New Cluster + +A migration is performed by following [these steps.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +# Default Storage Location Configuration + +Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible or Minio object store. + +For information on configuring these options, refer to [this page.](../reference-guides/backup-restore-configuration/storage-configuration.md) + +### Example values.yaml for the rancher-backup Helm Chart + +The example [values.yaml file](../reference-guides/backup-restore-configuration/storage-configuration.md#example-values-yaml-for-the-rancher-backup-helm-chart) can be used to configure the `rancher-backup` operator when the Helm CLI is used to install it. diff --git a/docs/pages-for-subheaders/backup-restore-configuration.md b/docs/pages-for-subheaders/backup-restore-configuration.md new file mode 100644 index 0000000000..2d4c62fa20 --- /dev/null +++ b/docs/pages-for-subheaders/backup-restore-configuration.md @@ -0,0 +1,10 @@ +--- +title: Rancher Backup Configuration Reference +shortTitle: Configuration +weight: 4 +--- + +- [Backup configuration](../reference-guides/backup-restore-configuration/backup-configuration.md) +- [Restore configuration](../reference-guides/backup-restore-configuration/restore-configuration.md) +- [Storage location configuration](../reference-guides/backup-restore-configuration/storage-configuration.md) +- [Example Backup and Restore Custom Resources](../reference-guides/backup-restore-configuration/examples.md) \ No newline at end of file diff --git a/docs/pages-for-subheaders/best-practices.md b/docs/pages-for-subheaders/best-practices.md new file mode 100644 index 0000000000..68e26b48b4 --- /dev/null +++ b/docs/pages-for-subheaders/best-practices.md @@ -0,0 +1,18 @@ +--- +title: Best Practices Guide +weight: 4 +--- + +The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. + +If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. + +Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. + +For more guidance on best practices, you can consult these resources: + +- [Security](rancher-security.md) +- [Rancher Blog](https://www.suse.com/c/rancherblog/) +- [Rancher Forum](https://forums.rancher.com/) +- [Rancher Users Slack](https://slack.rancher.io/) +- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md b/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md new file mode 100644 index 0000000000..66ef455ff5 --- /dev/null +++ b/docs/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -0,0 +1,50 @@ +--- +title: Checklist for Production-Ready Clusters +weight: 2 +--- + +In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. + +For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) + +This is a shortlist of best practices that we strongly recommend for all production clusters. + +For a full list of all the best practices that we recommend, refer to the [best practices section.](best-practices.md) + +### Node Requirements + +* Make sure your nodes fulfill all of the [node requirements,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) including the port requirements. + +### Back up etcd + +* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure recurring snapshots of etcd for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. + +### Cluster Architecture + +* Nodes should have one of the following role configurations: + * `etcd` + * `controlplane` + * `etcd` and `controlplane` + * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) +* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +* Assign two or more nodes the `controlplane` role for master component high availability. +* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md) + +For more information about the +number of nodes for each Kubernetes role, refer to the section on [recommended architecture.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +### Logging and Monitoring + +* Configure alerts/notifiers for Kubernetes components (System Service). +* Configure logging for cluster analysis and post-mortems. + +### Reliability + +* Perform load tests on your cluster to verify that its hardware can support your workloads. + +### Networking + +* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). +* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider](set-up-cloud-providers.md) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/docs/pages-for-subheaders/cis-scan-guides.md b/docs/pages-for-subheaders/cis-scan-guides.md new file mode 100644 index 0000000000..c4ffa2e080 --- /dev/null +++ b/docs/pages-for-subheaders/cis-scan-guides.md @@ -0,0 +1,114 @@ +--- +title: CIS Scan Guides +--- + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. The CIS scans can run on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. + +The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. + +- [About the CIS Benchmark](#about-the-cis-benchmark) +- [About the Generated Report](#about-the-generated-report) +- [Test Profiles](#test-profiles) +- [About Skipped and Not Applicable Tests](#about-skipped-and-not-applicable-tests) +- [Roles-based Access Control](../explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md) +- [Configuration](../explanations/integrations-in-rancher/cis-scans/configuration-reference.md) + +# About the CIS Benchmark + +The Center for Internet Security is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. + +CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. + +The official Benchmark documents are available through the CIS website. The sign-up form to access the documents is +here. + +# About the Generated Report + +Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. + +By default, the CIS Benchmark v1.6 is used. + +The Benchmark version is included in the generated report. + +The Benchmark provides recommendations of two types: Automated and Manual. Recommendations marked as Manual in the Benchmark are not included in the generated report. + +Some tests are designated as "Not Applicable." These tests will not be run on any CIS scan because of the way that Rancher provisions RKE clusters. For information on how test results can be audited, and why some tests are designated to be not applicable, refer to Rancher's [self-assessment guide](./rancher-security.md#the-cis-benchmark-and-self-assessment) for the corresponding Kubernetes version. + +The report contains the following information: + +| Column in Report | Description | +|------------------|-------------| +| `id` | The ID number of the CIS Benchmark. | +| `description` | The description of the CIS Benchmark test. | +| `remediation` | What needs to be fixed in order to pass the test. | +| `state` | Indicates if the test passed, failed, was skipped, or was not applicable. | +| `node_type` | The node role, which affects which tests are run on the node. Master tests are run on controlplane nodes, etcd tests are run on etcd nodes, and node tests are run on the worker nodes. | +| `audit` | This is the audit check that `kube-bench` runs for this test. | +| `audit_config` | Any configuration applicable to the audit script. | +| `test_info` | Test-related info as reported by `kube-bench`, if any. | +| `commands` | Test-related commands as reported by `kube-bench`, if any. | +| `config_commands` | Test-related configuration data as reported by `kube-bench`, if any. | +| `actual_value` | The test's actual value, present if reported by `kube-bench`. | +| `expected_result` | The test's expected result, present if reported by `kube-bench`. | + +Refer to the table in the cluster hardening guide for information on which versions of Kubernetes, the Benchmark, Rancher, and our cluster hardening guide correspond to each other. Also refer to the hardening guide for configuration files of CIS-compliant clusters and information on remediating failed tests. + +# Test Profiles + +The following profiles are available: + +- Generic CIS 1.5 +- Generic CIS 1.6 +- RKE permissive 1.5 +- RKE hardened 1.5 +- RKE permissive 1.6 +- RKE hardened 1.6 +- RKE2 permissive 1.5 +- RKE2 hardened 1.5 +- RKE2 permissive 1.6 +- RKE2 hardened 1.6 +- AKS +- EKS +- GKE + +You also have the ability to customize a profile by saving a set of tests to skip. + +All profiles will have a set of not applicable tests that will be skipped during the CIS scan. These tests are not applicable based on how a RKE cluster manages Kubernetes. + +There are two types of RKE cluster scan profiles: + +- **Permissive:** This profile has a set of tests that have been will be skipped as these tests will fail on a default RKE Kubernetes cluster. Besides the list of skipped tests, the profile will also not run the not applicable tests. +- **Hardened:** This profile will not skip any tests, except for the non-applicable tests. + +The EKS and GKE cluster scan profiles are based on CIS Benchmark versions that are specific to those types of clusters. + +In order to pass the "Hardened" profile, you will need to follow the steps on the [hardening guide](./rancher-security.md#rancher-hardening-guide) and use the `cluster.yml` defined in the hardening guide to provision a hardened cluster. + +The default profile and the supported CIS benchmark version depends on the type of cluster that will be scanned: + +The `rancher-cis-benchmark` supports the CIS 1.6 Benchmark version. + +- For RKE Kubernetes clusters, the RKE Permissive 1.6 profile is the default. +- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. +- For RKE2 Kubernetes clusters, the RKE2 Permissive 1.6 profile is the default. +- For cluster types other than RKE, RKE2, EKS and GKE, the Generic CIS 1.5 profile will be used by default. + +# About Skipped and Not Applicable Tests + +For a list of skipped and not applicable tests, refer to [this page](../explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md). + +For now, only user-defined skipped tests are marked as skipped in the generated report. + +Any skipped tests that are defined as being skipped by one of the default profiles are marked as not applicable. + +# Roles-based Access Control + +For information about permissions, refer to [this page](../explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md). + +# Configuration + +For more information about configuring the custom resources for the scans, profiles, and benchmark versions, refer to [this page](../explanations/integrations-in-rancher/cis-scans/configuration-reference.md). + +_**Tutorials:**_ + +Refer to the following tutorials to learn how to [install `rancher-cis-benchmark`](../how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md), [uninstall `rancher-cis-benchmark`](../how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md), [run a scan](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md), [run a scan periodically on a schedule](../how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md), [skip tests](../how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md), [view reports](../how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md), [enable alerting for `rancher-cis-benchmark`](../how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md), [configure alerts for a periodic scan on a schedule](../how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md), and how to [create a custom benchmark version to run a cluster scan](../how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md). diff --git a/docs/pages-for-subheaders/cis-scans.md b/docs/pages-for-subheaders/cis-scans.md new file mode 100644 index 0000000000..159b7bc78d --- /dev/null +++ b/docs/pages-for-subheaders/cis-scans.md @@ -0,0 +1,111 @@ +--- +title: CIS Scans +weight: 17 +--- + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. The CIS scans can run on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. + +The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. + +- [About the CIS Benchmark](#about-the-cis-benchmark) +- [About the Generated Report](#about-the-generated-report) +- [Test Profiles](#test-profiles) +- [About Skipped and Not Applicable Tests](#about-skipped-and-not-applicable-tests) +- [Roles-based Access Control](../explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md) +- [Configuration](../explanations/integrations-in-rancher/cis-scans/configuration-reference.md) + +# About the CIS Benchmark + +The Center for Internet Security is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. + +CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. + +The official Benchmark documents are available through the CIS website. The sign-up form to access the documents is +here. + +# About the Generated Report + +Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. + +By default, the CIS Benchmark v1.6 is used. + +The Benchmark version is included in the generated report. + +The Benchmark provides recommendations of two types: Automated and Manual. Recommendations marked as Manual in the Benchmark are not included in the generated report. + +Some tests are designated as "Not Applicable." These tests will not be run on any CIS scan because of the way that Rancher provisions RKE clusters. For information on how test results can be audited, and why some tests are designated to be not applicable, refer to Rancher's self-assessment guide for the corresponding Kubernetes version. + +The report contains the following information: + +| Column in Report | Description | +|------------------|-------------| +| `id` | The ID number of the CIS Benchmark. | +| `description` | The description of the CIS Benchmark test. | +| `remediation` | What needs to be fixed in order to pass the test. | +| `state` | Indicates if the test passed, failed, was skipped, or was not applicable. | +| `node_type` | The node role, which affects which tests are run on the node. Master tests are run on controlplane nodes, etcd tests are run on etcd nodes, and node tests are run on the worker nodes. | +| `audit` | This is the audit check that `kube-bench` runs for this test. | +| `audit_config` | Any configuration applicable to the audit script. | +| `test_info` | Test-related info as reported by `kube-bench`, if any. | +| `commands` | Test-related commands as reported by `kube-bench`, if any. | +| `config_commands` | Test-related configuration data as reported by `kube-bench`, if any. | +| `actual_value` | The test's actual value, present if reported by `kube-bench`. | +| `expected_result` | The test's expected result, present if reported by `kube-bench`. | + +Refer to the table in the cluster hardening guide for information on which versions of Kubernetes, the Benchmark, Rancher, and our cluster hardening guide correspond to each other. Also refer to the hardening guide for configuration files of CIS-compliant clusters and information on remediating failed tests. + +# Test Profiles + +The following profiles are available: + +- Generic CIS 1.5 +- Generic CIS 1.6 +- RKE permissive 1.5 +- RKE hardened 1.5 +- RKE permissive 1.6 +- RKE hardened 1.6 +- RKE2 permissive 1.5 +- RKE2 hardened 1.5 +- RKE2 permissive 1.6 +- RKE2 hardened 1.6 +- AKS +- EKS +- GKE + +You also have the ability to customize a profile by saving a set of tests to skip. + +All profiles will have a set of not applicable tests that will be skipped during the CIS scan. These tests are not applicable based on how a RKE cluster manages Kubernetes. + +There are two types of RKE cluster scan profiles: + +- **Permissive:** This profile has a set of tests that have been will be skipped as these tests will fail on a default RKE Kubernetes cluster. Besides the list of skipped tests, the profile will also not run the not applicable tests. +- **Hardened:** This profile will not skip any tests, except for the non-applicable tests. + +The EKS and GKE cluster scan profiles are based on CIS Benchmark versions that are specific to those types of clusters. + +In order to pass the "Hardened" profile, you will need to follow the steps on the hardening guide and use the `cluster.yml` defined in the hardening guide to provision a hardened cluster. + +The default profile and the supported CIS benchmark version depends on the type of cluster that will be scanned: + +The `rancher-cis-benchmark` supports the CIS 1.6 Benchmark version. + +- For RKE Kubernetes clusters, the RKE Permissive 1.6 profile is the default. +- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. +- For RKE2 Kubernetes clusters, the RKE2 Permissive 1.6 profile is the default. +- For cluster types other than RKE, RKE2, EKS and GKE, the Generic CIS 1.5 profile will be used by default. + +# About Skipped and Not Applicable Tests + +For a list of skipped and not applicable tests, refer to this page. + +For now, only user-defined skipped tests are marked as skipped in the generated report. + +Any skipped tests that are defined as being skipped by one of the default profiles are marked as not applicable. + +# Roles-based Access Control + +For information about permissions, refer to this page. + +# Configuration + +For more information about configuring the custom resources for the scans, profiles, and benchmark versions, refer to this page. \ No newline at end of file diff --git a/docs/pages-for-subheaders/cli-with-rancher.md b/docs/pages-for-subheaders/cli-with-rancher.md new file mode 100644 index 0000000000..07e11cd7ab --- /dev/null +++ b/docs/pages-for-subheaders/cli-with-rancher.md @@ -0,0 +1,5 @@ +--- +title: CLI with Rancher +--- + +Interact with Rancher using command line interface (CLI) tools from your workstation. The following docs will describe the [Rancher CLI](../reference-guides/cli-with-rancher/rancher-cli.md) and [kubectl Utility](../reference-guides/cli-with-rancher/kubectl-utility). \ No newline at end of file diff --git a/docs/pages-for-subheaders/cluster-configuration.md b/docs/pages-for-subheaders/cluster-configuration.md new file mode 100644 index 0000000000..f08618880b --- /dev/null +++ b/docs/pages-for-subheaders/cluster-configuration.md @@ -0,0 +1,30 @@ +--- +title: Cluster Configuration +weight: 2025 +--- + +After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. + +For information on editing cluster membership, go to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) + +### Cluster Configuration References + +The cluster configuration options depend on the type of Kubernetes cluster: + +- [RKE Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +- [RKE2 Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md) +- [K3s Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md) +- [EKS Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md) +- [GKE Cluster Configuration](gke-cluster-configuration.md) +- [AKS Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md) + +### Cluster Management Capabilities by Cluster Type + +The options and settings available for an existing cluster change based on the method that you used to provision it. + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + diff --git a/docs/pages-for-subheaders/configuration-options.md b/docs/pages-for-subheaders/configuration-options.md new file mode 100644 index 0000000000..fb5004c75c --- /dev/null +++ b/docs/pages-for-subheaders/configuration-options.md @@ -0,0 +1,48 @@ +--- +title: Configuration Options +weight: 3 +--- + +- [Egress Support](#egress-support) +- [Enabling Automatic Sidecar Injection](#enabling-automatic-sidecar-injection) +- [Overlay File](#overlay-file) +- [Selectors and Scrape Configs](#selectors-and-scrape-configs) +- [Enable Istio with Pod Security Policies](#enable-istio-with-pod-security-policies) +- [Additional Steps for Installing Istio on an RKE2 Cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) +- [Additional Steps for Project Network Isolation](#additional-steps-for-project-network-isolation) + +### Egress Support + +By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). + +### Enabling Automatic Sidecar Injection + +Automatic sidecar injection is disabled by default. To enable this, set the `sidecarInjectorWebhook.enableNamespacesByDefault=true` in the values.yaml on install or upgrade. This automatically enables Istio sidecar injection into all new namespaces that are deployed. + +### Overlay File + +An Overlay File is designed to support extensive configuration of your Istio installation. It allows you to make changes to any values available in the [IstioOperator API](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/). This will ensure you can customize the default installation to fit any scenario. + +The Overlay File will add configuration on top of the default installation that is provided from the Istio chart installation. This means you do not need to redefine the components that already defined for installation. + +For more information on Overlay Files, refer to the [Istio documentation.](https://istio.io/latest/docs/setup/install/istioctl/#configure-component-settings) + +### Selectors and Scrape Configs + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which enables monitoring across all namespaces by default. This ensures you can view traffic, metrics and graphs for resources deployed in a namespace with `istio-injection=enabled` label. + +If you would like to limit Prometheus to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. Once you do this, you will need to add additional configuration to continue to monitor your resources. + +For details, refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) + +### Enable Istio with Pod Security Policies + +Refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md) + +### Additional Steps for Installing Istio on an RKE2 Cluster + +Refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md) + +### Additional Steps for Project Network Isolation + +Refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md) \ No newline at end of file diff --git a/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md new file mode 100644 index 0000000000..f06cc7b47d --- /dev/null +++ b/docs/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -0,0 +1,30 @@ +--- +title: Configuring Microsoft Active Directory Federation Service (SAML) +weight: 1205 +--- + +If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. + +## Prerequisites + +You must have Rancher installed. + +- Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. +- You must have a global administrator account on your Rancher installation. + +You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. + +- Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. +- You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. + +## Setup Outline + +Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. + +- [1. Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) +- [2. Configuring Rancher for Microsoft AD FS](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md) + +{{< saml_caveats >}} + + +### [Next: Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) diff --git a/docs/pages-for-subheaders/configure-openldap.md b/docs/pages-for-subheaders/configure-openldap.md new file mode 100644 index 0000000000..59b2658a91 --- /dev/null +++ b/docs/pages-for-subheaders/configure-openldap.md @@ -0,0 +1,53 @@ +--- +title: Configuring OpenLDAP +weight: 1113 +--- + +If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. + +## Prerequisites + +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +## Configure OpenLDAP in Rancher + +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) + +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **OpenLDAP**. Fill out the **Configure an OpenLDAP server** form. +1. Click **Enable**. + +### Test Authentication + +Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. + +:::note + +The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. + +::: + +1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. +2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. + +**Result:** + +- OpenLDAP authentication is configured. +- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. + +:::note + +You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. + +::: + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/docs/pages-for-subheaders/configure-shibboleth-saml.md b/docs/pages-for-subheaders/configure-shibboleth-saml.md new file mode 100644 index 0000000000..3e2c420680 --- /dev/null +++ b/docs/pages-for-subheaders/configure-shibboleth-saml.md @@ -0,0 +1,112 @@ +--- +title: Configuring Shibboleth (SAML) +weight: 1210 +--- + +If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. + +In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. + +If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. + +> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md) + +This section covers the following topics: + +- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) + - [Shibboleth Prerequisites](#shibboleth-prerequisites) + - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) + - [SAML Provider Caveats](#saml-provider-caveats) +- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) + - [OpenLDAP Prerequisites](#openldap-prerequisites) + - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) + - [Troubleshooting](#troubleshooting) + +# Setting up Shibboleth in Rancher + +### Shibboleth Prerequisites +> +>- You must have a Shibboleth IdP Server configured. +>- Following are the Rancher Service Provider URLs needed for configuration: +Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` +Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` +>- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) + +### Configure Shibboleth in Rancher + +If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **Shibboleth**. +1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. + + 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). + + 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). + + 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). + + 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). + + 1. **Rancher API Host**: Enter the URL for your Rancher Server. + + 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. + + You can generate one using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. + + +1. After you complete the **Configure Shibboleth Account** form, click **Enable**. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. + + :::note + + You may have to disable your popup blocker to see the IdP login page. + + ::: + +**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. + +### SAML Provider Caveats + +If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. + +- There is no validation on users or groups when assigning permissions to them in Rancher. +- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. +- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. +- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. + +To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. + +# Setting up OpenLDAP in Rancher + +If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. + +### OpenLDAP Prerequisites + +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +### Configure OpenLDAP in Rancher + +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) Note that nested group membership is not available for Shibboleth. + +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Log into the Rancher UI using the initial local `admin` account. +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. + +# Troubleshooting + +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md new file mode 100644 index 0000000000..6d607713d1 --- /dev/null +++ b/docs/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -0,0 +1,74 @@ +--- +title: "Kubernetes Persistent Storage: Volumes and Storage Classes" +description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" +weight: 2031 +--- +When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. + +The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) + +### Prerequisites + +To set up persistent storage, the `Manage Volumes` [role](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. + +If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](set-up-cloud-providers.md) + +For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. + +### Setting up Existing Storage + +The overall workflow for setting up existing storage is as follows: + +1. Set up your persistent storage. This may be storage in an infrastructure provider, or it could be your own storage. +2. Add a persistent volume (PV) that refers to the persistent storage. +3. Add a persistent volume claim (PVC) that refers to the PV. +4. Mount the PVC as a volume in your workload. + +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md) + +### Dynamically Provisioning New Storage in Rancher + +The overall workflow for provisioning new storage is as follows: + +1. Add a StorageClass and configure it to use your storage provider. The StorageClass could refer to storage in an infrastructure provider, or it could refer to your own storage. +2. Add a persistent volume claim (PVC) that refers to the storage class. +3. Mount the PVC as a volume for your workload. + +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md) + +### Longhorn Storage + +[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. + +Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. + +If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/latest/what-is-longhorn/) + +Rancher v2.5 simplified the process of installing Longhorn on a Rancher-managed cluster. For more information, see [this page.](../explanations/integrations-in-rancher/longhorn.md) + +### Provisioning Storage Examples + +We provide examples of how to provision storage with [NFS,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) [vSphere,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) and [Amazon's EBS.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) + +### GlusterFS Volumes + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md) + +### iSCSI Volumes + +In [Rancher Launched Kubernetes clusters](launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md) + +### hostPath Volumes +Before you create a hostPath volume, you need to set up an [extra_bind](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. + +### Migrating vSphere Cloud Provider from In-tree to Out-of-tree + +Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. + +For instructions on how to migrate from the in-tree vSphere cloud provider to out-of-tree, and manage the existing VMs post migration, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md) + +### Related Links + +- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/docs/pages-for-subheaders/custom-resource-configuration.md b/docs/pages-for-subheaders/custom-resource-configuration.md new file mode 100644 index 0000000000..bb051c785d --- /dev/null +++ b/docs/pages-for-subheaders/custom-resource-configuration.md @@ -0,0 +1,9 @@ +--- +title: Custom Resource Configuration +weight: 5 +--- + +The following Custom Resource Definitions are used to configure logging: + +- [Flow and ClusterFlow](../explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) +- [Output and ClusterOutput](../explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md) \ No newline at end of file diff --git a/docs/pages-for-subheaders/deploy-apps-across-clusters.md b/docs/pages-for-subheaders/deploy-apps-across-clusters.md new file mode 100644 index 0000000000..bc0ebb83e3 --- /dev/null +++ b/docs/pages-for-subheaders/deploy-apps-across-clusters.md @@ -0,0 +1,15 @@ +--- +title: Deploying Applications across Clusters +weight: 12 +--- +### Fleet + +Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. + +Fleet is GitOps at scale. For more information, refer to the [Fleet section.](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) + +### Multi-cluster Apps + +In Rancher before v2.5, the multi-cluster apps feature was used to deploy applications across clusters. The multi-cluster apps feature is deprecated, but still available in Rancher v2.5. + +Refer to the documentation [here.](../how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md) \ No newline at end of file diff --git a/docs/pages-for-subheaders/deploy-rancher-manager.md b/docs/pages-for-subheaders/deploy-rancher-manager.md new file mode 100644 index 0000000000..05be04756a --- /dev/null +++ b/docs/pages-for-subheaders/deploy-rancher-manager.md @@ -0,0 +1,21 @@ +--- +title: Deploying Rancher Server +weight: 100 +--- + +Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. + +- [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) +- [AWS Marketplace](../getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace.md) (uses Amazon EKS) +- [Azure](../getting-started/quick-start-guides/deploy-rancher-manager/azure.md) (uses Terraform) +- [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md) (uses Terraform) +- [GCP](../getting-started/quick-start-guides/deploy-rancher-manager/gcp.md) (uses Terraform) +- [Hetzner Cloud](../getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud.md) (uses Terraform) +- [Vagrant](../getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md) +- [Equinix Metal](../getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal.md) +- [Outscale](../getting-started/quick-start-guides/deploy-rancher-manager/outscale-qs.md) (uses Terraform) + + +If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. + +- [Manual Install](../getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md) diff --git a/docs/pages-for-subheaders/deploy-rancher-workloads.md b/docs/pages-for-subheaders/deploy-rancher-workloads.md new file mode 100644 index 0000000000..cf89f3341a --- /dev/null +++ b/docs/pages-for-subheaders/deploy-rancher-workloads.md @@ -0,0 +1,9 @@ +--- +title: Deploying Workloads +weight: 200 +--- + +These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. + +- [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) +- [Workload with NodePort](../getting-started/quick-start-guides/deploy-workloads/nodeports.md) diff --git a/docs/pages-for-subheaders/downstream-cluster-configuration.md b/docs/pages-for-subheaders/downstream-cluster-configuration.md new file mode 100644 index 0000000000..2d15adaa3b --- /dev/null +++ b/docs/pages-for-subheaders/downstream-cluster-configuration.md @@ -0,0 +1,5 @@ +--- +title: Downstream Cluster Configuration +--- + +Users can easily configure downstream clusters with Rancher. The following docs will discuss [node template configuration](./node-template-configuration.md) and [machine configuration](./machine-configuration.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/enable-experimental-features.md b/docs/pages-for-subheaders/enable-experimental-features.md new file mode 100644 index 0000000000..d24aff21ac --- /dev/null +++ b/docs/pages-for-subheaders/enable-experimental-features.md @@ -0,0 +1,123 @@ +--- +title: Enabling Experimental Features +weight: 17 +--- +Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. + +The features can be enabled in three ways: + +- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. +- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) by going to the **Settings** page. +- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. + +Each feature has two values: + +- A default value, which can be configured with a flag or environment variable from the command line +- A set value, which can be configured with the Rancher API or UI + +If no value has been set, Rancher uses the default value. + +Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. + +For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. See the [feature flags page](../reference-guides/installation-references/feature-flags.md) for more information. + +# Enabling Features when Starting Rancher + +When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. + +### Enabling Features for Kubernetes Installs + +:::note + +Values set from the Rancher API will override the value passed in through the command line. + +::: + +When installing Rancher with a Helm chart, use the `--set` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: + +``` +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set 'extraEnv[0].name=CATTLE_FEATURES' + --set 'extraEnv[0].value==true,=true' +``` + +:::note + +If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +::: + +### Rendering the Helm Chart for Air Gap Installations + +For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. + +The Helm command is as follows: + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' + --set 'extraEnv[0].value==true,=true' +``` + +### Enabling Features for Docker Installs + +When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: + +``` +docker run -d -p 80:80 -p 443:443 \ + --restart=unless-stopped \ + rancher/rancher:rancher-latest \ + --features==true,=true +``` + + +# Enabling Features with the Rancher UI + +1. In the upper left corner, click **☰ > Global Settings**. +1. Click **Feature Flags**. +1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate**. + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher UI + +1. In the upper left corner, click **☰ > Global Settings**. +1. Click **Feature Flags**. You will see a list of experimental features. +1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate**. + +**Result:** The feature is disabled. + +# Enabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit**. +1. In the **Value** drop-down menu, click **True**. +1. Click **Show Request**. +1. Click **Send Request**. +1. Click **Close**. + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit**. +1. In the **Value** drop-down menu, click **False**. +1. Click **Show Request**. +1. Click **Send Request**. +1. Click **Close**. + +**Result:** The feature is disabled. diff --git a/docs/pages-for-subheaders/fleet-gitops-at-scale.md b/docs/pages-for-subheaders/fleet-gitops-at-scale.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/fleet-gitops-at-scale.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/gke-cluster-configuration.md b/docs/pages-for-subheaders/gke-cluster-configuration.md new file mode 100644 index 0000000000..bdb8150ddf --- /dev/null +++ b/docs/pages-for-subheaders/gke-cluster-configuration.md @@ -0,0 +1,323 @@ +--- +title: GKE Cluster Configuration Reference +shortTitle: GKE Cluster Configuration +weight: 3 +--- + +# Changes in Rancher v2.6 + +- Support for additional configuration options: + - Project network isolation + - Network tags + +# Cluster Location + +| Value | Description | +|--------|--------------| +| Location Type | Zonal or Regional. With GKE, you can create a cluster tailored to the availability requirements of your workload and your budget. By default, a cluster's nodes run in a single compute zone. When multiple zones are selected, the cluster's nodes will span multiple compute zones, while the controlplane is located in a single zone. Regional clusters increase the availability of the controlplane as well. For help choosing the type of cluster availability, refer to [these docs.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#choosing_a_regional_or_zonal_control_plane) | +| Zone | Each region in Compute engine contains a number of zones. For more information about available regions and zones, refer to [these docs.](https://cloud.google.com/compute/docs/regions-zones#available) | +| Additional Zones | For zonal clusters, you can select additional zones to create a [multi-zone cluster.](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#multi-zonal_clusters) | +| Region | For [regional clusters,](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#regional_clusters) you can select a region. For more information about available regions and zones, refer to [this section](https://cloud.google.com/compute/docs/regions-zones#available). The first part of each zone name is the name of the region. | + +# Cluster Options + +### Kubernetes Version + +_Mutable: yes_ + +For more information on GKE Kubernetes versions, refer to [these docs.](https://cloud.google.com/kubernetes-engine/versioning) + +### Container Address Range + +_Mutable: no_ + +The IP address range for pods in the cluster. Must be a valid CIDR range, e.g. 10.42.0.0/16. If not specified, a random range is automatically chosen from 10.0.0.0/8 and will exclude ranges already allocated to VMs, other clusters, or routes. Automatically chosen ranges may conflict with reserved IP addresses, dynamic routes, or routes within VPCs peering with the cluster. + +### Network + +_Mutable: no_ + +The Compute Engine Network that the cluster connects to. Routes and firewalls will be created using this network. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC networks that are shared to your project will appear here. will be available to select in this field. For more information, refer to [this page](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets). + +### Node Subnet / Subnet + +_Mutable: no_ + +The Compute Engine subnetwork that the cluster connects to. This subnetwork must belong to the network specified in the **Network** field. Select an existing subnetwork, or select "Auto Create Subnetwork" to have one automatically created. If not using an existing network, **Subnetwork Name** is required to generate one. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC subnets that are shared to your project will appear here. If using a Shared VPC network, you cannot select "Auto Create Subnetwork". For more information, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) + +### Subnetwork Name + +_Mutable: no_ + +Automatically create a subnetwork with the provided name. Required if "Auto Create Subnetwork" is selected for **Node Subnet** or **Subnet**. For more information on subnetworks, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) + +### Ip Aliases + +_Mutable: no_ + +Enable [alias IPs](https://cloud.google.com/vpc/docs/alias-ip). This enables VPC-native traffic routing. Required if using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc). + +### Network Policy + +_Mutable: yes_ + +Enable network policy enforcement on the cluster. A network policy defines the level of communication that can occur between pods and services in the cluster. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy) + +### Project Network Isolation + +_Mutable: yes_ + +choose whether to enable or disable inter-project communication. Note that enabling Project Network Isolation will automatically enable Network Policy and Network Policy Config, but not vice versa. + +### Node Ipv4 CIDR Block + +_Mutable: no_ + +The IP address range of the instance IPs in this cluster. Can be set if "Auto Create Subnetwork" is selected for **Node Subnet** or **Subnet**. Must be a valid CIDR range, e.g. 10.96.0.0/14. For more information on how to determine the IP address range, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) + +### Cluster Secondary Range Name + +_Mutable: no_ + +The name of an existing secondary range for Pod IP addresses. If selected, **Cluster Pod Address Range** will automatically be populated. Required if using a Shared VPC network. + +### Cluster Pod Address Range + +_Mutable: no_ + +The IP address range assigned to pods in the cluster. Must be a valid CIDR range, e.g. 10.96.0.0/11. If not provided, will be created automatically. Must be provided if using a Shared VPC network. For more information on how to determine the IP address range for your pods, refer to [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_pods) + +### Services Secondary Range Name + +_Mutable: no_ + +The name of an existing secondary range for service IP addresses. If selected, **Service Address Range** will be automatically populated. Required if using a Shared VPC network. + +### Service Address Range + +_Mutable: no_ + +The address range assigned to the services in the cluster. Must be a valid CIDR range, e.g. 10.94.0.0/18. If not provided, will be created automatically. Must be provided if using a Shared VPC network. For more information on how to determine the IP address range for your services, refer to [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_svcs) + +### Private Cluster + +_Mutable: no_ + +:::caution + +Private clusters require additional planning and configuration outside of Rancher. Refer to the [private cluster guide](../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md). + +::: + +Assign nodes only internal IP addresses. Private cluster nodes cannot access the public internet unless additional networking steps are taken in GCP. + +### Enable Private Endpoint + +:::caution + +Private clusters require additional planning and configuration outside of Rancher. Refer to the [private cluster guide](../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md). + +::: + +_Mutable: no_ + +Locks down external access to the control plane endpoint. Only available if **Private Cluster** is also selected. If selected, and if Rancher does not have direct access to the Virtual Private Cloud network the cluster is running in, Rancher will provide a registration command to run on the cluster to enable Rancher to connect to it. + +### Master IPV4 CIDR Block + +_Mutable: no_ + +The IP range for the control plane VPC. + +### Master Authorized Network + +_Mutable: yes_ + +Enable control plane authorized networks to block untrusted non-GCP source IPs from accessing the Kubernetes master through HTTPS. If selected, additional authorized networks may be added. If the cluster is created with a public endpoint, this option is useful for locking down access to the public endpoint to only certain networks, such as the network where your Rancher service is running. If the cluster only has a private endpoint, this setting is required. + +# Additional Options + +### Cluster Addons + +Additional Kubernetes cluster components. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster.AddonsConfig) + +#### Horizontal Pod Autoscaling + +_Mutable: yes_ + +The Horizontal Pod Autoscaler changes the shape of your Kubernetes workload by automatically increasing or decreasing the number of Pods in response to the workload's CPU or memory consumption, or in response to custom metrics reported from within Kubernetes or external metrics from sources outside of your cluster. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/horizontalpodautoscaler) + +#### HTTP (L7) Load Balancing + +_Mutable: yes_ + +HTTP (L7) Load Balancing distributes HTTP and HTTPS traffic to backends hosted on GKE. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer) + +#### Network Policy Config (master only) + +_Mutable: yes_ + +Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the master, it does not track whether network policy is enabled for the nodes. + +### Cluster Features (Alpha Features) + +_Mutable: no_ + +Turns on all Kubernetes alpha API groups and features for the cluster. When enabled, the cluster cannot be upgraded and will be deleted automatically after 30 days. Alpha clusters are not recommended for production use as they are not covered by the GKE SLA. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/alpha-clusters) + +### Logging Service + +_Mutable: yes_ + +The logging service the cluster uses to write logs. Use either [Cloud Logging](https://cloud.google.com/logging) or no logging service in which case no logs are exported from the cluster. + +### Monitoring Service + +_Mutable: yes_ + +The monitoring service the cluster uses to write metrics. Use either [Cloud Monitoring](https://cloud.google.com/monitoring) or monitoring service in which case no metrics are exported from the cluster. + + +### Maintenance Window + +_Mutable: yes_ + +Set the start time for a 4 hour maintenance window. The time is specified in the UTC time zone using the HH:MM format. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions) + +# Node Pools + +In this section, enter details describing the configuration of each node in the node pool. + +### Kubernetes Version + +_Mutable: yes_ + +The Kubernetes version for each node in the node pool. For more information on GKE Kubernetes versions, refer to [these docs.](https://cloud.google.com/kubernetes-engine/versioning) + +### Image Type + +_Mutable: yes_ + +The node operating system image. For more information for the node image options that GKE offers for each OS, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/node-images#available_node_images) + +:::note + +The default option is "Container-Optimized OS with Docker". The read-only filesystem on GCP's Container-Optimized OS is not compatible with the [legacy logging]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/logging) implementation in Rancher. If you need to use the legacy logging feature, select "Ubuntu with Docker" or "Ubuntu with Containerd". The [current logging feature](logging.md) is compatible with the Container-Optimized OS image. + +::: + +:::note + +If selecting "Windows Long Term Service Channel" or "Windows Semi-Annual Channel" for the node pool image type, you must also add at least one Container-Optimized OS or Ubuntu node pool. + +::: + +### Machine Type + +_Mutable: no_ + +The virtualized hardware resources available to node instances. For more information on Google Cloud machine types, refer to [this page.](https://cloud.google.com/compute/docs/machine-types#machine_types) + +### Root Disk Type + +_Mutable: no_ + +Standard persistent disks are backed by standard hard disk drives (HDD), while SSD persistent disks are backed by solid state drives (SSD). For more information, refer to [this section.](https://cloud.google.com/compute/docs/disks) + +### Local SSD Disks + +_Mutable: no_ + +Configure each node's local SSD disk storage in GB. Local SSDs are physically attached to the server that hosts your VM instance. Local SSDs have higher throughput and lower latency than standard persistent disks or SSD persistent disks. The data that you store on a local SSD persists only until the instance is stopped or deleted. For more information, see [this section.](https://cloud.google.com/compute/docs/disks#localssds) + +### Preemptible nodes (beta) + +_Mutable: no_ + +Preemptible nodes, also called preemptible VMs, are Compute Engine VM instances that last a maximum of 24 hours in general, and provide no availability guarantees. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/preemptible-vms) + +### Taints + +_Mutable: no_ + +When you apply a taint to a node, only Pods that tolerate the taint are allowed to run on the node. In a GKE cluster, you can apply a taint to a node pool, which applies the taint to all nodes in the pool. + +### Node Labels + +_Mutable: no_ + +You can apply labels to the node pool, which applies the labels to all nodes in the pool. + +Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +### Network Tags + +_Mutable: no_ + +You can add network tags to the node pool to make firewall rules and routes between subnets. Tags will apply to all nodes in the pool. + +For details on tag syntax and requirements, see the [Kubernetes documentation](https://cloud.google.com/vpc/docs/add-remove-network-tags). + +# Group Details + +In this section, enter details describing the node pool. + +### Name + +_Mutable: no_ + +Enter a name for the node pool. + +### Initial Node Count + +_Mutable: yes_ + +Integer for the starting number of nodes in the node pool. + +### Max Pod Per Node + +_Mutable: no_ + +GKE has a hard limit of 110 Pods per node. For more information on the Kubernetes limits, see [this section.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#dimension_limits) + +### Autoscaling + +_Mutable: yes_ + +Node pool autoscaling dynamically creates or deletes nodes based on the demands of your workload. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler) + +### Auto Repair + +_Mutable: yes_ + +GKE's node auto-repair feature helps you keep the nodes in your cluster in a healthy, running state. When enabled, GKE makes periodic checks on the health state of each node in your cluster. If a node fails consecutive health checks over an extended time period, GKE initiates a repair process for that node. For more information, see the section on [auto-repairing nodes.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair) + +### Auto Upgrade + +_Mutable: yes_ + +When enabled, the auto-upgrade feature keeps the nodes in your cluster up-to-date with the cluster control plane (master) version when your control plane is [updated on your behalf.](https://cloud.google.com/kubernetes-engine/upgrades#automatic_cp_upgrades) For more information about auto-upgrading nodes, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades) + +### Access Scopes + +_Mutable: no_ + +Access scopes are the legacy method of specifying permissions for your nodes. + +- **Allow default access:** The default access for new clusters is the [Compute Engine default service account.](https://cloud.google.com/compute/docs/access/service-accounts?hl=en_US#default_service_account) +- **Allow full access to all Cloud APIs:** Generally, you can just set the cloud-platform access scope to allow full access to all Cloud APIs, then grant the service account only relevant IAM roles. The combination of access scopes granted to the virtual machine instance and the IAM roles granted to the service account determines the amount of access the service account has for that instance. +- **Set access for each API:** Alternatively, you can choose to set specific scopes that permit access to the particular API methods that the service will call. + +For more information, see the [section about enabling service accounts for a VM.](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + + +### Configuring the Refresh Interval + +The refresh interval can be configured through the setting "gke-refresh", which is an integer representing seconds. + +The default value is 300 seconds. + +The syncing interval can be changed by running `kubectl edit setting gke-refresh`. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for GCP APIs. + diff --git a/docs/pages-for-subheaders/helm-charts-in-rancher.md b/docs/pages-for-subheaders/helm-charts-in-rancher.md new file mode 100644 index 0000000000..c085be082b --- /dev/null +++ b/docs/pages-for-subheaders/helm-charts-in-rancher.md @@ -0,0 +1,151 @@ +--- +title: Helm Charts in Rancher +weight: 11 +--- + +In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. Helm chart repositories are managed using **Apps & Marketplace**. It uses a catalog-like system to import bundles of charts from repositories and then uses those charts to either deploy custom Helm applications or Rancher's tools such as Monitoring or Istio. Rancher tools come as pre-loaded repositories which deploy as standalone Helm charts. Any additional repositories are only added to the current cluster. + +### Changes in Rancher v2.6 + +Starting in Rancher v2.6.0, a new versioning scheme for Rancher feature charts was implemented. The changes are centered around the major version of the charts and the +up annotation for upstream charts, where applicable. + +**Major Version:** The major version of the charts is tied to Rancher minor versions. When you upgrade to a new Rancher minor version, you should ensure that all of your **Apps & Marketplace** charts are also upgraded to the correct release line for the chart. + +:::note + +Any major versions that are less than the ones mentioned in the table below are meant for 2.5 and below only. For example, you are advised to not use <100.x.x versions of Monitoring in 2.6.x+. + +::: + +**Feature Charts:** + +| **Name** | **Supported Minimum Version** | **Supported Maximum Version** | +| ---------------- | ------------ | ------------ | +| external-ip-webhook | 100.0.0+up1.0.0 | 100.0.1+up1.0.1 | +| harvester-cloud-provider | 100.0.2+up0.1.12 | 100.0.2+up0.1.12 | +| harvester-csi-driver | 100.0.2+up0.1.11 | 100.0.2+up0.1.11 | +| rancher-alerting-drivers | 100.0.0 | 100.0.2 | +| rancher-backup | 2.0.1 | 2.1.2 | +| rancher-cis-benchmark | 2.0.1 | 2.0.4 | +| rancher-gatekeeper | 100.0.0+up3.6.0 | 100.1.0+up3.7.1 | +| rancher-istio | 100.0.0+up1.10.4 | 100.2.0+up1.12.6 | +| rancher-logging | 100.0.0+up3.12.0 | 100.1.2+up3.17.4 | +| rancher-longhorn | 100.0.0+up1.1.2 | 100.1.1+up1.2.3 | +| rancher-monitoring | 100.0.0+up16.6.0 | 100.1.0+up19.0.3 +| rancher-sriov (experimental) | 100.0.0+up0.1.0 | 100.0.3+up0.1.0 | +| rancher-vsphere-cpi | 100.3.0+up1.2.1 | 100.3.0+up1.2.1 | +| rancher-vsphere-csi | 100.3.0+up2.5.1-rancher1 | 100.3.0+up2.5.1-rancher1 | +| rancher-wins-upgrader | 0.0.100 | 100.0.0+up0.0.1 | +| neuvector | 100.0.0+up2.2.0 | 100.0.0+up2.2.0 | + +
+**Charts based on upstream:** For charts that are based on upstreams, the +up annotation should inform you of what upstream version the Rancher chart is tracking. Check the upstream version compatibility with Rancher during upgrades also. + +- As an example, `100.x.x+up16.6.0` for Monitoring tracks upstream kube-prometheus-stack `16.6.0` with some Rancher patches added to it. + +- On upgrades, ensure that you are not downgrading the version of the chart that you are using. For example, if you are using a version of Monitoring > `16.6.0` in Rancher 2.5, you should not upgrade to `100.x.x+up16.6.0`. Instead, you should upgrade to the appropriate version in the next release. + + +### Charts + +From the top-left menu select _"Apps & Marketplace"_ and you will be taken to the Charts page. + +The charts page contains all Rancher, Partner, and Custom Charts. + +* Rancher tools such as Logging or Monitoring are included under the Rancher label +* Partner charts reside under the Partners label +* Custom charts will show up under the name of the repository + +All three types are deployed and managed in the same way. + +:::note + +Apps managed by the Cluster Manager (the global view in the legacy Rancher UI) should continue to be managed only by the Cluster Manager, and apps managed with Apps & Marketplace in the new UI must be managed only by Apps & Marketplace. + +::: + +### Repositories + +From the left sidebar select _"Repositories"_. + +These items represent helm repositories, and can be either traditional helm endpoints which have an index.yaml, or git repositories which will be cloned and can point to a specific branch. In order to use custom charts, simply add your repository here and they will become available in the Charts tab under the name of the repository. + +To add a private CA for Helm Chart repositories: + +- **HTTP-based chart repositories**: You must add a base64 encoded copy of the CA certificate in DER format to the spec.caBundle field of the chart repo, such as `openssl x509 -outform der -in ca.pem | base64 -w0`. Click **Edit YAML** for the chart repo and set, as in the following example:
+ ``` + [...] + spec: + caBundle: + MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT + ... + nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4= + [...] + ``` + + +- **Git-based chart repositories**: You must add a base64 encoded copy of the CA certificate in DER format to the spec.caBundle field of the chart repo, such as `openssl x509 -outform der -in ca.pem | base64 -w0`. Click **Edit YAML** for the chart repo and set, as in the following example:
+ ``` + [...] + spec: + caBundle: + MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT + ... + nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4= + [...] + ``` + + +:::note Helm chart repositories with authentication + +As of Rancher v2.6.3, a new value `disableSameOriginCheck` has been added to the Repo.Spec. This allows users to bypass the same origin checks, sending the repository Authentication information as a Basic Auth Header with all API calls. This is not recommended but can be used as a temporary solution in cases of non-standard Helm chart repositories such as those that have redirects to a different origin URL. + +To use this feature for an existing Helm chart repository, click ⋮ > Edit YAML. On the `spec` portion of the YAML file, add `disableSameOriginCheck` and set it to `true`. + +```yaml +[...] +spec: + disableSameOriginCheck: true +[...] +``` + +::: + +### Helm Compatibility + +Only Helm 3 compatible charts are supported. + + +### Deployment and Upgrades + +From the _"Charts"_ tab select a Chart to install. Rancher and Partner charts may have extra configurations available through custom pages or questions.yaml files, but all chart installations can modify the values.yaml and other basic settings. Once you click install, a Helm operation job is deployed, and the console for the job is displayed. + +To view all recent changes, go to the _"Recent Operations"_ tab. From there you can view the call that was made, conditions, events, and logs. + +After installing a chart, you can find it in the _"Installed Apps"_ tab. In this section you can upgrade or delete the installation, and see further details. When choosing to upgrade, the form and values presented will be the same as installation. + +Most Rancher tools have additional pages located in the toolbar below the _"Apps & Marketplace"_ section to help manage and use the features. These pages include links to dashboards, forms to easily add Custom Resources, and additional information. + +:::caution + +If you are upgrading your chart using _"Customize Helm options before upgrade"_ , please be aware that using the _"--force"_ option may result in errors if your chart has immutable fields. This is because some objects in Kubernetes cannot be changed once they are created. To ensure you do not get this error you can: + + * use the default upgrade option ( i.e do not use _"--force"_ option ) + * uninstall the existing chart and install the upgraded chart + * delete the resources with immutable fields from the cluster before performing the _"--force"_ upgrade + +::: + +#### Changes in Rancher v2.6.3 + +The upgrade button has been removed for legacy apps from the **Apps & Marketplace > Installed Apps** page. + +If you have a legacy app installed and want to upgrade it: + +- The legacy [feature flag](enable-experimental-features.md) must be turned on (if it's not turned on automatically because of having a legacy app before upgrading) +- You can upgrade the app from cluster explorer, from the left nav section **Legacy > Project > Apps** +- For multi-cluster apps, you can go to **≡ > Multi-cluster Apps** and upgrade the app from there + +### Limitations + +[Dashboard apps or Rancher feature charts](helm-charts-in-rancher.md) **cannot** be installed using the Rancher CLI. diff --git a/docs/pages-for-subheaders/horizontal-pod-autoscaler.md b/docs/pages-for-subheaders/horizontal-pod-autoscaler.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/infrastructure-setup.md b/docs/pages-for-subheaders/infrastructure-setup.md new file mode 100644 index 0000000000..081207021e --- /dev/null +++ b/docs/pages-for-subheaders/infrastructure-setup.md @@ -0,0 +1,10 @@ +--- +title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. +shortTitle: Infrastructure Tutorials +weight: 5 +--- + +To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) + + +To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) diff --git a/docs/pages-for-subheaders/install-cluster-autoscaler.md b/docs/pages-for-subheaders/install-cluster-autoscaler.md new file mode 100644 index 0000000000..40dafa4b01 --- /dev/null +++ b/docs/pages-for-subheaders/install-cluster-autoscaler.md @@ -0,0 +1,25 @@ +--- +title: Cluster Autoscaler +weight: 1 +--- + +In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. + +To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. + +Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. + +It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. + +# Cloud Providers + +Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) + +### Setting up Cluster Autoscaler on Amazon Cloud Provider + +For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md) diff --git a/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md new file mode 100644 index 0000000000..50ebb1b036 --- /dev/null +++ b/docs/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -0,0 +1,332 @@ +--- +title: Install/Upgrade Rancher on a Kubernetes Cluster +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 2 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. + +- [Prerequisites](#prerequisites) +- [Install the Rancher Helm Chart](#install-the-rancher-helm-chart) + +# Prerequisites + +- [Kubernetes Cluster](#kubernetes-cluster) +- [Ingress Controller](#ingress-controller) +- [CLI Tools](#cli-tools) + +### Kubernetes Cluster + +Set up the Rancher server's local Kubernetes cluster. + +Rancher can be installed on any Kubernetes cluster. This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. + +For help setting up a Kubernetes cluster, we provide these tutorials: + +- **RKE:** For the tutorial to install an RKE Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) +- **K3s:** For the tutorial to install a K3s Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) +- **RKE2:** For the tutorial to install an RKE2 Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md) For help setting up the infrastructure for a high-availability RKE2 cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md) +- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an Ingress controller so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md) +- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an Ingress controller so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md) +- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an Ingress controller so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md) + +### Ingress Controller + +The Rancher UI and API are exposed through an Ingress. This means the Kubernetes cluster that you install Rancher in must contain an Ingress controller. + +For RKE, RKE2, and K3s installations, you don't have to install the Ingress controller manually because one is installed by default. + +For distributions that do not include an Ingress Controller by default, like a hosted Kubernetes cluster such as EKS, GKE, or AKS, you have to deploy an Ingress controller first. Note that the Rancher Helm chart does not set an `ingressClassName` on the ingress by default. Because of this, you have to configure the Ingress controller to also watch ingresses without an `ingressClassName`. + +Examples are included in the **Amazon EKS**, **AKS**, and **GKE** tutorials above. + +### CLI Tools + +The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md) to choose a version of Helm to install Rancher. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. + +# Install the Rancher Helm Chart + +Rancher is installed using the [Helm](https://helm.sh/) package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm, we can create configurable deployments instead of just using static files. + +For systems without direct internet access, see [Air Gap: Kubernetes install](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md). + +To choose a Rancher version to install, refer to [Choosing a Rancher Version.](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md) + +To choose a version of Helm to install Rancher with, refer to the [Helm version requirements](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md) + +:::note + +The installation instructions assume you are using Helm 3. + +::: + +To set up Rancher, + +1. [Add the Helm chart repository](#1-add-the-helm-chart-repository) +2. [Create a namespace for Rancher](#2-create-a-namespace-for-rancher) +3. [Choose your SSL configuration](#3-choose-your-ssl-configuration) +4. [Install cert-manager](#4-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) +5. [Install Rancher with Helm and your chosen certificate option](#5-install-rancher-with-helm-and-your-chosen-certificate-option) +6. [Verify that the Rancher server is successfully deployed](#6-verify-that-the-rancher-server-is-successfully-deployed) +7. [Save your options](#7-save-your-options) + +### 1. Add the Helm Chart Repository + +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + +{{< release-channel >}} + +``` +helm repo add rancher- https://releases.rancher.com/server-charts/ +``` + +### 2. Create a Namespace for Rancher + +We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: + +``` +kubectl create namespace cattle-system +``` + +### 3. Choose your SSL Configuration + +The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. + +:::note + +If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +::: + +There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: + +- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. +- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. +- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. + + +| Configuration | Helm Chart Option | Requires cert-manager | +| ------------------------------ | ----------------------- | ------------------------------------- | +| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#4-install-cert-manager) | +| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#4-install-cert-manager) | +| Certificates from Files | `ingress.tls.source=secret` | no | + +### 4. Install cert-manager + +:::note + +New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +::: + +> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). + +
+ Click to Expand + +:::note Important: + +Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation](../getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md). + +::: + +These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). + +``` +# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart: +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml + +# Add the Jetstack Helm repository +helm repo add jetstack https://charts.jetstack.io + +# Update your local Helm chart repository cache +helm repo update + +# Install the cert-manager Helm chart +helm install cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.7.1 +``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +
+ +### 5. Install Rancher with Helm and Your Chosen Certificate Option + +The exact command to install Rancher differs depending on the certificate configuration. + +However, irrespective of the certificate configuration, the name of the Rancher installation in the `cattle-system` namespace should always be `rancher`. + +:::tip Testing and Development: + +This final command to install Rancher requires a domain name that forwards traffic to Rancher. If you are using the Helm CLI to set up a proof-of-concept, you can use a fake domain name when passing the `hostname` option. An example of a fake domain name would be `.sslip.io`, which would expose Rancher on an IP where it is running. Production installs would require a real domain name. + +::: + + + + +The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. + +Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. + +- Set the `hostname` to the DNS name you pointed at your load balancer. +- Set the `bootstrapPassword` to something unique for the `admin` user. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. +- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + + +This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. + +:::note + +You need to have port 80 open as the HTTP-01 challenge can only be done on port 80. + +::: + +In the following command, + +- `hostname` is set to the public DNS record, +- Set the `bootstrapPassword` to something unique for the `admin` user. +- `ingress.tls.source` is set to `letsEncrypt` +- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) +- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin \ + --set ingress.tls.source=letsEncrypt \ + --set letsEncrypt.email=me@example.org \ + --set letsEncrypt.ingress.class=nginx +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + +In this option, Kubernetes secrets are created from your own certificates for Rancher to use. + +When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. + +Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. + +:::note + +If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?](../faq/technical-items.md#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) + +::: + +- Set the `hostname`. +- Set the `bootstrapPassword` to something unique for the `admin` user. +- Set `ingress.tls.source` to `secret`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin \ + --set ingress.tls.source=secret +``` + +If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin \ + --set ingress.tls.source=secret \ + --set privateCA=true +``` + +Now that Rancher is deployed, see [Adding TLS Secrets](../getting-started/installation-and-upgrade/resources/add-tls-secrets.md) to publish the certificate files so Rancher and the Ingress controller can use them. + + + + +The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. + +- [HTTP Proxy](../reference-guides/installation-references/helm-chart-options.md#http-proxy) +- [Private container image Registry](../reference-guides/installation-references/helm-chart-options.md#private-registry-and-air-gap-installs) +- [TLS Termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +See the [Chart Options](../reference-guides/installation-references/helm-chart-options.md) for the full list of options. + + +### 6. Verify that the Rancher Server is Successfully Deployed + +After adding the secrets, check if Rancher was rolled out successfully: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: + +``` +kubectl -n cattle-system get deploy rancher +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +rancher 3 3 3 3 3m +``` + +It should show the same count for `DESIRED` and `AVAILABLE`. + +### 7. Save Your Options + +Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. + +### Finishing Up + +That's it. You should have a functional Rancher server. + +In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. + +Doesn't work? Take a look at the [Troubleshooting](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) Page diff --git a/docs/pages-for-subheaders/installation-and-upgrade.md b/docs/pages-for-subheaders/installation-and-upgrade.md new file mode 100644 index 0000000000..04a232fe3b --- /dev/null +++ b/docs/pages-for-subheaders/installation-and-upgrade.md @@ -0,0 +1,94 @@ +--- +title: Installing/Upgrading Rancher +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 3 +--- + +This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. + +# Terminology + +In this section, + +- **The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. The Rancher management server can be installed on any Kubernetes cluster, including hosted clusters, such as Amazon EKS clusters. +- **RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. +- **K3s (Lightweight Kubernetes)** is also a fully compliant Kubernetes distribution. It is newer than RKE, easier to use, and more lightweight, with a binary size of less than 100 MB. +- **RKE2** is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. + +Note the `restrictedAdmin` Helm chart option available for **the Rancher Server**. When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#restricted-admin) + +# Overview of Installation Options + +Rancher can be installed on these main architectures: + +### High-availability Kubernetes Install with the Helm CLI + +We recommend using Helm, a Kubernetes package manager, to install Rancher on multiple nodes on a dedicated Kubernetes cluster. For RKE clusters, three nodes are required to achieve a high-availability cluster. For K3s clusters, only two nodes are required. + +### Automated Quickstart to Deploy Rancher on Amazon EKS + +Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS Kubernetes cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) + +### Single-node Kubernetes Install + +Rancher can be installed on a single-node Kubernetes cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. + +However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. + +### Docker Install + +For test and demonstration purposes, Rancher can be installed with Docker on a single node. A local Kubernetes cluster is installed in the single Docker container, and Rancher is installed on the local cluster. + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +### Other Options + +There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: + +| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | +| ---------------------------------- | ------------------------------ | ---------- | +| With direct access to the Internet | [Docs](install-upgrade-on-a-kubernetes-cluster.md) | [Docs](rancher-on-a-single-node-with-docker.md) | +| Behind an HTTP proxy | [Docs](rancher-behind-an-http-proxy.md) | These [docs,](rancher-on-a-single-node-with-docker.md) plus this [configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) | +| In an air gap environment | [Docs](air-gapped-helm-cli-install.md) | [Docs](air-gapped-helm-cli-install.md) | + +We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. + +For that reason, we recommend that for a production-grade architecture, you should set up a high-availability Kubernetes cluster, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. + +For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. + +Our [instructions for installing Rancher on Kubernetes](install-upgrade-on-a-kubernetes-cluster.md) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. + +When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,](installation-requirements.md) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. + +For a longer discussion of Rancher architecture, refer to the [architecture overview,](rancher-manager-architecture.md) [recommendations for production-grade architecture,](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) or our [best practices guide.](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md) + +# Prerequisites +Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.](installation-requirements.md) + +# Architecture Tip + +For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +For more architecture recommendations, refer to [this page.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +### More Options for Installations on a Kubernetes Cluster + +Refer to the [Helm chart options](../reference-guides/installation-references/helm-chart-options.md) for details on installing Rancher on a Kubernetes cluster with other configurations, including: + +- With [API auditing to record all transactions](../reference-guides/installation-references/helm-chart-options.md#api-audit-log) +- With [TLS termination on a load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) +- With a [custom Ingress](../reference-guides/installation-references/helm-chart-options.md#customizing-your-ingress) + +In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: + +- [RKE configuration options](https://rancher.com/docs/rke/latest/en/config-options/) +- [K3s configuration options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) + +### More Options for Installations with Docker + +Refer to the [docs about options for Docker installs](rancher-on-a-single-node-with-docker.md) for details about other configurations including: + +- With [API auditing to record all transactions](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) +- With an [external load balancer](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md) +- With a [persistent data store](../reference-guides/single-node-rancher-in-docker/advanced-options.md#persistent-data) diff --git a/docs/pages-for-subheaders/installation-references.md b/docs/pages-for-subheaders/installation-references.md new file mode 100644 index 0000000000..0eeae0f6c4 --- /dev/null +++ b/docs/pages-for-subheaders/installation-references.md @@ -0,0 +1,5 @@ +--- +title: Installation References +--- + +Please see the following reference guides for other installation resources: [Rancher Helm chart options](../reference-guides/installation-references/helm-chart-options.md), [TLS settings](../reference-guides/installation-references/tls-settings.md), and [feature flags](../reference-guides/installation-references/feature-flags.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/installation-requirements.md b/docs/pages-for-subheaders/installation-requirements.md new file mode 100644 index 0000000000..14e28c814e --- /dev/null +++ b/docs/pages-for-subheaders/installation-requirements.md @@ -0,0 +1,186 @@ +--- +title: Installation Requirements +description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup +weight: 1 +--- + +This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. + +:::note Important: + +If you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) which will run your apps and services. + +::: + +Make sure the node(s) for the Rancher server fulfill the following requirements: + +- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) + - [RKE Specific Requirements](#rke-specific-requirements) + - [K3s Specific Requirements](#k3s-specific-requirements) + - [RKE2 Specific Requirements](#rke2-specific-requirements) + - [Installing Docker](#installing-docker) +- [Hardware Requirements](#hardware-requirements) +- [CPU and Memory](#cpu-and-memory) + - [RKE and Hosted Kubernetes](#rke-and-hosted-kubernetes) + - [K3s Kubernetes](#k3s-kubernetes) + - [RKE2 Kubernetes](#rke2-kubernetes) + - [Docker](#docker) +- [Ingress](#ingress) +- [Disks](#disks) +- [Networking Requirements](#networking-requirements) + - [Node IP Addresses](#node-ip-addresses) + - [Port Requirements](#port-requirements) +- [Dockershim Support](#dockershim-support) + +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md) + +The Rancher UI works best in Firefox or Chrome. + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution. + +Docker is required for nodes that will run RKE Kubernetes clusters. It is not required for Kubernetes installs. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. + +Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes v1.19, v1.20 and v1.21, firewalld must be turned off. + +If you don't feel comfortable doing so you might check suggestions in the [respective issue](https://github.com/rancher/rancher/issues/28840). Some users were successful [creating a separate firewalld zone with a policy of ACCEPT for the Pod CIDR](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822). + +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md) + +### RKE Specific Requirements + +For the container runtime, RKE should work with any modern Docker version. + +Note that the following sysctl setting must be applied: + +``` +net.bridge.bridge-nf-call-iptables=1 +``` + +### K3s Specific Requirements + +For the container runtime, K3s should work with any modern version of Docker or containerd. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. + +If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. + + + +### RKE2 Specific Requirements + +For details on which OS versions were tested with RKE2, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +Docker is not required for RKE2 installs. + +### Installing Docker + +Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts](../getting-started/installation-and-upgrade/installation-requirements/install-docker.md) to install Docker with one command. + +# Hardware Requirements + +The following sections describe the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. + +# CPU and Memory + +Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. + +### RKE and Hosted Kubernetes + +These CPU and memory requirements apply to each host in the Kubernetes cluster where the Rancher server is installed. + +These requirements apply to RKE Kubernetes clusters, as well as to hosted Kubernetes clusters such as EKS. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | ---------- | ------------ | -------| ------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + +### K3s Kubernetes + +These CPU and memory requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.](install-upgrade-on-a-kubernetes-cluster.md) + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | +| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + + +### RKE2 Kubernetes + +These CPU and memory requirements apply to each instance with RKE2 installed. Minimum recommendations are outlined here. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 2 | 5 GB | +| Medium | Up to 15 | Up to 200 | 3 | 9 GB | + +### Docker + +These CPU and memory requirements apply to a host with a [single-node](rancher-on-a-single-node-with-docker.md) installation of Rancher. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 1 | 4 GB | +| Medium | Up to 15 | Up to 200 | 2 | 8 GB | + +# Ingress + +Each node in the Kubernetes cluster that Rancher is installed on should run an Ingress. + +The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. + +For RKE and K3s installations, you don't have to install the Ingress manually because it is installed by default. + +For hosted Kubernetes clusters (EKS, GKE, AKS) and RKE2 Kubernetes installations, you will need to set up the ingress. + +- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md) +- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md) +- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md) + +# Disks + +Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. + +# Networking Requirements + +This section describes the networking requirements for the node(s) where the Rancher server is installed. + +:::caution + +If a server containing Rancher has the `X-Frame-Options=DENY` header, some pages in the new Rancher UI will not be able to render after upgrading from the legacy UI. This is because some legacy pages are embedded as iFrames in the new UI. + +::: + +### Node IP Addresses + +Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +### Port Requirements + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements](../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. + +# Dockershim Support + +For more information on Dockershim support, refer to [this page](../getting-started/installation-and-upgrade/installation-requirements/dockershim.md). diff --git a/docs/pages-for-subheaders/integrations-in-rancher.md b/docs/pages-for-subheaders/integrations-in-rancher.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/integrations-in-rancher.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/introduction.md b/docs/pages-for-subheaders/introduction.md new file mode 100644 index 0000000000..d11bed52a2 --- /dev/null +++ b/docs/pages-for-subheaders/introduction.md @@ -0,0 +1,5 @@ +--- +title: Introduction +--- + +The [overview](../getting-started/introduction/overview.md) will discuss Rancher's features, capabilities, and how it makes running Kubernetes easy. The guide to the [new Rancher Manager docs structure, Divio,](../getting-started/introduction/what-are-divio-docs?.md) will explain more about the updated look and function of our docs. \ No newline at end of file diff --git a/docs/pages-for-subheaders/istio-setup-guide.md b/docs/pages-for-subheaders/istio-setup-guide.md new file mode 100644 index 0000000000..9426d0949b --- /dev/null +++ b/docs/pages-for-subheaders/istio-setup-guide.md @@ -0,0 +1,30 @@ +--- +title: Setup Guide +weight: 2 +--- + +This section describes how to enable Istio and start using it in your projects. + +If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. + +# Prerequisites + +This guide assumes you have already [installed Rancher,](installation-and-upgrade.md) and you have already [provisioned a separate Kubernetes cluster](kubernetes-clusters-in-rancher-setup.md) on which you will install Istio. + +The nodes in your cluster must meet the [CPU and memory requirements.](../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) + +The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) + + +# Install + +:::tip Quick Setup Tip: If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) and [setting up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) + +::: + +1. [Enable Istio in the cluster.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md) +1. [Enable Istio in all the namespaces where you want to use it.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md) +1. [Add deployments and services that have the Istio sidecar injected.](../how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md) +1. [Set up the Istio gateway. ](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) +1. [Set up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) +1. [Generate traffic and see Istio in action.](../how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md) diff --git a/docs/pages-for-subheaders/istio.md b/docs/pages-for-subheaders/istio.md new file mode 100644 index 0000000000..4e202f8a2e --- /dev/null +++ b/docs/pages-for-subheaders/istio.md @@ -0,0 +1,134 @@ +--- +title: Istio +weight: 14 +--- + +[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. + +As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. + +Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to a team of developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +This core service mesh provides features that include but are not limited to the following: + +- **Traffic Management** such as ingress and egress routing, circuit breaking, mirroring. +- **Security** with resources to authenticate and authorize traffic and users, mTLS included. +- **Observability** of logs, metrics, and distributed traffic flows. + +After [setting up istio](istio-setup-guide.md) you can leverage Istio's control plane functionality through the Rancher UI, `kubectl`, or `istioctl`. + +Istio needs to be set up by a `cluster-admin` before it can be used in a project. + +- [What's New in Rancher v2.5](#what-s-new-in-rancher-v2-5) +- [Tools Bundled with Istio](#tools-bundled-with-istio) +- [Prerequisites](#prerequisites) +- [Setup Guide](#setup-guide) +- [Remove Istio](#remove-istio) +- [Migrate from Previous Istio Version](#migrate-from-previous-istio-version) +- [Accessing Visualizations](#accessing-visualizations) +- [Architecture](#architecture) +- [Additional steps for installing Istio on an RKE2 cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) + +# What's New in Rancher v2.5 + +The overall architecture of Istio has been simplified. A single component, Istiod, has been created by combining Pilot, Citadel, Galley and the sidecar injector. Node Agent functionality has also been merged into istio-agent. + +Addons that were previously installed by Istio (cert-manager, Grafana, Jaeger, Kiali, Prometheus, Zipkin) will now need to be installed separately. Istio will support installation of integrations that are from the Istio Project and will maintain compatibility with those that are not. + +A Prometheus integration will still be available through an installation of [Rancher Monitoring](monitoring-and-alerting.md), or by installing your own Prometheus operator. Rancher's Istio chart will also install Kiali by default to ensure you can get a full picture of your microservices out of the box. + +Istio has migrated away from Helm as a way to install Istio and now provides installation through the istioctl binary or Istio Operator. To ensure the easiest interaction with Istio, Rancher's Istio will maintain a Helm chart that utilizes the istioctl binary to manage your Istio installation. + +This Helm chart will be available via the Apps and Marketplace in the UI. A user that has access to the Rancher Chart's catalog will need to set up Istio before it can be used in the project. + +# Tools Bundled with Istio + +Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy Helm chart, including an overlay file option to allow complex customization. + +It also includes the following: + +### Kiali + +Kiali is a comprehensive visualization aid used for graphing traffic flow throughout the service mesh. It allows you to see how they are connected, including the traffic rates and latencies between them. + +You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. + +### Jaeger + +Our Istio installer includes a quick-start, all-in-one installation of [Jaeger,](https://www.jaegertracing.io/) a tool used for tracing distributed systems. + +Note that this is not a production-qualified deployment of Jaeger. This deployment uses an in-memory storage component, while a persistent storage component is recommended for production. For more information on which deployment strategy you may need, refer to the [Jaeger documentation.](https://www.jaegertracing.io/docs/latest/operator/#production-strategy) + +# Prerequisites + +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory](../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) to run all of the components of Istio. + +If you are installing Istio on RKE2 cluster, some additional steps are required. For details, see [this section.](#additional-steps-for-installing-istio-on-an-rke2-cluster) + +Note that Istio v2 (upstream Istio v1.7+) cannot be upgraded in an air gapped environment. + +# Setup Guide + +Refer to the [setup guide](istio-setup-guide.md) for instructions on how to set up Istio and use it in a project. + +# Remove Istio + +To remove Istio components from a cluster, namespace, or workload, refer to the section on [uninstalling Istio.](../explanations/integrations-in-rancher/istio/disable-istio.md) + +# Migrate From Previous Istio Version + +There is no upgrade path for Istio versions less than 1.7.x. To successfully install Istio through **Apps & Marketplace,** you will need to disable your existing Istio from the global view in the legacy Rancher UI. + +If you have a significant amount of additional Istio CRDs you might consider manually migrating CRDs that are supported in both versions of Istio. You can do this by running `kubectl get -n istio-system -o yaml`, save the output yaml and re-apply in the new version. + +Another option is to manually uninstall istio resources one at a time, but leave the resources that are supported in both versions of Istio and that will not be installed by the newest version. This method is more likely to result in issues installing the new version, but could be a good option depending on your situation. + +# Accessing Visualizations + +> By default, only cluster-admins have access to Kiali. For instructions on how to allow admin, edit or views roles to access them, see [this section.](../explanations/integrations-in-rancher/istio/rbac-for-istio.md) + +After Istio is set up in a cluster, Grafana, Prometheus, and Kiali are available in the Rancher UI. + +To access the Grafana and Prometheus visualizations, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Grafana** or any of the other dashboards. + +To access the Kiali visualization, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see Kiali and click **Explore**. +1. In the left navigation bar, click **Istio**. +1. Click **Kiali**. From here you can access the **Traffic Graph** tab or the **Traffic Metrics** tab to see network visualizations and metrics. + +By default, all namespace will picked up by prometheus and make data available for Kiali graphs. Refer to [selector/scrape config setup](../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) if you would like to use a different configuration for prometheus data scraping. + +Your access to the visualizations depend on your role. Grafana and Prometheus are only available for `cluster-admin` roles. The Kiali UI is available only to `cluster-admin` by default, but `cluster-admin` can allow other roles to access them by editing the Istio values.yaml. + +# Architecture + +Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. + +Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. + +When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. + +For more information on the Istio sidecar, refer to the [Istio sidecare-injection docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/) and for more information on Istio's architecture, refer to the [Istio Architecture docs](https://istio.io/latest/docs/ops/deployment/architecture/) + +### Multiple Ingresses + +By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. Istio also installs an ingress gateway by default into the `istio-system` namespace. The result is that your cluster will have two ingresses in your cluster. + +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.](/img/istio-ingress.svg) + + Additional Istio Ingress gateways can be enabled via the [overlay file](configuration-options.md#overlay-file). + +### Egress Support + +By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](configuration-options.md#overlay-file). + +# Additional Steps for Installing Istio on an RKE2 Cluster + +To install Istio on an RKE2 cluster, follow the steps in [this section.](../explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md) diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/_index.md b/docs/pages-for-subheaders/kubernetes-cluster-setup.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/_index.md rename to docs/pages-for-subheaders/kubernetes-cluster-setup.md diff --git a/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md new file mode 100644 index 0000000000..309a6fbacd --- /dev/null +++ b/docs/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -0,0 +1,89 @@ +--- +title: Setting up Kubernetes Clusters in Rancher +description: Provisioning Kubernetes Clusters +weight: 7 +--- + +Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. + +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture](rancher-manager-architecture.md) page. + +This section covers the following topics: + + + +- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) +- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) +- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) + - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) + - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) +- [Registering Existing Clusters](#registering-existing-clusters) +- [Programmatically Creating Clusters](#programmatically-creating-clusters) + + + +### Cluster Management Capabilities by Cluster Type + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +# Setting up Clusters in a Hosted Kubernetes Provider + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +For more information, refer to the section on [hosted Kubernetes clusters.](set-up-clusters-from-hosted-kubernetes-providers.md) + +# Launching Kubernetes with Rancher + +Rancher uses the [Rancher Kubernetes Engine (RKE)](https://rancher.com/docs/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. + +In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. + +These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. + +If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. + +For more information, refer to the section on [RKE clusters.](launch-kubernetes-with-rancher.md) + +### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider + +Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This template defines the parameters used to launch nodes in your cloud providers. + +One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. + +The cloud providers available for creating a node template are decided based on the [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers) active in the Rancher UI. + +For more information, refer to the section on [nodes hosted by an infrastructure provider](use-new-nodes-in-an-infra-provider.md) + +### Launching Kubernetes on Existing Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,](use-existing-nodes.md) which creates a custom cluster. + +You can bring any nodes you want to Rancher and use them to create a cluster. + +These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. + +# Registering Existing Clusters + +The cluster registration feature replaces the feature to import clusters. + +Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. + +When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. + +For more information, see [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) + +# Programmatically Creating Clusters + +The most common way to programmatically deploy Kubernetes clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) + +EKS, GKE, AKS clusters and RKE clusters can be created or imported with Terraform. diff --git a/docs/pages-for-subheaders/kubernetes-components.md b/docs/pages-for-subheaders/kubernetes-components.md new file mode 100644 index 0000000000..4fc8354908 --- /dev/null +++ b/docs/pages-for-subheaders/kubernetes-components.md @@ -0,0 +1,18 @@ +--- +title: Kubernetes Components +weight: 100 +--- + +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](launch-kubernetes-with-rancher.md) clusters. + +This section includes troubleshooting tips in the following categories: + +- [Troubleshooting etcd Nodes](../troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md) +- [Troubleshooting Controlplane Nodes](../troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md) +- [Troubleshooting nginx-proxy Nodes](../troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md) +- [Troubleshooting Worker Nodes and Generic Components](../troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md) + +# Kubernetes Component Diagram + +![Cluster diagram](/img/clusterdiagram.svg)
+Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/docs/pages-for-subheaders/kubernetes-resources-setup.md b/docs/pages-for-subheaders/kubernetes-resources-setup.md new file mode 100644 index 0000000000..3778a8dbca --- /dev/null +++ b/docs/pages-for-subheaders/kubernetes-resources-setup.md @@ -0,0 +1,70 @@ +--- +title: Kubernetes Resources +weight: 18 +--- + +You can view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. + +## Workloads + +Deploy applications to your cluster nodes using [workloads](workloads-and-pods.md), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. + +When deploying a workload, you can deploy from any image. There are a variety of [workload types](workloads-and-pods.md#workload-types) to choose from which determine how your application should run. + +Following a workload deployment, you can continue working with it. You can: + +- [Upgrade](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) the workload to a newer version of the application it's running. +- [Roll back](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) a workload to a previous version, if an issue occurs during upgrade. +- [Add a sidecar](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md), which is a workload that supports a primary workload. + +## Load Balancing and Ingress + +### Load Balancers + +After you launch an application, it's only available within the cluster. It can't be reached externally. + +If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +#### Ingress + +Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. + +Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. + +For more information, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). + +When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. + +## Service Discovery + +After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. + +For more information, see [Service Discovery](../how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md). + +## Pipelines + +After your project has been [configured to a version control provider](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. + +For more information, see [Pipelines](pipelines.md). + +## Applications + +Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. + +## Kubernetes Resources + +Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. + +Resources include: + +- [Certificates](../how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md): Files used to encrypt/decrypt data entering or leaving the cluster. +- [ConfigMaps](../how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md): Files that store general configuration information, such as a group of config files. +- [Secrets](../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md): Files that store sensitive data like passwords, tokens, or keys. +- [Registries](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md): Files that carry credentials used to authenticate with private registries. diff --git a/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md b/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md new file mode 100644 index 0000000000..b071ea0c0b --- /dev/null +++ b/docs/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -0,0 +1,81 @@ +--- +title: Launching Kubernetes with Rancher +weight: 4 +--- + +You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, you can choose between [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) or [RKE2](https://docs.rke2.io) distributions. Rancher can launch Kubernetes on any computers, including: + +- Bare-metal servers +- On-premise virtual machines +- Virtual machines hosted by an infrastructure provider + +Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. + +Rancher can also create pools of nodes. One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +## RKE + +### Requirements + +If you use RKE to set up a cluster, your nodes must meet the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) for nodes in downstream user clusters. + +### Launching Kubernetes on New Nodes in an Infrastructure Provider + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +For more information, refer to the section on [launching Kubernetes on new nodes.](use-new-nodes-in-an-infra-provider.md) + +### Launching Kubernetes on Existing Custom Nodes + +In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. + +If you want to reuse a node from a previous custom cluster, [clean the node](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +For more information, refer to the section on [custom nodes.](use-existing-nodes.md) + +# Programmatically Creating RKE Clusters + +The most common way to programmatically deploy RKE clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) + +## RKE2 + +Rancher v2.6 introduced provisioning for [RKE2](https://docs.rke2.io/) clusters directly from the Rancher UI. RKE2, also known as RKE Government, is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. In Rancher v.2.6.4 and earlier, RKE2 provisioning was in tech preview. + +Note that in Rancher v2.6.5, RKE2 provisioning became GA. + +### Requirements + +If you use RKE2 to set up a cluster, your nodes must meet the [requirements](https://docs.rke2.io/install/requirements/) for nodes in downstream user clusters. + +### Launching Kubernetes on New Nodes in an Infrastructure Provider + +RKE2 provisioning is built on top of a new provisioning framework that leverages the upstream [Cluster API](https://github.com/kubernetes-sigs/cluster-api) project. With this new provisioning framework, you can: + +- Provision RKE2 clusters onto any provider for which Rancher has a node driver +- Fully configure RKE2 clusters within Rancher +- Choose CNI options Calico, Cilium, and Multus in addition to Canal + +RKE2 provisioning also includes installing RKE2 on clusters with Windows nodes. + +Windows features for RKE2 include: + +- Windows supports the vSphere node driver +- Calico CNI for Windows RKE2 custom clusters +- Project Network Isolation (PNI) for Calico +- Windows Containers with RKE2 powered by containerd +- Provisioning of Windows RKE2 clusters through Terraform +- Provisioning of Windows RKE2 custom clusters directly from the Rancher UI + +Windows Support for RKE2 Custom Clusters requires choosing Calico as the CNI. + +### Launching Kubernetes on Existing Custom Nodes + +RKE2 provisioning also allows you to install custom clusters on pre-provisioned VMs or bare-metal nodes. + +If you want to reuse a node from a previous custom cluster, clean the node before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +# Programmatically Creating RKE2 Clusters + +The most common way to programmatically deploy RKE2 clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster_v2) \ No newline at end of file diff --git a/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md b/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md new file mode 100644 index 0000000000..793605dcf9 --- /dev/null +++ b/docs/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -0,0 +1,63 @@ +--- +title: Set Up Load Balancer and Ingress Controller within Rancher +description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers +weight: 3040 +--- + +Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. + +## Load Balancers + +After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. + +If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +### Load Balancer Limitations + +Load Balancers have a couple of limitations you should be aware of: + +- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. + +- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: + + + - [Support for Layer-4 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-4-load-balancing) + + - [Support for Layer-7 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-7-load-balancing) + +## Ingress + +As mentioned in the limitations above, the disadvantages of using a load balancer are: + +- Load Balancers can only handle one IP address per service. +- If you run multiple services in your cluster, you must have a load balancer for each service. +- It can be expensive to have a load balancer for every service. + +In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. + +Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. + +Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. + +Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. + +Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). + +Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. + +:::note Using Rancher in a High Availability Configuration? + +Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. + +::: + +- For more information on how to set up ingress in Rancher, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). +- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. diff --git a/docs/pages-for-subheaders/logging.md b/docs/pages-for-subheaders/logging.md new file mode 100644 index 0000000000..c65116b1ae --- /dev/null +++ b/docs/pages-for-subheaders/logging.md @@ -0,0 +1,132 @@ +--- +title: Rancher Integration with Logging Services +shortTitle: Logging +description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. +metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." +weight: 15 +--- + +The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. + +For an overview of the changes in v2.5, see [this section.](../explanations/integrations-in-rancher/logging/logging-architecture.md#changes-in-rancher-v2-5) For information about migrating from Logging V1, see [this page.](../explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md) + +- [Enabling Logging](#enabling-logging) +- [Uninstall Logging](#uninstall-logging) +- [Architecture](#architecture) +- [Role-based Access Control](#role-based-access-control) +- [Configuring the Logging Custom Resources](#configuring-the-logging-custom-resources) + - [Flows and ClusterFlows](#flows-and-clusterflows) + - [Outputs and ClusterOutputs](#outputs-and-clusteroutputs) +- [Configuring the Logging Helm Chart](#configuring-the-logging-helm-chart) + - [Windows Support](#windows-support) + - [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) + - [Working with Taints and Tolerations](#working-with-taints-and-tolerations) + - [Logging V2 with SELinux](#logging-v2-with-selinux) + - [Additional Logging Sources](#additional-logging-sources) +- [Troubleshooting](#troubleshooting) + +# Enabling Logging + +You can enable the logging for a Rancher managed cluster by going to the Apps page and installing the logging app. + +1. Go to the cluster where you want to install logging and click **Apps & Marketplace**. +1. Click the **Logging** app. +1. Scroll to the bottom of the Helm chart README and click **Install**. + +**Result:** The logging app is deployed in the `cattle-logging-system` namespace. + +# Uninstall Logging + +1. Go to the cluster where you want to install logging and click **Apps & Marketplace**. +1. Click **Installed Apps**. +1. Go to the `cattle-logging-system` namespace and check the boxes for `rancher-logging` and `rancher-logging-crd`. +1. Click **Delete**. +1. Confirm **Delete**. + +**Result** `rancher-logging` is uninstalled. + +# Architecture + +For more information about how the logging application works, see [this section.](../explanations/integrations-in-rancher/logging/logging-architecture.md) + + + +# Role-based Access Control + +Rancher logging has two roles, `logging-admin` and `logging-view`. For more information on how and when to use these roles, see [this page.](../explanations/integrations-in-rancher/logging/rbac-for-logging.md) + +# Configuring Logging Custom Resources + +To manage `Flows,` `ClusterFlows`, `Outputs`, and `ClusterOutputs`, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to configure logging custom resources and click **Explore**. +1. In the left navigation bar, click **Logging**. + +### Flows and ClusterFlows + +For help with configuring `Flows` and `ClusterFlows`, see [this page.](../explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) + +### Outputs and ClusterOutputs + +For help with configuring `Outputs` and `ClusterOutputs`, see [this page.](../explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md) + +# Configuring the Logging Helm Chart + +For a list of options that can be configured when the logging application is installed or upgraded, see [this page.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md) + +### Windows Support + +Logging support for Windows clusters is available and logs can be collected from Windows nodes. + +For details on how to enable or disable Windows node logging, see [this section.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md#enable-disable-windows-node-logging) + +### Working with a Custom Docker Root Directory + +For details on using a custom Docker root directory, see [this section.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md#working-with-a-custom-docker-root-directory) + + +### Working with Taints and Tolerations + +For information on how to use taints and tolerations with the logging application, see [this page.](../explanations/integrations-in-rancher/logging/taints-and-tolerations.md) + + +### Logging V2 with SELinux + +For information on enabling the logging application for SELinux-enabled nodes, see [this section.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md#enabling-the-logging-application-to-work-with-selinux) + +### Additional Logging Sources + +By default, Rancher collects logs for control plane components and node components for all cluster types. In some cases additional logs can be collected. For details, see [this section.](logginlogging/helm-chart-options/#enabling-the-logging-application-to-work-with-selinux) + + +# Troubleshooting + +### The `cattle-logging` Namespace Being Recreated + +If your cluster previously deployed logging from the global view in the legacy Rancher UI, you may encounter an issue where its `cattle-logging` namespace is continually being recreated. + +The solution is to delete all `clusterloggings.management.cattle.io` and `projectloggings.management.cattle.io` custom resources from the cluster specific namespace in the management cluster. +The existence of these custom resources causes Rancher to create the `cattle-logging` namespace in the downstream cluster if it does not exist. + +The cluster namespace matches the cluster ID, so we need to find the cluster ID for each cluster. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to get the ID of and click **Explore**. +2. Copy the `` portion from one of the URLs below. The `` portion is the cluster namespace name. + +```bash +# Cluster Management UI +https:///c// + +# Cluster Dashboard +https:///dashboard/c// +``` + +Now that we have the `` namespace, we can delete the CRs that cause `cattle-logging` to be continually recreated. +*Warning:* ensure that logging, the version installed from the global view in the legacy Rancher UI, is not currently in use. + +```bash +kubectl delete clusterloggings.management.cattle.io -n +kubectl delete projectloggings.management.cattle.io -n +``` diff --git a/docs/pages-for-subheaders/machine-configuration.md b/docs/pages-for-subheaders/machine-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/machine-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/manage-clusters.md b/docs/pages-for-subheaders/manage-clusters.md new file mode 100644 index 0000000000..24f9b6f365 --- /dev/null +++ b/docs/pages-for-subheaders/manage-clusters.md @@ -0,0 +1,39 @@ +--- +title: Cluster Administration +weight: 8 +--- + +After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. + +This page covers the following topics: + +- [Switching between clusters](#switching-between-clusters) +- [Managing clusters in Rancher](#managing-clusters-in-rancher) +- [Configuring tools](#configuring-tools) + +:::note + +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +::: + +## Managing Clusters in Rancher + +After clusters have been [provisioned into Rancher](kubernetes-clusters-in-rancher-setup.md), [cluster owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +## Configuring Tools + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + +- Alerts +- Notifiers +- Logging +- Monitoring +- Istio Service Mesh +- OPA Gatekeeper + +Tools can be installed through **Apps & Marketplace.** diff --git a/docs/pages-for-subheaders/manage-persistent-storage.md b/docs/pages-for-subheaders/manage-persistent-storage.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/manage-persistent-storage.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/manage-project-resource-quotas.md b/docs/pages-for-subheaders/manage-project-resource-quotas.md new file mode 100644 index 0000000000..5492d92781 --- /dev/null +++ b/docs/pages-for-subheaders/manage-project-resource-quotas.md @@ -0,0 +1,41 @@ +--- +title: Project Resource Quotas +weight: 2515 +--- + +In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. + +This page is a how-to guide for creating resource quotas in existing projects. + +Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md#creating-projects) + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md) + +### Applying Resource Quotas to Existing Projects + +Edit resource quotas when: + +- You want to limit the resources that a project and its namespaces can use. +- You want to scale the resources available to a project up or down when a research quota is already in effect. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to apply a resource quota and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit Config**. + +1. Expand **Resource Quotas** and click **Add Resource**. Alternatively, you can edit existing quotas. + +1. Select a Resource Type. For more information on types, see the [quota type reference.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md) + +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. + + | Field | Description | + | ----------------------- | -------------------------------------------------------------------------------------------------------- | + | Project Limit | The overall resource limit for the project. | + | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | + +1. **Optional:** Add more quotas. + +1. Click **Create**. + +**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, you may still create namespaces, but they will be given a resource quota of 0. Subsequently, Rancher will not allow you to create any resources restricted by this quota. diff --git a/docs/pages-for-subheaders/manage-projects.md b/docs/pages-for-subheaders/manage-projects.md new file mode 100644 index 0000000000..9ac4f58a7c --- /dev/null +++ b/docs/pages-for-subheaders/manage-projects.md @@ -0,0 +1,39 @@ +--- +title: Project Administration +weight: 9 +--- + +_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! + +Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. + +You can use projects to perform actions like: + +- [Assign users access to a group of namespaces](../how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md) +- Assign users [specific roles in a project](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). A role can be owner, member, read-only, or [custom](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md) +- [Set resource quotas](manage-project-resource-quotas.md) +- [Manage namespaces](../how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md) +- [Configure tools](../reference-guides/rancher-project-tools.md) +- [Set up pipelines for continuous integration and deployment](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- [Configure pod security policies](../how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md) + +### Authorization + +Non-administrative users are only authorized for project access after an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owner or member](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) adds them to the project's **Members** tab. + +Whoever creates the project automatically becomes a [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). + +## Switching between Projects + +To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to switch projects and click **Explore**. +1. In the top navigation bar, select the project that you want to open. diff --git a/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md b/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md new file mode 100644 index 0000000000..42f1f049cc --- /dev/null +++ b/docs/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -0,0 +1,26 @@ +--- +title: Role-Based Access Control (RBAC) +weight: 20 +--- + +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](about-authentication.md), users can either be local or external. + +After you configure external authentication, the users that display on the **Users** page changes. + +- If you are logged in as a local user, only local users display. + +- If you are logged in as an external user, both external and local users display. + +## Users and Roles + +Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. + +- [Global Permissions](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md): + + Define user authorization outside the scope of any particular cluster. + +- [Cluster and Project Roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md): + + Define user authorization inside the specific cluster or project where they are assigned the role. + +Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/docs/pages-for-subheaders/monitoring-alerting-guides.md b/docs/pages-for-subheaders/monitoring-alerting-guides.md new file mode 100644 index 0000000000..3aa725064b --- /dev/null +++ b/docs/pages-for-subheaders/monitoring-alerting-guides.md @@ -0,0 +1,13 @@ +--- +title: Monitoring Guides +shortTitle: Guides +weight: 4 +--- + +- [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) +- [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) +- [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) +- [Customizing Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md) +- [Persistent Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md) +- [Debugging high memory usage](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md) +- [Migrating from Monitoring V1 to V2](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md) \ No newline at end of file diff --git a/docs/pages-for-subheaders/monitoring-and-alerting.md b/docs/pages-for-subheaders/monitoring-and-alerting.md new file mode 100644 index 0000000000..c0cc71541d --- /dev/null +++ b/docs/pages-for-subheaders/monitoring-and-alerting.md @@ -0,0 +1,105 @@ +--- +title: Monitoring and Alerting +shortTitle: Monitoring/Alerting +description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring +weight: 13 +--- + +Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. + +- [Features](#features) +- [How Monitoring Works](#how-monitoring-works) +- [Default Components and Deployments](#default-components-and-deployments) +- [Role-based Access Control](#role-based-access-control) +- [Guides](#guides) +- [Windows Cluster Support](#windows-cluster-support) +- [Known Issues](#known-issues) + +### Features + +Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. + +By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, or restore crashed servers. + +The `rancher-monitoring` operator, introduced in Rancher v2.5, is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) + +The monitoring application allows you to: + +- Monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments +- Define alerts based on metrics collected via Prometheus +- Create custom Grafana dashboards +- Configure alert-based notifications via Email, Slack, PagerDuty, etc. using Prometheus Alertmanager +- Defines precomputed, frequently needed or computationally expensive expressions as new time series based on metrics collected via Prometheus +- Expose collected metrics from Prometheus to the Kubernetes Custom Metrics API via Prometheus Adapter for use in HPA + +# How Monitoring Works + +For an explanation of how the monitoring components work together, see [this page.](../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +# Default Components and Deployments + +### Built-in Dashboards + +By default, the monitoring application deploys Grafana dashboards (curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project) onto a cluster. + +It also deploys an Alertmanager UI and a Prometheus UI. For more information about these tools, see [Built-in Dashboards.](../explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md) +### Default Metrics Exporters + +By default, Rancher Monitoring deploys exporters (such as [node-exporter](https://github.com/prometheus/node_exporter) and [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)). + +These default exporters automatically scrape metrics for CPU and memory from all components of your Kubernetes cluster, including your workloads. + +### Default Alerts + +The monitoring application deploys some alerts by default. To see the default alerts, go to the [Alertmanager UI](monitoring-alertinmonitoring-alerting/dashboards/#alertmanager-ui) and click **Expand all groups.** + +### Components Exposed in the Rancher UI + +For a list of monitoring components exposed in the Rancher UI, along with common use cases for editing them, see [this section.](../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#components-exposed-in-the-rancher-ui) + +# Role-based Access Control + +For information on configuring access to monitoring, see [this page.](../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md) + +# Guides + +- [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) +- [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) +- [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) +- [Customizing Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md) +- [Persistent Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md) +- [Debugging high memory usage](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md) +- [Migrating from Monitoring V1 to V2](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md) + +# Configuration + +### Configuring Monitoring Resources in Rancher + +> The configuration reference assumes familiarity with how monitoring components work together. For more information, see [How Monitoring Works.](../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +- [ServiceMonitor and PodMonitor](../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md) +- [Receiver](../reference-guides/monitoring-v2-configuration/receivers.md) +- [Route](../reference-guides/monitoring-v2-configuration/routes.md) +- [PrometheusRule](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) +- [Prometheus](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md) +- [Alertmanager](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +### Configuring Helm Chart Options + +For more information on `rancher-monitoring` chart options, including options to set resource limits and requests, see [this page.](../reference-guides/monitoring-v2-configuration/helm-chart-options.md) + +# Windows Cluster Support + +When deployed onto an RKE1 Windows cluster, Monitoring V2 will now automatically deploy a [windows-exporter](https://github.com/prometheus-community/windows_exporter) DaemonSet and set up a ServiceMonitor to collect metrics from each of the deployed Pods. This will populate Prometheus with `windows_` metrics that are akin to the `node_` metrics exported by [node_exporter](https://github.com/prometheus/node_exporter) for Linux hosts. + +To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts must have a minimum [wins](https://github.com/rancher/wins) version of v0.1.0. + +For more details on how to upgrade wins on existing Windows hosts, refer to the section on [Windows cluster support for Monitoring V2.](../explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md) + + + +# Known Issues + +There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more default memory. If you are enabling monitoring on a K3s cluster, we recommend to setting `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. + +For tips on debugging high memory usage, see [this page.](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md) diff --git a/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md b/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md new file mode 100644 index 0000000000..5f2ee6872b --- /dev/null +++ b/docs/pages-for-subheaders/monitoring-v2-configuration-guides.md @@ -0,0 +1,52 @@ +--- +title: Configuration +weight: 5 +--- + +This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. + +For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. + +# Setting Resource Limits and Requests + +The resource requests and limits for the monitoring application can be configured when installing `rancher-monitoring`. For more information about the default limits, see [this page.](../reference-guides/monitoring-v2-configuration/helm-chart-options.md#configuring-resource-limits-and-requests) + +:::note + +On an idle cluster, Monitoring V2 has significantly higher CPU usage (up to 70%) as compared to Monitoring V1. To improve performance and achieve similar results as in Monitoring V1, turn off the Prometheus adapter. + +::: + +# Prometheus Configuration + +It is usually not necessary to directly edit the Prometheus custom resource. + +Instead, to configure Prometheus to scrape custom metrics, you will only need to create a new ServiceMonitor or PodMonitor to configure Prometheus to scrape additional metrics. + + +### ServiceMonitor and PodMonitor Configuration + +For details, see [this page.](../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md) + +### Advanced Prometheus Configuration + +For more information about directly editing the Prometheus custom resource, which may be helpful in advanced use cases, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md) + +# Alertmanager Configuration + +The Alertmanager custom resource usually doesn't need to be edited directly. For most common use cases, you can manage alerts by updating Routes and Receivers. + +Routes and receivers are part of the configuration of the alertmanager custom resource. In the Rancher UI, Routes and Receivers are not true custom resources, but pseudo-custom resources that the Prometheus Operator uses to synchronize your configuration with the Alertmanager custom resource. When routes and receivers are updated, the monitoring application will automatically update Alertmanager to reflect those changes. + +For some advanced use cases, you may want to configure alertmanager directly. For more information, refer to [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +### Receivers + +Receivers are used to set up notifications. For details on how to configure receivers, see [this page.](../reference-guides/monitoring-v2-configuration/receivers.md) +### Routes + +Routes filter notifications before they reach receivers. Each route needs to refer to a receiver that has already been configured. For details on how to configure routes, see [this page.](../reference-guides/monitoring-v2-configuration/routes.md) + +### Advanced + +For more information about directly editing the Alertmanager custom resource, which may be helpful in advanced use cases, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) \ No newline at end of file diff --git a/docs/pages-for-subheaders/monitoring-v2-configuration.md b/docs/pages-for-subheaders/monitoring-v2-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/monitoring-v2-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/new-user-guides.md b/docs/pages-for-subheaders/new-user-guides.md new file mode 100644 index 0000000000..e2e667395c --- /dev/null +++ b/docs/pages-for-subheaders/new-user-guides.md @@ -0,0 +1,9 @@ +--- +title: New User Guides +--- + +New user guides, also known as **tutorials**, describe practical steps for users to follow in order to complete some concrete action. These docs are known as "learning-oriented" docs in which users learn by "doing". + +The new user guides are designed to guide beginners, or the everyday users of Rancher, through a series of steps to learn how to do something. The goal is that the user will be able to learn how to complete tasks by using easy-to-follow, meaningful, and repeatable directions. These guides will assist users to do work to then get the promised results immediately. + +The average Rancher user has a level of technical skill that is above the level of "beginner"; however, the new user guides are designed to help new, or beginner, users as well as the seasoned Rancher customer equally. This is accomplished by using a combination of high-level and technical language to introduce topics and guide the user through general tasks that are essential for every Rancher user to know. \ No newline at end of file diff --git a/docs/pages-for-subheaders/node-template-configuration.md b/docs/pages-for-subheaders/node-template-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/node-template-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/nutanix.md b/docs/pages-for-subheaders/nutanix.md new file mode 100644 index 0000000000..4b52bbc9e8 --- /dev/null +++ b/docs/pages-for-subheaders/nutanix.md @@ -0,0 +1,20 @@ +--- +title: Creating a Nutanix AOS Cluster +shortTitle: Nutanix +description: Use Rancher to create a Nutanix AOS (AHV) cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +metaDescription: Use Rancher to create a Nutanix AOS (AHV) cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +weight: 2225 +--- + +[Nutanix Acropolis Operating System](https://www.nutanix.com/products/acropolis) (Nutanix AOS) is an operating system for the Nutanix hyper-converged infrastructure platform. AOS comes with a built-in hypervisor called [Acropolis Hypervisor](https://www.nutanix.com/products/ahv), or AHV. By using Rancher with Nutanix AOS (AHV), you can bring cloud operations on-premises. + +Rancher can provision nodes in AOS (AHV) and install Kubernetes on them. When creating a Kubernetes cluster in AOS, Rancher first provisions the specified number of virtual machines by communicating with the Prism Central API. Then it installs Kubernetes on top of the VMs. + +A Nutanix cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. + +- [Creating a Nutanix Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md#creating-a-nutanix-aos-cluster) +- [Provisioning Storage](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos) + +# Creating a Nutanix Cluster + +In [this section,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos.md) you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Nutanix AOS. \ No newline at end of file diff --git a/docs/pages-for-subheaders/other-cloud-providers.md b/docs/pages-for-subheaders/other-cloud-providers.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/other-cloud-providers.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/other-installation-methods.md b/docs/pages-for-subheaders/other-installation-methods.md new file mode 100644 index 0000000000..939553398a --- /dev/null +++ b/docs/pages-for-subheaders/other-installation-methods.md @@ -0,0 +1,20 @@ +--- +title: Other Installation Methods +weight: 3 +--- + +### Air Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +The Docker installation is for development and testing environments only. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) diff --git a/docs/pages-for-subheaders/other-troubleshooting-tips.md b/docs/pages-for-subheaders/other-troubleshooting-tips.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/other-troubleshooting-tips.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/pipelines.md b/docs/pages-for-subheaders/pipelines.md new file mode 100644 index 0000000000..d69ac8193e --- /dev/null +++ b/docs/pages-for-subheaders/pipelines.md @@ -0,0 +1,299 @@ +--- +title: Pipelines +weight: 10 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +:::note Notes + +- As of Rancher v2.5, Git-based deployment pipelines are now deprecated. We recommend handling pipelines with Rancher Continuous Delivery powered by [Fleet](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md). To get to Fleet in Rancher, click ☰ > Continuous Delivery. + +- Pipelines in Kubernetes 1.21+ are no longer supported. + +- Fleet does not replace Rancher pipelines; the distinction is that Rancher pipelines are now powered by Fleet. + +::: + +Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. + +Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +:::note + +Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. + +::: + +This section covers the following topics: + +- [Concepts](#concepts) +- [How Pipelines Work](#how-pipelines-work) +[Roles-based Access Control for Pipelines](#role-based-access-control-for-pipelines) +- [Setting up Pipelines](#setting-up-pipelines) + - [Configure version control providers](#1-configure-version-control-providers) + - [Configure repositories](#2-configure-repositories) + - [Configure the pipeline](#3-configure-the-pipeline) +- [Pipeline Configuration Reference](#pipeline-configuration-reference) +- [Running your Pipelines](#running-your-pipelines) +- [Triggering a Pipeline](#triggering-a-pipeline) + - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) + +# Concepts + +For an explanation of concepts and terminology used in this section, refer to [this page.](../reference-guides/pipelines/concepts.md) + +# How Pipelines Work + +After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. + +A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. + +Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories](../reference-guides/pipelines/example-repositories.md) to view some common pipeline deployments. + +When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: + + - **Jenkins:** + + The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. + + :::note + + There is no option to use existing Jenkins deployments as the pipeline engine. + + ::: + + - **Docker Registry:** + + Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. + + - **Minio:** + + Minio storage is used to store the logs for pipeline executions. + + :::note + + The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components](../reference-guides/pipelines/configure-persistent-data.md). + + ::: + +# Role-based Access Control for Pipelines + +If you can access a project, you can enable repositories to start building pipelines. + +Only [administrators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure version control providers and manage global pipeline execution settings. + +Project members can only configure repositories and pipelines. + +# Setting up Pipelines + +### Prerequisite + +:::note Legacy Feature Flag: + +Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy features before using pipelines. Note that pipelines in Kubernetes 1.21+ are no longer supported. + +1. In the upper left corner, click **☰ > Global Settings**. +1. Click **Feature Flags**. +1. Go to the `legacy` feature flag and click **⋮ > Activate**. + +::: + +1. [Configure version control providers](#1-configure-version-control-providers) +2. [Configure repositories](#2-configure-repositories) +3. [Configure the pipeline](#3-configure-the-pipeline) + +### 1. Configure Version Control Providers + +Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider: + +- GitHub +- GitLab +- Bitbucket + +Select your provider's tab below and follow the directions. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to set up an OAuth App in Github. +1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. +1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. +1. Click **Authenticate**. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Click **GitLab**. +1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. +1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. +1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. +1. Click **Authenticate**. + +:::note Notes: + +1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. +2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. + +::: + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Click **Bitbucket** and leave **Use Bitbucket Cloud** selected by default. +1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. +1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. +1. Click **Authenticate**. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Click **Bitbucket** and choose the **Use private Bitbucket Server setup** option. +1. Follow the directions displayed to **Setup a Bitbucket Server application**. +1. Enter the host address of your Bitbucket server installation. +1. Click **Authenticate**. + +:::note + +Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: + +1. Setup Rancher server with a certificate from a trusted CA. +1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). + +::: + + + + +**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. + +### 2. Configure Repositories + +After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click on **Configure Repositories**. + +1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. + +1. For each repository that you want to set up a pipeline, click on **Enable**. + +1. When you're done enabling all your repositories, click on **Done**. + +**Results:** You have a list of repositories that you can start configuring pipelines for. + +### 3. Configure the Pipeline + +Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Find the repository that you want to set up a pipeline for. +1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.](../reference-guides/pipelines/pipeline-configuration.md) + + * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. + * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. + +1. Select which `branch` to use from the list of branches. + +1. Optional: Set up notifications. + +1. Set up the trigger rules for the pipeline. + +1. Enter a **Timeout** for the pipeline. + +1. When all the stages and steps are configured, click **Done**. + +**Results:** Your pipeline is now configured and ready to be run. + + +# Pipeline Configuration Reference + +Refer to [this page](../reference-guides/pipelines/pipeline-configuration.md) for details on how to configure a pipeline to: + +- Run a script +- Build and publish images +- Publish catalog templates +- Deploy YAML +- Deploy a catalog app + +The configuration reference also covers how to configure: + +- Notifications +- Timeouts +- The rules that trigger a pipeline +- Environment variables +- Secrets + + +# Running your Pipelines + +Run your pipeline for the first time. Find your pipeline and select the vertical **⋮ > Run**. + +During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: + +- `docker-registry` +- `jenkins` +- `minio` + +This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. + +# Triggering a Pipeline + +When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. + +Available Events: + +* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. +* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. +* **Tag**: When a tag is created in the repository, the pipeline is triggered. + +:::note + +This option doesn't exist for Rancher's [example repositories](../reference-guides/pipelines/example-repositories.md). + +::: + +### Modifying the Event Triggers for the Repository + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Find the repository where you want to modify the event triggers. Select the vertical **⋮ > Setting**. +1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. +1. Click **Save**. diff --git a/docs/pages-for-subheaders/prometheus-federator-guides.md b/docs/pages-for-subheaders/prometheus-federator-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/prometheus-federator-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/prometheus-federator.md b/docs/pages-for-subheaders/prometheus-federator.md new file mode 100644 index 0000000000..c55e1096b8 --- /dev/null +++ b/docs/pages-for-subheaders/prometheus-federator.md @@ -0,0 +1,105 @@ +--- +title: Prometheus Federator +weight: 7 +--- + +Prometheus Federator, also referred to as Project Monitoring v2, deploys a Helm Project Operator (based on the [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator)), an operator that manages deploying Helm charts each containing a Project Monitoring Stack, where each stack contains: + +- [Prometheus](https://prometheus.io/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) +- [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (managed externally by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) +- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) (deployed via an embedded Helm chart) +- Default PrometheusRules and Grafana dashboards based on the collection of community-curated resources from [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) +- Default ServiceMonitors that watch the deployed resources + +:::note Important: + +Prometheus Federator is designed to be deployed alongside an existing Prometheus Operator deployment in a cluster that has already installed the Prometheus Operator CRDs. + +::: + +## How does the operator work? + +1. On deploying this chart, users can create ProjectHelmCharts CRs with `spec.helmApiVersion` set to `monitoring.cattle.io/v1alpha1` (also known as "Project Monitors" in the Rancher UI) in a **Project Registration Namespace (`cattle-project-`)**. +2. On seeing each ProjectHelmChartCR, the operator will automatically deploy a Project Prometheus stack on the Project Owner's behalf in the **Project Release Namespace (`cattle-project--monitoring`)** based on a HelmChart CR and a HelmRelease CR automatically created by the ProjectHelmChart controller in the **Operator / System Namespace**. +3. RBAC will automatically be assigned in the Project Release Namespace to allow users to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack deployed; this will be based on RBAC defined on the Project Registration Namespace against the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authmonitoring-alerting/prometheus-federator/rbac#user-facing-roles). For more information, see the section on [configuring RBAC](../reference-guides/prometheus-federator/rbac.md). + +### What is a Project? + +In Prometheus Federator, a Project is a group of namespaces that can be identified by a `metav1.LabelSelector`. By default, the label used to identify projects is `field.cattle.io/projectId`, the label used to identify namespaces that are contained within a given Rancher Project. + +### Configuring the Helm release created by a ProjectHelmChart + +The `spec.values` of this ProjectHelmChart's resources will correspond to the `values.yaml` override to be supplied to the underlying Helm chart deployed by the operator on the user's behalf; to see the underlying chart's `values.yaml` spec, either: + +- View the chart's definition located at [`rancher/prometheus-federator` under `charts/rancher-project-monitoring`](https://github.com/rancher/prometheus-federator/blob/main/charts/rancher-project-monitoring) (where the chart version will be tied to the version of this operator). +- Look for the ConfigMap named `monitoring.cattle.io.v1alpha1` that is automatically created in each Project Registration Namespace, which will contain both the `values.yaml` and `questions.yaml` that was used to configure the chart (which was embedded directly into the `prometheus-federator` binary). + +### Namespaces + +As a Project Operator based on [rancher/helm-project-operator](https://github.com/rancher/helm-project-operator), Prometheus Federator has three different classifications of namespaces that the operator looks out for: + +1. **Operator / System Namespace**: The namespace that the operator is deployed into (e.g., `cattle-monitoring-system`). This namespace will contain all HelmCharts and HelmReleases for all ProjectHelmCharts watched by this operator. **Only Cluster Admins should have access to this namespace.** + +2. **Project Registration Namespace (`cattle-project-`)**: The set of namespaces that the operator watches for ProjectHelmCharts within. The RoleBindings and ClusterRoleBindings that apply to this namespace will also be the source of truth for the auto-assigned RBAC created in the Project Release Namespace. For details, refer to the [RBAC page](../reference-guides/prometheus-federator/rbac.md). **Project Owners (admin), Project Members (edit), and Read-Only Members (view) should have access to this namespace.** + + :::note Notes: + + - Project Registration Namespaces will be auto-generated by the operator and imported into the Project it is tied to if `.Values.global.cattle.projectLabel` is provided, which is set to `field.cattle.io/projectId` by default. This indicates that a Project Registration Namespace should be created by the operator if at least one namespace is observed with that label. The operator will not let these namespaces be deleted unless either all namespaces with that label are gone (e.g., this is the last namespace in that project, in which case the namespace will be marked with the label `"helm.cattle.io/helm-project-operator-orphaned": "true"`, which signals that it can be deleted), or it is no longer watching that project because the project ID was provided under `.Values.helmProjectOperator.otherSystemProjectLabelValues`, which serves as a denylist for Projects. These namespaces will also never be auto-deleted to avoid destroying user data; it is recommended that users clean up these namespaces manually if desired on creating or deleting a project. + + - If `.Values.global.cattle.projectLabel` is not provided, the Operator / System Namespace will also be the Project Registration Namespace. + + ::: + +3. **Project Release Namespace (`cattle-project--monitoring`):** The set of namespaces that the operator deploys Project Monitoring Stacks within on behalf of a ProjectHelmChart; the operator will also automatically assign RBAC to Roles created in this namespace by the Project Monitoring Stack based on bindings found in the Project Registration Namespace. **Only Cluster Admins should have access to this namespace; Project Owners (admin), Project Members (edit), and Read-Only Members (view) will be assigned limited access to this namespace by the deployed Helm Chart and Prometheus Federator.** + + :::note Notes: + + - Project Release Namespaces are automatically deployed and imported into the project whose ID is specified under `.Values.helmProjectOperator.projectReleaseNamespaces.labelValue`, which defaults to the value of `.Values.global.cattle.systemProjectId` if not specified, whenever a ProjectHelmChart is specified in a Project Registration Namespace. + + - Project Release Namespaces follow the same orphaning conventions as Project Registration Namespaces (see note above). + + - If `.Values.projectReleaseNamespaces.enabled` is false, the Project Release Namespace will be the same as the Project Registration Namespace. + + ::: + +### Helm Resources (HelmChart, HelmRelease) + +On deploying a ProjectHelmChart, the Prometheus Federator will automatically create and manage two child custom resources that manage the underlying Helm resources in turn: + +- A HelmChart CR (managed via an embedded [k3s-io/helm-contoller](https://github.com/k3s-io/helm-controller) in the operator): This custom resource automatically creates a Job in the same namespace that triggers a `helm install`, `helm upgrade`, or `helm uninstall` depending on the change applied to the HelmChart CR. This CR is automatically updated on changes to the ProjectHelmChart (e.g., modifying the values.yaml) or changes to the underlying Project definition (e.g., adding or removing namespaces from a project). + +:::note Important: + +If a ProjectHelmChart is not deploying or updating the underlying Project Monitoring Stack for some reason, the Job created by this resource in the Operator / System namespace should be the first place you check to see if there's something wrong with the Helm operation. However, this is generally only accessible by a **Cluster Admin.** + +::: + +- A HelmRelease CR (managed via an embedded [rancher/helm-locker](https://github.com/rancher/helm-locker) in the operator): This custom resource automatically locks a deployed Helm release in place and automatically overwrites updates to underlying resources unless the change happens via a Helm operation (`helm install`, `helm upgrade`, or `helm uninstall` performed by the HelmChart CR). + +:::note + +HelmRelease CRs emit Kubernetes Events that detect when an underlying Helm release is being modified and locks it back to place. To view these events, you can use `kubectl describe helmrelease -n `; you can also view the logs on this operator to see when changes are detected and which resources modifications were attempted on. + +::: + +Both of these resources are created for all Helm charts in the Operator / System namespaces to avoid escalation of privileges to underprivileged users. + +### Advanced Helm Project Operator Configuration + +For more information on advanced configurations, refer to [this page](https://github.com/rancher/prometheus-federator/blob/main/charts/prometheus-federator/0.0.1/README.md#advanced-helm-project-operator-configuration). + + + +### Prometheus Federator on the Local Cluster + +Prometheus Federator is a resource intensive application. Installing it to the local cluster is possible, but **not recommended**. \ No newline at end of file diff --git a/docs/pages-for-subheaders/provisioning-storage-examples.md b/docs/pages-for-subheaders/provisioning-storage-examples.md new file mode 100644 index 0000000000..9732249038 --- /dev/null +++ b/docs/pages-for-subheaders/provisioning-storage-examples.md @@ -0,0 +1,12 @@ +--- +title: Provisioning Storage Examples +weight: 3053 +--- + +Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. + +For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: + +- [NFS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) +- [vSphere](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) +- [EBS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) diff --git a/docs/pages-for-subheaders/quick-start-guides.md b/docs/pages-for-subheaders/quick-start-guides.md new file mode 100644 index 0000000000..e19a1729d3 --- /dev/null +++ b/docs/pages-for-subheaders/quick-start-guides.md @@ -0,0 +1,21 @@ +--- +title: Rancher Deployment Quick Start Guides +metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +weight: 2 +aliases: + - /rancher/v2.x/en/quick-start-guide/ +--- +:::caution + +The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). + +::: + +Use this section of the docs to jump start your deployment and testing of Rancher 2.x. It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. + +We have Quick Start Guides for: + +- [Deploying Rancher Server](deploy-rancher-manager.md): Get started running Rancher using the method most convenient for you. + +- [Deploying Workloads](deploy-rancher-workloads.md): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. diff --git a/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md b/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md new file mode 100644 index 0000000000..a2dc3b1eda --- /dev/null +++ b/docs/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -0,0 +1,14 @@ +--- +title: Installing Rancher behind an HTTP Proxy +weight: 4 +--- + +In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. + +Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). + +# Installation Outline + +1. [Set up infrastructure](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md) +2. [Set up a Kubernetes cluster](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md) +3. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md) diff --git a/docs/pages-for-subheaders/rancher-managed-clusters.md b/docs/pages-for-subheaders/rancher-managed-clusters.md new file mode 100644 index 0000000000..82d5004251 --- /dev/null +++ b/docs/pages-for-subheaders/rancher-managed-clusters.md @@ -0,0 +1,21 @@ +--- +title: Best Practices for Rancher Managed Clusters +shortTitle: Rancher Managed Clusters +weight: 2 +--- + +### Logging + +Refer to [this guide](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md) for our recommendations for cluster-level logging and application logging. + +### Monitoring + +Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. Refer to this [guide](../reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md) for our recommendations. + +### Tips for Setting Up Containers + +Running well-built containers can greatly impact the overall performance and security of your environment. Refer to this [guide](../reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md) for tips. + +### Best Practices for Rancher Managed vSphere Clusters + +This [guide](../reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md) outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. diff --git a/docs/pages-for-subheaders/rancher-manager-architecture.md b/docs/pages-for-subheaders/rancher-manager-architecture.md new file mode 100644 index 0000000000..dcc38870cf --- /dev/null +++ b/docs/pages-for-subheaders/rancher-manager-architecture.md @@ -0,0 +1,199 @@ +--- +title: Architecture +weight: 1 +--- + +This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. + +For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) + +For a list of main features of the Rancher API server, refer to the [overview section.](../getting-started/introduction/overview.md#features-of-the-rancher-api-server) + +For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +:::note + +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +::: + +This section covers the following topics: + +- [Rancher server architecture](#rancher-server-architecture) +- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) + - [The authentication proxy](#1-the-authentication-proxy) + - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) + - [Node agents](#3-node-agents) + - [Authorized cluster endpoint (ACE)](#4-authorized-cluster-endpoint-ace) +- [Important files](#important-files) +- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) +- [Rancher server components and source code](#rancher-server-components-and-source-code) + +# Rancher Server Architecture + +The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. + +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). + +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +The diagram below shows how users can manipulate both [Rancher-launched Kubernetes](launch-kubernetes-with-rancher.md) clusters and [hosted Kubernetes](set-up-clusters-from-hosted-kubernetes-providers.md) clusters through Rancher's authentication proxy: + +
Managing Kubernetes Clusters through Rancher's Authentication Proxy
+ +![Architecture](/img/rancher-architecture-rancher-api-server.svg) + +You can install Rancher on a single node, or on a high-availability Kubernetes cluster. + +A high-availability Kubernetes installation is recommended for production. + +A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version. + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md). + +The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. + +# Communicating with Downstream User Clusters + +This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. + +The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. + +
Communicating with Downstream Clusters
+ +![Rancher Components](/img/rancher-architecture-cluster-controller.svg) + +The following descriptions correspond to the numbers in the diagram above: + +1. [The Authentication Proxy](#1-the-authentication-proxy) +2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) +3. [Node Agents](#3-node-agents) +4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) + +### 1. The Authentication Proxy + +In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see +the pods. Bob is authenticated through Rancher's authentication proxy. + +The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. + +Rancher communicates with Kubernetes clusters using a [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/), which provides an identity for processes that run in a pod. + +By default, Rancher generates a [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_cluster.yml`) contains full access to the cluster. + +### 2. Cluster Controllers and Cluster Agents + +Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. + +There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: + +- Watches for resource changes in the downstream cluster +- Brings the current state of the downstream cluster to the desired state +- Configures access control policies to clusters and projects +- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE + +By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. + +The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: + +- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters +- Manages workloads, pod creation and deployment within each cluster +- Applies the roles and bindings defined in each cluster's global policies +- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health + +### 3. Node Agents + +If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. + +The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. + +### 4. Authorized Cluster Endpoint (ACE) + +An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. + +:::note + +- The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](launch-kubernetes-with-rancher.md) to provision the cluster. The ACE is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. + +- The [ACE is available for registered RKE2 and K3s clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md#authorized-cluster-endpoint-support-for-rke2-and-k3s-clusters) as of Rancher v2.6.3. + +::: + +There are two main reasons why a user might need the authorized cluster endpoint: + +- To access a downstream user cluster while Rancher is down +- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. + +:::note Example scenario: + +Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. + +::: + +With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. + +You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) + +# Important Files + +The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. +- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. + +:::note + +The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +::: + +For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) documentation. + +# Tools for Provisioning Kubernetes Clusters + +The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. + +### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider + +Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) + +### Rancher Launched Kubernetes for Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. + +Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) + +### Hosted Kubernetes Providers + +When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) + +### Registered Kubernetes Clusters + +In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +# Rancher Server Components and Source Code + +This diagram shows each component that the Rancher server is composed of: + +![Rancher Components](/img/rancher-architecture-rancher-components.svg) + +The GitHub repositories for Rancher can be found at the following links: + +- [Main Rancher server repository](https://github.com/rancher/rancher) +- [Rancher UI](https://github.com/rancher/ui) +- [Rancher API UI](https://github.com/rancher/api-ui) +- [Norman,](https://github.com/rancher/norman) Rancher's API framework +- [Types](https://github.com/rancher/types) +- [Rancher CLI](https://github.com/rancher/cli) +- [Catalog applications](https://github.com/rancher/helm) + +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md new file mode 100644 index 0000000000..e9399ac288 --- /dev/null +++ b/docs/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -0,0 +1,208 @@ +--- +title: Installing Rancher on a Single Node Using Docker +description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. +weight: 2 +--- + +Rancher can be installed by running a single Docker container. + +In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. + +:::note Want to use an external load balancer? + +See [Docker Install with an External Load Balancer](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md) instead. + +::: + +A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +### Privileged Access for Rancher + +When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. + +# Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.](installation-requirements.md) + +# 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements](installation-requirements.md) to launch your Rancher server. + +# 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +:::tip Do you want to.. + +- Use a proxy? See [HTTP Proxy Configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) +- Configure custom CA root certificate to access your services? See [Custom CA root certificate](../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate/) +- Complete an Air Gap Installation? See [Air Gap: Docker Install](air-gapped-helm-cli-install.md) +- Record all transactions with the Rancher API? See [API Auditing](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +::: + +Choose from the following options: + +- [Option A: Default Rancher-generated Self-signed Certificate](#option-a-default-rancher-generated-self-signed-certificate) +- [Option B: Bring Your Own Certificate, Self-signed](#option-b-bring-your-own-certificate-self-signed) +- [Option C: Bring Your Own Certificate, Signed by a Recognized CA](#option-c-bring-your-own-certificate-signed-by-a-recognized-ca) +- [Option D: Let's Encrypt Certificate](#option-d-let-s-encrypt-certificate) +- [Option E: Localhost tunneling, no Certificate](#option-e-localhost-tunneling-no-certificate) + +### Option A: Default Rancher-generated Self-signed Certificate + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your host, and run the command below: + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:latest +``` + +### Option B: Bring Your Own Certificate, Self-signed +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +:::note Prerequisites: + +Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. + +- The certificate files must be in PEM format. +- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +::: + +After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| ------------------- | --------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | + +Log into your host, and run the command below: + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + --privileged \ + rancher/rancher:latest +``` + +### Option C: Bring Your Own Certificate, Signed by a Recognized CA + +In production environments where you're exposing an app publicly, you would use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +The Docker install is not recommended for production. These instructions are provided for testing and development purposes only. + +:::note Prerequisites: + +- The certificate files must be in PEM format. +- In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) + +::: + +After obtaining your certificate, run the Docker command below. + +- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. +- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +| Placeholder | Description | +| ------------------- | ----------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | + +Log into your host, and run the command below: + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + --privileged \ + rancher/rancher:latest \ + --no-cacerts +``` + +### Option D: Let's Encrypt Certificate + +:::caution + +Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +::: + +For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. + +The Docker install is not recommended for production. These instructions are provided for testing and development purposes only. + +:::note Prerequisites: + +- Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. +- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +::: + +After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. + +| Placeholder | Description | +| ----------------- | ------------------- | +| `` | Your domain address | + +Log into your host, and run the command below: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:latest \ + --acme-domain +``` + +### Option E: Localhost tunneling, no Certificate + +If you are installing Rancher in a development or testing environment where you have a localhost tunneling solution running, such as [ngrok](https://ngrok.com/), avoid generating a certificate. This installation option doesn't require a certificate. + +- You will use `--no-cacerts` in the argument to disable the default CA certificate generated by Rancher. + +Log into your host, and run the command below: + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:latest \ + --no-cacerts +``` + +## Advanced Options + +When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: + +- Custom CA Certificate +- API Audit Log +- TLS Settings +- Air Gap +- Persistent Data +- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +Refer to [this page](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for details. + +## Troubleshooting + +Refer to [this page](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) for frequently asked questions and troubleshooting tips. + +## What's Next? + +- **Recommended:** Review Single Node [Backup](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) and [Restore](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](kubernetes-clusters-in-rancher-setup.md). diff --git a/docs/pages-for-subheaders/rancher-security.md b/docs/pages-for-subheaders/rancher-security.md new file mode 100644 index 0000000000..a2e153af49 --- /dev/null +++ b/docs/pages-for-subheaders/rancher-security.md @@ -0,0 +1,99 @@ +--- +title: Security +weight: 20 +aliases: + - /rancher/v2.x/en/security/ +--- + + + + + + + +
+

Security policy

+

Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

+
+

Reporting process

+

Please submit possible security issues by emailing security-rancher@suse.com .

+
+

Announcements

+

Subscribe to the Rancher announcements forum for release updates.

+
+ +Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability](manage-role-based-access-control-rbac.md), Rancher makes your Kubernetes clusters even more secure. + +On this page, we provide security related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: + +- [NeuVector Integration with Rancher](#neuvector-integration-with-rancher) +- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) +- [SELinux RPM](#selinux-rpm) +- [Guide to hardening Rancher installations](#rancher-hardening-guide) +- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) +- [Third-party penetration test reports](#third-party-penetration-test-reports) +- [Rancher Security Advisories and CVEs](#rancher-security-advisories-and-cves) +- [Kubernetes Security Best Practices](#kubernetes-security-best-practices) + +### NeuVector Integration with Rancher + +_New in v2.6.5_ + +NeuVector is an open-source, container-focused security application that is now integrated into Rancher. NeuVector provides production security, DevOps vulnerability protection, and a container firewall, et al. Please see the [Rancher docs](../explanations/integrations-in-rancher/neuvector.md) and the [NeuVector docs](https://open-docs.neuvector.com/) for more information. + +### Running a CIS Security Scan on a Kubernetes Cluster + +Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the [CIS](https://www.cisecurity.org/cis-benchmarks/) (Center for Internet Security) Kubernetes Benchmark. + +The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. + +The Center for Internet Security (CIS) is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". + +CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. + +The Benchmark provides recommendations of two types: Automated and Manual. We run tests related to only Automated recommendations. + +When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. + +For details, refer to the section on [security scans](cis-scan-guides.md). + +### SELinux RPM + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. + +We provide two RPMs (Red Hat packages) that enable Rancher products to function properly on SELinux-enforcing hosts: `rancher-selinux` and `rke2-selinux`. For details, see [this page](selinux-rpm.md). + +### Rancher Hardening Guide + +The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. + +The hardening guides provide prescriptive guidance for hardening a production installation of Rancher. See Rancher's guides for [Self Assessment of the CIS Kubernetes Benchmark](#the-cis-benchmark-and-self-sssessment) for the full list of security controls. + +> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. + +Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher. + +### The CIS Benchmark and Self-Assessment + +The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). + +Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark. + +### Third-party Penetration Test Reports + +Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. + +Results: + +- [Cure53 Pen Test - July 2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) +- [Untamed Theory Pen Test - March 2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) + +### Rancher Security Advisories and CVEs + +Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](../reference-guides/rancher-security/security-advisories-and-cves.md) + +### Kubernetes Security Best Practices + +For recommendations on securing your Kubernetes cluster, refer to the [Kubernetes Security Best Practices](../reference-guides/rancher-security/kubernetes-security-best-practices.md) guide. diff --git a/docs/pages-for-subheaders/rancher-server-configuration.md b/docs/pages-for-subheaders/rancher-server-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/rancher-server-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/rancher-server.md b/docs/pages-for-subheaders/rancher-server.md new file mode 100644 index 0000000000..62678aabce --- /dev/null +++ b/docs/pages-for-subheaders/rancher-server.md @@ -0,0 +1,19 @@ +--- +title: Best Practices for the Rancher Server +shortTitle: Rancher Server +weight: 1 +--- + +This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. + +### Recommended Architecture and Infrastructure + +Refer to this [guide](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md) for our general advice for setting up the Rancher server on a high-availability Kubernetes cluster. + +### Deployment Strategies + +This [guide](../reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md) is designed to help you choose whether a regional deployment strategy or a hub-and-spoke deployment strategy is better for a Rancher server that manages downstream Kubernetes clusters. + +### Installing Rancher in a vSphere Environment + +This [guide](../reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md) outlines a reference architecture for installing Rancher in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. \ No newline at end of file diff --git a/docs/pages-for-subheaders/rancher-v2.6-hardening-guides.md b/docs/pages-for-subheaders/rancher-v2.6-hardening-guides.md new file mode 100644 index 0000000000..49e6bb8958 --- /dev/null +++ b/docs/pages-for-subheaders/rancher-v2.6-hardening-guides.md @@ -0,0 +1,65 @@ +--- +title: Self-Assessment and Hardening Guides for Rancher v2.6 +shortTitle: Rancher v2.6 Hardening Guides +weight: 1 +aliases: + - /rancher/v2.6/en/security/rancher-2.5/ + - /rancher/v2.6/en/security/rancher-2.5/1.5-hardening-2.5/ + - /rancher/v2.6/en/security/rancher-2.5/1.5-benchmark-2.5/ + - /rancher/v2.6/en/security/rancher-2.5/1.6-hardening-2.5/ + - /rancher/v2.6/en/security/rancher-2.5/1.6-benchmark-2.5/ +--- + +Rancher provides specific security hardening guides for each supported Rancher's Kubernetes distributions. + +- [Rancher Kubernetes Distributions](#rancher-kubernetes-distributions) +- [Hardening Guides and Benchmark Versions](#hardening-guides-and-benchmark-versions) + - [RKE Guides](#rke-guides) + - [RKE2 Guides](#rke2-guides) + - [K3s Guides](#k3s) +- [Rancher with SELinux](#rancher-with-selinux) + +# Rancher Kubernetes Distributions + +Rancher uses the following Kubernetes distributions: + +- [**RKE**](https://rancher.com/docs/rke/latest/en/), Rancher Kubernetes Engine, is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. +- [**RKE2**](https://docs.rke2.io/) is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. +- [**K3s**](https://rancher.com/docs/k3s/latest/en/) is a fully conformant, lightweight Kubernetes distribution. It is easy to install, with half the memory of upstream Kubernetes, all in a binary of less than 100 MB. + +To harden a Kubernetes cluster outside of Rancher's distributions, refer to your Kubernetes provider docs. + +# Hardening Guides and Benchmark Versions + +These guides have been tested along with the Rancher v2.6 release. Each self-assessment guide is accompanied with a hardening guide and tested on a specific Kubernetes version and CIS benchmark version. If a CIS benchmark has not been validated for your Kubernetes version, you can choose to use the existing guides until a newer version is added. + +### RKE Guides + +| Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides | +| ------------------ | --------------------- | --------------------- | ---------------- | +| Kubernetes v1.18 up to v1.23 | CIS v1.6 | [Link](../reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-self-assessment-guide-with-cis-v1.6-benchmark.md) | [Link](../reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-hardening-guide-with-cis-v1.6-benchmark.md) | + +:::note + +- CIS v1.20 benchmark version for Kubernetes v1.19 and v1.20 is not yet released as a profile in Rancher's CIS Benchmark chart. + +::: + +### RKE2 Guides + +| Type | Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides | +| ---- | ------------------ | --------------------- | --------------------- | ---------------- | +| Rancher provisioned RKE2 cluster | Kubernetes v1.21 up to v1.23 | CIS v1.6 | [Link](../reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-self-assessment-guide-with-cis-v1.6-benchmark.md) | [Link](../reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-hardening-guide-with-cis-v1.6-benchmark.md) | +| Standalone RKE2 | Kubernetes v1.21 up to v1.23 | CIS v1.6 | [Link](https://docs.rke2.io/security/cis_self_assessment16/) | [Link](https://docs.rke2.io/security/hardening_guide/) | + +### K3s Guides + +| Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides | +| ------------------ | --------------------- | --------------------- | ---------------- | +| Kubernetes v1.21 and v1.22 | CIS v1.6 | [Link](https://rancher.com/docs/k3s/latest/en/security/self_assessment/) | [Link](https://rancher.com/docs/k3s/latest/en/security/hardening_guide/) | + +# Rancher with SELinux + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on RHEL and CentOS. + +To use Rancher with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.](selinux-rpm.md#installing-the-rancher-selinux-rpm) diff --git a/docs/pages-for-subheaders/resources.md b/docs/pages-for-subheaders/resources.md new file mode 100644 index 0000000000..a351d87400 --- /dev/null +++ b/docs/pages-for-subheaders/resources.md @@ -0,0 +1,26 @@ +--- +title: Resources +weight: 5 +--- + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +### Air-Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Advanced Options + +When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: + +- [Custom CA Certificate](../getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md) +- [API Audit Log](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) +- [TLS Settings](../reference-guides/installation-references/tls-settings.md) +- [etcd configuration](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md) +- [Local System Charts for Air Gap Installations](../getting-started/installation-and-upgrade/resources/local-system-charts.md) | v2.3.0 | diff --git a/docs/pages-for-subheaders/selinux-rpm.md b/docs/pages-for-subheaders/selinux-rpm.md new file mode 100644 index 0000000000..9f1fc5c026 --- /dev/null +++ b/docs/pages-for-subheaders/selinux-rpm.md @@ -0,0 +1,17 @@ +--- +title: SELinux RPM +weight: 4 +--- + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. + +Developed by Red Hat, it is an implementation of mandatory access controls (MAC) on Linux. Mandatory access controls allow an administrator of a system to define how applications and users can access different resources such as files, devices, networks and inter-process communication. SELinux also enhances security by making an OS restrictive by default. + +After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. To check whether SELinux is enabled and enforcing on your system, use `getenforce`: + +``` +# getenforce +Enforcing +``` + +We provide two RPMs (Red Hat packages) that enable Rancher products to function properly on SELinux-enforcing hosts: [`rancher-selinux`](../reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md) and [`rke2-selinux`](../reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md). \ No newline at end of file diff --git a/docs/pages-for-subheaders/set-up-cloud-providers.md b/docs/pages-for-subheaders/set-up-cloud-providers.md new file mode 100644 index 0000000000..97d6ceb6a2 --- /dev/null +++ b/docs/pages-for-subheaders/set-up-cloud-providers.md @@ -0,0 +1,43 @@ +--- +title: Setting up Cloud Providers +weight: 2300 +--- +A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. + +When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. + +Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. + +By default, the **Cloud Provider** option is set to `None`. + +The following cloud providers can be enabled: + +* Amazon +* Azure +* GCE (Google Compute Engine) +* vSphere + +### Setting up the Amazon Cloud Provider + +For details on enabling the Amazon cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) + +### Setting up the Azure Cloud Provider + +For details on enabling the Azure cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md) + +### Setting up the GCE Cloud Provider + +For details on enabling the Google Compute Engine cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) + +### Setting up the vSphere Cloud Provider + +For details on enabling the vSphere cloud provider, refer to [this page.](vsphere-cloud-provider.md) + +### Setting up a Custom Cloud Provider + +The `Custom` cloud provider is available if you want to configure any Kubernetes cloud provider. + +For the custom cloud provider option, you can refer to the [RKE docs](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration: + +* [vSphere](https://rancher.com/docs/rke/latest/en/config-options/cloud-providercluster-provisioning/rke-clusters/cloud-providers/vsphere/) +* [OpenStack](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md new file mode 100644 index 0000000000..9fbcc53013 --- /dev/null +++ b/docs/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -0,0 +1,30 @@ +--- +title: Setting up Clusters from Hosted Kubernetes Providers +weight: 3 +--- + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-prem or in an infrastructure provider. + +Rancher supports the following Kubernetes providers: + +- [Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) +- [Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) +- [Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) +- [Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) +- [Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) +- [Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) + +## Hosted Kubernetes Provider Authentication + +When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: + +- [Creating a GKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +- [Creating an EKS Cluster](amazon-eks-permissions.md) +- [Creating an AKS Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) +- [Creating an ACK Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +- [Creating a TKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) +- [Creating a CCE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) diff --git a/docs/pages-for-subheaders/single-node-rancher-in-docker.md b/docs/pages-for-subheaders/single-node-rancher-in-docker.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/pages-for-subheaders/single-node-rancher-in-docker.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/pages-for-subheaders/use-existing-nodes.md b/docs/pages-for-subheaders/use-existing-nodes.md new file mode 100644 index 0000000000..cf3c02f89b --- /dev/null +++ b/docs/pages-for-subheaders/use-existing-nodes.md @@ -0,0 +1,147 @@ +--- +title: Launching Kubernetes on Existing Custom Nodes +description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements +metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" +weight: 2225 +--- + +When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. + +To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. + +This section describes how to set up a custom cluster. + +# Creating a Cluster with Custom Nodes + +:::note Want to use Windows hosts as Kubernetes workers? + +See [Configuring Custom Clusters for Windows](use-windows-clusters.md) before you start. + +::: + + + +- [1. Provision a Linux Host](#1-provision-a-linux-host) +- [2. Create the Custom Cluster](#2-create-the-custom-cluster) +- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) + + + +### 1. Provision a Linux Host + +Begin creation of a custom cluster by provisioning a Linux host. Your host can be: + +- A cloud-host virtual machine (VM) +- An on-prem VM +- A bare-metal server + +If you want to reuse a node from a previous custom cluster, [clean the node](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +Provision the host according to the [installation requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) and the [checklist for production-ready clusters.](checklist-for-production-ready-clusters.md) + +If you're using Amazon EC2 as your host and want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements](https://rancher.com/docs/rke//latest/en/config-options/dual-stack#requirements) when provisioning the host. + +### 2. Create the Custom Cluster + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Custom**. +1. Enter a **Cluster Name**. +1. Use **Cluster Configuration** section to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. + + :::note Using Windows nodes as Kubernetes workers? + + - See [Enable the Windows Support Option](use-windows-clusters.md). + - The only Network Provider available for clusters with Windows support is Flannel. + + ::: + + :::note Dual-stack on Amazon EC2: + + If you're using Amazon EC2 as your host and want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements](https://rancher.com/docs/rke//latest/en/config-options/dual-stack#requirements) when configuring RKE. + + ::: + +6. Click **Next**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +7. From **Node Role**, choose the roles that you want filled by a cluster node. You must provision at least one node for each role: `etcd`, `worker`, and `control plane`. All three roles are required for a custom cluster to finish provisioning. For more information on roles, see [this section.](../reference-guides/kubernetes-concepts.md#roles-for-nodes-in-kubernetes-clusters) + +:::note Notes: + +- Using Windows nodes as Kubernetes workers? See [this section](use-windows-clusters.md). +- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). + +::: + +8. **Optional**: Click **[Show advanced options](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. + +9. Copy the command displayed on screen to your clipboard. + +10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. + +:::note + +Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. + +::: + +11. When you finish running the command(s) on your Linux host(s), click **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +### 3. Amazon Only: Tag Resources + +If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. + +[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) + +:::note + +You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) + +::: + +The following resources need to be tagged with a `ClusterID`: + +- **Nodes**: All hosts added in Rancher. +- **Subnet**: The subnet used for your cluster +- **Security Group**: The security group used for your cluster. + +:::note + +Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. + +::: + +The tag that should be used is: + +``` +Key=kubernetes.io/cluster/, Value=owned +``` + +`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. + +If you share resources between clusters, you can change the tag to: + +``` +Key=kubernetes.io/cluster/CLUSTERID, Value=shared +``` + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md new file mode 100644 index 0000000000..90a485ebc7 --- /dev/null +++ b/docs/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -0,0 +1,169 @@ +--- +title: Launching Kubernetes on New Nodes in an Infrastructure Provider +weight: 2205 +--- + +This section covers the following topics: + +- [RKE Clusters](#rke-clusters) + - [Node templates](#node-templates) + - [Node labels](#node-labels) + - [Node taints](#node-taints) + - [Administrator control of node templates](#administrator-control-of-node-templates) + - [Node pools](#node-pools) + - [Node pool taints](#node-pool-taints) + - [About node auto-replace](#about-node-auto-replace) + - [Enabling node auto-replace](#enabling-node-auto-replace) + - [Disabling node auto-replace](#disabling-node-auto-replace) + - [Cloud credentials](#cloud-credentials) + - [Node drivers](#node-drivers) +- [RKE2 Clusters](#rke2-clusters) + - [Node roles in RKE2](#node-roles-in-rke2) + +When you create an RKE or RKE2 cluster using a node template in Rancher, each resulting node pool is shown in a new **Machine Pools** tab. You can see the machine pools by doing the following: + +1. Click **☰ > Cluster Management**. +1. Click the name of the RKE or RKE2 cluster. + +## RKE Clusters + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +The available cloud providers to create a node template are decided based on active [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers). + +### Node Templates + +A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. + +After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. + +#### Node Labels + +You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. + +Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +#### Node Taints + +You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. + +Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +#### Administrator Control of Node Templates + +Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. + +To access all node templates, an administrator will need to do the following: + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. + +**Result:** All node templates are listed. The templates can be edited or cloned by clicking the **⋮**. + +### Node Pools + +Using Rancher, you can create pools of nodes based on a [node template](#node-templates). + +A node template defines the configuration of a node, like what operating system to use, number of CPUs, and amount of memory. + +The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. + +Each node pool must have one or more nodes roles assigned. + +Each node role (i.e. etcd, controlplane, and worker) should be assigned to a distinct node pool. Although it is possible to assign multiple node roles to a node pool, this should not be done for production clusters. + +The recommended setup is to have: + +- a node pool with the etcd node role and a count of three +- a node pool with the controlplane node role and a count of at least two +- a node pool with the worker node role and a count of at least two + +**RKE1 downstream cluster nodes in an air-gapped environment:** + +By default, Rancher tries to run the Docker Install script when provisioning RKE1 downstream cluster nodes, such as in vSphere. However, the Rancher Docker installation script would fail in air-gapped environments. To work around this issue, you may choose to skip installing Docker when creating a Node Template where Docker is pre-installed onto a VM image. You can accomplish this by selecting **None** in the dropdown list for `Docker Install URL` under **Engine Options** in the Rancher UI. + +
**Engine Options Dropdown:**
+![Engine Options Dropdown](/img/node-template-engine-options-rke1.png) + +#### Node Pool Taints + +If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. + +For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. + +When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +#### About Node Auto-replace + +If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. + +:::caution + +Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +::: + +Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. + +#### Enabling Node Auto-replace + +When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. + +1. In the form for creating or editing a cluster, go to the **Node Pools** section. +1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Fill out the rest of the form for creating or editing the cluster. + +**Result:** Node auto-replace is enabled for the node pool. + +#### Disabling Node Auto-replace + +You can disable node auto-replace from the Rancher UI with the following steps: + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to disable node auto-replace and click **⋮ > Edit Config**. +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. +1. Click **Save**. + +**Result:** Node auto-replace is disabled for the node pool. + +### Cloud Credentials + +Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: + +- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. + +- After the cloud credential is created, it can be re-used to create additional node templates. + +- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. + +After cloud credentials are created, the user can start [managing the cloud credentials that they created](../reference-guides/user-settings/manage-cloud-credentials.md). + +### Node Drivers + +If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#activating-deactivating-node-drivers), or you can [add your own custom node driver](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#adding-custom-node-drivers). + +## RKE2 Clusters + +Rancher v2.6 introduces provisioning for [RKE2](https://docs.rke2.io/) clusters directly from the Rancher UI. RKE2, also known as RKE Government, is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. + +:::note + +For RKE2 cluster templates, please refer to [this page](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-cluster-templates.md#rke2-cluster-template) for additional information. + +::: + +### Node Roles + +The RKE2 CLI exposes two roles, `server` and `agent`, which represent the Kubernetes node-roles `etcd` + `controlplane` and `worker` respectively. With RKE2 integration in Rancher v2.6, RKE2 node pools can represent more fine-grained role assignments such that `etcd` and `controlplane` roles can be represented. + +The same functionality of using `etcd`, `controlplane` and `worker` nodes is possible in the RKE2 CLI by using flags and node tainting to control where workloads and the Kubernetes master were scheduled. The reason those roles were not implemented as first-class roles in the RKE2 CLI is that RKE2 is conceptualized as a set of raw building blocks that are best leveraged through an orchestration system such as Rancher. + +The implementation of the three node roles in Rancher means that Rancher managed RKE2 clusters are able to easily leverage all of the same architectural best practices that are recommended for RKE clusters. + +In our [recommended cluster architecture](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md), we outline how many nodes of each role clusters should have: + +- At least three nodes with the role etcd to survive losing one node +- At least two nodes with the role controlplane for master component high availability +- At least two nodes with the role worker for workload rescheduling upon node failure \ No newline at end of file diff --git a/docs/pages-for-subheaders/use-windows-clusters.md b/docs/pages-for-subheaders/use-windows-clusters.md new file mode 100644 index 0000000000..507cf8cb88 --- /dev/null +++ b/docs/pages-for-subheaders/use-windows-clusters.md @@ -0,0 +1,297 @@ +--- +title: Launching Kubernetes on Windows Clusters +weight: 2240 +--- + +When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. + +In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. + +Some other requirements for Windows clusters include: + +- You can only add Windows nodes to a cluster if Windows support is enabled when the cluster is created. Windows support cannot be enabled for existing clusters. +- Kubernetes 1.15+ is required. +- The Flannel network provider must be used. +- Windows nodes must have 50 GB of disk space. + +For the full list of requirements, see [this section.](#requirements-for-windows-clusters) + +For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). + +This guide covers the following topics: + + + +- [Changes in Rancher v2.6](#changes-in-rancher-v2-6) +- [Requirements](#requirements-for-windows-clusters) +- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) +- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) + + +# Changes in Rancher v2.6 + +Rancher v2.6 introduces provisioning for [RKE2](https://docs.rke2.io/) clusters directly from the Rancher UI. RKE2, also known as RKE Government, is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. + +As of Rancher v2.6.5, provisioning for RKE2 is GA. + +The RKE2 provisioning feature also includes installing RKE2 on Windows clusters. Windows features for RKE2 include: + +- Windows Containers with RKE2 powered by containerd +- Added provisioning of Windows RKE2 custom clusters directly from the Rancher UI +- Calico CNI for Windows RKE2 custom clusters +- SAC releases of Windows Server (2004 and 20H2) are included in the technical preview + +Windows Support for RKE2 Custom Clusters requires choosing Calico as the CNI. + +:::note + +Rancher will allow Windows workload pods to deploy on both Windows and Linux worker nodes by default. When creating mixed clusters in RKE2, you must edit the `nodeSelector` in the chart to direct the pods to be placed onto a compatible Windows node. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) for more information on how to use `nodeSelector` to assign pods to nodes. + +::: + +# Requirements for Windows Clusters + +The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation](installation-requirements.md). + +### OS and Docker Requirements + +Our support for Windows Server and Windows containers match the Microsoft official lifecycle for LTSC (Long-Term Servicing Channel) and SAC (Semi-Annual Channel). + +For the support lifecycle dates for Windows Server, see the [Microsoft Documentation.](https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info) + +### Kubernetes Version + +Kubernetes v1.15+ is required. + +If you are using Kubernetes v1.21 with Windows Server 20H2 Standard Core, the patch "2019-08 Servicing Stack Update for Windows Server" must be installed on the node. + +### Node Requirements + +The hosts in the cluster need to have at least: + +- 2 core CPUs +- 5 GB memory +- 50 GB disk space + +Rancher will not provision the node if the node does not meet these requirements. + +### Networking Requirements + +Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation](installation-and-upgrade.md) before proceeding with this guide. + +Rancher only supports Windows using Flannel as the network provider. + +There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. + +For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. + +If you are configuring DHCP options sets for an AWS virtual private cloud, note that in the `domain-name` option field, only one domain name can be specified. According to the DHCP options [documentation:](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) + +:::note + +Some Linux operating systems accept multiple domain names separated by spaces. However, other Linux operating systems and Windows treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name. + +::: + +### Rancher on vSphere with ESXi 6.7u2 and above + +If you are using Rancher on VMware vSphere with ESXi 6.7u2 or later with Red Hat Enterprise Linux 8.3, CentOS 8.3, or SUSE Enterprise Linux 15 SP2 or later, it is necessary to disable the `vmxnet3` virtual network adapter hardware offloading feature. Failure to do so will result in all network connections between pods on different cluster nodes to fail with timeout errors. All connections from Windows pods to critical services running on Linux nodes, such as CoreDNS, will fail as well. It is also possible that external connections may fail. This issue is the result of Linux distributions enabling the hardware offloading feature in `vmxnet3` and a bug in the `vmxnet3` hardware offloading feature that results in the discarding of packets for guest overlay traffic. To address this issue, it is necessary disable the `vmxnet3` hardware offloading feature. This setting does not survive reboot, so it is necessary to disable on every boot. The recommended course of action is to create a systemd unit file at `/etc/systemd/system/disable_hw_offloading.service`, which disables the `vmxnet3` hardware offloading feature on boot. A sample systemd unit file which disables the `vmxnet3` hardware offloading feature is as follows. Note that `` must be customized to the host `vmxnet3` network interface, e.g., `ens192`: + +``` +[Unit] +Description=Disable vmxnet3 hardware offloading feature + +[Service] +Type=oneshot +ExecStart=ethtool -K tx-udp_tnl-segmentation off +ExecStart=ethtool -K tx-udp_tnl-csum-segmentation off +StandardOutput=journal + +[Install] +WantedBy=multi-user.target +``` +Then set the appropriate permissions on the systemd unit file: +``` +chmod 0644 /etc/systemd/system/disable_hw_offloading.service +``` +Finally, enable the systemd service: +``` +systemctl enable disable_hw_offloading.service +``` + +### Architecture Requirements + +The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. + +The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. + +We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: + + + +| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | +| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | Control plane, etcd, worker | Manage the Kubernetes cluster | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | Worker | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | +| Node 3 | Windows (Windows Server core version 1809 or above) | Worker | Run your Windows containers | + +### Container Requirements + +Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. + +### Cloud Provider Specific Requirements + +If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page](set-up-cloud-providers.md) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. + +If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: + +- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) +- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. + +# Tutorial: How to Create a Cluster with Windows Support + +This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) + +When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent](../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. + +To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. + + + +1. [Provision Hosts](#1-provision-hosts) +1. [Create the Cluster on Existing Nodes](#2-create-the-cluster-on-existing-nodes) +1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) +1. [Optional: Configuration for Azure Files](#4-optional-configuration-for-azure-files) + + +# 1. Provision Hosts + +To begin provisioning a cluster on existing nodes with Windows support, prepare your hosts. + +Your hosts can be: + +- Cloud-hosted VMs +- VMs from virtualization clusters +- Bare-metal servers + +You will provision three nodes: + +- One Linux node, which manages the Kubernetes control plane and stores your `etcd` +- A second Linux node, which will be another worker node +- The Windows node, which will run your Windows containers as a worker node + +| Node | Operating System | +| ------ | ------------------------------------------------------------ | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | +| Node 3 | Windows (Windows Server core version 1809 or above required) | + +If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.](set-up-cloud-providers.md) + +# 2. Create the Cluster on Existing Nodes + +The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster](use-existing-nodes.md) with some Windows-specific requirements. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Custom**. +1. Enter a name for your cluster in the **Cluster Name** field. +1. In the **Kubernetes Version** dropdown menu, select v1.19 or above. +1. In the **Network Provider** field, select **Flannel**. +1. In the **Windows Support** section, click **Enabled**. +1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. +1. Click **Next**. + +:::note Important: + +For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +::: + +# 3. Add Nodes to the Cluster + +This section describes how to register your Linux and Worker nodes to your cluster. You will run a command on each node, which will install the Rancher agent and allow Rancher to manage each node. + +### Add Linux Master Node + +In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. + +The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. + +1. In the **Node Operating System** section, click **Linux**. +1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. +1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent](../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +1. Copy the command displayed on the screen to your clipboard. +1. SSH into your Linux host and run the command that you copied to your clipboard. +1. When you are finished provisioning your Linux node(s), select **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +It may take a few minutes for the node to be registered in your cluster. + +### Add Linux Worker Node + +In this section, we run a command to register the Linux worker node to the cluster. + +After the initial provisioning of your cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **⋮ > Edit Config**. +1. Scroll down to **Node Operating System**. Choose **Linux**. +1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. +1. Copy the command displayed on screen to your clipboard. +1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. +1. From **Rancher**, click **Save**. + +**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. + +:::note + +Taints on Linux Worker Nodes + +For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the Windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. + +| Taint Key | Taint Value | Taint Effect | +| -------------- | ----------- | ------------ | +| `cattle.io/os` | `linux` | `NoSchedule` | + +::: + +### Add a Windows Worker Node + +In this section, we run a command to register the Windows worker node to the cluster. + +You can add Windows hosts to the cluster by editing the cluster and choosing the **Windows** option. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **⋮ > Edit Config**. +1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. +1. Copy the command displayed on screen to your clipboard. +1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. +1. From Rancher, click **Save**. +1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. + +**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# Configuration for Storage Classes in Azure + +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md) diff --git a/docs/pages-for-subheaders/user-settings.md b/docs/pages-for-subheaders/user-settings.md new file mode 100644 index 0000000000..bab26d837b --- /dev/null +++ b/docs/pages-for-subheaders/user-settings.md @@ -0,0 +1,16 @@ +--- +title: User Settings +weight: 23 +--- + +Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. + +![User Settings Menu](/img/user-settings.png) + +The available user settings are: + +- [API & Keys](../reference-guides/user-settings/api-keys.md): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. +- [Cloud Credentials](../reference-guides/user-settings/manage-cloud-credentials.md): Manage cloud credentials [used by node templates](use-new-nodes-in-an-infra-provider.md#node-templates) to [provision nodes for clusters](launch-kubernetes-with-rancher.md). +- [Node Templates](../reference-guides/user-settings/manage-node-templates.md): Manage templates [used by Rancher to provision nodes for clusters](launch-kubernetes-with-rancher.md). +- [Preferences](../reference-guides/user-settings/user-preferences.md): Sets superficial preferences for the Rancher UI. +- Log Out: Ends your user session. diff --git a/docs/pages-for-subheaders/vsphere-cloud-provider.md b/docs/pages-for-subheaders/vsphere-cloud-provider.md new file mode 100644 index 0000000000..01f1d0ee24 --- /dev/null +++ b/docs/pages-for-subheaders/vsphere-cloud-provider.md @@ -0,0 +1,14 @@ +--- +title: Setting up the vSphere Cloud Provider +weight: 4 +--- + +In this section, you'll learn how to set up a vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. + +# In-tree Cloud Provider + +To use the in-tree vSphere cloud provider, you will need to use an RKE configuration option. For details, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md) + +# Out-of-tree Cloud Provider + +To set up the out-of-tree vSphere cloud provider, you will need to install Helm charts from the Rancher marketplace. For details, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md) diff --git a/docs/pages-for-subheaders/vsphere.md b/docs/pages-for-subheaders/vsphere.md new file mode 100644 index 0000000000..aa5931abd8 --- /dev/null +++ b/docs/pages-for-subheaders/vsphere.md @@ -0,0 +1,62 @@ +--- +title: Creating a vSphere Cluster +shortTitle: vSphere +description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +weight: 2225 +--- + +By using Rancher with vSphere, you can bring cloud operations on-premises. + +Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. + +A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. + +- [vSphere Enhancements in Rancher v2.3](#vsphere-enhancements-in-rancher-v2-3) +- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) +- [Provisioning Storage](#provisioning-storage) +- [Enabling the vSphere Cloud Provider](#enabling-the-vsphere-cloud-provider) + +# vSphere Enhancements in Rancher v2.3 + +The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: + +### Self-healing Node Pools + +One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,](use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. + +:::caution + +It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +::: + +### Dynamically Populated Options for Instances and Scheduling + +Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. + +For the fields to be populated, your setup needs to fulfill the [prerequisites.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md#prerequisites) + +### More Supported Operating Systems + +You can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) + +### Video Walkthrough of v2.3.3 Node Template Features + +In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. + +{{< youtube id="dPIwg6x1AlU">}} + +# Creating a vSphere Cluster + +In [this section,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md) you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. + +# Provisioning Storage + +For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](vsphere-cloud-provider.md) + +# Enabling the vSphere Cloud Provider + +When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. + +For details, refer to the section on [enabling the vSphere cloud provider.](vsphere-cloud-provider.md) diff --git a/docs/pages-for-subheaders/workloads-and-pods.md b/docs/pages-for-subheaders/workloads-and-pods.md new file mode 100644 index 0000000000..ea909365f7 --- /dev/null +++ b/docs/pages-for-subheaders/workloads-and-pods.md @@ -0,0 +1,79 @@ +--- +title: "Kubernetes Workloads and Pods" +description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" +weight: 3025 +--- + +You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. + +### Pods + +[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. + +### Workloads + +_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. +Workloads let you define the rules for application scheduling, scaling, and upgrade. + +#### Workload Types + +Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: + +- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) + + _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. + +- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) + + _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. + +- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) + + _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. + +- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) + + _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. + +- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) + + _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. + +### Services + +In many use cases, a workload has to be either: + +- Accessed by other workloads in the cluster. +- Exposed to the outside world. + +You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. + +#### Service Types + +There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). + +- **ClusterIP** + + >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. + +- **NodePort** + + >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. + +- **LoadBalancer** + + >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. + +## Workload Options + +This section of the documentation contains instructions for deploying workloads and using workload options. + +- [Deploy Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md) +- [Upgrade Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) +- [Rollback Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) + +## Related Links + +### External Links + +- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/docs/rancher-manager.md b/docs/rancher-manager.md new file mode 100644 index 0000000000..346e997c01 --- /dev/null +++ b/docs/rancher-manager.md @@ -0,0 +1,24 @@ +--- +slug: / +weight: 1 +title: "Rancher 2.6" +shortTitle: "Rancher 2.6 (Latest)" +description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." +metaTitle: "Rancher 2.6 Docs: What is New?" +metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." +insertOneSix: false +ctaBanner: 0 +aliases: + - /rancher/v2.x/en/ +--- +Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher 2 exclusively deploys and manages Kubernetes clusters running anywhere, on any provider. + +Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. + +One Rancher server installation can manage thousands of Kubernetes clusters and thousands of nodes from the same user interface. + +Rancher adds significant value on top of Kubernetes, first by centralizing authentication and role-based access control (RBAC) for all of the clusters, giving global admins the ability to control cluster access from one location. + +It then enables detailed monitoring and alerting for clusters and their resources, ships logs to external providers, and integrates directly with Helm via the Application Catalog. If you have an external CI/CD system, you can plug it into Rancher, but if you don't, Rancher even includes Fleet to help you automatically deploy and upgrade workloads. + +Rancher is a _complete_ container management platform for Kubernetes, giving you the tools to successfully run Kubernetes anywhere. diff --git a/docs/reference-guides.md b/docs/reference-guides.md new file mode 100644 index 0000000000..d2e85baa0b --- /dev/null +++ b/docs/reference-guides.md @@ -0,0 +1,11 @@ +--- +title: Reference Guides +--- + +**Reference guides** are technical descriptions of processes or products that users can study. Reference guides are designed to be "information-oriented" and their primary function is to describe. + +These docs may also include some usage steps in the course of description; however, their purpose is not to explain concepts nor to outline steps to achieve tasks. + +The users who utilize reference guides are knowledgeable with the Rancher product as well as how to use it. They will benefit from detailed descriptions of something to be used when needing to refer to specifics of usage. + +Good examples of Rancher reference guides would be the [Rancher Manager architecture](./pages-for-subheaders/rancher-manager-architecture.md) and [cluster configuration guides](./pages-for-subheaders/cluster-configuration.md). diff --git a/docs/reference-guides/about-the-api/api-tokens.md b/docs/reference-guides/about-the-api/api-tokens.md new file mode 100644 index 0000000000..756831cf4d --- /dev/null +++ b/docs/reference-guides/about-the-api/api-tokens.md @@ -0,0 +1,66 @@ +--- +title: API Tokens +weight: 1 +--- + +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. Tokens are not invalidated by changing a password. + +You can deactivate API tokens by deleting them or by deactivating the user account. + +### Deleting tokens +To delete a token, + +1. Go to the list of all tokens in the Rancher API view at `https:///v3/tokens`. + +1. Access the token you want to delete by its ID. For example, `https:///v3/tokens/kubectl-shell-user-vqkqt` + +1. Click **Delete**. + +Here is the complete list of tokens that are generated with `ttl=0`: + +| Token | Description | +|-------|-------------| +| `kubeconfig-*` | Kubeconfig token | +| `kubectl-shell-*` | Access to `kubectl` shell in the browser | +| `agent-*` | Token for agent deployment | +| `compose-token-*` | Token for compose | +| `helm-token-*` | Token for Helm chart deployment | +| `*-pipeline*` | Pipeline token for project | +| `telemetry-*` | Telemetry token | +| `drain-node-*` | Token for drain (we use `kubectl` for drain because there is no native Kubernetes API) | + + +### Setting TTL on Kubeconfig Tokens + +Admins can set a global TTL on Kubeconfig tokens. Once the token expires the kubectl command will require the user to authenticate to Rancher. + +Go to the global settings and: + +1. Set the `kubeconfig-generate-token` setting to `false`. This setting instructs Rancher to no longer automatically generate a token when a user clicks on download a kubeconfig file. The kubeconfig file will now provide a command to login to Rancher. + +:::note + +Once this setting is deactivated, a generated kubeconfig will reference the [Rancher CLI](../../pages-for-subheaders/cli-with-rancher.md) to retrieve a short lived token for the cluster. When you use this kubeconfig in a client, such as `kubectl`, the Rancher CLI needs to be installed as well. + +::: + +2. Set the `kubeconfig-token-ttl-minutes` setting to the desired duration in minutes. By default, `kubeconfig-token-ttl-minutes` is 960 (16 hours). + +:::note + +This value cannot exceed max-ttl of API tokens.(`https:// + secretKey: +``` + +### IAM Permissions for EC2 Nodes to Access S3 + +There are two ways to set up the `rancher-backup` operator to use S3 as the backup storage location. + +One way is to configure the `credentialSecretName` in the Backup custom resource, which refers to AWS credentials that have access to S3. + +If the cluster nodes are in Amazon EC2, the S3 access can also be set up by assigning IAM permissions to the EC2 nodes so that they can access S3. + +To allow a node to access S3, follow the instructions in the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-instance-access-s3-bucket/) to create an IAM role for EC2. When you add a custom policy to the role, add the following permissions, and replace the `Resource` with your bucket name: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::rancher-backups" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObjectAcl" + ], + "Resource": [ + "arn:aws:s3:::rancher-backups/*" + ] + } + ] +} +``` + +After the role is created, and you have attached the corresponding instance profile to your EC2 instance(s), the `credentialSecretName` directive can be left empty in the Backup custom resource. + +# Examples + +For example Backup custom resources, refer to [this page.](examples.md#backup) diff --git a/docs/reference-guides/backup-restore-configuration/examples.md b/docs/reference-guides/backup-restore-configuration/examples.md new file mode 100644 index 0000000000..dfab5caa5f --- /dev/null +++ b/docs/reference-guides/backup-restore-configuration/examples.md @@ -0,0 +1,308 @@ +--- +title: Examples +weight: 5 +--- + +This section contains examples of Backup and Restore custom resources. + +The default backup storage location is configured when the `rancher-backup` operator is installed or upgraded. + +Encrypted backups can only be restored if the Restore custom resource uses the same encryption configuration secret that was used to create the backup. + +- [Backup](#backup) + - [Backup in the default location with encryption](#backup-in-the-default-location-with-encryption) + - [Recurring backup in the default location](#recurring-backup-in-the-default-location) + - [Encrypted recurring backup in the default location](#encrypted-recurring-backup-in-the-default-location) + - [Encrypted backup in Minio](#encrypted-backup-in-minio) + - [Backup in S3 using AWS credential secret](#backup-in-s3-using-aws-credential-secret) + - [Recurring backup in S3 using AWS credential secret](#recurring-backup-in-s3-using-aws-credential-secret) + - [Backup from EC2 nodes with IAM permission to access S3](#backup-from-ec2-nodes-with-iam-permission-to-access-s3) +- [Restore](#restore) + - [Restore using the default backup file location](#restore-using-the-default-backup-file-location) + - [Restore for Rancher migration](#restore-for-rancher-migration) + - [Restore from encrypted backup](#restore-from-encrypted-backup) + - [Restore an encrypted backup from Minio](#restore-an-encrypted-backup-from-minio) + - [Restore from backup using an AWS credential secret to access S3](#restore-from-backup-using-an-aws-credential-secret-to-access-s3) + - [Restore from EC2 nodes with IAM permissions to access S3](#restore-from-ec2-nodes-with-iam-permissions-to-access-s3) +- [Example Credential Secret for Storing Backups in S3](#example-credential-secret-for-storing-backups-in-s3) +- [Example EncryptionConfiguration](#example-encryptionconfiguration) + +# Backup + +This section contains example Backup custom resources. + +### Backup in the Default Location with Encryption + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: default-location-encrypted-backup +spec: + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +### Recurring Backup in the Default Location + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: default-location-recurring-backup +spec: + resourceSetName: rancher-resource-set + schedule: "@every 1h" + retentionCount: 10 +``` + +### Encrypted Recurring Backup in the Default Location + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: default-enc-recurring-backup +spec: + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 3 +``` + +### Encrypted Backup in Minio + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: minio-backup +spec: + storageLocation: + s3: + credentialSecretName: minio-creds + credentialSecretNamespace: default + bucketName: rancherbackups + endpoint: minio.xip.io + endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +### Backup in S3 Using AWS Credential Secret + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: s3-backup +spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +### Recurring Backup in S3 Using AWS Credential Secret + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: s3-recurring-backup +spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 10 +``` + +### Backup from EC2 Nodes with IAM Permission to Access S3 + +This example shows that the AWS credential secret does not have to be provided to create a backup if the nodes running `rancher-backup` have [these permissions for access to S3.](backup-configuration.md#iam-permissions-for-ec2-nodes-to-access-s3) + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: s3-iam-backup +spec: + storageLocation: + s3: + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +# Restore + +This section contains example Restore custom resources. + +### Restore Using the Default Backup File Location + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-default +spec: + backupFilename: default-location-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-29-54-07-00.tar.gz +# encryptionConfigSecretName: test-encryptionconfig +``` + +### Restore for Rancher Migration +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-migration +spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + prune: false + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com +``` + +### Restore from Encrypted Backup + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-encrypted +spec: + backupFilename: default-test-s3-def-backup-c583d8f2-6daf-4648-8ead-ed826c591471-2020-08-24T20-47-05Z.tar.gz + encryptionConfigSecretName: encryptionconfig +``` + +### Restore an Encrypted Backup from Minio + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-minio +spec: + backupFilename: default-minio-backup-demo-aa5c04b7-4dba-4c48-9ac4-ab7916812eaa-2020-08-30T13-18-17-07-00.tar.gz + storageLocation: + s3: + credentialSecretName: minio-creds + credentialSecretNamespace: default + bucketName: rancherbackups + endpoint: minio.xip.io + endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t + encryptionConfigSecretName: test-encryptionconfig +``` + +### Restore from Backup Using an AWS Credential Secret to Access S3 + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-s3-demo +spec: + backupFilename: test-s3-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-49-34-07-00.tar.gz.enc + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + encryptionConfigSecretName: test-encryptionconfig +``` + +### Restore from EC2 Nodes with IAM Permissions to Access S3 + +This example shows that the AWS credential secret does not have to be provided to restore from backup if the nodes running `rancher-backup` have [these permissions for access to S3.](backup-configuration.md#iam-permissions-for-ec2-nodes-to-access-s3) + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-s3-demo +spec: + backupFilename: default-test-s3-recurring-backup-84bf8dd8-0ef3-4240-8ad1-fc7ec308e216-2020-08-24T10#52#44-07#00.tar.gz + storageLocation: + s3: + bucketName: rajashree-backup-test + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + encryptionConfigSecretName: test-encryptionconfig +``` + +# Example Credential Secret for Storing Backups in S3 + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: creds +type: Opaque +data: + accessKey: + secretKey: +``` + +# Example EncryptionConfiguration + +The snippet below demonstrates two different types of secrets and their relevance with respect to Backup and Restore of custom resources. + +The first example is that of a secret that is used to encrypt the backup files. The backup operator, in this case, will not be able to read the secrets encryption file. It only uses the contents of the secret. + +The second example is that of a Kubernetes secrets encryption config file that is used to encrypt secrets when stored in etcd. **When backing up the etcd datastore, be sure to also back up the EncryptionConfiguration.** Failure to do so will result in an inability to use the restored data if secrets encryption was in use at the time the data was backed up. + + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - secretbox: + keys: + - name: key1 + secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= +``` + + + diff --git a/docs/reference-guides/backup-restore-configuration/restore-configuration.md b/docs/reference-guides/backup-restore-configuration/restore-configuration.md new file mode 100644 index 0000000000..36b2679f01 --- /dev/null +++ b/docs/reference-guides/backup-restore-configuration/restore-configuration.md @@ -0,0 +1,90 @@ +--- +title: Restore Configuration +shortTitle: Restore +weight: 2 +--- + +The Restore Create page lets you provide details of the backup to restore from + +![](/img/backup_restore/restore/restore.png) + +- [Backup Source](#backup-source) + - [An Existing Backup Config](#an-existing-backup-config) + - [The default storage target](#the-default-storage-target) + - [An S3-compatible object store](#an-s3-compatible-object-store) +- [Encryption](#encryption) +- [Prune during restore](#prune-during-restore) +- [Getting the Backup Filename from S3](#getting-the-backup-filename-from-s3) + +# Backup Source +Provide details of the backup file and its storage location, which the operator will then use to perform the restore. Select from the following options to provide these details + + + + +### An existing backup config + +Selecting this option will populate the **Target Backup** dropdown with the Backups available in this cluster. Select the Backup from the dropdown, and that will fill out the **Backup Filename** field for you, and will also pass the backup source information from the selected Backup to the operator. + +![](/img/backup_restore/restore/existing.png) + +If the Backup custom resource does not exist in the cluster, you need to get the exact filename and provide the backup source details with the default storage target or an S3-compatible object store. + + +### The default storage target + +Select this option if you are restoring from a backup file that exists in the default storage location configured at the operator-level. The operator-level configuration is the storage location that was configured when the `rancher-backup` operator was installed or upgraded. Provide the exact filename in the **Backup Filename** field. + +![](/img/backup_restore/restore/default.png) + +### An S3-compatible object store + +Select this option if no default storage location is configured at the operator-level, OR if the backup file exists in a different S3 bucket than the one configured as the default storage location. Provide the exact filename in the **Backup Filename** field. Refer to [this section](#getting-the-backup-filename-from-s3) for exact steps on getting the backup filename from s3. Fill in all the details for the S3 compatible object store. Its fields are exactly same as ones for the `backup.StorageLocation` configuration in the [Backup custom resource.](backup-configuration.md#storage-location) + +![](/img/backup_restore/restore/s3store.png) + +# Encryption + +If the backup was created with encryption enabled, its file will have `.enc` suffix. Choosing such a Backup, or providing a backup filename with `.enc` suffix will display another dropdown named **Encryption Config Secret**. + +![](/img/backup_restore/restore/encryption.png) + +The Secret selected from this dropdown must have the same contents as the one used for the Backup custom resource while performing the backup. If the encryption configuration doesn't match, the restore will fail + +The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | + +:::note Important: + +This field should only be set if the backup was created with encryption enabled. Providing the incorrect encryption config will cause the restore to fail. + +::: + +# Prune During Restore + +* **Prune**: In order to fully restore Rancher from a backup, and to go back to the exact state it was at when the backup was performed, we need to delete any additional resources that were created by Rancher after the backup was taken. The operator does so if the **Prune** flag is enabled. Prune is enabled by default and it is recommended to keep it enabled. +* **Delete Timeout**: This is the amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `prune` | Delete the resources managed by Rancher that are not present in the backup (Recommended). | +| `deleteTimeoutSeconds` | Amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. | + +# Getting the Backup Filename from S3 + +This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. + +To obtain this file name from S3, go to your S3 bucket (and folder if it was specified while performing backup). + +Copy the filename and store it in your Restore custom resource. So assuming the name of your backup file is `backupfile`, + +- If your bucket name is `s3bucket` and no folder was specified, then the `backupFilename` to use will be `backupfile`. +- If your bucket name is `s3bucket` and the base folder is`s3folder`, the `backupFilename` to use is only `backupfile` . +- If there is a subfolder inside `s3Folder` called `s3sub`, and that has your backup file, then the `backupFilename` to use is `s3sub/backupfile`. + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `backupFilename` | This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. | diff --git a/docs/reference-guides/backup-restore-configuration/storage-configuration.md b/docs/reference-guides/backup-restore-configuration/storage-configuration.md new file mode 100644 index 0000000000..5df67541fd --- /dev/null +++ b/docs/reference-guides/backup-restore-configuration/storage-configuration.md @@ -0,0 +1,65 @@ +--- +title: Backup Storage Location Configuration +shortTitle: Storage +weight: 3 +--- + +Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible object store. + +Only one storage location can be configured at the operator level. + +- [Storage Location Configuration](#storage-location-configuration) + - [No Default Storage Location](#no-default-storage-location) + - [S3-compatible Object Store](#s3-compatible-object-store) + - [Use an existing StorageClass](#existing-storageclass) + - [Use an existing PersistentVolume](#existing-persistent-volume) +- [Encryption](#encryption) +- [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) + +# Storage Location Configuration + +### No Default Storage Location + +You can choose to not have any operator-level storage location configured. If you select this option, you must configure an S3-compatible object store as the storage location for each individual backup. + +### S3-compatible Object Store + +| Parameter | Description | +| -------------- | -------------- | +| Credential Secret | Choose the credentials for S3 from your secrets in Rancher. [Example](examples.md#example-credential-secret-for-storing-backups-in-s3). | +| Bucket Name | Enter the name of the [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html) where the backups will be stored. Default: `rancherbackups`. | +| Region | The [AWS region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | +| Folder | The [folder in the S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-folders.html) where the backups will be stored. | +| Endpoint | The [S3 endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) For example, `s3.us-west-2.amazonaws.com`. | +| Endpoint CA | The CA cert used to for the S3 endpoint. Default: base64 encoded CA cert | +| insecureTLSSkipVerify | Set to true if you are not using TLS. | + +### Existing StorageClass + +Installing the `rancher-backup` chart by selecting the StorageClass option will create a Persistent Volume Claim (PVC), and Kubernetes will in turn dynamically provision a Persistent Volume (PV) where all the backups will be saved by default. + +For information about creating storage classes refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md) + +::: note Important: + +It is highly recommended to use a StorageClass with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. +If no such StorageClass is available, after the PV is provisioned, make sure to edit its reclaim policy and set it to "Retain" before storing backups in it. + +::: + +### Existing Persistent Volume + +Select an existing Persistent Volume (PV) that will be used to store your backups. For information about creating PersistentVolumes in Rancher, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) + +:::note Important: + +It is highly recommended to use a Persistent Volume with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. + +::: + +# Example values.yaml for the rancher-backup Helm Chart + +The documented `values.yaml` file that can be used to configure `rancher-backup` operator when the Helm CLI is used can be found in the [backup-restore-operator repository.](https://github.com/rancher/backup-restore-operator/blob/master/charts/rancher-backup/values.yaml) + +For more information about `values.yaml` files and configuring Helm charts during installation, refer to the [Helm documentation.](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing) + diff --git a/content/rancher/v2.6/en/best-practices/rancher-managed/logging/_index.md b/docs/reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-managed/logging/_index.md rename to docs/reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md diff --git a/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md new file mode 100644 index 0000000000..166eb9442d --- /dev/null +++ b/docs/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md @@ -0,0 +1,120 @@ +--- +title: Monitoring Best Practices +weight: 2 +--- + +Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. This is not different when using Kubernetes and Rancher. Fortunately the integrated monitoring and alerting functionality makes this whole process a lot easier. + +The [Rancher monitoring documentation](../../../pages-for-subheaders/monitoring-and-alerting.md) describes how you can set up a complete Prometheus and Grafana stack. Out of the box this will scrape monitoring data from all system and Kubernetes components in your cluster and provide sensible dashboards and alerts for them to get started. But for a reliable setup, you also need to monitor your own workloads and adapt Prometheus and Grafana to your own specific use cases and cluster sizes. This document aims to give you best practices for this. + +- [What to Monitor](#what-to-monitor) +- [Configuring Prometheus Resource Usage](#configuring-prometheus-resource-usage) +- [Scraping Custom Workloads](#scraping-custom-workloads) +- [Monitoring in a (Micro)Service Architecture](#monitoring-in-a-micro-service-architecture) +- [Real User Monitoring](#real-user-monitoring) +- [Security Monitoring](#security-monitoring) +- [Setting up Alerts](#setting-up-alerts) + +# What to Monitor + +Kubernetes itself, as well as applications running inside of it, form a distributed system where different components interact with each other. For the whole system and each individual component, you have to ensure performance, availability, reliability and scalability. A good resource with more details and information is Google's free [Site Reliability Engineering Book](https://landing.google.com/sre/sre-book/), especially the chapter about [Monitoring distributed systems](https://landing.google.com/sre/sre-book/chapters/monitoring-distributed-systems/). + +# Configuring Prometheus Resource Usage + +When installing the integrated monitoring stack, Rancher allows to configure several settings that are dependent on the size of your cluster and the workloads running in it. This chapter covers these in more detail. + +### Storage and Data Retention + +The amount of storage needed for Prometheus directly correlates to the amount of time series and labels that you store and the data retention you have configured. It is important to note that Prometheus is not meant to be used as a long-term metrics storage. Data retention time is usually only a couple of days and not weeks or months. The reason for this is that Prometheus does not perform any aggregation on its stored metrics. This is great because aggregation can dilute data, but it also means that the needed storage grows linearly over time without retention. + +One way to calculate the necessary storage is to look at the average size of a storage chunk in Prometheus with this query + +``` +rate(prometheus_tsdb_compaction_chunk_size_bytes_sum[1h]) / rate(prometheus_tsdb_compaction_chunk_samples_sum[1h]) +``` + +Next, find out your data ingestion rate per second: + +``` +rate(prometheus_tsdb_head_samples_appended_total[1h]) +``` + +and then multiply this with the retention time, adding a few percentage points as buffer: + +``` +average chunk size in bytes * ingestion rate per second * retention time in seconds * 1.1 = necessary storage in bytes +``` + +You can find more information about how to calculate the necessary storage in this [blog post](https://www.robustperception.io/how-much-disk-space-do-prometheus-blocks-use). + +You can read more about the Prometheus storage concept in the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/storage). + +### CPU and Memory Requests and Limits + +In larger Kubernetes clusters Prometheus can consume quite a bit of memory. The amount of memory Prometheus needs directly correlates to the amount of time series and amount of labels it stores and the scrape interval in which these are filled. + +You can find more information about how to calculate the necessary memory in this [blog post](https://www.robustperception.io/how-much-ram-does-prometheus-2-x-need-for-cardinality-and-ingestion). + +The amount of necessary CPUs correlate with the amount of queries you are performing. + +### Federation and Long-term Storage + +Prometheus is not meant to store metrics for a long amount of time, but should only be used for short term storage. + +In order to store some, or all metrics for a long time, you can leverage Prometheus' [remote read/write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations) capabilities to connect it to storage systems like [Thanos](https://thanos.io/), [InfluxDB](https://www.influxdata.com/), [M3DB](https://www.m3db.io/), or others. You can find an example setup in this [blog post](https://rancher.com/blog/2020/prometheus-metric-federation). + +# Scraping Custom Workloads + +While the integrated Rancher Monitoring already scrapes system metrics from a cluster's nodes and system components, the custom workloads that you deploy on Kubernetes should also be scraped for data. For that you can configure Prometheus to do an HTTP request to an endpoint of your applications in a certain interval. These endpoints should then return their metrics in a Prometheus format. + +In general, you want to scrape data from all the workloads running in your cluster so that you can use them for alerts or debugging issues. Often, you recognize that you need some data only when you actually need the metrics during an incident. It is good, if it is already scraped and stored. Since Prometheus is only meant to be a short-term metrics storage, scraping and keeping lots of data is usually not that expensive. If you are using a long-term storage solution with Prometheus, you can then still decide which data you are actually persisting and keeping there. + +### About Prometheus Exporters + +A lot of 3rd party workloads like databases, queues or web-servers either already support exposing metrics in a Prometheus format, or there are so called exporters available that translate between the tool's metrics and the format that Prometheus understands. Usually you can add these exporters as additional sidecar containers to the workload's Pods. A lot of helm charts already include options to deploy the correct exporter. Additionally you can find a curated list of exports by SysDig on [promcat.io](https://promcat.io/) and on [ExporterHub](https://exporterhub.io/). + +### Prometheus support in Programming Languages and Frameworks + +To get your own custom application metrics into Prometheus, you have to collect and expose these metrics directly from your application's code. Fortunately, there are already libraries and integrations available to help with this for most popular programming languages and frameworks. One example for this is the Prometheus support in the [Spring Framework](https://docs.spring.io/spring-metrics/docs/current/public/prometheus). + +### ServiceMonitors and PodMonitors + +Once all your workloads expose metrics in a Prometheus format, you have to configure Prometheus to scrape it. Under the hood Rancher is using the [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator). This makes it easy to add additional scraping targets with ServiceMonitors and PodMonitors. A lot of helm charts already include an option to create these monitors directly. You can also find more information in the Rancher documentation. + +### Prometheus Push Gateway + +There are some workloads that are traditionally hard to scrape by Prometheus. Examples for these are short lived workloads like Jobs and CronJobs, or applications that do not allow sharing data between individual handled incoming requests, like PHP applications. + +To still get metrics for these use cases, you can set up [prometheus-pushgateways](https://github.com/prometheus/pushgateway). The CronJob or PHP application would push metric updates to the pushgateway. The pushgateway aggregates and exposes them through an HTTP endpoint, which then can be scraped by Prometheus. + +### Prometheus Blackbox Monitor + +Sometimes it is useful to monitor workloads from the outside. For this, you can use the [Prometheus blackbox-exporter](https://github.com/prometheus/blackbox_exporter) which allows probing any kind of endpoint over HTTP, HTTPS, DNS, TCP and ICMP. + +# Monitoring in a (Micro)Service Architecture + +If you have a (micro)service architecture where multiple individual workloads within your cluster are communicating with each other, it is really important to have detailed metrics and traces about this traffic to understand how all these workloads are communicating with each other and where a problem or bottleneck may be. + +Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](https://rancher.com/docs/rancher/v2.6/en/istio/) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. + +# Real User Monitoring + +Monitoring the availability and performance of all your internal workloads is vitally important to run stable, reliable and fast applications. But these metrics only show you parts of the picture. To get a complete view it is also necessary to know how your end users are actually perceiving it. For this you can look into various [Real user monitoring solutions](https://en.wikipedia.org/wiki/Real_user_monitoring). + +# Security Monitoring + +In addition to monitoring workloads to detect performance, availability or scalability problems, the cluster and the workloads running into it should also be monitored for potential security problems. A good starting point is to frequently run and alert on [CIS Scans](../../../pages-for-subheaders/cis-scan-guides.md) which check if the cluster is configured according to security best practices. + +For the workloads, you can have a look at Kubernetes and Container security solutions like [Falco](https://falco.org/), [Aqua Kubernetes Security](https://www.aquasec.com/solutions/kubernetes-container-security/), [SysDig](https://sysdig.com/). + +# Setting up Alerts + +Getting all the metrics into a monitoring systems and visualizing them in dashboards is great, but you also want to be pro-actively alerted if something goes wrong. + +The integrated Rancher monitoring already configures a sensible set of alerts that make sense in any Kubernetes cluster. You should extend these to cover your specific workloads and use cases. + +When setting up alerts, configure them for all the workloads that are critical to the availability of your applications. But also make sure that they are not too noisy. Ideally every alert you are receiving should be because of a problem that needs your attention and needs to be fixed. If you have alerts that are firing all the time but are not that critical, there is a danger that you start ignoring your alerts all together and then miss the real important ones. Less may be more here. Start to focus on the real important metrics first, for example alert if your application is offline. Fix all the problems that start to pop up and then start to create more detailed alerts. + +If an alert starts firing, but there is nothing you can do about it at the moment, it's also fine to silence the alert for a certain amount of time, so that you can look at it later. + +You can find more information on how to set up alerts and notification channels in the [Rancher Documentation](../../../pages-for-subheaders/monitoring-and-alerting.md). diff --git a/docs/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md b/docs/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md new file mode 100644 index 0000000000..1f7f08da9c --- /dev/null +++ b/docs/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md @@ -0,0 +1,59 @@ +--- +title: Best Practices for Rancher Managed vSphere Clusters +shortTitle: Rancher Managed Clusters in vSphere +--- + +This guide outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. + +- [1. VM Considerations](#1-vm-considerations) +- [2. Network Considerations](#2-network-considerations) +- [3. Storage Considerations](#3-storage-considerations) +- [4. Backups and Disaster Recovery](#4-backups-and-disaster-recovery) + +
Solution Overview
+ +![Solution Overview](/img/solution_overview.drawio.svg) + +# 1. VM Considerations + +### Leverage VM Templates to Construct the Environment + +To facilitate consistency across the deployed Virtual Machines across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customisation options. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across ESXi Hosts + +Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across Datastores + +Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. + +### Configure VM's as Appropriate for Kubernetes + +It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. + +# 2. Network Considerations + +### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes + +Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. + +### Consistent IP Addressing for VM's + +Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +# 3. Storage Considerations + +### Leverage SSD Drives for ETCD Nodes + +ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. + +# 4. Backups and Disaster Recovery + +### Perform Regular Downstream Cluster Backups + +Kubernetes uses etcd to store all its data - from configuration, state and metadata. Backing this up is crucial in the event of disaster recovery. + +### Back up Downstream Node VMs + +Incorporate the Rancher downstream node VM's within a standard VM backup policy. \ No newline at end of file diff --git a/docs/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md b/docs/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md new file mode 100644 index 0000000000..323acdb8fd --- /dev/null +++ b/docs/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md @@ -0,0 +1,53 @@ +--- +title: Tips for Setting Up Containers +weight: 100 +--- + +Running well-built containers can greatly impact the overall performance and security of your environment. + +Below are a few tips for setting up your containers. + +For a more detailed discussion of security for containers, you can also refer to Rancher's [Guide to Container Security.](https://rancher.com/complete-guide-container-security) + +### Use a Common Container OS + +When possible, you should try to standardize on a common container base OS. + +Smaller distributions such as Alpine and BusyBox reduce container image size and generally have a smaller attack/vulnerability surface. + +Popular distributions such as Ubuntu, Fedora, and CentOS are more field-tested and offer more functionality. + +### Start with a FROM scratch container +If your microservice is a standalone static binary, you should use a FROM scratch container. + +The FROM scratch container is an [official Docker image](https://hub.docker.com/_/scratch) that is empty so that you can use it to design minimal images. + +This will have the smallest attack surface and smallest image size. + +### Run Container Processes as Unprivileged +When possible, use a non-privileged user when running processes within your container. While container runtimes provide isolation, vulnerabilities and attacks are still possible. Inadvertent or accidental host mounts can also be impacted if the container is running as root. For details on configuring a security context for a pod or container, refer to the [Kubernetes docs](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + +### Define Resource Limits +Apply CPU and memory limits to your pods. This can help manage the resources on your worker nodes and avoid a malfunctioning microservice from impacting other microservices. + +In standard Kubernetes, you can set resource limits on the namespace level. In Rancher, you can set resource limits on the project level and they will propagate to all the namespaces within the project. For details, refer to the Rancher docs. + +When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project or namespace, all containers will require a respective CPU or Memory field set during creation. To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. + +The Kubernetes docs have more information on how resource limits can be set at the [container level](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container) and the namespace level. + +### Define Resource Requirements +You should apply CPU and memory requirements to your pods. This is crucial for informing the scheduler which type of compute node your pod needs to be placed on, and ensuring it does not over-provision that node. In Kubernetes, you can set a resource requirement by defining `resources.requests` in the resource requests field in a pod's container spec. For details, refer to the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). + +:::note + +If you set a resource limit for the namespace that the pod is deployed in, and the container doesn't have a specific resource request, the pod will not be allowed to start. To avoid setting these fields on each and every container during workload creation, a default container resource limit can be specified on the namespace. + +::: + +It is recommended to define resource requirements on the container level because otherwise, the scheduler makes assumptions that will likely not be helpful to your application when the cluster experiences load. + +### Liveness and Readiness Probes +Set up liveness and readiness probes for your container. Unless your container completely crashes, Kubernetes will not know it's unhealthy unless you create an endpoint or mechanism that can report container status. Alternatively, make sure your container halts and crashes if unhealthy. + +The Kubernetes docs show how to [configure liveness and readiness probes for containers.](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) diff --git a/docs/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md b/docs/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md new file mode 100644 index 0000000000..3a0f0ca344 --- /dev/null +++ b/docs/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md @@ -0,0 +1,91 @@ +--- +title: Installing Rancher in a vSphere Environment +shortTitle: On-Premises Rancher in vSphere +weight: 3 +--- + +This guide outlines a reference architecture for installing Rancher on an RKE Kubernetes cluster in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. + +- [1. Load Balancer Considerations](#1-load-balancer-considerations) +- [2. VM Considerations](#2-vm-considerations) +- [3. Network Considerations](#3-network-considerations) +- [4. Storage Considerations](#4-storage-considerations) +- [5. Backups and Disaster Recovery](#5-backups-and-disaster-recovery) + +
Solution Overview
+ +![Solution Overview](/img/rancher-on-prem-vsphere.svg) + +# 1. Load Balancer Considerations + +A load balancer is required to direct traffic to the Rancher workloads residing on the RKE nodes. + +### Leverage Fault Tolerance and High Availability + +Leverage the use of an external (hardware or software) load balancer that has inherit high-availability functionality (F5, NSX-T, Keepalived, etc). + +### Back Up Load Balancer Configuration + +In the event of a Disaster Recovery activity, availability of the Load balancer configuration will expedite the recovery process. + +### Configure Health Checks + +Configure the Load balancer to automatically mark nodes as unavailable if a health check is failed. For example, NGINX can facilitate this with: + +`max_fails=3 fail_timeout=5s` + +### Leverage an External Load Balancer + +Avoid implementing a software load balancer within the management cluster. + +### Secure Access to Rancher + +Configure appropriate Firewall / ACL rules to only expose access to Rancher + +# 2. VM Considerations + +### Size the VM's According to Rancher Documentation + +https://rancher.com/docs/rancher/v2.6/en/installation/requirements/ + +### Leverage VM Templates to Construct the Environment + +To facilitate the consistency of Virtual Machines deployed across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customization options. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across ESXi Hosts + +Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across Datastores + +Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. + +### Configure VM's as Appropriate for Kubernetes + +It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. + +# 3. Network Considerations + +### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes + +Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. + +### Consistent IP Addressing for VM's + +Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +# 4. Storage Considerations + +### Leverage SSD Drives for ETCD Nodes + +ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. + +# 5. Backups and Disaster Recovery + +### Perform Regular Management Cluster Backups + +Rancher stores its data in the ETCD datastore of the Kubernetes cluster it resides on. Like with any Kubernetes cluster, perform frequent, tested backups of this cluster. + +### Back up Rancher Cluster Node VMs + +Incorporate the Rancher management node VM's within a standard VM backup policy. diff --git a/docs/reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md b/docs/reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md new file mode 100644 index 0000000000..c31fdec2fd --- /dev/null +++ b/docs/reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md @@ -0,0 +1,45 @@ +--- +title: Rancher Deployment Strategy +weight: 100 +--- + +There are two recommended deployment strategies for a Rancher server that manages downstream Kubernetes clusters. Each one has its own pros and cons. Read more about which one would fit best for your use case: + +* [Hub and Spoke](#hub-and-spoke-strategy) +* [Regional](#regional-strategy) + +# Hub & Spoke Strategy +--- + +In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. + +![](/img/bpg/hub-and-spoke.png) + +### Pros + +* Environments could have nodes and network connectivity across regions. +* Single control plane interface to view/see all regions and environments. +* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. + +### Cons + +* Subject to network latencies. +* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. + +# Regional Strategy +--- +In the regional deployment model a control plane is deployed in close proximity to the compute nodes. + +![](/img/bpg/regional.png) + +### Pros + +* Rancher functionality in regions stay operational if a control plane in another region goes down. +* Network latency is greatly reduced, improving the performance of functionality in Rancher. +* Upgrades of the Rancher control plane can be done independently per region. + +### Cons + +* Overhead of managing multiple Rancher installations. +* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. +* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md b/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md new file mode 100644 index 0000000000..a4c0d82803 --- /dev/null +++ b/docs/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md @@ -0,0 +1,37 @@ +--- +title: Tips for Running Rancher +weight: 100 +--- + +This guide is geared toward use cases where Rancher is used to manage downstream Kubernetes clusters. The high-availability setup is intended to prevent losing access to downstream clusters if the Rancher server is not available. + +A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. + +If you are installing Rancher in a vSphere environment, refer to the best practices documented [here.](on-premises-rancher-in-vsphere.md) + +When you set up your high-availability Rancher installation, consider the following: + +### Run Rancher on a Separate Cluster +Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. + +### Make sure nodes are configured correctly for Kubernetes ### +It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://etcd.io/docs/v3.4/op-guide/performance/). + +### When using RKE: Back up the Statefile +RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. + +### Run All Nodes in the Cluster in the Same Datacenter +For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. + +### Development and Production Environments Should be Similar +It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. + +### Monitor Your Clusters to Plan Capacity +The Rancher server's Kubernetes cluster should run within the [system and hardware requirements](../../../pages-for-subheaders/installation-requirements.md) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. + +However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. + +After you [enable monitoring](../../../pages-for-subheaders/monitoring-and-alerting.md) in the cluster, you can set up alerts to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. + diff --git a/docs/reference-guides/cli-with-rancher/kubectl-utility.md b/docs/reference-guides/cli-with-rancher/kubectl-utility.md new file mode 100644 index 0000000000..09159533e4 --- /dev/null +++ b/docs/reference-guides/cli-with-rancher/kubectl-utility.md @@ -0,0 +1,38 @@ +--- +title: kubectl Utility +--- + +- [kubectl](#kubectl) + - [kubectl Utility](#kubectl-utility) + - [Authentication with kubectl and kubeconfig Tokens with TTL](#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) + +# kubectl + +Interact with Rancher using kubectl. + +### kubectl Utility + +Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +Configure kubectl by visiting your cluster in the Rancher Web UI, clicking on `Kubeconfig`, copying contents, and putting them into your `~/.kube/config` file. + +Run `kubectl cluster-info` or `kubectl get pods` successfully. + +### Authentication with kubectl and kubeconfig Tokens with TTL + +_Requirements_ + +If admins have [enforced TTL on kubeconfig tokens](../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher CLI](cli-with-rancher.md) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like: +`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. + +This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: + +1. Local +2. Active Directory (LDAP only) +3. FreeIPA +4. OpenLDAP +5. SAML providers: Ping, Okta, ADFS, Keycloak, Shibboleth + +When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. +The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../reference-guides/about-the-api/api-tokens.md#deleting-tokens). +Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. \ No newline at end of file diff --git a/docs/reference-guides/cli-with-rancher/rancher-cli.md b/docs/reference-guides/cli-with-rancher/rancher-cli.md new file mode 100644 index 0000000000..ced6909d25 --- /dev/null +++ b/docs/reference-guides/cli-with-rancher/rancher-cli.md @@ -0,0 +1,97 @@ +--- +title: Rancher CLI +description: Interact with Rancher using command line interface (CLI) tools from your workstation. +weight: 21 +--- + +- [Rancher CLI](#rancher-cli) + - [Download Rancher CLI](#download-rancher-cli) + - [Requirements](#requirements) + - [CLI Authentication](#cli-authentication) + - [Project Selection](#project-selection) + - [Commands](#commands) + - [Rancher CLI Help](#rancher-cli-help) + - [Limitations](#limitations) + +The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. + +### Download Rancher CLI + +The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/ranchcli/releases) for direct downloads of the binary. + +1. In the upper left corner, click **☰**. +1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. +1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/ranchcli/releases) for direct downloads of the binary. + +### Requirements + +After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: + +- Your Rancher Server URL, which is used to connect to Rancher Server. +- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../reference-guides/user-settings/api-keys.md). + +### CLI Authentication + +Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): + +```bash +$ ./rancher login https:// --token +``` + +If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. + +### Project Selection + +Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. + +**Example: `./rancher context switch` Output** +``` +User:rancher-cli-directory user$ ./rancher context switch +NUMBER CLUSTER NAME PROJECT ID PROJECT NAME +1 cluster-2 c-7q96s:p-h4tmb project-2 +2 cluster-2 c-7q96s:project-j6z6d Default +3 cluster-1 c-lchzv:p-xbpdt project-1 +4 cluster-1 c-lchzv:project-s2mch Default +Select a Project: +``` + +After you enter a number, the console displays a message that you've changed projects. + +``` +INFO[0005] Setting new context to project project-1 +INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json +``` + +Ensure you can run `rancher kubectl get pods` successfully. + +### Commands + +The following commands are available for use in Rancher CLI. + +| Command | Result | +|---|---| +| `apps, [app]` | Performs operations on catalog applications (i.e., individual [Helm charts](https://docs.helm.sh/developing_charts/)) or Rancher charts. | +| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.6/helm-charts). | +| `clusters, [cluster]` | Performs operations on your [clusters](kubernetes-clusters-in-rancher-setup.md). | +| `context` | Switches between Rancher [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). For an example, see [Project Selection](#project-selection). | +| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) and [workloads](workloads-and-pods.md)). Specify resources by name or ID. | +| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | +| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | +| `namespaces, [namespace]` |Performs operations on namespaces. | +| `nodes, [node]` |Performs operations on nodes. | +| `projects, [project]` | Performs operations on [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). | +| `ps` | Displays [workloads](workloads-and-pods.md) in a project. | +| `settings, [setting]` | Shows the current settings for your Rancher Server. | +| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | +| `help, [h]` | Shows a list of commands or help for one command. | + + +### Rancher CLI Help + +Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. + +All commands accept the `--help` flag, which documents each command's usage. + +### Limitations + +The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](helm-charts-in-rancher.md). \ No newline at end of file diff --git a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md new file mode 100644 index 0000000000..4b00f3e1bf --- /dev/null +++ b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2.md @@ -0,0 +1,79 @@ +--- +title: EC2 Machine Configuration Reference +weight: 2 +--- + +For more details about EC2 nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). + +### Region + +The geographical [region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) in which to build your cluster. + +### Zone + +The [zone](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones), an isolated location within a region to build your cluster + +### Instance Type + +The [instance type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html), which determines the hardware characteristics, used to provision your cluster. + +### Root Disk Size + +Configure the size (in GB) for your [root device](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/RootDeviceStorage.html). + +### VPC/Subnet + +The [VPC](https://docs.aws.amazon.com/vpc/latest/userguide/configure-your-vpc.html) or specific [subnet](https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), an IP range in your VPC, to add your resources to. + +### IAM Instance Profile Name + +The name of the [instance profile] used to pass an IAM role to an EC2 instance. + +## Advanced Options + +### AMI ID + +The [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) used for the nodes in your cluster. + +### SSH Username for AMI + +The username for connecting to your launched instances. Refer to [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connection-prereqs.html) for the default usernames to selected AMIs. For AMIs not listed, check with the AMI provider. + +### Security Group + +Choose the default security group or configure a security group. + +Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. + +### EBS Root Volume Type + +The [EBS volume type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) to use for the root device. + +### Encrypt EBS Volume + +Enable [Amazon EBS Encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html). + +### Request Spot Instance + +Enable option to [request spot instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) and specify the maximum instance price per hour you're willing to pay. + +### Use only private address + +Enable option on use only [private addresses](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html). + +### EBS-Optimized Instance + +Use an [EBS-optimized instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html). + +### Allow access to EC2 metadata + +Enable access to [EC2 metadata](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + +### Use tokens for metadata + +Use [Instance Metadata Service Version 2 (IMDSv2)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html), a token-based method to access metadata. + +### Add Tag + +Add metadata using [tags](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) to categorize resources. + diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/_index.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/_index.md rename to docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/_index.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/_index.md rename to docs/reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean.md diff --git a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md new file mode 100644 index 0000000000..8a72e8be2b --- /dev/null +++ b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md @@ -0,0 +1,50 @@ +--- +title: EC2 Node Template Configuration +weight: 1 +--- + +For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). +### Region + +In the **Region** field, select the same region that you used when creating your cloud credentials. + +### Cloud Credentials + +Your AWS account access information, stored in a [cloud credential.](../../../user-settings/manage-cloud-credentials.md) + +See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. + +See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. + +See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM + +See our three example JSON policies: + +- [Example IAM Policy](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy) +- [Example IAM Policy with PassRole](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](../../../../pages-for-subheaders/set-up-cloud-providers.md) or want to pass an IAM Profile to an instance) +- [Example IAM Policy to allow encrypted EBS volumes](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. + +### Authenticate & Configure Nodes + +Choose an availability zone and network settings for your cluster. + +### Security Group + +Choose the default security group or configure a security group. + +Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. + +--- +**_New in v2.6.4_** + +If you provide your own security group for an EC2 instance, please note that Rancher will not modify it. As such, you will be responsible for ensuring that your security group is set to allow the [necessary ports for Rancher to provision the instance](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#ports-for-rancher-server-nodes-on-rke). For more information on controlling inbound and outbound traffic to EC2 instances with security groups, refer [here](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#WorkingWithSecurityGroups). + +### Instance Options + +Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. It is possible that a selected region does not support the default instance type. In this scenario you must select an instance type that does exist, otherwise an error will occur stating the requested configuration is not supported. + +If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider](../../../../pages-for-subheaders/set-up-cloud-providers.md), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + +### Engine Options + +In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md rename to docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md rename to docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix-node-template-config/_index.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix-node-template-config/_index.md rename to docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix.md diff --git a/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md new file mode 100644 index 0000000000..81c66fd841 --- /dev/null +++ b/docs/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md @@ -0,0 +1,93 @@ +--- +title: VSphere Node Template Configuration +weight: 2 +--- + +- [Account Access](#account-access) +- [Scheduling](#scheduling) +- [Instance Options](#instance-options) +- [Networks](#networks) +- [Node tags and custom attributes](#node-tags-and-custom-attributes) +- [cloud-init](#cloud-init) + +# Account Access + +| Parameter | Required | Description | +|:----------------------|:--------:|:-----| +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../user-settings/manage-cloud-credentials.md) | + +Your cloud credential has these fields: + +| Credential Field | Description | +|-----------------|--------------| +| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | Optional: configure configure the port of the vCenter or ESXi server. | +| Username and password | Enter your vSphere login username and password. | + +# Scheduling + +Choose what hypervisor the virtual machine will be scheduled to. + +The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. + +| Field | Required | Explanation | +|---------|---------------|-----------| +| Data Center | * | Choose the name/path of the data center where the VM will be scheduled. | +| Resource Pool | | Name of the resource pool to schedule the VMs in. Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | +| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. | +| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. | + +# Instance Options + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +| Parameter | Required | Description | +|:----------------|:--------:|:-----------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | +| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | +| Networks | | Name(s) of the network to attach the VM to. | +| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + + +### About VM Creation Methods + +In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). + +The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). + +Choose the way that the VM will be created: + +- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. +- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list **Library templates**. +- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. +- **Install from boot2docker ISO:** Ensure that the **OS ISO URL** field contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). Note that this URL must be accessible from the nodes running your Rancher server installation. + +# Networks + +The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. + +# Node Tags and Custom Attributes + +Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +For tags, all your vSphere tags will show up as options to select from in your node template. + +In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. + +:::note + +Custom attributes are a legacy feature that will eventually be removed from vSphere. + +::: + +# cloud-init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. + +Note that cloud-init is not supported when using the ISO creation method. diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md new file mode 100644 index 0000000000..d27ddf7f62 --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration.md @@ -0,0 +1,226 @@ +--- +shortTitle: AKS Cluster Configuration +title: AKS Cluster Configuration Reference +weight: 4 +--- + +# Changes in Rancher v2.6 + +- Support for adding more than one node pool +- Support for private clusters +- Enabled autoscaling node pools +- The AKS permissions are now configured in cloud credentials + +# Role-based Access Control + +When provisioning an AKS cluster in the Rancher UI, RBAC cannot be disabled. If role-based access control is disabled for the cluster in AKS, the cluster cannot be registered or imported into Rancher. + +Rancher can configure member roles for AKS clusters in the same way as any other cluster. For more information, see the section on [role-based access control.](../../../pages-for-subheaders/manage-role-based-access-control-rbac.md) + +# Cloud Credentials + +:::note + +The configuration information in this section assumes you have already set up a service principal for Rancher. For step-by-step instructions for how to set up the service principal, see [this section.](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md#prerequisites-in-microsoft-azure) + +::: + +### Subscription ID + +To get the subscription ID, click **All Services** in the left navigation bar. Then click **Subscriptions**. Go to the name of the subscription that you want to associate with your Kubernetes cluster and copy the **Subscription ID**. + +### Client ID + +To get the client ID, go to the Azure Portal, then click **Azure Active Directory**, then click **App registrations,** then click the name of the service principal. The client ID is listed on the app registration detail page as **Application (client) ID**. + +### Client Secret + +You can't retrieve the client secret value after it is created, so if you don't already have a client secret value, you will need to create a new client secret. + +To get a new client secret, go to the Azure Portal, then click **Azure Active Directory**, then click **App registrations,** then click the name of the service principal. + +Then click **Certificates & secrets** and click **New client secret**. Click **Add**. Then copy the **Value** of the new client secret. + +### Environment + +Microsoft provides multiple [clouds](https://docs.microsoft.com/en-us/cli/azure/cloud?view=azure-cli-latest) for compliance with regional laws, which are available for your use: + +- AzurePublicCloud +- AzureGermanCloud +- AzureChinaCloud +- AzureUSGovernmentCloud + +# Account Access + +In this section you will need to select an existing Azure cloud credential or create a new one. + +For help configuring your Azure cloud credential, see [this section.](#cloud-credentials) + +# Cluster Location + +Configure the cluster and node location. For more information on availability zones for AKS, see the [AKS documentation.](https://docs.microsoft.com/en-us/azure/aks/availability-zones) + +The high availability locations include multiple availability zones. + +# Cluster Options + +### Kubernetes Version + +The available Kubernetes versions are dynamically fetched from the Azure API. + +### Cluster Resource Group + +A resource group is a container that holds related resources for an Azure solution. The resource group can include all the resources for the solution, or only those resources that you want to manage as a group. You decide how you want to allocate resources to resource groups based on what makes the most sense for your organization. Generally, add resources that share the same lifecycle to the same resource group so you can easily deploy, update, and delete them as a group. + +Use an existing resource group or enter a resource group name and one will be created for you. + +Using a resource group containing an existing AKS cluster will create a new resource group. Azure AKS only allows one AKS cluster per resource group. + +For information on managing resource groups, see the [Azure documentation.](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) + +### Linux Admin Username + +The username used to create an SSH connection to the Linux nodes. + +The default username for AKS nodes is `azureuser`. + +### SSH Public Key + +The key used to create an SSH connection to the Linux nodes. + +### Tags + +Cluster tags can be useful if your organization uses tags as a way to organize resources across multiple Azure services. These tags don't apply to resources within the cluster. + +# Networking Options + +### LoadBalancer SKU + +Azure load balancers support both standard and basic SKUs (stock keeping units). + +For a comparison of standard and basic load balancers, see the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/load-balancer/skus#skus) Microsoft recommends the Standard load balancer. + +The Standard load balancer is required if you have selected one or more availability zones, or if you have more than one node pool. + +### Network Policy + +All pods in an AKS cluster can send and receive traffic without limitations, by default. To improve security, you can define rules that control the flow of traffic. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster. + +Azure provides two ways to implement network policy. You choose a network policy option when you create an AKS cluster. The policy option can't be changed after the cluster is created: + +- Azure's own implementation, called Azure Network Policies. The Azure network policy requires the Azure CNI. +- Calico Network Policies, an open-source network and network security solution founded by [Tigera](https://www.tigera.io/). + +You can also choose to have no network policy. + +For more information about the differences between Azure and Calico network policies and their capabilities, see the [AKS documentation.](https://docs.microsoft.com/en-us/azure/aks/use-network-policies#differences-between-azure-and-calico-policies-and-their-capabilities) + +### DNS Prefix +Enter a unique DNS prefix for your cluster's Kubernetes API server FQDN. + +### Network Plugin +There are two network plugins: kubenet and Azure CNI. + +The [kubenet](https://kubernetes.io/docs/concepts/cluster-administration/network-plugins/#kubenet) Kubernetes plugin is the default configuration for AKS cluster creation. When kubenet is used, each node in the cluster receives a routable IP address. The pods use NAT to communicate with other resources outside the AKS cluster. This approach reduces the number of IP addresses you need to reserve in your network space for pods to use. + +With the Azure CNI (advanced) networking plugin, pods get full virtual network connectivity and can be directly reached via their private IP address from connected networks. This plugin requires more IP address space. + +For more information on the differences between kubenet and Azure CNI, see the [AKS documentation.](https://docs.microsoft.com/en-us/azure/aks/concepts-network#compare-network-models) + +### HTTP Application Routing + +When enabled, the HTTP application routing add-on makes it easier to access applications deployed to the AKS cluster. It deploys two components: a [Kubernetes Ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/) and an [External-DNS](https://github.com/kubernetes-incubator/external-dns) controller. + +For more information, see the [AKS documentation.](https://docs.microsoft.com/en-us/azure/aks/http-application-routing) + +### Set Authorized IP Ranges + +You can secure access to the Kubernetes API server using [authorized IP address ranges.](https://docs.microsoft.com/en-us/azure/aks/api-server-authorized-ip-ranges#overview-of-api-server-authorized-ip-ranges) + +The Kubernetes API server exposes the Kubernetes API. This component provides the interaction for management tools, such as kubectl. AKS provides a single-tenant cluster control plane with a dedicated API server. By default, the API server is assigned a public IP address, and you should control access to it using Kubernetes-based or Azure-based RBAC. + +To secure access to the otherwise publicly accessible AKS control plane and API server, you can enable and use authorized IP ranges. These authorized IP ranges only allow defined IP address ranges to communicate with the API server. + +However, even if you use authorized IP address ranges, you should still use Kubernetes RBAC or Azure RBAC to authorize users and the actions they request. + +### Container Monitoring + +Container monitoring gives you performance visibility by collecting memory and processor metrics from controllers, nodes, and containers that are available in Kubernetes through the Metrics API. Container logs are also collected. After you enable monitoring, metrics and logs are automatically collected for you through a containerized version of the Log Analytics agent for Linux. Metrics are written to the metrics store and log data is written to the logs store associated with your [Log Analytics](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/log-query-overview) workspace. + +### Log Analytics Workspace Resource Group + +The [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups) containing the Log Analytics Workspace. You must create at least one workspace to use Azure Monitor Logs. + +### Log Analytics Workspace Name + +Data collected by Azure Monitor Logs is stored in one or more [Log Analytics workspaces.](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/design-logs-deployment) The workspace defines the geographic location of the data, access rights defining which users can access data, and configuration settings such as the pricing tier and data retention. + +You must create at least one workspace to use Azure Monitor Logs. A single workspace may be suffxicient for all of your monitoring data, or may choose to create multiple workspaces depending on your requirements. For example, you might have one workspace for your production data and another for testing. + +For more information about Azure Monitor Logs, see the [Azure documentation.](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/data-platform-logs) + +### Support Private Kubernetes Service + +Typically, AKS worker nodes do not get public IPs, regardless of whether the cluster is private. In a private cluster, the control plane does not have a public endpoint. + +Rancher can connect to a private AKS cluster in one of two ways. + +The first way to ensure that Rancher is running on the same [NAT](https://docs.microsoft.com/en-us/azure/virtual-network/nat-overview) as the AKS nodes. + +The second way is to run a command to register the cluster with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster’s Kubernetes API. This command is displayed in a pop-up when you provision an AKS cluster with a private API endpoint enabled. + +:::note + +Please be aware that when registering an existing AKS cluster, the cluster might take some time, possibly hours, to appear in the `Cluster To register` dropdown list. This outcome will be based on region. + +::: + +For more information about connecting to an AKS private cluster, see the [AKS documentation.](https://docs.microsoft.com/en-us/azure/aks/private-clusters#options-for-connecting-to-the-private-cluster) + +# Node Pools + +### Mode + +The Azure interface allows users to specify whether a Primary Node Pool relies on either `system` (normally used for control planes) or `user` (what is most typically needed for Rancher). + +For Primary Node Pools, you can specify Mode, OS, Count and Size. + +System node pools always require running nodes, so they cannot be scaled below one node. At least one system node pool is required. + +For subsequent node pools, the Rancher UI forces the default of User. User node pools allow you to scale to zero nodes. User node pools don't run any part of the Kubernetes controlplane. + +AKS doesn't expose the nodes that run the Kubernetes controlplane components. + +### Availability Zones + +[Availability zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview) are unique physical locations within a region. Each zone is made up of one or more data centers equipped with independent power, cooling, and networking. + +Not all regions have support for availability zones. For a list of Azure regions with availability zones, see the [Azure documentation.](https://docs.microsoft.com/en-us/azure/availability-zones/az-region#azure-regions-with-availability-zones) + +### VM Size + +Choose a size for each VM in the node pool. For details about each VM size, see [this page.](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) + +### OS Disk Type + +The nodes in the node pool can have either managed or ephemeral disks. + +[Ephemeral OS disks](https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks) are created on the local virtual machine storage and not saved to the remote Azure Storage. Ephemeral OS disks work well for stateless workloads, where applications are tolerant of individual VM failures, but are more affected by VM deployment time or reimaging the individual VM instances. With Ephemeral OS disk, you get lower read/write latency to the OS disk and faster VM reimage. + +[Azure managed disks](https://docs.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview) are block-level storage volumes that are managed by Azure and used with Azure Virtual Machines. Managed disks are designed for 99.999% availability. Managed disks achieve this by providing you with three replicas of your data, allowing for high durability. + +### OS Disk Size + +The size in GB for the disk for each node. + +### Node Count +The number of nodes in the node pool. The maximum number of nodes may be limited by your [Azure subscription.](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits) + +### Max Pods Per Node +The maximum number of pods per node defaults to 110 with a maximum of 250. + +### Enable Auto Scaling + +When auto scaling is enabled, you will need to enter a minimum and maximum node count. + +When Auto Scaling is enabled, you can't manually scale the node pool. The scale is controlled by the AKS autoscaler. diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md new file mode 100644 index 0000000000..ad5e489d8d --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md @@ -0,0 +1,151 @@ +--- +title: EKS Cluster Configuration Reference +shortTitle: EKS Cluster Configuration +weight: 2 +--- + +### Account Access + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.](../../user-settings/manage-cloud-credentials.md) | + +### Service Role + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Secrets Encryption + +Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) + +### API Server Endpoint Access + +Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Private-only API Endpoints + +If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. + +There are two ways to avoid this extra manual step: +- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. +- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). + +### Public Access Endpoints + +Optionally limit access to the public endpoint via explicit CIDR blocks. + +If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. + +One of the following is required to enable private access: +- Rancher's IP must be part of an allowed CIDR block +- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group + +For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Subnet + +| Option | Description | +| ------- | ------------ | +| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | +| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + +### Security Group + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Logging + +Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. + +Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. + +For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) + +### Managed Node Groups + +Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. + +For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) + +#### Bring your own launch template + +A launch template ID and version can be provided in order to easily configure the EC2 instances in a node group. If a launch template is provided, then none of the settings below will be configurable in Rancher. Therefore, using a launch template would require that all the necessary and desired settings from the list below would need to be specified in the launch template. Also note that if a launch template ID and version is provided, then only the template version can be updated. Using a new template ID would require creating a new managed node group. + +| Option | Description | Required/Optional | +| ------ | ----------- | ----------------- | +| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | Required | +| Image ID | Specify a custom AMI for the nodes. Custom AMIs used with EKS must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) | Optional | +| Node Volume Size | The launch template must specify an EBS volume with the desired size | Required | +| SSH Key | A key to be added to the instances to provide SSH access to the nodes | Optional | +| User Data | Cloud init script in [MIME multi-part format](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data) | Optional | +| Instance Resource Tags | Tag each EC2 instance in the node group | Optional | + +#### Rancher-managed launch templates + +If you do not specify a launch template, then you will be able to configure the above options in the Rancher UI and all of them can be updated after creation. In order to take advantage of all of these options, Rancher will create and manage a launch template for you. Each cluster in Rancher will have one Rancher-managed launch template and each managed node group that does not have a specified launch template will have one version of the managed launch template. The name of this launch template will have the prefix "rancher-managed-lt-" followed by the display name of the cluster. In addition, the Rancher-managed launch template will be tagged with the key "rancher-managed-template" and value "do-not-modify-or-delete" to help identify it as Rancher-managed. It is important that this launch template and its versions not be modified, deleted, or used with any other clusters or managed node groups. Doing so could result in your node groups being "degraded" and needing to be destroyed and recreated. + +#### Custom AMIs + +If you specify a custom AMI, whether in a launch template or in Rancher, then the image must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) and you must provide user data to [bootstrap the node](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami). This is considered an advanced use case and understanding the requirements is imperative. + +If you specify a launch template that does not contain a custom AMI, then Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version and selected region. You can also select a [GPU enabled instance](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) for workloads that would benefit from it. + +:::note + +The GPU enabled instance setting in Rancher is ignored if a custom AMI is provided, either in the dropdown or in a launch template. + +::: + +#### Spot instances + +Spot instances are now [supported by EKS](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot). If a launch template is specified, Amazon recommends that the template not provide an instance type. Instead, Amazon recommends providing multiple instance types. If the "Request Spot Instances" checkbox is enabled for a node group, then you will have the opportunity to provide multiple instance types. + +:::note + +Any selection you made in the instance type dropdown will be ignored in this situation and you must specify at least one instance type to the "Spot Instance Types" section. Furthermore, a launch template used with EKS cannot request spot instances. Requesting spot instances must be part of the EKS configuration. + +::: + +#### Node Group Settings + +The following settings are also configurable. All of these except for the "Node Group Name" are editable after the node group is created. + +| Option | Description | +| ------- | ------------ | +| Node Group Name | The name of the node group. | +| Desired ASG Size | The desired number of instances. | +| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Labels | Kubernetes labels applied to the nodes in the managed node group. | +| Tags | These are tags for the managed node group and do not propagate to any of the associated resources. | + + +### Configuring the Refresh Interval + +The `eks-refresh-cron` setting is deprecated. It has been migrated to the `eks-refresh` setting, which is an integer representing seconds. + +The default value is 300 seconds. + +The syncing interval can be changed by running `kubectl edit setting eks-refresh`. + +If the `eks-refresh-cron` setting was previously set, the migration will happen automatically. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. + diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md new file mode 100644 index 0000000000..f3a6d1a869 --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md @@ -0,0 +1,51 @@ +--- +title: Private Clusters +weight: 2 +--- + +In GKE, [private clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept) are clusters whose nodes are isolated from inbound and outbound traffic by assigning them internal IP addresses only. Private clusters in GKE have the option of exposing the control plane endpoint as a publicly accessible address or as a private address. This is different from other Kubernetes providers, which may refer to clusters with private control plane endpoints as "private clusters" but still allow traffic to and from nodes. You may want to create a cluster with private nodes, with or without a public control plane endpoint, depending on your organization's networking and security requirements. A GKE cluster provisioned from Rancher can use isolated nodes by selecting "Private Cluster" in the Cluster Options (under "Show advanced options"). The control plane endpoint can optionally be made private by selecting "Enable Private Endpoint". + +### Private Nodes + +Because the nodes in a private cluster only have internal IP addresses, they will not be able to install the cluster agent and Rancher will not be able to fully manage the cluster. This can be overcome in a few ways. + +#### Cloud NAT + +:::caution + +Cloud NAT will [incur charges](https://cloud.google.com/nat/pricing). + +::: + +If restricting outgoing internet access is not a concern for your organization, use Google's [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) service to allow nodes in the private network to access the internet, enabling them to download the required images from Dockerhub and contact the Rancher management server. This is the simplest solution. + +#### Private registry + +:::caution + +This scenario is not officially supported, but is described for cases in which using the Cloud NAT service is not sufficient. + +::: + +If restricting both incoming and outgoing traffic to nodes is a requirement, follow the air-gapped installation instructions to set up a private container image [registry](https://rancher.com/docs/rancher/v2.6/en/installation/other-installation-methods/air-gap/) on the VPC where the cluster is going to be, allowing the cluster nodes to access and download the images they need to run the cluster agent. If the control plane endpoint is also private, Rancher will need [direct access](#direct-access) to it. + +### Private Control Plane Endpoint + +If the cluster has a public endpoint exposed, Rancher will be able to reach the cluster, and no additional steps need to be taken. However, if the cluster has no public endpoint, then considerations must be made to ensure Rancher can access the cluster. + +#### Cloud NAT + +:::caution + +Cloud NAT will [incur charges](https://cloud.google.com/nat/pricing). + +::: + +As above, if restricting outgoing internet access to the nodes is not a concern, then Google's [Cloud NAT](https://cloud.google.com/nat/docs/using-nat) service can be used to allow the nodes to access the internet. While the cluster is provisioning, Rancher will provide a registration command to run on the cluster. Download the [kubeconfig](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl) for the new cluster and run the provided kubectl command on the cluster. Gaining access +to the cluster in order to run this command can be done by creating a temporary node or using an existing node in the VPC, or by logging on to or creating an SSH tunnel through one of the cluster nodes. + +#### Direct access + +If the Rancher server is run on the same VPC as the cluster's control plane, it will have direct access to the control plane's private endpoint. The cluster nodes will need to have access to a [private registry](#private-registry) to download images as described above. + +You can also use services from Google such as [Cloud VPN](https://cloud.google.com/network-connectivity/docs/vpn/concepts/overview) or [Cloud Interconnect VLAN](https://cloud.google.com/network-connectivity/docs/interconnect) to facilitate connectivity between your organization's network and your Google VPC. diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md new file mode 100644 index 0000000000..a827719123 --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration.md @@ -0,0 +1,147 @@ +--- +title: K3s Cluster Configuration Reference +shortTitle: K3s Cluster Configuration +weight: 6 +--- + +This section covers the configuration options that are available in Rancher for a new or existing K3s Kubernetes cluster. + +# Overview + +You can configure the Kubernetes options one of two ways: + +- [Rancher UI](#configuration-options-in-the-rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create a K3s config file. Using a config file allows you to set any of the [options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) available in an K3s installation. + +# Configuration Options in the Rancher UI + +:::tip + +Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the K3s cluster configuration file in YAML. For the complete reference of configurable options for K3s clusters in YAML, see the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/install-options/) + +::: + +### Basics +#### Kubernetes Version + +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). + +For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). + +#### Encrypt Secrets + +Option to enable or disable secrets encryption. When enabled, secrets will be encrypted using a AES-CBC key. If disabled, any previously secrets will not be readable until encryption is enabled again. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/advanced/#secrets-encryption-config-experimental) for details. + +#### Project Network Isolation + +If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. + +#### SELinux + +Option to enable or disable [SELinux](https://rancher.com/docs/k3s/latest/en/advanced/#selinux-support) support. + +#### CoreDNS + +By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#coredns) for details.. + +#### Klipper Service LB + +Option to enable or disable the [Klipper](https://github.com/rancher/klipper-lb) service load balancer. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#service-load-balancer) for details. + +#### Traefik Ingress + +Option to enable or disable the [Traefik](https://traefik.io/) HTTP reverse proxy and load balancer. For more details and configuration options, see the [K3s documentation](https://rancher.com/docs/k3s/latest/en/networking/#traefik-ingress-controller). + +#### Local Storage + +Option to enable or disable [local storage](https://rancher.com/docs/k3s/latest/en/storage/) on the node(s). + +#### Metrics Server + +Option to enable or disable the [metrics server](https://github.com/kubernetes-incubator/metrics-server). If enabled, ensure port 10250 is opened for inbound TCP traffic. + +### Add-On Config + +Additional Kubernetes manifests, managed as a [Add-on](https://kubernetes.io/docs/concepts/cluster-administration/addons/), to apply to the cluster on startup. Refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/helm/#automatically-deploying-manifests-and-helm-charts) for details. + +### Agent Environment Vars + +Option to set environment variables for [K3s agents](https://rancher.com/docs/k3s/latest/en/architecture/). The environment variables can be set using key value pairs. Refer to the [K3 documentation](https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/) for more details. + +### etcd + +#### Automatic Snapshots + +Option to enable or disable recurring etcd snapshots. If enabled, users have the option to configure the frequency of snapshots. For details, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/backup-restore/#creating-snapshots). + +#### Metrics + +Option to choose whether to expose etcd metrics to the public or only within the cluster. + +### Networking + +#### Cluster CIDR + +IPv4/IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16). + +#### Service CIDR + +IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16). + +#### Cluster DNS + +IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10). + +#### Cluster Domain + +Select the domain for the cluster. The default is `cluster.local`. + +#### NodePort Service Port Range + +Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). The default is `30000-32767`. + +#### TLS Alternate Names + +Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert. + +#### Authorized Cluster Endpoint + +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +### Registries + +Select the image repository to pull Rancher images from. For more details and configuration options, see the [K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/). + +### Upgrade Strategy + +#### Controle Plane Concurrency + +Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. + +#### Worker Concurrency + +Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. + +#### Drain Nodes (Control Plane) + +Option to remove all pods from the node prior to upgrading. + +#### Drain Nodes (Worker Nodes) + +Option to remove all pods from the node prior to upgrading. + +### Advanced + +Option to set kubelet options for different nodes. For available options, refer to the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). + +# Cluster Config File + +Instead of using the Rancher UI forms to choose Kubernetes options for the cluster, advanced users can create an K3s config file. Using a config file allows you to set any of the [options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) available in an K3s installation. + +To edit an K3s config file directly from the Rancher UI, click **Edit as YAML**. + + diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md new file mode 100644 index 0000000000..cffc95e467 --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -0,0 +1,378 @@ +--- +title: RKE Cluster Configuration Reference +shortTitle: RKE Cluster Configuration +weight: 1 +--- + +When Rancher installs Kubernetes, it uses [RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) or [RKE2](https://docs.rke2.io/) as the Kubernetes distribution. + +This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. + +- [Overview](#overview) +- [Editing Clusters with a Form in the Rancher UI](#editing-clusters-with-a-form-in-the-rancher-ui) +- [Editing Clusters with YAML](#editing-clusters-with-yaml) +- [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) + - [Kubernetes Version](#kubernetes-version) + - [Network Provider](#network-provider) + - [Project Network Isolation](#project-network-isolation) + - [Kubernetes Cloud Providers](#kubernetes-cloud-providers) + - [Private Registries](#private-registries) + - [Authorized Cluster Endpoint](#authorized-cluster-endpoint) + - [Node Pools](#node-pools) + - [NGINX Ingress](#nginx-ingress) + - [Metrics Server Monitoring](#metrics-server-monitoring) + - [Pod Security Policy Support](#pod-security-policy-support) + - [Docker Version on Nodes](#docker-version-on-nodes) + - [Docker Root Directory](#docker-root-directory) + - [Default Pod Security Policy](#default-pod-security-policy) + - [Node Port Range](#node-port-range) + - [Recurring etcd Snapshots](#recurring-etcd-snapshots) + - [Agent Environment Variables](#agent-environment-variables) + - [Updating ingress-nginx](#updating-ingress-nginx) +- [RKE Cluster Config File Reference](#rke-cluster-config-file-reference) + - [Config File Structure in Rancher](#config-file-structure-in-rancher) + - [Default DNS Provider](#default-dns-provider) +- [Rancher Specific Parameters in YAML](#rancher-specific-parameters-in-yaml) + - [docker_root_dir](#docker_root_dir) + - [enable_cluster_monitoring](#enable_cluster_monitoring) + - [enable_network_policy](#enable_network_policy) + - [local_cluster_auth_endpoint](#local_cluster_auth_endpoint) + - [Custom Network Plug-in](#custom-network-plug-in) + +# Overview + +You can configure the Kubernetes options one of two ways: + +- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +The RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) + +In [clusters launched by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), you can edit any of the remaining options that follow. + +For an example of RKE config file syntax, see the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). + +The forms in the Rancher UI don't include all advanced options for configuring RKE. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) + +# Editing Clusters with a Form in the Rancher UI + +To edit your cluster, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster you want to configure and click **⋮ > Edit Config**. + + +# Editing Clusters with YAML + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +RKE clusters (also called RKE1 clusters) are edited differently than RKE2 and K3s clusters. + +To edit an RKE config file directly from the Rancher UI, + +1. Click **☰ > Cluster Management**. +1. Go to the RKE cluster you want to configure. Click and click **⋮ > Edit Config**. This take you to the RKE configuration form. Note: Because cluster provisioning changed in Rancher 2.6, the **⋮ > Edit as YAML** can be used for configuring RKE2 clusters, but it can't be used for editing RKE1 configuration. +1. In the configuration form, scroll down and click **Edit as YAML**. +1. Edit the RKE options under the `rancher_kubernetes_engine_config` directive. + +# Configuration Options in the Rancher UI + +:::tip + +Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE cluster configuration file in YAML. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) + +::: + +### Kubernetes Version + +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). + +For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). + +### Network Provider + +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ](../../../faq/container-network-interface-providers.md). + +:::caution + +After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. + +::: + +Out of the box, Rancher is compatible with the following network providers: + +- [Canal](https://github.com/projectcalico/canal) +- [Flannel](https://github.com/coreos/flannel#flannel) +- [Calico](https://docs.projectcalico.org/v3.11/introduction/) +- [Weave](https://github.com/weaveworks/weave) + +:::note Notes on Weave: + +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File](#rke-cluster-config-file-reference) and the [Weave Network Plug-in Options](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). + +::: + +### Project Network Isolation + +If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. + +Project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### Kubernetes Cloud Providers + +You can configure a [Kubernetes cloud provider](../../../pages-for-subheaders/set-up-cloud-providers.md). If you want to use dynamically provisioned [volumes and storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. + +:::note + +If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. + +::: + +### Private Registries + +The cluster-level private registry configuration is only used for provisioning clusters. + +There are two main ways to set up private registries in Rancher: by setting up the [global default registry](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. + +The private registry configuration option tells Rancher where to pull the [system images](https://rancher.com/docs/rke/latest/en/config-options/system-images/) or [addon images](https://rancher.com/docs/rke/latest/en/config-options/add-ons/) that will be used in your cluster. + +- **System images** are components needed to maintain the Kubernetes cluster. +- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. + +For more information on setting up a private registry for components applied during the provisioning of the cluster, see the [RKE documentation on private registries](https://rancher.com/docs/rke/latest/en/config-options/private-registries/). + +Rancher v2.6 introduced the ability to configure [ECR registries for RKE clusters](https://rancher.com/docs/rke/latest/en/config-options/private-registries/#amazon-elastic-container-registry-ecr-private-registry-setup). + +### Authorized Cluster Endpoint + +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. + +:::note + +The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](../../../pages-for-subheaders/rancher-manager-architecture.md#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. + +::: + +This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +### Node Pools + +For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +### NGINX Ingress + +If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. + +### Metrics Server Monitoring + +Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). + +Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. + +### Pod Security Policy Support + +Enables [pod security policies](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. + +You must have an existing Pod Security Policy configured before you can use this option. + +### Docker Version on Nodes + +Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. + +If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. + +For details on which Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +### Docker Root Directory + +If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), specify the correct Docker Root Directory in this option. + +### Default Pod Security Policy + +If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. + +### Node Port Range + +Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. + +### Recurring etcd Snapshots + +Option to enable or disable [recurring etcd snapshots](https://rancher.com/docs/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). + +### Agent Environment Variables + +Option to set environment variables for [rancher agents](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. + +### Updating ingress-nginx + +Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. + +If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. + + + +# RKE Cluster Config File Reference + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available](https://rancher.com/docs/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. + +For the complete reference for configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) + +### Config File Structure in Rancher + +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,](https://rancher.com/docs/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. + +
+ Example Cluster Config File + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` +
+ +### Default DNS provider + +The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider](https://rancher.com/docs/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. + +| Rancher version | Kubernetes version | Default DNS provider | +|-------------|--------------------|----------------------| +| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | +| v2.2.5 and higher | v1.13.x and lower | kube-dns | +| v2.2.4 and lower | any | kube-dns | + +# Rancher Specific Parameters in YAML + +Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): + +### docker_root_dir + +See [Docker Root Directory](#docker-root-directory). + +### enable_cluster_monitoring + +Option to enable or disable [Cluster Monitoring](../../../pages-for-subheaders/monitoring-and-alerting.md). + +### enable_network_policy + +Option to enable or disable Project Network Isolation. + +Project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### local_cluster_auth_endpoint + +See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). + +Example: + +```yaml +local_cluster_auth_endpoint: + enabled: true + fqdn: "FQDN" + ca_certs: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +### Custom Network Plug-in + +You can add a custom network plug-in by using the [user-defined add-on functionality](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. + +There are two ways that you can specify an add-on: + +- [In-line Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) + +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md new file mode 100644 index 0000000000..fa11f57214 --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration.md @@ -0,0 +1,200 @@ +--- +title: RKE2 Cluster Configuration Reference +shortTitle: RKE2 Cluster Configuration +weight: 5 +--- + +This section covers the configuration options that are available in Rancher for a new or existing RKE2 Kubernetes cluster. + +# Overview + +You can configure the Kubernetes options in one of the two following ways: + +- [Rancher UI](#configuration-options-in-the-rancher-ui): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE2 config file. Using a config file allows you to set any of the [options](https://docs.rke2.io/install/install_options/install_options) available in an RKE2 installation. + +# Configuration Options in the Rancher UI + +:::tip + +Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE2 cluster configuration file in YAML. For the complete reference of configurable options for RKE2 Kubernetes clusters in YAML, see the [RKE2 documentation.](https://docs.rke2.io/install/install_options/install_options/) + +::: + +### Basics +#### Kubernetes Version + +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). + +For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). + +#### Container Network Provider + +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. + +:::caution + +After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you to tear down the entire cluster and all its applications. + +::: + +Out of the box, Rancher is compatible with the following network providers: + +- [Canal](https://github.com/projectcalico/canal) +- [Cilium](https://cilium.io/)* +- [Calico](https://docs.projectcalico.org/v3.11/introduction/) +- [Multus](https://github.com/k8snetworkplumbingwg/multus-cni) + +\* When using [project network isolation](#project-network-isolation) in the [Cilium CNI](../../../faq/container-network-interface-providers.md#cilium), it is possible to enable cross-node ingress routing. Click the [CNI provider docs](../../../faq/container-network-interface-providers.md#ingress-routing-across-nodes-in-cilium) to learn more. + +For more details on the different networking providers and how to configure them, please view our [RKE2 documentation](https://docs.rke2.io/install/network_options/). + +##### Dual-stack Networking + +[Dual-stack](https://docs.rke2.io/install/network_options/#dual-stack-configuration) networking is supported for all CNI providers. To configure RKE2 in dual-stack mode, set valid IPv4/IPv6 CIDRs for your [Cluster CIDR](#cluster-cidr) and/or [Service CIDR](#service-cidr). + +###### Additional Configuration {#dual-stack-additional-config} + +When using `cilium` or `multus,cilium` as your container network interface provider, ensure the **Enable IPv6 Support** option is also enabled. + +#### Cloud Provider + +You can configure a [Kubernetes cloud provider](../../../pages-for-subheaders/set-up-cloud-providers.md). If you want to use dynamically provisioned [volumes and storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. + +:::note + +If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference [this documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. + +::: + +#### Default Pod Security Policy + +Choose the default [pod security policy](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. Please refer to the [RKE2 documentation](https://docs.rke2.io/security/policies/) on the specifications of each available policy. + +#### Worker CIS Profile + +Select a [CIS benchmark](../../../pages-for-subheaders/cis-scan-guides.md) to validate the system configuration against. + +#### Project Network Isolation + +If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. + +Project network isolation is available if you are using any RKE2 network plugin that supports the enforcement of Kubernetes network policies, such as Canal. + +#### SELinux + +Option to enable or disable [SELinux](https://docs.rke2.io/security/selinux) support. + +#### CoreDNS + +By default, [CoreDNS](https://coredns.io/) is installed as the default DNS provider. If CoreDNS is not installed, an alternate DNS provider must be installed yourself. Refer to the [RKE2 documentation](https://docs.rke2.io/networking/#coredns) for additional CoreDNS configurations. + +#### NGINX Ingress + +If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. Refer to the [RKE2 documentation](https://docs.rke2.io/networking/#nginx-ingress-controller) for additional configuration options. + +Refer to the [RKE2 documentation](https://docs.rke2.io/networking/#nginx-ingress-controller) for additional configuration options. + +#### Metrics Server + +Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). + +Each cloud provider capable of launching a cluster using RKE2 can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. + +### Add-On Config + +Additional Kubernetes manifests, managed as an [Add-on](https://kubernetes.io/docs/concepts/cluster-administration/addons/), to apply to the cluster on startup. Refer to the [RKE2 documentation](https://docs.rke2.io/helm/#automatically-deploying-manifests-and-helm-charts) for details. + +### Agent Environment Vars + +Option to set environment variables for [Rancher agents](https://rancher.com/docs/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. Refer to the [RKE2 documentation](https://docs.rke2.io/install/install_options/linux_agent_config/) for more details. + +### etcd + +#### Automatic Snapshots + +Option to enable or disable recurring etcd snapshots. If enabled, users have the option to configure the frequency of snapshots. For details, refer to the [RKE2 documentation](https://docs.rke2.io/backup_restore/#creating-snapshots). Note that with RKE2, snapshots are stored on each etcd node. This varies from RKE1 which only stores one snapshot per cluster. + +#### Metrics + +Option to choose whether to expose etcd metrics to the public or only within the cluster. + +### Networking + +#### Cluster CIDR + +IPv4 and/or IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16). + +##### Dual-stack Networking + +To configure [dual-stack](https://docs.rke2.io/install/network_options/#dual-stack-configuration) mode, enter a valid IPv4/IPv6 CIDR. For example `10.42.0.0/16,2001:cafe:42:0::/56`. + +[Additional configuration](#dual-stack-additional-config) is required when using `cilium` or `multus,cilium` as your [container network](#container-network) interface provider. + +#### Service CIDR + +IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16). + +##### Dual-stack Networking + +To configure [dual-stack](https://docs.rke2.io/install/network_options/#dual-stack-configuration) mode, enter a valid IPv4/IPv6 CIDR. For example `10.42.0.0/16,2001:cafe:42:0::/56`. + +[Additional configuration](#dual-stack-additional-config) is required when using `cilium ` or `multus,cilium` as your [container network](#container-network) interface provider. + +#### Cluster DNS + +IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10). + +#### Cluster Domain + +Select the domain for the cluster. The default is `cluster.local`. + +#### NodePort Service Port Range + +Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). The default is `30000-32767`. + +#### TLS Alternate Names + +Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert. + +#### Authorized Cluster Endpoint + +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. + +This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +### Registries + +Select the image repository to pull Rancher images from. For more details and configuration options, see the [RKE2 documentation](https://docs.rke2.io/install/containerd_registry_configuration/). + +### Upgrade Strategy + +#### Control Plane Concurrency + +Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. + +#### Worker Concurrency + +Select how many nodes can be upgraded at the same time. Can be a fixed number or percentage. + +#### Drain Nodes (Control Plane) + +Option to remove all pods from the node prior to upgrading. + +#### Drain Nodes (Worker Nodes) + +Option to remove all pods from the node prior to upgrading. + +### Advanced + +Option to set kubelet options for different nodes. For available options, refer to the [Kubernetes documentation](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). + +# Cluster Config File + +Instead of using the Rancher UI forms to choose Kubernetes options for the cluster, advanced users can create an RKE2 config file. Using a config file allows you to set any of the [options](https://docs.rke2.io/install/install_options/install_options) available in an RKE2 installation. + +To edit an RKE2 config file directly from the Rancher UI, click **Edit as YAML**. diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/syncing/_index.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/syncing/_index.md rename to docs/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md diff --git a/docs/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md b/docs/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md new file mode 100644 index 0000000000..39413de95b --- /dev/null +++ b/docs/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md @@ -0,0 +1,54 @@ +--- +title: Rancher Agent Options +weight: 2500 +--- + +Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes](../../../../pages-for-subheaders/use-existing-nodes.md) and add the options to the generated `docker run` command when adding a node. + +For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.](../../../../pages-for-subheaders/rancher-manager-architecture.md#3-node-agents) + +## General options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | +| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | +| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | +| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | +| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | +| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | + +## Role options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | +| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | +| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | +| `--worker` | `WORKER=true` | Apply the role `worker` to the node | + +## IP address options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | +| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | + +### Dynamic IP address options + +For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. + +| Value | Example | Description | +| ---------- | -------------------- | ----------- | +| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | +| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | +| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | +| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | +| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | +| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | +| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | +| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | +| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | +| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | +| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | +| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/docs/reference-guides/configure-openldap/openldap-config-reference.md b/docs/reference-guides/configure-openldap/openldap-config-reference.md new file mode 100644 index 0000000000..1a02ee9b23 --- /dev/null +++ b/docs/reference-guides/configure-openldap/openldap-config-reference.md @@ -0,0 +1,86 @@ +--- +title: OpenLDAP Configuration Reference +weight: 2 +--- + +This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. + +For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) + +> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users](../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) +- [OpenLDAP server configuration](#openldap-server-configuration) +- [User/group schema configuration](#user-group-schema-configuration) + - [User schema configuration](#user-schema-configuration) + - [Group schema configuration](#group-schema-configuration) + +## Background: OpenLDAP Authentication Flow + +1. When a user attempts to login with LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. +2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. +3. Once the user has been found, they are authenticated with another LDAP bind request using the user's DN and provided password. +4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. + +# OpenLDAP Server Configuration + +You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. + +
OpenLDAP Server Parameters
+ +| Parameter | Description | +|:--|:--| +| Hostname | Specify the hostname or IP address of the OpenLDAP server | +| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| +| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | +| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | +| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. | +| Service Account Password | The password for the service account. | +| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| +| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| + +# User/Group Schema Configuration + +If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. + +Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. + +If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. + +### User Schema Configuration + +The table below details the parameters for the user schema configuration. + +
User Schema Configuration Parameters
+ +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Username Attribute | The user attribute whose value is suitable as a display name. | +| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | +| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | +| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | +| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | +| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | + +### Group Schema Configuration + +The table below details the parameters for the group schema configuration. + +
Group Schema Configuration Parameters
+ +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Name Attribute | The group attribute whose value is suitable for a display name. | +| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | +| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | +| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | +| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | +| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/docs/reference-guides/installation-references/feature-flags.md b/docs/reference-guides/installation-references/feature-flags.md new file mode 100644 index 0000000000..a3b04a3335 --- /dev/null +++ b/docs/reference-guides/installation-references/feature-flags.md @@ -0,0 +1,37 @@ +--- +title: Feature Flags +--- + +Feature flags were introduced to allow you to try experimental features that are not enabled by default. + +To learn about feature values and how to enable features, refer [here](../../pages-for-subheaders/enable-experimental-features.md). + +The following is a list of the feature flags available in Rancher: + +- `harvester`: This feature flag is available starting in v2.6.1. It is used to manage access to the Virtualization Management page where users can navigate directly to Harvester clusters and access the Harvester UI. For more information, see [this page](../explanations/integrations-in-rancher/harvester.md#feature-flag/). +- `rke2`: Used to enable the ability to provision RKE2 clusters. By default, this feature flag is enabled, which allows users to attempt to provision these type of clusters. +- `fleet`: The previous `fleet` feature flag is now required to be enabled as the Fleet capabilities are leveraged within the new provisioning framework. If you had this feature flag disabled in earlier versions, upon upgrading to Rancher v2.6, the flag will automatically be enabled. See this [page](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) for more information. +- `continuous-delivery`: In Rancher v2.5.x, Fleet came with a GitOps feature that could not be disabled separately from Fleet. In Rancher v2.6, the `continuous-delivery` feature flag was introduced to allow the GitOps feature of Fleet to be disabled. For more information, see [this page.](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/continuous-delivery.md). +- `legacy`: There are a set of features from previous versions that are slowly being phased out of Rancher for newer iterations of the feature. This is a mix of deprecated features as well as features that will eventually be moved to newer variations in Rancher. By default, this feature flag is disabled for new installations. If you are upgrading from a previous version, this feature flag would be enabled. +- `token-hashing`: Used to enable new token-hashing feature. Once enabled, existing tokens will be hashed and all new tokens will be hashed automatically using the SHA256 algorithm. Once a token is hashed it cannot be undone. Once this feature flag is enabled, it cannot be disabled. See [hashing of tokens](../reference-guides/about-the-api/api-tokens.md) for more information. +- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md). In other words, it enables types for storage providers and provisioners that are not enabled by default. +- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules,](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md) which are traffic management features of Istio. +- `multi-cluster-management`: Used for multi-cluster provisioning and management of Kubernetes clusters. This feature flag can only be set at install time and not changed afterwards. + +The below table shows the availability and default value for feature flags in Rancher: + +| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | +| ----------------------------- | ------------- | ------------ | --------------- |---| +| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | +| `istio-virtual-service-ui` | `true` | GA* | v2.3.2 | | +| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | +| `fleet` | `true` | GA* | v2.5.0 | | +| `fleet` | `true` | Can no longer be disabled | v2.6.0 | N/A | +| `continuous-delivery` | `true` | GA* | v2.6.0 | | +| `token-hashing` | `false` for new installs, `true` for upgrades | GA* | v2.6.0 | | +| `legacy` | `false` for new installs, `true` for upgrades | GA* | v2.6.0 | | +| `multi-cluster-management` | `false` | GA* | v2.5.0 | | +| `harvester` | `true` | Experimental | v2.6.1 | | +| `rke2` | `true` | Experimental | v2.6.0 | | + +\* Generally Available. This feature is included in Rancher and it is not experimental. \ No newline at end of file diff --git a/docs/reference-guides/installation-references/helm-chart-options.md b/docs/reference-guides/installation-references/helm-chart-options.md new file mode 100644 index 0000000000..fadc2f7a74 --- /dev/null +++ b/docs/reference-guides/installation-references/helm-chart-options.md @@ -0,0 +1,285 @@ +--- +title: Rancher Helm Chart Options +weight: 1 +aliases: + - /rancher/v2.6/en/installation/resources/chart-options +--- + +This page is a configuration reference for the Rancher Helm chart. + +For help choosing a Helm chart version, refer to [this page.](../../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md) + +For information on enabling experimental features, refer to [this page.](../../pages-for-subheaders/enable-experimental-features.md) + +- [Common Options](#common-options) +- [Advanced Options](#advanced-options) +- [API Audit Log](#api-audit-log) +- [Setting Extra Environment Variables](#setting-extra-environment-variables) +- [TLS Settings](#tls-settings) +- [Customizing your Ingress](#customizing-your-ingress) +- [HTTP Proxy](#http-proxy) +- [Additional Trusted CAs](#additional-trusted-cas) +- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) +- [External TLS Termination](#external-tls-termination) + +### Common Options + +| Option | Default Value | Description | +| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | +| `bootstrapPassword` | " " | `string` - Set the [bootstrap password](#bootstrap-password) for the first admin user. After logging in, the admin will need to reset their password. A randomly generated bootstrap password is used if this value is not set. +| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | +| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | +| `letsEncrypt.email` | " " | `string` - Your email address | +| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | +| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | + +
+ +### Advanced Options + +| Option | Default Value | Description | +| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | +| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" Rancher server cluster. _Note: This option is no longer available in v2.5.0. Consider using the `restrictedAdmin` option to prevent users from modifying the local cluster._ | +| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | +| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | +| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.level` | 0 | `int` - set the [API Audit Log](../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) level. 0 is off. [0-3] | +| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | +| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs | +| `certmanager.version` | "" | `string` - set cert-manager compatibility | +| `debug` | false | `bool` - set debug flag on rancher server | +| `extraEnv` | [] | `list` - set additional environment variables for Rancher | +| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | +| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. | +| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | +| `ingress.enabled` | true | When set to false, Helm will not install a Rancher ingress. Set the option to false to deploy your own ingress. | +| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges. Options: traefik, nginx. | | +| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - comma separated list of hostnames or ip address not to use the proxy | | +| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | +| `rancherImage` | "rancher/rancher" | `string` - rancher image source | +| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | +| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | +| `replicas` | 3 | `int` - Number of Rancher server replicas. Setting to -1 will dynamically choose 1, 2, or 3 based on the number of available nodes in the cluster. | +| `resources` | {} | `map` - rancher pod resource requests & limits | +| `restrictedAdmin` | `false` | `bool` - When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#restricted-admin) | +| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system container images, e.g., http://registry.example.com/ | +| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | +| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | + + + +### Bootstrap Password + +When Rancher starts for the first time, a password is randomly generated for the first admin user. When the admin first logs in to Rancher, the UI shows commands that can be used to retrieve the bootstrap password. The admin needs to run those commands and log in with the bootstrap password. Then Rancher gives the admin an opportunity to reset the password. + +If you want to use a specific bootstrap password instead of a randomly generated one, provide the password. + +```plain +--set bootstrapPassword="rancher" +``` + +The password, whether provided or generated, will be stored in a Kubernetes secret. After Rancher is installed, the UI will show instructions for how to retrieve the password using kubectl: + +``` +kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}' +``` + +### API Audit Log + +Enabling the [API Audit Log](../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) + +You can collect this log as you would any container log. Enable [logging](../../pages-for-subheaders/logging.md) for the `System` Project on the Rancher server cluster. + +```plain +--set auditLog.level=1 +``` + +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable [logging](../../pages-for-subheaders/logging.md) for the Rancher server cluster or System Project. + +Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. + +### Setting Extra Environment Variables + +You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +### TLS Settings + +When you install Rancher inside of a Kubernetes cluster, TLS is offloaded at the cluster's ingress controller. The possible TLS settings depend on the used ingress controller. + +See [TLS settings](tls-settings.md) for more information and options. + +### Import `local` Cluster + +By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. + +:::caution + +If you turn addLocal off, most Rancher v2.5 features won't work, including the EKS provisioner. + +::: + +If this is a concern in your environment you can set this option to "false" on your initial install. + +This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. + +```plain +--set addLocal="false" +``` + +### Customizing your Ingress + +To customize or use a different ingress with Rancher server you can set your own Ingress annotations. + +Example on setting a custom certificate issuer: + +```plain +--set ingress.extraAnnotations.'cert-manager\.io/cluster-issuer'=issuer-name +``` + +Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. + +```plain +--set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' +``` + +### HTTP Proxy + +Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. + +Add your IP exceptions to the `noProxy` list. Make sure you add the Pod cluster IP range (default: `10.42.0.0/16`), Service cluster IP range (default: `10.43.0.0/16`), the internal cluster domains (default: `.svc,.cluster.local`) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. + +```plain +--set proxy="http://:@:/" +--set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local" +``` + +### Additional Trusted CAs + +If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. + +```plain +--set additionalTrustedCAs=true +``` + +Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. + +```plain +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem +``` + +### Private Registry and Air Gap Installs + +For details on installing Rancher with a private registry, see the [air gap installation docs.](../../pages-for-subheaders/air-gapped-helm-cli-install.md) + +# External TLS Termination + +We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. + +You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. + +:::note + +If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](../../getting-started/installation-and-upgrade/resources/add-tls-secrets.md) to add the CA cert for Rancher. + +::: + +Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. + +### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: 'true' +``` + +### Required Headers + +- `Host` +- `X-Forwarded-Proto` +- `X-Forwarded-Port` +- `X-Forwarded-For` + +### Recommended Timeouts + +- Read Timeout: `1800 seconds` +- Write Timeout: `1800 seconds` +- Connect Timeout: `30 seconds` + +### Health Checks + +Rancher will respond `200` to health checks on the `/healthz` endpoint. + +### Example NGINX config + +This NGINX configuration is tested on NGINX 1.14. + +:::caution + +This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +::: + +- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server IP_NODE_1:80; + server IP_NODE_2:80; + server IP_NODE_3:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` diff --git a/content/rancher/v2.6/en/installation/resources/tls-settings/_index.md b/docs/reference-guides/installation-references/tls-settings.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/tls-settings/_index.md rename to docs/reference-guides/installation-references/tls-settings.md diff --git a/docs/reference-guides/kubernetes-concepts.md b/docs/reference-guides/kubernetes-concepts.md new file mode 100644 index 0000000000..8b8c007ab0 --- /dev/null +++ b/docs/reference-guides/kubernetes-concepts.md @@ -0,0 +1,76 @@ +--- +title: Kubernetes Concepts +weight: 4 +--- + +This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified overview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) + +This section covers the following topics: + +- [About Docker](#about-docker) +- [About Kubernetes](#about-kubernetes) +- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) +- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) + - [etcd Nodes](#etcd-nodes) + - [Controlplane Nodes](#controlplane-nodes) + - [Worker Nodes](#worker-nodes) +- [About Helm](#about-helm) + +# About Docker + +Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. + +:::note + +Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. + +::: + +# About Kubernetes + +Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. + +# What is a Kubernetes Cluster? + +A cluster is a group of computers that work together as a single system. + +A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. + +# Roles for Nodes in Kubernetes Clusters + +Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. + +A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. + +### etcd Nodes + +Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. + +The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. + +The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. + +Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. + +Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. + +### Controlplane Nodes + +Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although three or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. + +### Worker Nodes + +Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: + +- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. +- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. + +Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads](../pages-for-subheaders/workloads-and-pods.md). + +# About Helm + +For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. + +Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). + +For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) diff --git a/docs/reference-guides/monitoring-v2-configuration/examples.md b/docs/reference-guides/monitoring-v2-configuration/examples.md new file mode 100644 index 0000000000..a9149e462d --- /dev/null +++ b/docs/reference-guides/monitoring-v2-configuration/examples.md @@ -0,0 +1,25 @@ +--- +title: Examples +weight: 400 +--- + + +### ServiceMonitor + +An example ServiceMonitor custom resource can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + +### PodMonitor + +An example PodMonitor can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml) An example Prometheus resource that refers to it can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml) + +### PrometheusRule + +For users who are familiar with Prometheus, a PrometheusRule contains the alerting and recording rules that you would normally place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). + +For a more fine-grained application of PrometheusRules within your cluster, the ruleSelector field on a Prometheus resource allows you to select which PrometheusRules should be loaded onto Prometheus based on the labels attached to the PrometheusRules resources. + +An example PrometheusRule is on [this page.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md) + +### Alertmanager Config + +For an example configuration, refer to [this section.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md#example-alertmanager-config) \ No newline at end of file diff --git a/docs/reference-guides/monitoring-v2-configuration/helm-chart-options.md b/docs/reference-guides/monitoring-v2-configuration/helm-chart-options.md new file mode 100644 index 0000000000..17cd311bee --- /dev/null +++ b/docs/reference-guides/monitoring-v2-configuration/helm-chart-options.md @@ -0,0 +1,77 @@ +--- +title: Helm Chart Options +weight: 8 +--- + +- [Configuring Resource Limits and Requests](#configuring-resource-limits-and-requests) +- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) +- [Additional Scrape Configurations](#additional-scrape-configurations) +- [Configuring Applications Packaged within Monitoring V2](#configuring-applications-packaged-within-monitoring-v2) +- [Increase the Replicas of Alertmanager](#increase-the-replicas-of-alertmanager) +- [Configuring the Namespace for a Persistent Grafana Dashboard](#configuring-the-namespace-for-a-persistent-grafana-dashboard) + + +# Configuring Resource Limits and Requests + +The resource requests and limits can be configured when installing `rancher-monitoring`. + +The default values are in the [values.yaml](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/values.yaml) in the `rancher-monitoring` Helm chart. + +The default values in the table below are the minimum required resource limits and requests. + +| Resource Name | Memory Limit | CPU Limit | Memory Request | CPU Request | +| ------------- | ------------ | ----------- | ---------------- | ------------------ | +| alertmanager | 500Mi | 1000m | 100Mi | 100m | +| grafana | 200Mi | 200m | 100Mi | 100m | +| kube-state-metrics subchart | 200Mi | 100m | 130Mi | 100m | +| prometheus-node-exporter subchart | 50Mi | 200m | 30Mi | 100m | +| prometheusOperator | 500Mi | 200m | 100Mi | 100m | +| prometheus | 2500Mi | 1000m | 1750Mi | 750m | +| **Total** | **3950Mi** | **2700m** | **2210Mi** | **1250m** | + +At least 50Gi storage is recommended. + + +# Trusted CA for Notifiers + +If you need to add a trusted CA to your notifier, follow these steps: + +1. Create the `cattle-monitoring-system` namespace. +1. Add your trusted CA secret to the `cattle-monitoring-system` namespace. +1. Deploy or upgrade the `rancher-monitoring` Helm chart. In the chart options, reference the secret in **Alerting > Additional Secrets**. + +**Result:** The default Alertmanager custom resource will have access to your trusted CA. + + +# Additional Scrape Configurations + +If the scrape configuration you want cannot be specified via a ServiceMonitor or PodMonitor at the moment, you can provide an `additionalScrapeConfigSecret` on deploying or upgrading `rancher-monitoring`. + +A [scrape_config section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) specifies a set of targets and parameters describing how to scrape them. In the general case, one scrape configuration specifies a single job. + +An example of where this might be used is with Istio. For more information, see [this section.](../../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) + + +# Configuring Applications Packaged within Monitoring v2 + +We deploy kube-state-metrics and node-exporter with monitoring v2. Node exporter are deployed as DaemonSets. In the monitoring v2 helm chart, in the values.yaml, each of the things are deployed as sub charts. + +We also deploy grafana which is not managed by prometheus. + +If you look at what the helm chart is doing like in kube-state-metrics, there are plenty more values that you can set that aren’t exposed in the top level chart. + +But in the top level chart you can add values that override values that exist in the sub chart. + +### Increase the Replicas of Alertmanager + +As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster. The replicas can all be managed using the same underlying Alertmanager Config Secret. For more information on the Alertmanager Config Secret, refer to [this section.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md#multiple-alertmanager-replicas) + +### Configuring the Namespace for a Persistent Grafana Dashboard + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: + +``` +grafana.sidecar.dashboards.searchNamespace=ALL +``` + +Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. \ No newline at end of file diff --git a/docs/reference-guides/monitoring-v2-configuration/receivers.md b/docs/reference-guides/monitoring-v2-configuration/receivers.md new file mode 100644 index 0000000000..24e0b337d4 --- /dev/null +++ b/docs/reference-guides/monitoring-v2-configuration/receivers.md @@ -0,0 +1,319 @@ +--- +title: Receiver Configuration +shortTitle: Receivers +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The [Alertmanager Config](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) Secret contains the configuration of an Alertmanager instance that sends out notifications based on alerts it receives from Prometheus. + +:::note + +This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#3-how-alertmanager-works) + +::: + +- [Creating Receivers in the Rancher UI](#creating-receivers-in-the-rancher-ui) +- [Receiver Configuration](#receiver-configuration) + - [Slack](#slack) + - [Email](#email) + - [PagerDuty](#pagerduty) + - [Opsgenie](#opsgenie) + - [Webhook](#webhook) + - [Custom](#custom) + - [Teams](#teams) + - [SMS](#sms) +- [Configuring Multiple Receivers](#configuring-multiple-receivers) +- [Example Alertmanager Config](examples.md#example-alertmanager-config) +- [Example Route Config for CIS Scan Alerts](#example-route-config-for-cis-scan-alerts) +- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) + +# Creating Receivers in the Rancher UI + +:::note Prerequisites: + +- The monitoring application needs to be installed. +- If you configured monitoring with an existing Alertmanager Secret, it must have a format that is supported by Rancher's UI. Otherwise you will only be able to make changes based on modifying the Alertmanager Secret directly. Note: We are continuing to make enhancements to what kinds of Alertmanager Configurations we can support using the Routes and Receivers UI, so please [file an issue](https://github.com/rancher/rancher/issues/new) if you have a request for a feature enhancement. + +::: + +To create notification receivers in the Rancher UI, + + + + +1. Go to the cluster where you want to create receivers. Click **Monitoring -> Alerting -> AlertManagerConfigs**. +1. Ciick **Create**. +1. Click **Add Receiver**. +1. Enter a **Name** for the receiver. +1. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. +1. Click **Create**. + + + + +1. Go to the cluster where you want to create receivers. Click **Monitoring** and click **Receiver**. +2. Enter a name for the receiver. +3. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. +4. Click **Create**. + + + + +**Result:** Alerts can be configured to send notifications to the receiver(s). + +# Receiver Configuration + +The notification integrations are configured with the `receiver`, which is explained in the [Prometheus documentation.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) + +### Native vs. Non-native Receivers + +By default, AlertManager provides native integration with some receivers, which are listed in [this section.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) All natively supported receivers are configurable through the Rancher UI. + +For notification mechanisms not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI. + +Currently the Rancher Alerting Drivers app provides access to the following integrations: +- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver +- SMS, based on the [Sachet](https://github.com/messagebird/sachet) driver + +The following types of receivers can be configured in the Rancher UI: + +- Slack +- Email +- PagerDuty +- Opsgenie +- Webhook +- Custom +- Teams +- SMS + +The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. + +# Slack + +| Field | Type | Description | +|------|--------------|------| +| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | +| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | +| Proxy URL | String | Proxy for the webhook notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +# Email + +| Field | Type | Description | +|------|--------------|------| +| Default Recipient Address | String | The email address that will receive notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +SMTP options: + +| Field | Type | Description | +|------|--------------|------| +| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | +| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | +| Use TLS | Bool | Use TLS for encryption. | +| Username | String | Enter a username to authenticate with the SMTP server. | +| Password | String | Enter a password to authenticate with the SMTP server. | + +# PagerDuty + +| Field | Type | Description | +|------|------|-------| +| Integration Type | String | `Events API v2` or `Prometheus`. | +| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | +| Proxy URL | String | Proxy for the PagerDuty notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +# Opsgenie + +| Field | Description | +|------|-------------| +| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | +| Proxy URL | Proxy for the Opsgenie notifications. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +Opsgenie Responders: + +| Field | Type | Description | +|-------|------|--------| +| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | +| Send To | String | Id, Name, or Username of the Opsgenie recipient. | + +# Webhook + +| Field | Description | +|-------|--------------| +| URL | Webhook URL for the app of your choice. | +| Proxy URL | Proxy for the webhook notification. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + + + +# Custom + +The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. + +# Teams + +### Enabling the Teams Receiver for Rancher Managed Clusters + +The Teams receiver is not a native receiver and must be enabled before it can be used. You can enable the Teams receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the Teams option selected. + +1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Apps & Marketplace**. +1. Click the **Alerting Drivers** app. +1. Click the **Helm Deploy Options** tab. +1. Select the **Teams** option and click **Install**. +1. Take note of the namespace used as it will be required in a later step. + +### Configure the Teams Receiver + +The Teams receiver can be configured by updating its ConfigMap. For example, the following is a minimal Teams receiver configuration. + +```yaml +[Microsoft Teams] +teams-instance-1: https://your-teams-webhook-url +``` + +When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). + +Use the example below as the URL where: + +- `ns-1` is replaced with the namespace where the `rancher-alerting-drivers` app is installed + +```yaml +url: http://rancher-alerting-drivers-prom2teams.ns-1.svc:8089/v2/teams-instance-1 +``` + + + +# SMS + +### Enabling the SMS Receiver for Rancher Managed Clusters + +The SMS receiver is not a native receiver and must be enabled before it can be used. You can enable the SMS receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the SMS option selected. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to install `rancher-alerting-drivers` and click **Explore**. +1. In the left navigation bar, click +1. Click the **Alerting Drivers** app. +1. Click the **Helm Deploy Options** tab +1. Select the **SMS** option and click **Install**. +1. Take note of the namespace used as it will be required in a later step. + +### Configure the SMS Receiver + +The SMS receiver can be configured by updating its ConfigMap. For example, the following is a minimal SMS receiver configuration. + +```yaml +providers: + telegram: + token: 'your-token-from-telegram' + +receivers: +- name: 'telegram-receiver-1' + provider: 'telegram' + to: + - '123456789' +``` + +When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). + +Use the example below as the name and URL, where: + +- the name assigned to the receiver, e.g. `telegram-receiver-1`, must match the name in the `receivers.name` field in the ConfigMap, e.g. `telegram-receiver-1` +- `ns-1` in the URL is replaced with the namespace where the `rancher-alerting-drivers` app is installed + +```yaml +name: telegram-receiver-1 +url http://rancher-alerting-drivers-sachet.ns-1.svc:9876/alert +``` + + + + +# Configuring Multiple Receivers + +By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. + +It is also possible to send alerts to multiple notification systems. One way is to configure the Receiver using custom YAML, in which case you can add the configuration for multiple notification systems, as long as you are sure that both systems should receive the same messages. + +You can also set up multiple receivers by using the `continue` option for a route, so that the alerts sent to a receiver continue being evaluated in the next level of the routing tree, which could contain another receiver. + + +# Example Alertmanager Configs + +### Slack +To set up notifications via Slack, the following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret, where the `api_url` should be updated to use your Webhook URL from Slack: + +```yaml +route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 3h + receiver: 'slack-notifications' +receivers: +- name: 'slack-notifications' + slack_configs: + - send_resolved: true + text: '{{ template "slack.rancher.text" . }}' + api_url: +templates: +- /etc/alertmanager/config/*.tmpl +``` + +### PagerDuty +To set up notifications via PagerDuty, use the example below from the [PagerDuty documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) as a guideline. This example sets up a route that captures alerts for a database service and sends them to a receiver linked to a service that will directly notify the DBAs in PagerDuty, while all other alerts will be directed to a default receiver with a different PagerDuty integration key. + +The following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret. The `service_key` should be updated to use your PagerDuty integration key and can be found as per the "Integrating with Global Event Routing" section of the PagerDuty documentation. For the full list of configuration options, refer to the [Prometheus documentation](https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config). + +```yaml +route: + group_by: [cluster] + receiver: 'pagerduty-notifications' + group_interval: 5m + routes: + - match: + service: database + receiver: 'database-notifcations' + +receivers: +- name: 'pagerduty-notifications' + pagerduty_configs: + - service_key: 'primary-integration-key' + +- name: 'database-notifcations' + pagerduty_configs: + - service_key: 'database-integration-key' +``` + +# Example Route Config for CIS Scan Alerts + +While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. + +For example, the following example route configuration could be used with a Slack receiver named `test-cis`: + +```yaml +spec: + receiver: test-cis + group_by: +# - string + group_wait: 30s + group_interval: 30s + repeat_interval: 30s + match: + job: rancher-cis-scan +# key: string + match_re: + {} +# key: string +``` + +For more information on enabling alerting for `rancher-cis-benchmark`, see [this section.](../../pages-for-subheaders/cis-scan-guides.md#enabling-alerting-for-rancher-cis-benchmark) + + +# Trusted CA for Notifiers + +If you need to add a trusted CA to your notifier, follow the steps in [this section.](helm-chart-options.md#trusted-ca-for-notifiers) \ No newline at end of file diff --git a/docs/reference-guides/monitoring-v2-configuration/routes.md b/docs/reference-guides/monitoring-v2-configuration/routes.md new file mode 100644 index 0000000000..5b07c72f9f --- /dev/null +++ b/docs/reference-guides/monitoring-v2-configuration/routes.md @@ -0,0 +1,97 @@ +--- +title: Route Configuration +shortTitle: Routes +weight: 5 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The route configuration is the section of the Alertmanager custom resource that controls how the alerts fired by Prometheus are grouped and filtered before they reach the receiver. + +When a Route is changed, the Prometheus Operator regenerates the Alertmanager custom resource to reflect the changes. + +For more information about configuring routes, refer to the [official Alertmanager documentation.](https://www.prometheus.io/docs/alerting/latest/configuration/#route) + +:::note + +This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +::: + +- [Route Restrictions](#route-restrictions) +- [Route Configuration](#route-configuration) + - [Receiver](#receiver) + - [Grouping](#grouping) + - [Matching](#matching) + +# Route Restrictions + +Alertmanager proxies alerts for Prometheus based on its receivers and a routing tree that filters alerts to certain receivers based on labels. + +Alerting drivers proxy alerts for Alertmanager to non-native receivers, such as Microsoft Teams and SMS. + +In the Rancher UI for configuring routes and receivers, you can configure routing trees with one root and then a depth of one more level, for a tree with a depth of two. But if you use a `continue` route when configuring Alertmanager directly, you can make the tree deeper. + +Each receiver is for one or more notification providers. So if you know that every alert for Slack should also go to PagerDuty, you can configure both in the same receiver. + +# Route Configuration + +### Note on Labels and Annotations + +Labels should be used for identifying information that can affect the routing of notifications. Identifying information about the alert could consist of a container name, or the name of the team that should be notified. + +Annotations should be used for information that does not affect who receives the alert, such as a runbook url or error message. + + +### Receiver +The route needs to refer to a [receiver](#receiver-configuration) that has already been configured. + +### Grouping + + + + +:::note + +As of Rancher v2.6.5, `Group By` now accepts a list of strings instead of key-value pairs. See the [upstream documentation](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#route) for details. + +::: + +| Field | Default | Description | +|-------|--------------|---------| +| Group By | N/a | List of labels to group by. Labels must not be repeated (unique list). Special label "..." (aggregate by all possible labels), if provided, must be the only element in the list. | +| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | +| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | +| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | + + + + +| Field | Default | Description | +|-------|--------------|---------| +| Group By | N/a | The labels by which incoming alerts are grouped together. For example, `[ group_by: '[' , ... ']' ]` Multiple alerts coming in for labels such as `cluster=A` and `alertname=LatencyHigh` can be batched into a single group. To aggregate by all possible labels, use the special value `'...'` as the sole label name, for example: `group_by: ['...']` Grouping by `...` effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. | +| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | +| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | +| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | + + + + + + +### Matching + +The **Match** field refers to a set of equality matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs to the Rancher UI, they correspond to the YAML in this format: + +```yaml +match: + [ : , ... ] +``` + +The **Match Regex** field refers to a set of regex-matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs in the Rancher UI, they correspond to the YAML in this format: + +```yaml +match_re: + [ : , ... ] +``` diff --git a/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md new file mode 100644 index 0000000000..a1097240b9 --- /dev/null +++ b/docs/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -0,0 +1,35 @@ +--- +title: ServiceMonitor and PodMonitor Configuration +shortTitle: ServiceMonitors and PodMonitors +weight: 7 +--- + +ServiceMonitors and PodMonitors are both pseudo-CRDs that map the scrape configuration of the Prometheus custom resource. + +These configuration objects declaratively specify the endpoints that Prometheus will scrape metrics from. + +ServiceMonitors are more commonly used than PodMonitors, and we recommend them for most use cases. + +:::note + +This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +::: + +### ServiceMonitors + +This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. + +When a ServiceMonitor is created, the Prometheus Operator updates the Prometheus scrape configuration to include the ServiceMonitor configuration. Then Prometheus begins scraping metrics from the endpoint defined in the ServiceMonitor. + +Any Services in your cluster that match the labels located within the ServiceMonitor `selector` field will be monitored based on the `endpoints` specified on the ServiceMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) provided by Prometheus Operator. + +For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) + +### PodMonitors + +This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. + +When a PodMonitor is created, the Prometheus Operator updates the Prometheus scrape configuration to include the PodMonitor configuration. Then Prometheus begins scraping metrics from the endpoint defined in the PodMonitor. + +Any Pods in your cluster that match the labels located within the PodMonitor `selector` field will be monitored based on the `podMetricsEndpoints` specified on the PodMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitorspec) provided by Prometheus Operator. diff --git a/content/rancher/v2.6/en/pipelines/concepts/_index.md b/docs/reference-guides/pipelines/concepts.md similarity index 100% rename from content/rancher/v2.6/en/pipelines/concepts/_index.md rename to docs/reference-guides/pipelines/concepts.md diff --git a/docs/reference-guides/pipelines/configure-persistent-data.md b/docs/reference-guides/pipelines/configure-persistent-data.md new file mode 100644 index 0000000000..6c074272a7 --- /dev/null +++ b/docs/reference-guides/pipelines/configure-persistent-data.md @@ -0,0 +1,96 @@ +--- +title: Configuring Persistent Data for Pipeline Components +weight: 600 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) + +:::note Prerequisites for both parts A and B: + +[Persistent volumes](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) must be available for the cluster. + +::: + +### A. Configuring Persistent Data for Docker Registry + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. + +1. Find the `docker-registry` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the dropdown. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. + +1. Click **Upgrade**. + +### B. Configuring Persistent Data for Minio + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. +1. Go to the `minio` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. + +1. Click **Upgrade**. + +**Result:** Persistent storage is configured for your pipeline components. diff --git a/docs/reference-guides/pipelines/example-repositories.md b/docs/reference-guides/pipelines/example-repositories.md new file mode 100644 index 0000000000..02a8d5d8ba --- /dev/null +++ b/docs/reference-guides/pipelines/example-repositories.md @@ -0,0 +1,90 @@ +--- +title: Example Repositories +weight: 500 +--- + +Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: + +- Go +- Maven +- php + +:::note Prerequisites: + +- The example repositories are only available if you have not [configured a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md). +- Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy features before using pipelines. +- Note that pipelines in Kubernetes 1.21+ are no longer supported. + + 1. In the upper left corner, click **☰ > Global Settings**. + 1. Click **Feature Flags**. + 1. Go to the `legacy` feature flag and click **⋮ > Activate**. + +::: + +To start using these example repositories, + +1. [Enable the example repositories](#1-enable-the-example-repositories) +2. [View the example pipeline](#2-view-the-example-pipeline) +3. [Run the example pipeline](#3-run-the-example-pipeline) + +### 1. Enable the Example Repositories + +By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. In the **Pipelines** tab, click **Configure Repositories**. + + :::note + + Example repositories only display if you haven't fetched your own repos. + + ::: + +1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. + +**Results:** + +- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. + +- The following workloads are deployed to a new namespace: + + - `docker-registry` + - `jenkins` + - `minio` + +### 2. View the Example Pipeline + +After enabling an example repository, review the pipeline to see how it is set up. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. In the **Pipelines** tab, click **Configure Repositories**. +1. Find the example repository, select **⋮ > Edit Config**. There are two ways to view the pipeline: + * **Rancher UI**: Click on **Edit Config** or **View/Edit YAML** to view the stages and steps of the pipeline. The YAML view shows the `./rancher-pipeline.yml` file. + +### 3. Run the Example Pipeline + +After enabling an example repository, run the pipeline to see how it works. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. In the **Pipelines** tab, go to the pipeline and select the vertical **⋮ > Run**. + + :::note + + When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. + + ::: + +**Result:** The pipeline runs. You can see the results in the logs. + +### What's Next? + +For detailed information about setting up your own pipeline for your repository, [configure a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md), enable a repository and finally configure your pipeline. diff --git a/docs/reference-guides/pipelines/example-yaml.md b/docs/reference-guides/pipelines/example-yaml.md new file mode 100644 index 0000000000..5a04f2def8 --- /dev/null +++ b/docs/reference-guides/pipelines/example-yaml.md @@ -0,0 +1,72 @@ +--- +title: Example YAML File +weight: 501 +--- + +Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. + +In the [pipeline configuration reference](pipeline-configuration.md), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. + +Below is a full example `rancher-pipeline.yml` for those who want to jump right in. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} + # Set environment variables in container for the step + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 + # Set environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com + - name: Deploy some workloads + steps: + - applyYamlConfig: + path: ./deployment.yaml +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +# timeout in minutes +timeout: 30 +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` diff --git a/docs/reference-guides/pipelines/pipeline-configuration.md b/docs/reference-guides/pipelines/pipeline-configuration.md new file mode 100644 index 0000000000..da5bf92a6e --- /dev/null +++ b/docs/reference-guides/pipelines/pipeline-configuration.md @@ -0,0 +1,660 @@ +--- +title: Pipeline Configuration Reference +weight: 1 +--- + +In this section, you'll learn how to configure pipelines. + +- [Step Types](#step-types) +- [Step Type: Run Script](#step-type-run-script) +- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) +- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) +- [Step Type: Deploy YAML](#step-type-deploy-yaml) +- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) +- [Notifications](#notifications) +- [Timeouts](#timeouts) +- [Triggers and Trigger Rules](#triggers-and-trigger-rules) +- [Environment Variables](#environment-variables) +- [Secrets](#secrets) +- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) +- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) + - [Executor Quota](#executor-quota) + - [Resource Quota for Executors](#resource-quota-for-executors) + - [Custom CA](#custom-ca) +- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) +- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) + +# Step Types + +Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. + +Step types include: + +- [Run Script](#step-type-run-script) +- [Build and Publish Images](#step-type-build-and-publish-images) +- [Publish Catalog Template](#step-type-publish-catalog-template) +- [Deploy YAML](#step-type-deploy-yaml) +- [Deploy Catalog App](#step-type-deploy-catalog-app) + + + +### Configuring Steps By UI + +If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. + +1. Add stages to your pipeline execution by clicking **Add Stage**. + + 1. Enter a **Name** for each stage of your pipeline. + 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. + +1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. + +### Configuring Steps by YAML + +For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com +``` +# Step Type: Run Script + +The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configuring Script by UI + +1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. + +1. Click **Add**. + +### Configuring Script by YAML +```yaml +# example +stages: +- name: Build something + steps: + - runScriptConfig: + image: golang + shellScript: go build +``` +# Step Type: Build and Publish Images + +The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. + +The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. + +### Configuring Building and Publishing Images by UI +1. From the **Step Type** drop-down, choose **Build and Publish**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | + Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | + Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | + Build Context

(**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). + +### Configuring Building and Publishing Images by YAML + +You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: + +Variable Name | Description +------------------------|------------------------------------------------------------ +PLUGIN_DRY_RUN | Disable docker push +PLUGIN_DEBUG | Docker daemon executes in debug mode +PLUGIN_MIRROR | Docker daemon registry mirror +PLUGIN_INSECURE | Docker daemon allows insecure registries +PLUGIN_BUILD_ARGS | Docker build args, a comma separated list + +
+ +```yaml +# This example shows an environment variable being used +# in the Publish Image step. This variable allows you to +# publish an image to an insecure registry: + +stages: +- name: Publish Image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + pushRemote: true + registry: example.com + env: + PLUGIN_INSECURE: "true" +``` + +# Step Type: Publish Catalog Template + +The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a git hosted chart repository. It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. + +### Configuring Publishing a Catalog Template by UI + +1. From the **Step Type** drop-down, choose **Publish Catalog Template**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | + Catalog Template Name | The name of the template. For example, wordpress. | + Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | + Protocol | You can choose to publish via HTTP(S) or SSH protocol. | + Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | + Git URL | The Git URL of the chart repository that the template will be published to. | + Git Branch | The Git branch of the chart repository that the template will be published to. | + Author Name | The author name used in the commit message. | + Author Email | The author email used in the commit message. | + + +### Configuring Publishing a Catalog Template by YAML + +You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: + +* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. +* CatalogTemplate: The name of the template. +* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. +* GitUrl: The git URL of the chart repository that the template will be published to. +* GitBranch: The git branch of the chart repository that the template will be published to. +* GitAuthor: The author name used in the commit message. +* GitEmail: The author email used in the commit message. +* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. + +```yaml +# example +stages: +- name: Publish Wordpress Template + steps: + - publishCatalogConfig: + path: ./charts/wordpress/latest + catalogTemplate: wordpress + version: ${CICD_GIT_TAG} + gitUrl: git@github.com:myrepo/charts.git + gitBranch: master + gitAuthor: example-user + gitEmail: user@example.com + envFrom: + - sourceName: publish-keys + sourceKey: DEPLOY_KEY +``` + +# Step Type: Deploy YAML + +This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configure Deploying YAML by UI + +1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. + +1. Enter the **YAML Path**, which is the path to the manifest file in the source code. + +1. Click **Add**. + +### Configure Deploying YAML by YAML + +```yaml +# example +stages: +- name: Deploy + steps: + - applyYamlConfig: + path: ./deployment.yaml +``` + +# Step Type :Deploy Catalog App + +The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. + +### Configure Deploying Catalog App by UI + +1. From the **Step Type** drop-down, choose **Deploy Catalog App**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Catalog | The catalog from which the app template will be used. | + Template Name | The name of the app template. For example, wordpress. | + Template Version | The version of the app template you want to deploy. | + Namespace | The target namespace where you want to deploy the app. | + App Name | The name of the app you want to deploy. | + Answers | Key-value pairs of answers used to deploy the app. | + + +### Configure Deploying Catalog App by YAML + +You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: + +* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. +* Version: The version of the template you want to deploy. +* Answers: Key-value pairs of answers used to deploy the app. +* Name: The name of the app you want to deploy. +* TargetNamespace: The target namespace where you want to deploy the app. + +```yaml +# example +stages: +- name: Deploy App + steps: + - applyAppConfig: + catalogTemplate: cattle-global-data:library-mysql + version: 0.3.8 + answers: + persistence.enabled: "false" + name: testmysql + targetNamespace: test +``` + +# Timeouts + +By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. + +### Configuring Timeouts by UI + +Enter a new value in the **Timeout** field. + +### Configuring Timeouts by YAML + +In the `timeout` section, enter the timeout value in minutes. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +# timeout in minutes +timeout: 30 +``` + +# Notifications + +You can enable notifications to any notifiers based on the build status of a pipeline. Before enabling notifications, Rancher recommends setting up notifiers so it will be easy to add recipients immediately. + +### Configuring Notifications by UI + +1. Within the **Notification** section, turn on notifications by clicking **Enable**. + +1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. + +1. If you don't have any existing notifiers, Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. + + :::note + + Notifiers are configured at a cluster level and require a different level of permissions. + + ::: + +1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. + +### Configuring Notifications by YAML + +In the `notification` section, you will provide the following information: + +* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. + * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. + * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. +* **Condition:** Select which conditions of when you want the notification to be sent. +* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. + +```yaml +# Example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` + +# Triggers and Trigger Rules + +After you configure a pipeline, you can trigger it using different methods: + +- **Manually:** + + After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. + +- **Automatically:** + + When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. + + To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. + +Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: + +- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. + +- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. + +If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. + +Wildcard character (`*`) expansion is supported in `branch` conditions. + +This section covers the following topics: + +- [Configuring pipeline triggers](#configuring-pipeline-triggers) +- [Configuring stage triggers](#configuring-stage-triggers) +- [Configuring step triggers](#configuring-step-triggers) +- [Configuring triggers by YAML](#configuring-triggers-by-yaml) + +### Configuring Pipeline Triggers + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. +1. Click on **Show Advanced Options**. +1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. + + 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. + + 1. **Optional:** Add more branches that trigger a build. + +1. Click **Done**. + +### Configuring Stage Triggers + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. +1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. +1. Click **Show advanced options**. +1. In the **Trigger Rules** section, configure rules to run or skip the stage. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the stage and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the stage. | + | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + +### Configuring Step Triggers + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. +1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. +1. Click **Show advanced options**. +1. In the **Trigger Rules** section, configure rules to run or skip the step. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the step and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the step. | + | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + + +### Configuring Triggers by YAML + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +``` + +# Environment Variables + +When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. + +### Configuring Environment Variables by UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. +1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. +1. Click **Show advanced options**. +1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. +1. Add your environment variable(s) into either the script or file. +1. Click **Save**. + +### Configuring Environment Variables by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 +``` + +# Secrets + +If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets](../../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md). + +### Prerequisite +Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. +
+ +:::note + +Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). + +::: + +### Configuring Secrets by UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. +1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. +1. Click **Show advanced options**. +1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. +1. Click **Save**. + +### Configuring Secrets by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${ALIAS_ENV} + # environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV +``` + +# Pipeline Variable Substitution Reference + +For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. + +Variable Name | Description +------------------------|------------------------------------------------------------ +`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). +`CICD_GIT_URL` | URL of the Git repository. +`CICD_GIT_COMMIT` | Git commit ID being executed. +`CICD_GIT_BRANCH` | Git branch of this event. +`CICD_GIT_REF` | Git reference specification of this event. +`CICD_GIT_TAG` | Git tag name, set on tag event. +`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). +`CICD_PIPELINE_ID` | Rancher ID for the pipeline. +`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. +`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. +`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. +`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

[Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) + +# Global Pipeline Execution Settings + +After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. + +### Changing Pipeline Settings + +:::note Prerequisite: + +Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy +features before using pipelines. Note that pipelines in Kubernetes 1.21+ are no longer supported. + +1. In the upper left corner, click **☰ > Global Settings**. +1. Click **Feature Flags**. +1. Go to the `legacy` feature flag and click **⋮ > Activate**. + +::: + +To edit these settings: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. + +- [Executor Quota](#executor-quota) +- [Resource Quota for Executors](#resource-quota-for-executors) +- [Custom CA](#custom-ca) + +### Executor Quota + +Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. + +### Resource Quota for Executors + +Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. + +Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. + +To configure compute resources for pipeline-step containers: + +You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. + +In a step, you will provide the following information: + +* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. +* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. +* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. +* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi +``` + +:::note + +Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. + +::: + +### Custom CA + +If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. + +1. Click **Edit cacerts**. + +1. Paste in the CA root certificates and click **Save cacerts**. + +**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. + +# Persistent Data for Pipeline Components + +The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +For details on setting up persistent storage for pipelines, refer to [this page.](configure-persistent-data.md) + +# Example rancher-pipeline.yml + +An example pipeline configuration file is on [this page.](example-yaml.md) diff --git a/docs/reference-guides/prometheus-federator/rbac.md b/docs/reference-guides/prometheus-federator/rbac.md new file mode 100644 index 0000000000..a08ebb26e8 --- /dev/null +++ b/docs/reference-guides/prometheus-federator/rbac.md @@ -0,0 +1,33 @@ +--- +title: Role-Based Access Control +shortTitle: RBAC +weight: 2 +--- + +This section describes the expectations for Role-Based Access Control (RBAC) for Prometheus Federator. + +As described in the section on [namespaces](../../pages-for-subheaders/prometheus-federator.md#namespaces), Prometheus Federator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings: + +- ClusterRoleBindings +- RoleBindings in the Project Release Namespace + +On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under: + +- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin` +- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit` +- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view` + +By default, these roleRefs will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). + +:::note + +For Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates. + +::: + +If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels: + +- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}` +- `helm.cattle.io/project-helm-chart-role-aggregate-from: ` + +By default, `rancher-project-monitoring`, the underlying chart deployed by Prometheus Federator, creates three default Roles per Project Release Namespace that provide `admin`, `edit`, and `view` users to permissions to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack to provide least privilege. However, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or create Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces. \ No newline at end of file diff --git a/docs/reference-guides/rancher-cluster-tools.md b/docs/reference-guides/rancher-cluster-tools.md new file mode 100644 index 0000000000..68a105ecfe --- /dev/null +++ b/docs/reference-guides/rancher-cluster-tools.md @@ -0,0 +1,59 @@ +--- +title: Tools for Logging, Monitoring, and Visibility +weight: 2033 +--- + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + + + +- [Logging](#logging) +- [Monitoring and Alerts](#monitoring-and-alerts) +- [Istio](#istio) +- [OPA Gatekeeper](#opa-gatekeeper) +- [CIS Scans](#cis-scans) + + + + +# Logging + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems + +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. + +For more information, refer to the logging documentation [here.](../pages-for-subheaders/logging.md) +# Monitoring and Alerts + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. + +After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. + +Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. + +Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. + +For more information, refer to the monitoring documentation [here.](../pages-for-subheaders/monitoring-and-alerting.md) + +# Istio + +[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. + +Rancher's integration with Istio was improved in Rancher v2.5. + +For more information, refer to the Istio documentation [here.](../pages-for-subheaders/istio.md) +# OPA Gatekeeper + +[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.](../explanations/integrations-in-rancher/opa-gatekeeper.md) + +# CIS Scans + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. + +For more information, refer to the CIS scan documentation [here.](../pages-for-subheaders/cis-scan-guides.md) \ No newline at end of file diff --git a/docs/reference-guides/rancher-manager-architecture/architecture-recommendations.md b/docs/reference-guides/rancher-manager-architecture/architecture-recommendations.md new file mode 100644 index 0000000000..99022aa691 --- /dev/null +++ b/docs/reference-guides/rancher-manager-architecture/architecture-recommendations.md @@ -0,0 +1,113 @@ +--- +title: Architecture Recommendations +weight: 3 +--- + +If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) + +This section covers the following topics: + +- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) +- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) +- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-kubernetes-installations) +- [Environment for Kubernetes Installations](#environment-for-kubernetes-installations) +- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-kubernetes-installations) +- [Architecture for an Authorized Cluster Endpoint (ACE)](#architecture-for-an-authorized-cluster-endpoint-ace) + +# Separation of Rancher and User Clusters + +A user cluster is a downstream Kubernetes cluster that runs your apps and services. + +If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. + +If Rancher is intended to manage downstream Kubernetes clusters, the Kubernetes cluster that the Rancher server runs on should also be separate from the downstream user clusters. + +![Separation of Rancher Server from User Clusters](/img/rancher-architecture-separation-of-rancher-server.svg) + +# Why HA is Better for Rancher in Production + +We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. + +We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. + +### K3s Kubernetes Cluster Installations + +One option for the underlying Kubernetes cluster is to use K3s Kubernetes. K3s is Rancher's CNCF certified Kubernetes distribution. It is easy to install and uses half the memory of Kubernetes, all in a binary of less than 100 MB. Another advantage of K3s is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. + +
Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
+![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server](/img/k3s-server-storage.svg) + +### RKE Kubernetes Cluster Installations + +In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. + +
Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
+![Architecture of an RKE Kubernetes cluster running the Rancher management server](/img/rke-server-storage.svg) + +# Recommended Load Balancer Configuration for Kubernetes Installations + +We recommend the following configurations for the load balancer and Ingress controllers: + +* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP). +* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
+![Rancher HA](/img/ha/rancher2ha.svg) + +# Environment for Kubernetes Installations + +It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. + +For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +# Recommended Node Roles for Kubernetes Installations + +The below recommendations apply when Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. + +### K3s Cluster Roles + +In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. + +For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. + +### RKE Cluster Roles + +If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. + +### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters + +Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. + +Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. + +For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. + +![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters](/img/rancher-architecture-node-roles.svg) + +RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. + +We recommend that downstream user clusters should have at least: + +- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available +- **Two nodes with only the controlplane role** to make the master component highly available +- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services + +With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. + +For more best practices for downstream clusters, refer to the [production checklist](../../pages-for-subheaders/checklist-for-production-ready-clusters.md) or our [best practices guide.](../../pages-for-subheaders/best-practices.md) + +# Architecture for an Authorized Cluster Endpoint (ACE) + +If you are using an [authorized cluster endpoint (ACE),](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. + +If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) and [API keys](../user-settings/api-keys.md#creating-an-api-key) for more information. + +As of Rancher v2.6.3, ACE support is available for registered RKE2 and K3s clusters. To view the manual steps to perform on the downstream cluster to enable the ACE, click [here](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md#authorized-cluster-endpoint-support-for-rke2-and-k3s-clusters). diff --git a/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/reference-guides/rancher-manager-architecture/rancher-server-and-components.md b/docs/reference-guides/rancher-manager-architecture/rancher-server-and-components.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/docs/reference-guides/rancher-manager-architecture/rancher-server-and-components.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/reference-guides/rancher-project-tools.md b/docs/reference-guides/rancher-project-tools.md new file mode 100644 index 0000000000..1bfba4bf21 --- /dev/null +++ b/docs/reference-guides/rancher-project-tools.md @@ -0,0 +1,39 @@ +--- +title: Tools for Logging, Monitoring, and Visibility +weight: 2525 +--- + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + + +- [Notifiers and Alerts](#notifiers-and-alerts) +- [Logging](#logging) +- [Monitoring](#monitoring) + + + +## Notifiers and Alerts + +Notifiers and alerts are two features that work together to inform you of events in the Rancher system. Before they can be enabled, the monitoring application must be installed. + +Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. + +Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. + +## Logging + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems + +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. + +For details, refer to the [logging section.](../pages-for-subheaders/logging.md) + +## Monitoring + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.](../pages-for-subheaders/monitoring-and-alerting.md) diff --git a/content/rancher/v2.5/en/security/best-practices/_index.md b/docs/reference-guides/rancher-security/kubernetes-security-best-practices.md similarity index 100% rename from content/rancher/v2.5/en/security/best-practices/_index.md rename to docs/reference-guides/rancher-security/kubernetes-security-best-practices.md diff --git a/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-hardening-guide-with-cis-v1.6-benchmark.md b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-hardening-guide-with-cis-v1.6-benchmark.md new file mode 100644 index 0000000000..f2870ee089 --- /dev/null +++ b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-hardening-guide-with-cis-v1.6-benchmark.md @@ -0,0 +1,655 @@ +--- +title: RKE Hardening Guide with CIS v1.6 Benchmark +weight: 100 +aliases: + - /rancher/v2.6/en/security/hardening-guides/1.6-hardening-2.6/ +--- + +This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.6. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +:::note + +This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +::: + +This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +| Rancher Version | CIS Benchmark Version | Kubernetes Version | +| --------------- | --------------------- | ------------------ | +| Rancher v2.6 | Benchmark v1.6 | Kubernetes v1.18 up to v1.23 | + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf). + +- [Overview](#overview) +- [Configure Kernel Runtime Parameters](#configure-kernel-runtime-parameters) +- [Configure `etcd` user and group](#configure-etcd-user-and-group) +- [Configure `default` service account](#configure-default-service-account) +- [Configure Network Policy](#configure-network-policy) +- [Reference Hardened RKE `cluster.yml` Configuration](#reference-hardened-rke-cluster-yml-configuration) +- [Reference Hardened RKE Template Configuration](#reference-hardened-rke-template-configuration) +- [Reference Hardened **cloud-config** Configuration](#reference-hardened-cloud-config-configuration) + +### Overview + +This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.6 with Kubernetes v1.18 up to v1.23 or provisioning a RKE cluster with Kubernetes v1.18 up to v1.23 to be used within Rancher v2.6. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more details about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.6](./rke1-self-assessment-guide-with-cis-v1.6-benchmark.md). + +#### Known Issues + +- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS v1.6 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. +- When setting the `default_pod_security_policy_template_id:` to `restricted` or `restricted-noroot`, based on the pod security policies (PSP) [provided](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) by Rancher, Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS v1.6 check 5.1.5 requires that the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +### Configure Kernel Runtime Parameters + +The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: + +```ini +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxbytes=25000000 +``` + +Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### Configure `etcd` user and group + +A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. + +#### Create `etcd` user and group + +To create the **etcd** user and group run the following console commands. The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. + +```bash +groupadd --gid 52034 etcd +useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd --shell /usr/sbin/nologin +``` + +Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: + +```yaml +services: + etcd: + gid: 52034 + uid: 52034 +``` + +### Configure `default` Service Account + +#### Set `automountServiceAccountToken` to `false` for `default` service accounts + +Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +For each namespace including **default** and **kube-system** on a standard RKE install, the **default** service account must include this value: + +```yaml +automountServiceAccountToken: false +``` + +Save the following configuration to a file called `account_update.yaml`. + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default +automountServiceAccountToken: false +``` + +Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. + +```bash +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do + kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" +done +``` + +### Configure Network Policy + +#### Ensure that all Namespaces have Network Policies defined + +Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints. + +Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. This guide uses [Canal](https://github.com/projectcalico/canal) to provide the policy enforcement. Additional information about CNI providers can be found [here](https://www.suse.com/c/rancher_blog/comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/). + +Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a **permissive** example is provided below. If you want to allow all traffic to all pods in a namespace (even if policies are added that cause some pods to be treated as “isolated”), you can create a policy that explicitly allows all traffic in that namespace. Save the following configuration as `default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) about network policies can be found on the Kubernetes site. + +:::note + +This `NetworkPolicy` is just an example and is not recommended for production use. + +::: + +```yaml +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress +``` + +Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to `chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. + +```bash +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do + kubectl apply -f default-allow-all.yaml -n ${namespace} +done +``` + +Execute this script to apply the `default-allow-all.yaml` configuration with the **permissive** `NetworkPolicy` to all namespaces. + +### Reference Hardened RKE `cluster.yml` Configuration + +The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install of Rancher Kubernetes Engine (RKE). RKE install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration in RKE can be found [here](https://rancher.com/docs/rke/latest/en/config-options/nodes/). + +:::note Important: + +For a Kubernetes v1.18 cluster, the configuration `spec.volumes: 'ephemeral'` should be removed from the `PodSecurityPolicy`, since it's not supported in this Kubernetes release. + +::: + +```yaml +# If you intend to deploy Kubernetes in an air-gapped environment, +# please consult the documentation on how to configure custom RKE images. +# https://rancher.com/docs/rke/latest/en/installation/ . + +# The nodes directive is required and will vary depending on your environment. +# Documentation for node configuration can be found here: +# https://rancher.com/docs/rke/latest/en/config-options/nodes/ +nodes: [] +services: + etcd: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + external_urls: [] + ca_cert: "" + cert: "" + key: "" + path: "" + uid: 52034 + gid: 52034 + snapshot: false + retention: "" + creation: "" + backup_config: null + kube-api: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + service_cluster_ip_range: "" + service_node_port_range: "" + pod_security_policy: true + always_pull_images: false + secrets_encryption_config: + enabled: true + custom_config: null + audit_log: + enabled: true + configuration: null + admission_configuration: null + event_rate_limit: + enabled: true + configuration: null + kube-controller: + image: "" + extra_args: + feature-gates: RotateKubeletServerCertificate=true + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + bind-address: 127.0.0.1 + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + cluster_cidr: "" + service_cluster_ip_range: "" + scheduler: + image: "" + extra_args: + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + bind-address: 127.0.0.1 + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + kubelet: + image: "" + extra_args: + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: true + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + cluster_domain: cluster.local + infra_container_image: "" + cluster_dns_server: "" + fail_swap_on: false + generate_serving_certificate: true + kubeproxy: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] +network: + plugin: "" + options: {} + mtu: 0 + node_selector: {} + update_strategy: null +authentication: + strategy: "" + sans: [] + webhook: null +addons: | + # Upstream Kubernetes restricted PSP policy + # https://github.com/kubernetes/website/blob/564baf15c102412522e9c8fc6ef2b5ff5b6e766c/content/en/examples/policy/restricted-psp.yaml + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted-noroot + spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use. + - 'csi' + - 'persistentVolumeClaim' + - 'ephemeral' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'MustRunAsNonRoot' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted-noroot + rules: + - apiGroups: + - extensions + resourceNames: + - restricted-noroot + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted-noroot + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted-noroot + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-allow-all + spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + automountServiceAccountToken: false +addons_include: [] +system_images: + etcd: "" + alpine: "" + nginx_proxy: "" + cert_downloader: "" + kubernetes_services_sidecar: "" + kubedns: "" + dnsmasq: "" + kubedns_sidecar: "" + kubedns_autoscaler: "" + coredns: "" + coredns_autoscaler: "" + nodelocal: "" + kubernetes: "" + flannel: "" + flannel_cni: "" + calico_node: "" + calico_cni: "" + calico_controllers: "" + calico_ctl: "" + calico_flexvol: "" + canal_node: "" + canal_cni: "" + canal_controllers: "" + canal_flannel: "" + canal_flexvol: "" + weave_node: "" + weave_cni: "" + pod_infra_container: "" + ingress: "" + ingress_backend: "" + metrics_server: "" + windows_pod_infra_container: "" +ssh_key_path: "" +ssh_cert_path: "" +ssh_agent_auth: false +authorization: + mode: "" + options: {} +ignore_docker_version: false +kubernetes_version: "" +private_registries: [] +ingress: + provider: "" + options: {} + node_selector: {} + extra_args: {} + dns_policy: "" + extra_envs: [] + extra_volumes: [] + extra_volume_mounts: [] + update_strategy: null + http_port: 0 + https_port: 0 + network_mode: "" +cluster_name: +cloud_provider: + name: "" +prefix_path: "" +win_prefix_path: "" +addon_job_timeout: 0 +bastion_host: + address: "" + port: "" + user: "" + ssh_key: "" + ssh_key_path: "" + ssh_cert: "" + ssh_cert_path: "" +monitoring: + provider: "" + options: {} + node_selector: {} + update_strategy: null + replicas: null +restore: + restore: false + snapshot_name: "" +dns: null +upgrade_strategy: + max_unavailable_worker: "" + max_unavailable_controlplane: "" + drain: null + node_drain_input: null +``` + +### Reference Hardened RKE Template Configuration + +The reference RKE template provides the configuration needed to achieve a hardened install of Kubernetes. RKE templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher [documentation](../../../pages-for-subheaders/installation-and-upgrade.md) for additional installation and RKE template details. + +```yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted-noroot +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: true +local_cluster_auth_endpoint: + enabled: true +name: '' +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 45 + authentication: + strategy: x509 + dns: + nodelocal: + ip_address: '' + node_selector: null + update_strategy: {} + enable_cri_dockerd: false + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + default_backend: false + default_ingress_class: true + http_port: 0 + https_port: 0 + provider: nginx + kubernetes_version: v1.21.8-rancher1-1 + monitoring: + provider: metrics-server + replicas: 1 +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + mtu: 0 + options: + flannel_backend_type: vxlan + plugin: canal + rotate_encryption_key: false +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + scheduler: + extra_args: + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + bind-address: 127.0.0.1 + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + timeout: 300 + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + retention: 72h + snapshot: false + uid: 52034 + gid: 52034 + kube_api: + always_pull_images: false + audit_log: + enabled: true + event_rate_limit: + enabled: true + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube-controller: + extra_args: + feature-gates: RotateKubeletServerCertificate=true + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + bind-address: 127.0.0.1 + kubelet: + extra_args: + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: true + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + generate_serving_certificate: true + ssh_agent_auth: false + upgrade_strategy: + max_unavailable_controlplane: '1' + max_unavailable_worker: 10% +windows_prefered_cluster: false +``` + +### Reference Hardened **cloud-config** Configuration + +A **cloud-config** configuration file is generally used in cloud infrastructure environments to allow for configuration management of compute instances. The reference config configures SUSE Linux Enterprise Server (SLES), openSUSE Leap, Red Hat Enterprise Linux (RHEL) and Ubuntu operating system level settings needed before installing Kubernetes. + +#### Reference Hardened **cloud-config** for SUSE Linux Enterprise Server 15 (SLES 15) and openSUSE Leap 15 + +```yaml +#cloud-config +system_info: + default_user: + groups: + - docker +write_files: +- path: "/etc/sysctl.d/90-kubelet.conf" + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxbytes=25000000 +package_update: true +ssh_pwauth: false +runcmd: +# Docker should already be installed in SLES 15 SP3 +- zypper install docker containerd +- systemctl daemon-reload +- systemctl enable docker.service +- systemctl start --no-block docker.service +- sysctl -p /etc/sysctl.d/90-kubelet.conf +- groupadd --gid 52034 etcd +- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd --shell /usr/sbin/nologin +``` + +#### Reference Hardened **cloud-config** for Red Hat Enterprise Linux 8 (RHEL 8) and Ubuntu 20.04 LTS + +```yaml +#cloud-config +system_info: + default_user: + groups: + - docker +write_files: +- path: "/etc/sysctl.d/90-kubelet.conf" + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxbytes=25000000 +package_update: true +ssh_pwauth: false +runcmd: +# Install Docker from Rancher's Docker installation scripts - github.com/rancher/install-docker +- curl https://releases.rancher.com/install-docker/20.10.sh | sh +- sysctl -p /etc/sysctl.d/90-kubelet.conf +- groupadd --gid 52034 etcd +- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd --shell /usr/sbin/nologin +``` diff --git a/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-self-assessment-guide-with-cis-v1.6-benchmark.md b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-self-assessment-guide-with-cis-v1.6-benchmark.md new file mode 100644 index 0000000000..3e4e703e30 --- /dev/null +++ b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-self-assessment-guide-with-cis-v1.6-benchmark.md @@ -0,0 +1,3104 @@ +--- +title: RKE CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 +weight: 101 +aliases: + - /rancher/v2.6/en/security/hardening-guides/1.6-benchmark-2.6/ +--- + +### RKE CIS v1.6 Kubernetes Benchmark - Rancher v2.6 with Kubernetes v1.18 to v1.23 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). + +#### Overview + +This document is a companion to the [Rancher v2.6 RKE security hardening guide](rke1-hardening-guide-with-cis-v1.6-benchmark.md). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: + +| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | +| ----------------------- | --------------- | --------------------- | ------------------- | +| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6 | CIS v1.6 | Kubernetes v1.18 up to v1.23 | + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. + +:::note + +Only `automated` tests (previously called `scored`) are covered in this guide. + +::: + +### Controls +## 1.1 Master Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 + +**Audit:** + +```bash +stat -c permissions=%a +``` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root + +**Audit:** + +```bash +stat -c %U:%G +``` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit:** + +```bash +stat -c %a /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'700' is equal to '700' +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +A system service account is required for etcd data directory ownership. +Refer to Rancher's hardening guide for more details on how to configure this ownership. + +**Audit:** + +```bash +stat -c %U:%G /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'etcd:etcd' is present +``` + +**Returned Value**: + +```console +etcd:etcd +``` + +### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit Script:** `check_files_owner_in_dir.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the owner is set to root:root for +# the given directory and all the files in it +# +# inputs: +# $1 = /full/path/to/directory +# +# outputs: +# true/false + +INPUT_DIR=$1 + +if [[ "${INPUT_DIR}" == "" ]]; then + echo "false" + exit +fi + +if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then + echo "false" + exit +fi + +statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) +while read -r statInfoLine; do + f=$(echo ${statInfoLine} | cut -d' ' -f1) + p=$(echo ${statInfoLine} | cut -d' ' -f2) + + if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then + if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "root:root" ]]; then + echo "false" + exit + fi + fi +done <<< "${statInfoLines}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_owner_in_dir.sh /node/etc/kubernetes/ssl +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 644 /etc/kubernetes/pki/*.crt + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /node/etc/kubernetes/ssl/!(*key).pem +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 600 /etc/kubernetes/ssl/*key.pem + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /node/etc/kubernetes/ssl/*key.pem +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--basic-auth-file' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the --kubelet-https parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is not present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'Node' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes RBAC, +for example: +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'RBAC' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes PodSecurityPolicy: +--enable-admission-plugins=...,PodSecurityPolicy,... +Then restart the API Server. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the --insecure-bind-address parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--insecure-bind-address' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--insecure-port=0 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'0' is equal to '0' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +6443 is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.21 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example: +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: +--audit-log-maxage=30 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +30 is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. +--audit-log-maxbackup=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +10 is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB: +--audit-log-maxsize=100 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +100 is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameter as appropriate and if needed. +For example, +--request-timeout=300s + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--request-timeout' is not present OR '--request-timeout' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --service-account-key-file parameter +to the public key file for service accounts: +`--service-account-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the etcd certificate and key file parameters. +`--etcd-certfile=` +`--etcd-keyfile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the TLS certificate and private key file parameters. +`--tls-cert-file=` +`--tls-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the client certificate authority file. +`--client-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the etcd certificate authority file parameter. +`--etcd-cafile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit Script:** `check_encryption_provider_config.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to check the encrption provider config is set to aesbc +# +# outputs: +# true/false + +# TODO: Figure out the file location from the kube-apiserver commandline args +ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" + +if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then + echo "false" + exit +fi + +for provider in "$@" +do + if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then + echo "true" + exit + fi +done + +echo "false" +exit + +``` + +**Audit Execution:** + +```bash +./check_encryption_provider_config.sh aescbc +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +- aescbc: true +``` + +### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM +_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM +_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM +_SHA384 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example: +--terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'true' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +`--service-account-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --root-ca-file parameter to the certificate bundle file`. +`--root-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +Cluster provisioned by RKE handles certificate rotation directly through RKE. + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121587 121567 0 12:27 ? 00:00:12 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --address=127.0.0.1 --leader-elect=true --profiling=false --v=2 --bind-address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 121587 121567 0 12:27 ? 00:00:12 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --address=127.0.0.1 --leader-elect=true --profiling=false --v=2 --bind-address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +## 2 Etcd Node Configuration Files +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +`--cert-file=` +`--key-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--cert-file' is present AND '--key-file' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 2 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 2 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 1 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameters. +`--peer-client-file=` +`--peer-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 5 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-client-cert-auth' is present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 4 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-auto-tls' is not present OR '--peer-auto-tls' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 4 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameter. +`--trusted-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--trusted-ca-file' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 3 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-policy-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Consider modification of the audit policy in use on the cluster to include these items, at a +minimum. + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 $proykubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +**Returned Value**: + +```console +600 +``` + +### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is not present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +`--client-ca-file chmod 644 ` + +**Audit Script:** `check_cafile_permissions.sh` + +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + +``` + +**Audit Execution:** + +```bash +./check_cafile_permissions.sh +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +`chown root:root ` + +**Audit Script:** `check_cafile_ownership.sh` + +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + +``` + +**Audit Execution:** + +```bash +./check_cafile_ownership.sh +``` + +**Expected Result**: + +```console +'root:root' is not present +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + +## 4.2 Kubelet +### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +`--client-ca-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present OR '' is not present +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 121813 121792 4 12:27 ? 00:03:37 kubelet --fail-swap-on=false --resolv-conf=/etc/resolv.conf --authorization-mode=Webhook --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --v=2 --volume-plugin-dir=/var/lib/kubelet/volumeplugins --address=0.0.0.0 --make-iptables-util-chains=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --hostname-override= --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-.pem --network-plugin=cni --streaming-connection-idle-timeout=30m --root-dir=/var/lib/kubelet --event-qps=0 --feature-gates=RotateKubeletServerCertificate=true --protect-kernel-defaults=true --cloud-provider= --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet--key.pem --cgroups-per-qos=True --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=rancher/mirrored-pause:3.5 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --anonymous-auth=false --authentication-token-webhook=true --node-ip= --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --read-only-port=0 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present OR '' is not present +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set tlsCertFile to the location +of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +`--tls-cert-file=` +`--tls-private-key-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present AND '' is not present +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line rotateCertificates: true or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'--rotate-certificates' is not present OR '--rotate-certificates' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 121813 121792 4 12:27 ? 00:03:37 kubelet --fail-swap-on=false --resolv-conf=/etc/resolv.conf --authorization-mode=Webhook --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --v=2 --volume-plugin-dir=/var/lib/kubelet/volumeplugins --address=0.0.0.0 --make-iptables-util-chains=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --hostname-override= --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-.pem --network-plugin=cni --streaming-connection-idle-timeout=30m --root-dir=/var/lib/kubelet --event-qps=0 --feature-gates=RotateKubeletServerCertificate=true --protect-kernel-defaults=true --cloud-provider= --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet--key.pem --cgroups-per-qos=True --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=rancher/mirrored-pause:3.5 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --anonymous-auth=false --authentication-token-webhook=true --node-ip= --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --read-only-port=0 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE handles certificate rotation directly through RKE. + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set TLSCipherSuites: to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +**Audit Script:** `check_for_default_sa.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` + +**Audit Execution:** + +```bash +./check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +## 5.2 Pod Security Policies +### 5.2.1 Minimize the admission of privileged containers (Manual) + + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that +the .spec.privileged field is omitted or set to false. + +### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostPID field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostIPC field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostNetwork field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.allowPrivilegeEscalation field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.6 Minimize the admission of root containers (Manual) + + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of +UIDs not including 0. + +### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) + + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + +### 5.2.8 Minimize the admission of containers with added capabilities (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that allowedCapabilities is not present in PSPs for the cluster unless +it is set to an empty array. + +### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications runnning on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) + + +**Result:** warn + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +**Audit Script:** `check_for_network_policies.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [[ ${policy_count} -eq 0 ]]; then + echo "false" + exit + fi +done + +echo "true" + +``` + +**Audit Execution:** + +```bash +./check_for_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 5.4 Secrets Management +### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +if possible, rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you +would need to enable alpha features in the apiserver by passing "--feature- +gates=AllAlpha=true" argument. +Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS +parameter to "--feature-gates=AllAlpha=true" +KUBE_API_ARGS="--feature-gates=AllAlpha=true" +Based on your system, restart the kube-apiserver service. For example: +systemctl restart kube-apiserver.service +Use annotations to enable the docker/default seccomp profile in your pod definitions. An +example is as below: +apiVersion: v1 +kind: Pod +metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default +spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + +### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply security contexts to your pods. For a +suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Automated) + + +**Result:** pass + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + +**Audit Script:** `check_for_default_ns.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) +if [[ ${count} -gt 0 ]]; then + echo "false" + exit +fi + +echo "true" + + +``` + +**Audit Execution:** + +```bash +./check_for_default_ns.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + diff --git a/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-hardening-guide-with-cis-v1.6-benchmark.md b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-hardening-guide-with-cis-v1.6-benchmark.md new file mode 100644 index 0000000000..4d4ff7ad2e --- /dev/null +++ b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-hardening-guide-with-cis-v1.6-benchmark.md @@ -0,0 +1,418 @@ +--- +title: RKE2 Hardening Guide with CIS v1.6 Benchmark +weight: 100 +--- + +This document provides prescriptive guidance for hardening a production installation of a RKE2 cluster to be provisioned with Rancher v2.6.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +:::note + +This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +::: + +This hardening guide is intended to be used for RKE2 clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +| Rancher Version | CIS Benchmark Version | Kubernetes Version | +| --------------- | --------------------- | ------------------ | +| Rancher v2.6.5+ | Benchmark v1.6 | Kubernetes v1.21 up to v1.23 | + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf). + +- [Overview](#overview) +- [Host-level requirements](#host-level-requirements) +- [Setting up hosts](#setting-up-hosts) +- [Kubernetes runtime requirements](#kubernetes-runtime-requirements) +- [API Server audit configuration](#api-server-audit-configuration) +- [Known issues](#known-issues) +- [Reference Hardened RKE2 Template Configuration](#reference-hardened-rke2-template-configuration) +- [Conclusion](#conclusion) + +### Overview + +This document provides prescriptive guidance for hardening a RKE2 cluster to be provisioned through Rancher v2.6.5+ with Kubernetes v1.21 up to v1.23. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more details about evaluating a hardened RKE2 cluster against the official CIS benchmark, refer to the [RKE2 - CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.6](rke2-self-assessment-guide-with-cis-v1.6-benchmark.md). + +RKE2 is designed to be "hardened by default" and pass the majority of the Kubernetes CIS controls without modification. There are a few notable exceptions to this that require manual intervention to fully pass the CIS Benchmark: + +1. RKE2 will not modify the host operating system. Therefore, you, the operator, must make a few host-level modifications. +2. Certain CIS policy controls for `PodSecurityPolicies` and `NetworkPolicies` will restrict the functionality of the cluster. You must opt into having RKE2 configuring these out of the box. + +To help ensure these above requirements are met, RKE2 can be started with the `profile` flag set to `cis-1.6`. This flag generally does two things: + +1. Checks that host-level requirements have been met. If they haven't, RKE2 will exit with a fatal error describing the unmet requirements. +2. Configures runtime pod security policies and network policies that allow the cluster to pass associated controls. + +:::note + +The profile's flag only valid values are `cis-1.5` or `cis-1.6`. It accepts a string value to allow for other profiles in the future. + +::: + +The following section outlines the specific actions that are taken when the `profile` flag is set to `cis-1.6`. + +### Host-level requirements + +There are two areas of host-level requirements: kernel parameters and etcd process/directory configuration. These are outlined in this section. + +#### Ensure `protect-kernel-defaults` is set + +This is a kubelet flag that will cause the kubelet to exit if the required kernel parameters are unset or are set to values that are different from the kubelet's defaults. + +When the `profile` flag is set, RKE2 will set the flag to `true`. + +:::caution + +`protect-kernel-defaults` is exposed as a configuration flag for RKE2. If you have set `profile` to "cis-1.x" and `protect-kernel-defaults` to `false` explicitly, RKE2 will exit with an error. + +::: + +RKE2 will also check the same kernel parameters that the kubelet does and exit with an error following the same rules as the kubelet. This is done as a convenience to help the operator more quickly and easily identify what kernel parameters are violating the kubelet defaults. + +Both `protect-kernel-defaults` and `profile` flags can be set in RKE2 template configuration file. + +```yaml +spec: + rkeConfig: + machineSelectorConfig: + - config: + profile: cis-1.6 + protect-kernel-defaults: true +``` + +#### Ensure etcd is configured properly + +The CIS Benchmark requires that the etcd data directory be owned by the `etcd` user and group. This implicitly requires the etcd process to be ran as the host-level `etcd` user. To achieve this, RKE2 takes several steps when started with a valid "cis-1.x" profile: + +1. Check that the `etcd` user and group exists on the host. If they don't, exit with an error. +2. Create etcd's data directory with `etcd` as the user and group owner. +3. Ensure the etcd process is ran as the `etcd` user and group by setting the etcd static pod's `SecurityContext` appropriately. + +### Setting up hosts + +This section gives you the commands necessary to configure your host to meet the above requirements. + +#### Set kernel parameters + +The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: + +```ini +vm.panic_on_oom=0 +vm.overcommit_memory=1 +kernel.panic=10 +kernel.panic_on_oops=1 +``` + +Run `sudo sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +Please perform this step only on fresh installations, before actually deploying RKE2 through Rancher. + +#### Create the etcd user + +On some Linux distributions, the `useradd` command will not create a group. The `-U` flag is included below to account for that. This flag tells `useradd` to create a group with the same name as the user. + +```bash +sudo useradd -r -c "etcd user" -s /sbin/nologin -M etcd -U +``` + +### Kubernetes runtime requirements + +The runtime requirements to pass the CIS Benchmark are centered around pod security and network policies. These are outlined in this section. + +#### `PodSecurityPolicies` + +RKE2 always runs with the `PodSecurityPolicy` admission controller turned on. However, when it is **not** started with a valid "cis-1.x" profile, RKE2 will put an unrestricted policy in place that allows Kubernetes to run as though the `PodSecurityPolicy` admission controller was not enabled. + +When ran with a valid "cis-1.x" profile, RKE2 will put a much more restrictive set of policies in place. These policies meet the requirements outlined in section 5.2 of the CIS Benchmark. + +> The Kubernetes control plane components and critical additions such as CNI, DNS, and Ingress are ran as pods in the `kube-system` namespace. Therefore, this namespace will have a policy that is less restrictive so that these components can run properly. + +#### `NetworkPolicies` + +When ran with a valid "cis-1.x" profile, RKE2 will put `NetworkPolicies` in place that passes the CIS Benchmark for Kubernetes' built-in namespaces. These namespaces are: `kube-system`, `kube-public`, `kube-node-lease`, and `default`. + +The `NetworkPolicy` used will only allow pods within the same namespace to talk to each other. The notable exception to this is that it allows DNS requests to be resolved. + +:::note + +Operators must manage network policies as normal for additional namespaces that are created. + +::: +#### Configure `default` service account + +**Set `automountServiceAccountToken` to `false` for `default` service accounts** + +Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The `default` service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +For each namespace including `default` and `kube-system` on a standard RKE2 install, the `default` service account must include this value: + +```yaml +automountServiceAccountToken: false +``` + +For namespaces created by the cluster operator, the following script and configuration file can be used to configure the `default` service account. + +The configuration bellow must be saved to a file called `account_update.yaml`. + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default +automountServiceAccountToken: false +``` + +Create a bash script file called `account_update.sh`. Be sure to `sudo chmod +x account_update.sh` so the script has execute permissions. + +```bash +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o=jsonpath="{.items[*]['metadata.name']}"); do + echo -n "Patching namespace $namespace - " + kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" +done +``` + +Execute this script to apply the `account_update.yaml` configuration to `default` service account in all namespaces. + +### API Server audit configuration + +CIS requirements 1.2.22 to 1.2.25 are related to configuring audit logs for the API Server. When RKE2 is started with the `profile` flag set to `cis-1.6`, it will automatically configure hardened `--audit-log-` parameters in the API Server to pass those CIS checks. + +RKE2's default audit policy is configured to not log requests in the API Server. This is done to allow cluster operators flexibility to customize an audit policy that suits their auditing requirements and needs, as these are specific to each users' environment and policies. + +A default audit policy is created by RKE2 when started with the `profile` flag set to `cis-1.6`. The policy is defined in `/etc/rancher/rke2/audit-policy.yaml`. + +```yaml +apiVersion: audit.k8s.io/v1 +kind: Policy +metadata: + creationTimestamp: null +rules: +- level: None +``` + +To start logging requests to the API Server, at least `level` parameter must be modified, for example, to `Metadata`. Detailed information about policy configuration for the API server can be found in the Kubernetes [documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/). + +After adapting the audit policy, RKE2 must be restarted to load the new configuration. + +```shell +sudo systemctl restart rke2-server.service +``` + +API Server audit logs will be written to `/var/lib/rancher/rke2/server/logs/audit.log`. + +### Known issues + +The following are controls that RKE2 currently does not pass. Each gap will be explained and whether it can be passed through manual operator intervention or if it will be addressed in a future release. + +#### Control 1.1.12 +Ensure that the etcd data directory ownership is set to `etcd:etcd`. + +**Rationale** +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. + +**Remediation** +This can be remediated by creating an `etcd` user and group as described above. + +#### Control 5.1.5 +Ensure that default service accounts are not actively used + +**Rationale** Kubernetes provides a `default` service account which is used by cluster workloads where no specific service account is assigned to the pod. + +Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. + +The `default` service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +This can be remediated by updating the `automountServiceAccountToken` field to `false` for the `default` service account in each namespace. + +**Remediation** +You can manually update this field on service accounts in your cluster to pass the control as described above. + +#### Control 5.3.2 +Ensure that all Namespaces have Network Policies defined + +**Rationale** +Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to. A network policy is a specification of how selections of pods are allowed to communicate with each other and other network endpoints. + +Network Policies are namespace scoped. When a network policy is introduced to a given namespace, all traffic not allowed by the policy is denied. However, if there are no network policies in a namespace all traffic will be allowed into and out of the pods in that namespace. + +**Remediation** +This can be remediated by setting `profile: "cis-1.6"` in RKE2 template configuration file. An example can be found below. + +### Reference Hardened RKE2 Template Configuration + +The reference template configuration is used in Rancher to create a hardened RKE2 custom cluster. This reference does not include other required **cluster configuration** directives which will vary depending on your environment. + +```yaml +apiVersion: provisioning.cattle.io/v1 +kind: Cluster +metadata: + name: + annotations: + {} +# key: string + labels: + {} +# key: string + namespace: fleet-default +spec: + defaultPodSecurityPolicyTemplateName: '' + kubernetesVersion: + localClusterAuthEndpoint: + caCerts: '' + enabled: false + fqdn: '' + rkeConfig: + chartValues: + rke2-canal: + {} + etcd: + disableSnapshots: false + s3: +# bucket: string +# cloudCredentialName: string +# endpoint: string +# endpointCA: string +# folder: string +# region: string +# skipSSLVerify: boolean + snapshotRetention: 5 + snapshotScheduleCron: 0 */5 * * * + machineGlobalConfig: + cni: canal + machinePools: +# - cloudCredentialSecretName: string +# controlPlaneRole: boolean +# displayName: string +# drainBeforeDelete: boolean +# etcdRole: boolean +# labels: +# key: string +# machineConfigRef: +# apiVersion: string +# fieldPath: string +# kind: string +# name: string +# namespace: string +# resourceVersion: string +# uid: string +# machineDeploymentAnnotations: +# key: string +# machineDeploymentLabels: +# key: string +# machineOS: string +# maxUnhealthy: string +# name: string +# nodeStartupTimeout: string +# paused: boolean +# quantity: int +# rollingUpdate: +# maxSurge: string +# maxUnavailable: string +# taints: +# - effect: string +# key: string +# timeAdded: string +# value: string +# unhealthyNodeTimeout: string +# unhealthyRange: string +# workerRole: boolean + machineSelectorConfig: + - config: + profile: cis-1.6 + protect-kernel-defaults: true +# - config: +# +# machineLabelSelector: +# matchExpressions: +# - key: string +# operator: string +# values: +# - string +# matchLabels: +# key: string + registries: + configs: + {} + #authConfigSecretName: string +# caBundle: string +# insecureSkipVerify: boolean +# tlsSecretName: string + mirrors: + {} + #endpoint: +# - string +# rewrite: +# key: string + upgradeStrategy: + controlPlaneConcurrency: 10% + controlPlaneDrainOptions: +# deleteEmptyDirData: boolean +# disableEviction: boolean +# enabled: boolean +# force: boolean +# gracePeriod: int +# ignoreDaemonSets: boolean +# ignoreErrors: boolean +# postDrainHooks: +# - annotation: string +# preDrainHooks: +# - annotation: string +# skipWaitForDeleteTimeoutSeconds: int +# timeout: int + workerConcurrency: 10% + workerDrainOptions: +# deleteEmptyDirData: boolean +# disableEviction: boolean +# enabled: boolean +# force: boolean +# gracePeriod: int +# ignoreDaemonSets: boolean +# ignoreErrors: boolean +# postDrainHooks: +# - annotation: string +# preDrainHooks: +# - annotation: string +# skipWaitForDeleteTimeoutSeconds: int +# timeout: int +# additionalManifest: string +# etcdSnapshotCreate: +# generation: int +# etcdSnapshotRestore: +# generation: int +# name: string +# restoreRKEConfig: string +# infrastructureRef: +# apiVersion: string +# fieldPath: string +# kind: string +# name: string +# namespace: string +# resourceVersion: string +# uid: string +# provisionGeneration: int +# rotateCertificates: +# generation: int +# services: +# - string +# rotateEncryptionKeys: +# generation: int + machineSelectorConfig: + - config: {} +# agentEnvVars: +# - name: string +# value: string +# cloudCredentialSecretName: string +# clusterAPIConfig: +# clusterName: string +# defaultClusterRoleForProjectMembers: string +# enableNetworkPolicy: boolean +# redeploySystemAgentGeneration: int +__clone: true +``` + +### Conclusion + +If you have followed this guide, your RKE2 custom cluster provisioned by Rancher will be configured to pass the CIS Kubernetes Benchmark. You can review our RKE2 CIS Benchmark Self-Assessment Guide [v1.6](rke2-self-assessment-guide-with-cis-v1.6-benchmark.md) to understand how we verified each of the benchmarks and how you can do the same on your cluster. diff --git a/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-self-assessment-guide-with-cis-v1.6-benchmark.md b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-self-assessment-guide-with-cis-v1.6-benchmark.md new file mode 100644 index 0000000000..2d7a959932 --- /dev/null +++ b/docs/reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-self-assessment-guide-with-cis-v1.6-benchmark.md @@ -0,0 +1,3330 @@ +--- +title: RKE2 CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 +weight: 101 +--- + +### CIS v1.6 Kubernetes Benchmark - Rancher v2.6 RKE2 with Kubernetes v1.21 up to v1.23 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). + +#### Overview + +This document is a companion to the [Rancher v2.6 RKE2 security hardening guide](rke2-hardening-guide-with-cis-v1.6-benchmark.md). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher with RKE2 provisioned clusters, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: + +| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | +| ----------------------- | --------------- | --------------------- | ------------------- | +| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6.5+ | CIS v1.6 | Kubernetes v1.21 up to v1.23 | + +Because Rancher and RKE2 install Kubernetes services as containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +RKE2 launches control plane components as static pods, managed by the kubelet, and uses containerd as the container runtime. Configuration is defined by arguments passed to the container at the time of initialization or via configuration file. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE2 nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. + +:::note + +Only `automated` tests (previously called `scored`) are covered in this guide. + +::: + +### Controls + +--- +## 1.1 Master Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the +master node. +For example, chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 + +**Audit:** + +```bash +stat -c %a +``` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root + +**Audit:** + +```bash +stat -c %U:%G +``` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/db/etcd +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd +``` + +**Expected Result**: + +```console +'etcd:etcd' is present +``` + +**Returned Value**: + +```console +etcd:etcd +``` + +### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /etc/kubernetes/admin.conf + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/cred/admin.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /etc/kubernetes/admin.conf + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 scheduler + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root scheduler + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 controllermanager + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root controllermanager + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/tls +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 644 /var/lib/rancher/rke2/server/tls/*.crt + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /var/lib/rancher/rke2/server/tls/*.crt +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 600 /etc/kubernetes/pki/*.key + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /var/lib/rancher/rke2/server/tls/*.key +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +true +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--basic-auth-file' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the --kubelet-https parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is not present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the master node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'Node' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes RBAC, +for example: +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'RBAC' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NodeRestriction,PodSecurityPolicy' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes PodSecurityPolicy: +--enable-admission-plugins=...,PodSecurityPolicy,... +Then restart the API Server. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NodeRestriction,PodSecurityPolicy' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NodeRestriction,PodSecurityPolicy' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the --insecure-bind-address parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--insecure-bind-address' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--insecure-port=0 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'0' is equal to '0' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +6443 is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.21 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example: +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: +--audit-log-maxage=30 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +30 is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. +--audit-log-maxbackup=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +10 is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB: +--audit-log-maxsize=100 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +100 is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +and set the below parameter as appropriate and if needed. +For example, +--request-timeout=300s + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--request-timeout' is not present OR '--request-timeout' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR '--service-account-lookup' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --service-account-key-file parameter +to the public key file for service accounts: +`--service-account-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the etcd certificate and key file parameters. +`--etcd-certfile=` +`--etcd-keyfile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the TLS certificate and private key file parameters. +`--tls-cert-file=` +`--tls-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the client certificate authority file. +`--client-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the etcd certificate authority file parameter. +`--etcd-cafile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit:** + +```bash +/bin/sh -c 'if grep aescbc /var/lib/rancher/rke2/server/cred/encryption-config.json; then echo 0; fi' +``` + +**Expected Result**: + +```console +'0' is present +``` + +**Returned Value**: + +```console +{"kind":"EncryptionConfiguration","apiVersion":"apiserver.config.k8s.io/v1","resources":[{"resources":["secrets"],"providers":[{"aescbc":{"keys":[{"name":"aescbckey","secret":"ZP3yNnlCjzcKMBXfmNBmpGbiY+oXne+WP6EM42lZIbE="}]}},{"identity":{}}]}]} 0 +``` + +### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM +_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM +_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM +_SHA384 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example: +--terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'true' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +`--service-account-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --root-ca-file parameter to the certificate bundle file`. +`--root-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml file +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5533 5414 0 14:58 ? 00:00:02 kube-scheduler --permit-port-sharing=true --address=127.0.0.1 --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=10259 +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 5533 5414 0 14:58 ? 00:00:02 kube-scheduler --permit-port-sharing=true --address=127.0.0.1 --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=10259 +``` + +## 2 Etcd Node Configuration Files +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +`--cert-file=` +`--key-file=` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 0 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the +master node and set the below parameters. +`--peer-client-file=` +`--peer-key-file=` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-auto-tls' is not present OR '--peer-auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 6 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Manual) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the +master node and set the below parameter. +`--trusted-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--trusted-ca-file' is not present +``` + +**Returned Value**: + +```console +etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 3 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file +``` + +**Expected Result**: + +```console +'audit-policy-file' is equal to 'audit-policy-file' +``` + +**Returned Value**: + +```console +audit-policy-file +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Consider modification of the audit policy in use on the cluster to include these items, at a +minimum. + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c permissions=%a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %a /node/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'permissions' is present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is not present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the following command to modify the file permissions of the +`--client-ca-file chmod 644 ` + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/tls/server-ca.crt +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +`chown root:roset: trueot ` + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/client-ca.crt; then stat -c %U:%G /var/lib/rancher/rke2/agent/client-ca.crt; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +## 4.2 Kubelet +### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +`--client-ca-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'0' is equal to '0' AND '--read-only-port' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--streaming-connection-idle-timeout' is not present OR '--streaming-connection-idle-timeout' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--make-iptables-util-chains' is not present OR '--make-iptables-util-chains' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--event-qps' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set tlsCertFile to the location +of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +`--tls-cert-file=` +`--tls-private-key-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line rotateCertificates: true or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--rotate-certificates' is not present OR '--rotate-certificates' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) + + +**Result:** pass + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'RotateKubeletServerCertificate' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set TLSCipherSuites: to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +**Audit Script:** `check_for_default_sa.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` + +**Audit Execution:** + +```bash +./check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +## 5.2 Pod Security Policies +### 5.2.1 Minimize the admission of privileged containers (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that +the .spec.privileged field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp global-restricted-psp -o json | jq -r '.spec.runAsUser.rule' +``` + +**Expected Result**: + +```console +'MustRunAsNonRoot' is equal to 'MustRunAsNonRoot' +``` + +**Returned Value**: + +```console +MustRunAsNonRoot +``` + +### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostPID field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostIPC field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostNetwork field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.allowPrivilegeEscalation field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.6 Minimize the admission of root containers (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of +UIDs not including 0. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + +**Audit:** + +```bash +kubectl get psp global-restricted-psp -o json | jq -r .spec.requiredDropCapabilities[] +``` + +**Expected Result**: + +```console +'ALL' is equal to 'ALL' +``` + +**Returned Value**: + +```console +ALL +``` + +### 5.2.8 Minimize the admission of containers with added capabilities (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that allowedCapabilities is not present in PSPs for the cluster unless +it is set to an empty array. + +### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications runnning on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports Network Policies (Automated) + + +**Result:** pass + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +**Audit:** + +```bash +kubectl get pods -n kube-system -l k8s-app=canal -o json | jq .items[] | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +**Audit Script:** `check_for_rke2_network_policies.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in kube-system kube-public default; do + policy_count=$(/var/lib/rancher/rke2/bin/kubectl get networkpolicy -n ${namespace} -o json | jq -r '.items | length') + if [ ${policy_count} -eq 0 ]; then + echo "false" + exit + fi +done + +echo "true" + +``` + +**Audit Execution:** + +```bash +./check_for_rke2_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 5.4 Secrets Management +### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +if possible, rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.6 The v1.5.1 guide skips 5.6 and goes from 5.5 to 5.7. We are including it here merely for explanation. +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you +would need to enable alpha features in the apiserver by passing "--feature- +gates=AllAlpha=true" argument. +Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS +parameter to "--feature-gates=AllAlpha=true" +KUBE_API_ARGS="--feature-gates=AllAlpha=true" +Based on your system, restart the kube-apiserver service. For example: +systemctl restart kube-apiserver.service +Use annotations to enable the docker/default seccomp profile in your pod definitions. An +example is as below: +apiVersion: v1 +kind: Pod +metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default +spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + +### 5.7.3 Apply Security Context to Your Pods and Containers (Automated) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply security contexts to your pods. For a +suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + diff --git a/docs/reference-guides/rancher-security/security-advisories-and-cves.md b/docs/reference-guides/rancher-security/security-advisories-and-cves.md new file mode 100644 index 0000000000..28c652786e --- /dev/null +++ b/docs/reference-guides/rancher-security/security-advisories-and-cves.md @@ -0,0 +1,31 @@ +--- +title: Security Advisories and CVEs +weight: 300 +aliases: + - /rancher/v2.x/en/security/cve/ +--- + +Rancher is committed to informing the community of security issues in our products. Rancher will publish security advisories and CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. New security advisories are also published in Rancher's GitHub [security page](https://github.com/rancher/rancher/security/advisories). + +| ID | Description | Date | Resolution | +|----|-------------|------|------------| +| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) Container Network Interface (CNI) when configured through [RKE templates](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | +| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | This vulnerability only affects customers using the `restricted-admin` role in Rancher. A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 where the `global-data` role in `cattle-global-data` namespace grants write access to the Catalogs. Since each user with any level of catalog access was bound to the `global-data` role, this grants write access to templates (`CatalogTemplates`) and template versions (`CatalogTemplateVersions`) for any user with any level of catalog access. New users created in Rancher are by default assigned to the `user` role (standard user), which is not designed to grant write catalog access. This vulnerability effectively elevates the privilege of any user to write access for the catalog template and catalog template version resources. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.11 and from 2.6.0 up to and including 2.6.2, where an insufficient check of the same-origin policy when downloading Helm charts from a configured private repository can lead to exposure of the repository credentials to a third-party provider. This issue only happens when the user configures access credentials to a private repository in Rancher inside `Apps & Marketplace > Repositories`. The issue was found and reported by Martin Andreas Ullrich. | 14 Apr 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) and [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) | +| [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | A vulnerability was discovered in versions of Rancher starting 2.0 up to and including 2.6.3. The `restricted` pod security policy (PSP) provided in Rancher deviated from the upstream `restricted` policy provided in Kubernetes on account of which Rancher's PSP had `runAsUser` set to `runAsAny`, while upstream had `runAsUser` set to `MustRunAsNonRoot`. This allowed containers to run as any user, including a privileged user (`root`), even when Rancher's `restricted` policy was enforced on a project or at the cluster level. | 31 Mar 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | +| [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | A vulnerability was discovered in Rancher versions up to and including 2.4.17, 2.5.11 and 2.6.2. After removing a `Project Role` associated with a group from the project, the bindings that granted access to cluster-scoped resources for those subjects were not deleted. This was due to an incomplete authorization logic check. A user who was a member of the affected group with authenticated access to Rancher could exploit this vulnerability to access resources they shouldn't have had access to. The exposure level would depend on the original permission level granted to the affected project role. This vulnerability only affected customers using group based authentication in Rancher. | 31 Mar 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3), [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) and [Rancher v2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) | +| [CVE-2021-36776](https://github.com/rancher/rancher/security/advisories/GHSA-gvh9-xgrq-r8hw) | A vulnerability was discovered in Rancher versions starting 2.5.0 up to and including 2.5.9, that allowed an authenticated user to impersonate any user on a cluster through an API proxy, without requiring knowledge of the impersonated user's credentials. This was due to the API proxy not dropping the impersonation header before sending the request to the Kubernetes API. A malicious user with authenticated access to Rancher could use this to impersonate another user with administrator access in Rancher, thereby gaining administrator level access to the cluster. | 31 Mar 2022 | [Rancher v2.6.0](https://github.com/rancher/rancher/releases/tag/v2.6.0) and [Rancher v2.5.10](https://github.com/rancher/rancher/releases/tag/v2.5.10) | +| [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher versions 2.0 through the aforementioned fixed versions, where users were granted access to resources regardless of the resource's API group. For example, Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. Resources affected in the **Downstream clusters** and **Rancher management cluster** can be found [here](https://github.com/rancher/rancher/security/advisories/GHSA-f9xf-jq4j-vqw4). There is not a direct mitigation besides upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered in Rancher 2.0.0 through the aforementioned patched versions, where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then correctly removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server and incorrectly return the information. The vulnerability is limited to valid Rancher users with some level of permissions on the cluster. There is not a direct mitigation besides upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher 2.2.0 through the aforementioned patched versions, where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud-credential ID that was valid for a given cloud provider, could call that cloud provider's API through the proxy API, and the cloud-credential would be attached. The exploit is limited to valid Rancher users. There is not a direct mitigation outside of upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-25313](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25313) | A security vulnerability was discovered on all Rancher 2 versions. When accessing the Rancher API with a browser, the URL was not properly escaped, making it vulnerable to an XSS attack. Specially crafted URLs to these API endpoints could include JavaScript which would be embedded in the page and execute in a browser. There is no direct mitigation. Avoid clicking on untrusted links to your Rancher server. | 2 Mar 2021 | [Rancher v2.5.6](https://github.com/rancher/rancher/releases/tag/v2.5.6), [Rancher v2.4.14](https://github.com/rancher/rancher/releases/tag/v2.4.14), and [Rancher v2.3.11](https://github.com/rancher/rancher/releases/tag/v2.3.11) | +| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | +| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | +| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | +| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions](../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md). | diff --git a/docs/reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md b/docs/reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md new file mode 100644 index 0000000000..f25aa83bf1 --- /dev/null +++ b/docs/reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md @@ -0,0 +1,67 @@ +--- +title: About rancher-selinux +--- + + +To allow Rancher to work with SELinux, some functionality has to be manually enabled for the SELinux nodes. To help with that, Rancher provides a SELinux RPM. + +The `rancher-selinux` RPM only contains policies for the [rancher-logging application.](https://github.com/rancher/charts/tree/dev-v2.5/charts/rancher-logging) + +The `rancher-selinux` GitHub repository is [here.](https://github.com/rancher/rancher-selinux) + +# Installing the rancher-selinux RPM + +:::note Requirement: + +The rancher-selinux RPM was tested with CentOS 7 and 8. + +::: + +### 1. Set up the yum repo + +Set up the yum repo to install `rancher-selinux` directly on all hosts in the cluster. + +In order to use the RPM repository, on a CentOS 7 or RHEL 7 system, run the following bash snippet: + +``` +# cat << EOF > /etc/yum.repos.d/rancher.repo +[rancher] +name=Rancher +baseurl=https://rpm.rancher.io/rancher/production/centos/7/noarch +enabled=1 +gpgcheck=1 +gpgkey=https://rpm.rancher.io/public.key +EOF +``` + +In order to use the RPM repository, on a CentOS 8 or RHEL 8 system, run the following bash snippet: + +``` +# cat << EOF > /etc/yum.repos.d/rancher.repo +[rancher] +name=Rancher +baseurl=https://rpm.rancher.io/rancher/production/centos/8/noarch +enabled=1 +gpgcheck=1 +gpgkey=https://rpm.rancher.io/public.key +EOF +``` +### 2. Installing the RPM + +Install the RPM: + +``` +yum -y install rancher-selinux +``` + +# Configuring the Logging Application to Work with SELinux + +:::note Requirement: + +Logging v2 was tested with SELinux on RHEL/CentOS 7 and 8. + +::: + +Applications do not automatically work once the `rancher-selinux` RPM is installed on the host. They need to be configured to run in an allowed SELinux container domain provided by the RPM. + +To configure the `rancher-logging` chart to be SELinux aware, change `global.seLinux.enabled` to true in the `values.yaml` when installing the chart. \ No newline at end of file diff --git a/docs/reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md b/docs/reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md new file mode 100644 index 0000000000..dd2ddfbb4b --- /dev/null +++ b/docs/reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md @@ -0,0 +1,9 @@ +--- +title: About rke2-selinux +--- + +`rke2-selinux` provides policies for RKE2. It is installed automatically when the RKE2 installer script detects that it is running on an RPM-based distro. + +The `rke2-selinux` GitHub repository is [here.](https://github.com/rancher/rke2-selinux) + +For more information about installing RKE2 on SELinux-enabled hosts, see the [RKE2 documentation.](https://docs.rke2.io/install/methods/#rpm) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-yaml/_index.md b/docs/reference-guides/rke1-template-example-yaml.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/admin-settings/rke-templates/example-yaml/_index.md rename to docs/reference-guides/rke1-template-example-yaml.md diff --git a/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md b/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md new file mode 100644 index 0000000000..c5cca0e829 --- /dev/null +++ b/docs/reference-guides/single-node-rancher-in-docker/advanced-options.md @@ -0,0 +1,115 @@ +--- +title: Advanced Options for Docker Installs +weight: 5 +--- + +When installing Rancher, there are several advanced options that can be enabled: + +- [Custom CA Certificate](#custom-ca-certificate) +- [API Audit Log](#api-audit-log) +- [TLS Settings](#tls-settings) +- [Air Gap](#air-gap) +- [Persistent Data](#persistent-data) +- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) + +### Custom CA Certificate + +If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. + +Use the command example to start a Rancher container with your private CA certificates mounted. + +- The volume flag (`-v`) should specify the host directory containing the CA root certificates. +- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. +- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. +- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. + +The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. + +Privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /host/certs:/container/certs \ + -e SSL_CERT_DIR="/container/certs" \ + --privileged \ + rancher/rancher:latest +``` + +### API Audit Log + +The API Audit Log records all the user and system transactions made through Rancher server. + +The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. + +See [API Audit Log](../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) for more information and options. + +Privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /var/log/rancher/auditlog:/var/log/auditlog \ + -e AUDIT_LEVEL=1 \ + --privileged \ + rancher/rancher:latest +``` + +### TLS settings + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_TLS_MIN_VERSION="1.0" \ + --privileged \ + rancher/rancher:latest +``` + +Privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +See [TLS settings](../installation-references/tls-settings.md) for more information and options. + +### Air Gap + +If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + --privileged \ + rancher/rancher:latest +``` + +Privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. + +If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. + +Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. + +To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: + +``` +docker run -d --restart=unless-stopped \ + -p 8080:80 -p 8443:443 \ + --privileged \ + rancher/rancher:latest +``` + +Privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) diff --git a/docs/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md b/docs/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md new file mode 100644 index 0000000000..1e49073b2a --- /dev/null +++ b/docs/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md @@ -0,0 +1,64 @@ +--- +title: HTTP Proxy Configuration +weight: 251 +--- + +If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. + +Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. + +| Environment variable | Purpose | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | +| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | +| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | + +:::note Important: + +NO_PROXY must be in uppercase to use network range (CIDR) notation. + +::: + +## Docker Installation + +Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md) are: + +- `localhost` +- `127.0.0.1` +- `0.0.0.0` +- `10.0.0.0/8` +- `cattle-system.svc` +- `.svc` +- `.cluster.local` + +The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e HTTP_PROXY="http://192.168.10.1:3128" \ + -e HTTPS_PROXY="http://192.168.10.1:3128" \ + -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local,example.com" \ + --privileged \ + rancher/rancher:latest +``` + +Privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher) + +### Air-gapped proxy configuration + +_New in v2.6.4_ + +You can now provision node driver clusters from an air-gapped cluster configured to use a proxy for outbound connections. + +In addition to setting the default rules for a proxy server as shown above, you will need to add additional rules, shown below, to provision node driver clusters from a proxied Rancher environment. + +You will configure your filepath according to your setup, e.g., `/etc/apt/apt.conf.d/proxy.conf`: + +``` +acl SSL_ports port 22 +acl SSL_ports port 2376 + +acl Safe_ports port 22 # ssh +acl Safe_ports port 2376 # docker port +``` \ No newline at end of file diff --git a/docs/reference-guides/system-tools.md b/docs/reference-guides/system-tools.md new file mode 100644 index 0000000000..f237773e47 --- /dev/null +++ b/docs/reference-guides/system-tools.md @@ -0,0 +1,31 @@ +--- +title: System Tools +weight: 22 +--- + +:::note + +System Tools has been deprecated since June 2022. + +::: +# Logs + +Please use [logs-collector](https://github.com/rancherlabs/support-tools/tree/master/collection/rancher/v2.x/logs-collector) to collect logs from your cluster. + +# Stats + +If you want to replicate the stats command, you can run the following command on your cluster nodes: + +:::note + +This command below requires the package `sysstat` on the cluster node. + +::: + +``` +/usr/bin/sar -u -r -F 1 1 +``` + +# Remove + +Please use the [Rancher Cleanup](https://github.com/rancher/rancher-cleanup) tool. diff --git a/docs/reference-guides/user-settings/api-keys.md b/docs/reference-guides/user-settings/api-keys.md new file mode 100644 index 0000000000..cb3151520c --- /dev/null +++ b/docs/reference-guides/user-settings/api-keys.md @@ -0,0 +1,60 @@ +--- +title: API Keys +weight: 7005 +--- + +## API Keys and User Authentication + +If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. + +An API key is also required for using Rancher CLI. + +API Keys are composed of four components: + +- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. +- **Access Key:** The token's username. +- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. +- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. + +:::note + +Users may opt to enable [token hashing](../about-the-api/api-tokens.md). + +::: + +## Creating an API Key + +1. Select **User Avatar > Account & API Keys** from upper right corner. + +2. Click **Create API Key**. + +3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. + + The API key won't be valid after expiration. Shorter expiration periods are more secure. + + Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. + + A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) for more information. + +4. Click **Create**. + + **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. + + Use the **Bearer Token** to authenticate with Rancher CLI. + +5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. + +## What's Next? + +- Enter your API key information into the application that will send requests to the Rancher API. +- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. +- API keys are used for API calls and [Rancher CLI](../../pages-for-subheaders/cli-with-rancher.md). + +## Deleting API Keys + +If you need to revoke an API key, delete it. You should delete API keys: + +- That may have been compromised. +- That have expired. + +To delete an API, select the stale key and click **Delete**. diff --git a/docs/reference-guides/user-settings/manage-cloud-credentials.md b/docs/reference-guides/user-settings/manage-cloud-credentials.md new file mode 100644 index 0000000000..3910521175 --- /dev/null +++ b/docs/reference-guides/user-settings/manage-cloud-credentials.md @@ -0,0 +1,52 @@ +--- +title: Managing Cloud Credentials +weight: 7011 +--- + +When you create a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. + +Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. + +Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. + +You can create cloud credentials in two contexts: + +- [During creation of a node template](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for a cluster. +- In the **User Settings** + +All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. + +## Creating a Cloud Credential from User Settings + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click a cloud credential type. The values of this dropdown is based on the `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) in Rancher. +1. Enter a name for the cloud credential. +1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. +1. Click **Create**. + +**Result:** The cloud credential is created and can immediately be used to [create node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates). + +## Updating a Cloud Credential + +When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Choose the cloud credential you want to edit and click the **⋮ > Edit Config**. +1. Update the credential information and click **Save**. + +**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Deleting a Cloud Credential + +In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates](manage-node-templates.md#deleting-a-node-template) that are still associated to that cloud credential. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. You can either individually delete a cloud credential or bulk delete. + + - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. + - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. +1. Confirm that you want to delete these cloud credentials. diff --git a/docs/reference-guides/user-settings/manage-node-templates.md b/docs/reference-guides/user-settings/manage-node-templates.md new file mode 100644 index 0000000000..2d57fec003 --- /dev/null +++ b/docs/reference-guides/user-settings/manage-node-templates.md @@ -0,0 +1,55 @@ +--- +title: Managing Node Templates +weight: 7010 +--- + +When you provision a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: + +- While [provisioning a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). +- At any time, from your [user settings](#creating-a-node-template-from-user-settings). + +When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. + +## Creating a Node Template + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Add Template**. +1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. + +**Result:** The template is configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Updating a Node Template + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Choose the node template that you want to edit and click the **⋮ > Edit**. + + :::note + + The default `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) and any node driver, that has fields marked as `password`, are required to use [cloud credentials](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#cloud-credentials). + + ::: + +1. Edit the required information and click **Save**. + +**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. + +## Cloning Node Templates + +When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Find the template you want to clone. Then select **⋮ > Clone**. +1. Complete the rest of the form. + +**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Deleting a Node Template + +When you no longer use a node template, you can delete it from your user settings. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/content/rancher/v2.0-v2.4/en/user-settings/preferences/_index.md b/docs/reference-guides/user-settings/user-preferences.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/user-settings/preferences/_index.md rename to docs/reference-guides/user-settings/user-preferences.md diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf b/docs/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf rename to docs/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf b/docs/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf rename to docs/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf b/docs/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf rename to docs/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf b/docs/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf rename to docs/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf diff --git a/docs/security/security-scan/security-scan.md b/docs/security/security-scan/security-scan.md new file mode 100644 index 0000000000..28e59b6b0d --- /dev/null +++ b/docs/security/security-scan/security-scan.md @@ -0,0 +1,6 @@ +--- +title: Security Scans +weight: 299 +--- + +The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) diff --git a/docs/shared-files/_cluster-capabilities-table.md b/docs/shared-files/_cluster-capabilities-table.md new file mode 100644 index 0000000000..878a8a3b6b --- /dev/null +++ b/docs/shared-files/_cluster-capabilities-table.md @@ -0,0 +1,24 @@ +| Action | Rancher Launched Kubernetes Clusters | EKS, GKE and AKS Clusters1 | Other Hosted Kubernetes Clusters | Non-EKS or GKE Registered Clusters | +| --- | --- | ---| ---|----| +| [Using kubectl and a kubeconfig file to Access a Cluster](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) | ✓ | ✓ | ✓ | ✓ | +| [Managing Cluster Members](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) | ✓ | ✓ | ✓ | ✓ | +| [Editing and Upgrading Clusters](../pages-for-subheaders/cluster-configuration.md) | ✓ | ✓ | ✓ | ✓2 | +| [Managing Nodes](../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) | ✓ | ✓ | ✓ | ✓3 | +| [Managing Persistent Volumes and Storage Classes](../pages-for-subheaders/create-kubernetes-persistent-storage.md) | ✓ | ✓ | ✓ | ✓ | +| [Managing Projects, Namespaces and Workloads](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) | ✓ | ✓ | ✓ | ✓ | +| [Using App Catalogs](../pages-for-subheaders/helm-charts-in-rancher.md) | ✓ | ✓ | ✓ | ✓ | +| Configuring Tools ([Alerts, Notifiers, Monitoring](../pages-for-subheaders/monitoring-and-alerting.md), [Logging](../pages-for-subheaders/logging.md), [Istio](../pages-for-subheaders/istio.md)) | ✓ | ✓ | ✓ | ✓ | +| [Running Security Scans](../pages-for-subheaders/cis-scan-guides.md) | ✓ | ✓ | ✓ | ✓ | +| [Use existing configuration to create additional clusters](../how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md)| ✓ | ✓ | ✓ | | +| [Ability to rotate certificates](../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md) | ✓ | ✓ | | | +| Ability to [backup](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) and [restore](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md) Rancher-launched clusters | ✓ | ✓ | | ✓4 | +| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) | ✓ | | | | +| [Configuring Pod Security Policies](../how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md) | ✓ | ✓ | || + +1. Registered GKE and EKS clusters have the same options available as GKE and EKS clusters created from the Rancher UI. The difference is that when a registered cluster is deleted from the Rancher UI, it is not destroyed. + +2. Cluster configuration options can't be edited for registered clusters, except for [K3s and RKE2 clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) + +3. For registered cluster nodes, the Rancher UI exposes the ability to cordon, drain, and edit the node. + +4. For registered clusters using etcd as a control plane, snapshots must be taken manually outside of the Rancher UI to use for backup and recovery. diff --git a/docs/shared-files/_common-ports-table.md b/docs/shared-files/_common-ports-table.md new file mode 100644 index 0000000000..1835beba03 --- /dev/null +++ b/docs/shared-files/_common-ports-table.md @@ -0,0 +1,19 @@ +| Protocol | Port | Description | +|:--------: |:----------------: |---------------------------------------------------------------------------------- | +| TCP | 22 | Node driver SSH provisioning | +| TCP | 179 | Calico BGP Port | +| TCP | 2376 | Node driver Docker daemon TLS port | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | +| TCP | 8443 | Rancher webhook | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | +| TCP | 9443 | Rancher webhook | +| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | +| TCP | 6783 | Weave Port | +| UDP | 6783-6784 | Weave UDP Ports | +| TCP | 10250 | Metrics server communication with all nodes API | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | +| TCP/UDP | 30000-32767 | NodePort port range | diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md new file mode 100644 index 0000000000..9c990589f9 --- /dev/null +++ b/docs/troubleshooting.md @@ -0,0 +1,45 @@ +--- +title: Troubleshooting +weight: 600 +--- + +This section contains information to help you troubleshoot issues when using Rancher. + +- [Kubernetes components](pages-for-subheaders/kubernetes-components.md) + + If you need help troubleshooting core Kubernetes cluster components like: + * `etcd` + * `kube-apiserver` + * `kube-controller-manager` + * `kube-scheduler` + * `kubelet` + * `kube-proxy` + * `nginx-proxy` + +- [Kubernetes resources](troubleshooting/other-troubleshooting-tips/kubernetes-resources.md) + + Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. + +- [Networking](troubleshooting/other-troubleshooting-tips/networking.md) + + Steps to troubleshoot networking issues can be found here. + +- [DNS](troubleshooting/other-troubleshooting-tips/dns.md) + + When you experience name resolution issues in your cluster. + +- [Troubleshooting Rancher installed on Kubernetes](troubleshooting/other-troubleshooting-tips/rancher-ha.md) + + If you experience issues with your [Rancher server installed on Kubernetes](pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + +- [Logging](troubleshooting/other-troubleshooting-tips/logging.md) + + Read more about what log levels can be configured and how to configure a log level. + +- [User ID Tracking in Audit Logs](troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md) + + Read more about how a Rancher Admin can trace an event from the Rancher audit logs and into the Kubernetes audit logs using the external Identity Provider username. + +- [Expired Webhook Certificates](troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md) + + Read more about how to rotate a Rancher webhook certificate secret after it expires on an annual basis. diff --git a/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md new file mode 100644 index 0000000000..2d0c44a212 --- /dev/null +++ b/docs/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -0,0 +1,52 @@ +--- +title: Troubleshooting Controlplane Nodes +weight: 2 +--- + +This section applies to nodes with the `controlplane` role. + +# Check if the Controlplane Containers are Running + +There are three specific containers launched on nodes with the `controlplane` role: + +* `kube-apiserver` +* `kube-controller-manager` +* `kube-scheduler` + +The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver +f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler +bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager +``` + +# Controlplane Container Logging + +:::note + +If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election](../other-troubleshooting-tips/kubernetes-resources.md#kubernetes-leader-election) how to retrieve the current leader. + +::: + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kube-apiserver +docker logs kube-controller-manager +docker logs kube-scheduler +``` + +# RKE2 Server Logging + +If Rancher provisions an RKE2 cluster that can't communicate with Rancher, you can run this command on a server node in the downstream cluster to get the RKE2 server logs: + +``` +journalctl -u rke2-server -f +``` \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/etcd/_index.md b/docs/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/etcd/_index.md rename to docs/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/docs/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md rename to docs/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/docs/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md rename to docs/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md diff --git a/docs/troubleshooting/other-troubleshooting-tips/dns.md b/docs/troubleshooting/other-troubleshooting-tips/dns.md new file mode 100644 index 0000000000..19fbf53b37 --- /dev/null +++ b/docs/troubleshooting/other-troubleshooting-tips/dns.md @@ -0,0 +1,221 @@ +--- +title: DNS +weight: 103 +--- + +The commands/steps listed on this page can be used to check name resolution issues in your cluster. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. + +### Check if DNS pods are running + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns +``` + +Example output when using CoreDNS: +``` +NAME READY STATUS RESTARTS AGE +coredns-799dffd9c4-6jhlz 1/1 Running 0 76m +``` + +Example output when using kube-dns: +``` +NAME READY STATUS RESTARTS AGE +kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s +``` + +### Check if the DNS service is present with the correct cluster-ip + +``` +kubectl -n kube-system get svc -l k8s-app=kube-dns +``` + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s +``` + +### Check if domain names are resolving + +Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. + +``` +kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default +``` + +Example output: +``` +Server: 10.43.0.10 +Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kubernetes.default +Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local +pod "busybox" deleted +``` + +Check if external names are resolving (in this example, `www.google.com`) + +``` +kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com +``` + +Example output: +``` +Server: 10.43.0.10 +Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local + +Name: www.google.com +Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net +Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net +pod "busybox" deleted +``` + +If you want to check resolving of domain names on all of the hosts, execute the following steps: + +1. Save the following file as `ds-dnstest.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: dnstest + spec: + selector: + matchLabels: + name: dnstest + template: + metadata: + labels: + name: dnstest + spec: + tolerations: + - operator: Exists + containers: + - image: busybox:1.28 + imagePullPolicy: Always + name: alpine + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + ``` + +2. Launch it using `kubectl create -f ds-dnstest.yml` +3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. +4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). + + ``` + export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" + ``` + +5. When this command has finished running, the output indicating everything is correct is: + + ``` + => Start DNS resolve test + => End DNS resolve test + ``` + +If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. + +Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. + +``` +=> Start DNS resolve test +command terminated with exit code 1 +209.97.182.150 cannot resolve www.google.com +=> End DNS resolve test +``` + +Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. + +### CoreDNS specific + +#### Check CoreDNS logging + +``` +kubectl -n kube-system logs -l k8s-app=kube-dns +``` + +#### Check configuration + +CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. + +``` +kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} +``` + +#### Check upstream nameservers in resolv.conf + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. + +``` +kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' +``` + +#### Enable query logging + +Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: + +``` +kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - +``` + +All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). + +### kube-dns specific + +#### Check upstream nameservers in kubedns container + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). + +Use the following command to check the upstream nameservers used by the kubedns container: + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done +``` + +Example output: +``` +Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x +nameserver 1.1.1.1 +nameserver 8.8.4.4 +``` + +If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: + +* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. +* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): + +``` +services: + kubelet: + extra_args: + resolv-conf: "/run/resolvconf/resolv.conf" +``` + +:::note + +As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. + +::: + +See [Editing Cluster as YAML](../../pages-for-subheaders/cluster-configuration.md#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: + +``` +kubectl delete pods -n kube-system -l k8s-app=kube-dns +pod "kube-dns-5fd74c7488-6pwsf" deleted +``` + +Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). + +If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: + +``` +kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' +``` + +Example output: +``` +upstreamNameservers:["1.1.1.1"] +``` diff --git a/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md b/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md new file mode 100644 index 0000000000..97cc975e9a --- /dev/null +++ b/docs/troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation.md @@ -0,0 +1,29 @@ +--- +title: Rotation of Expired Webhook Certificates +weight: 120 +--- + +For Rancher versions that have `rancher-webhook` installed, these certificates will expire after one year. It will be necessary for you to rotate your webhook certificate when this occurs. + +Rancher will advise the community once there is a permanent solution in place for this known issue. Currently, there are two methods to work around this issue: + +##### 1. Users with cluster access, run the following commands: +``` +kubectl delete secret -n cattle-system cattle-webhook-tls +kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io --ignore-not-found=true rancher.cattle.io +kubectl delete pod -n cattle-system -l app=rancher-webhook +``` + +##### 2. Users with no cluster access via `kubectl`: + +1. Delete the `cattle-webhook-tls` secret in the `cattle-system` namespace in the local cluster. + +2. Delete the `rancher.cattle.io` mutating webhook + +3. Delete the `rancher-webhook` pod in the `cattle-system` namespace in the local cluster. + +:::note + +The webhook certificate expiration issue is not specific to `cattle-webhook-tls` as listed in the examples. You will fill in your expired certificate secret accordingly. + +::: \ No newline at end of file diff --git a/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md new file mode 100644 index 0000000000..b135139882 --- /dev/null +++ b/docs/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -0,0 +1,271 @@ +--- +title: Kubernetes resources +weight: 101 +--- + +The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +- [Nodes](#nodes) + - [Get nodes](#get-nodes) + - [Get node conditions](#get-node-conditions) +- [Kubernetes leader election](#kubernetes-leader-election) + - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) + - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) +- [Ingress controller](#ingress-controller) + - [Pod details](#pod-details) + - [Pod container logs](#pod-container-logs) + - [Namespace events](#namespace-events) + - [Debug logging](#debug-logging) + - [Check configuration](#check-configuration) +- [Rancher agents](#rancher-agents) + - [cattle-node-agent](#cattle-node-agent) + - [cattle-cluster-agent](#cattle-cluster-agent) +- [Jobs and pods](#jobs-and-pods) + - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) + - [Describe pod](#describe-pod) + - [Pod container logs](#pod-container-logs) + - [Describe job](#describe-job) + - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) + - [Evicted pods](#evicted-pods) + - [Job does not complete](#job-does-not-complete) + +# Nodes + +### Get nodes + +Run the command below and check the following: + +- All nodes in your cluster should be listed, make sure there is not one missing. +- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) +- Check if all nodes report the correct version. +- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) + + +``` +kubectl get nodes -o wide +``` + +Example output: + +``` +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +``` + +### Get node conditions + +Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) + +``` +kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' +``` + +Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. + +``` +kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' +``` + +Example output: + +``` +worker-0: DiskPressure:True +``` + +# Kubernetes leader election + +### Kubernetes Controller Manager leader + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). + +``` +kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> +``` + +### Kubernetes Scheduler leader + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). + +``` +kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> +``` + +# Ingress Controller + +The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. + +Check if the pods are running on all nodes: + +``` +kubectl -n ingress-nginx get pods -o wide +``` + +Example output: + +``` +kubectl -n ingress-nginx get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE +default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 +nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 +nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 +``` + +If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. + +### Pod details + +``` +kubectl -n ingress-nginx describe pods -l app=ingress-nginx +``` + +### Pod container logs + +``` +kubectl -n ingress-nginx logs -l app=ingress-nginx +``` + +### Namespace events + +``` +kubectl -n ingress-nginx get events +``` + +### Debug logging + +To enable debug logging: + +``` +kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' +``` + +### Check configuration + +Retrieve generated configuration in each pod: + +``` +kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done +``` + +# Rancher agents + +Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. + +#### cattle-node-agent + +Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 +cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 +cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 +cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 +cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 +cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 +cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 +``` + +Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: + +``` +kubectl -n cattle-system logs -l app=cattle-agent +``` + +#### cattle-cluster-agent + +Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 +``` + +Check logging of cattle-cluster-agent pod: + +``` +kubectl -n cattle-system logs -l app=cattle-cluster-agent +``` + +# Jobs and Pods + +### Check that pods or jobs have status **Running**/**Completed** + +To check, run the command: + +``` +kubectl get pods --all-namespaces +``` + +If a pod is not in **Running** state, you can dig into the root cause by running: + +### Describe pod + +``` +kubectl describe pod POD_NAME -n NAMESPACE +``` + +### Pod container logs + +``` +kubectl logs POD_NAME -n NAMESPACE +``` + +If a job is not in **Completed** state, you can dig into the root cause by running: + +### Describe job + +``` +kubectl describe job JOB_NAME -n NAMESPACE +``` + +### Logs from the containers of pods of the job + +``` +kubectl logs -l job-name=JOB_NAME -n NAMESPACE +``` + +### Evicted pods + +Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). + +Retrieve a list of evicted pods (podname and namespace): + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' +``` + +To delete all evicted pods: + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done +``` + +Retrieve a list of evicted pods, scheduled node and the reason: + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done +``` + +### Job does not complete + +If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.](../../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md) + +Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.6/en/troubleshooting/logging/_index.md b/docs/troubleshooting/other-troubleshooting-tips/logging.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/logging/_index.md rename to docs/troubleshooting/other-troubleshooting-tips/logging.md diff --git a/docs/troubleshooting/other-troubleshooting-tips/networking.md b/docs/troubleshooting/other-troubleshooting-tips/networking.md new file mode 100644 index 0000000000..00863c315d --- /dev/null +++ b/docs/troubleshooting/other-troubleshooting-tips/networking.md @@ -0,0 +1,122 @@ +--- +title: Networking +weight: 102 +--- + +The commands/steps listed on this page can be used to check networking related issues in your cluster. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +### Double check if all the required ports are opened in your (host) firewall + +Double check if all the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. +### Check if overlay network is functioning correctly + +The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. + +To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `swiss-army-knife` container on every host (image was developed by Rancher engineers and can be found here: https://github.com/rancherlabs/swiss-army-knife), which we will use to run a `ping` test between containers on all hosts. + +:::note + +This container [does not support ARM nodes](https://github.com/leodotcloud/swiss-army-knife/issues/18), such as a Raspberry Pi. This will be seen in the pod logs as `exec user process caused: exec format error`. + +::: + +1. Save the following file as `overlaytest.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: overlaytest + spec: + selector: + matchLabels: + name: overlaytest + template: + metadata: + labels: + name: overlaytest + spec: + tolerations: + - operator: Exists + containers: + - image: rancherlabs/swiss-army-knife + imagePullPolicy: Always + name: overlaytest + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + + ``` + +2. Launch it using `kubectl create -f overlaytest.yml` +3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. +4. Run the following script, from the same location. It will have each `overlaytest` container on every host ping each other: + ``` + #!/bin/bash + echo "=> Start network overlay test" + kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | + while read spod shost + do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | + while read tip thost + do kubectl --request-timeout='10s' exec $spod -c overlaytest -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1" + RC=$? + if [ $RC -ne 0 ] + then echo FAIL: $spod on $shost cannot reach pod IP $tip on $thost + else echo $shost can reach $thost + fi + done + done + echo "=> End network overlay test" + ``` + +5. When this command has finished running, it will output the state of each route: + + ``` + => Start network overlay test + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.7.3 on wk2 + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.0.5 on cp1 + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.2.12 on wk1 + command terminated with exit code 1 + FAIL: overlaytest-v4qkl on cp1 cannot reach pod IP 10.42.7.3 on wk2 + cp1 can reach cp1 + cp1 can reach wk1 + command terminated with exit code 1 + FAIL: overlaytest-xpxwp on wk1 cannot reach pod IP 10.42.7.3 on wk2 + wk1 can reach cp1 + wk1 can reach wk1 + => End network overlay test + ``` + If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for overlay networking are not opened for `wk2`. +6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. + + +### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices + +When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: + +* `websocket: bad handshake` +* `Failed to connect to proxy` +* `read tcp: i/o timeout` + +See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. + +### Resolved issues + +#### Overlay network broken when using Canal/Flannel due to missing node annotations + +| | | +|------------|------------| +| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | +| Resolved in | v2.1.2 | + +To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): + +``` +kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' +``` + +If there is no output, the cluster is not affected. diff --git a/content/rancher/v2.6/en/troubleshooting/rancherha/_index.md b/docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/rancherha/_index.md rename to docs/troubleshooting/other-troubleshooting-tips/rancher-ha.md diff --git a/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md b/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md new file mode 100644 index 0000000000..c40978b36f --- /dev/null +++ b/docs/troubleshooting/other-troubleshooting-tips/registered-clusters.md @@ -0,0 +1,68 @@ +--- +title: Registered clusters +weight: 105 +--- + +The commands/steps listed on this page can be used to check clusters that you are registering or that are registered in Rancher. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kubeconfig_from_imported_cluster.yml`) + +### Rancher agents + +Communication to the cluster (Kubernetes API via cattle-cluster-agent) and communication to the nodes is done through Rancher agents. + +If the cattle-cluster-agent cannot connect to the configured `server-url`, the cluster will remain in **Pending** state, showing `Waiting for full cluster configuration`. + +#### cattle-node-agent + +:::note + +cattle-node-agents are only present in clusters created in Rancher with RKE. + +::: + +Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 +cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 +cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 +cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 +cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 +cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 +cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 +``` + +Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: + +``` +kubectl -n cattle-system logs -l app=cattle-agent +``` + +#### cattle-cluster-agent + +Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 +``` + +Check logging of cattle-cluster-agent pod: + +``` +kubectl -n cattle-system logs -l app=cattle-cluster-agent +``` diff --git a/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md b/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md new file mode 100644 index 0000000000..d7def700d0 --- /dev/null +++ b/docs/troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs.md @@ -0,0 +1,23 @@ +--- +title: User ID Tracking in Audit Logs +weight: 110 +--- + +The following audit logs are used in Rancher to track events occuring on the local and downstream clusters: + +* [Kubernetes Audit Logs](https://rancher.com/docs/rke/latest/en/config-options/audit-log/) +* [Rancher API Audit Logs](../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) + +Audit logs in Rancher v2.6 have been enhanced to include the external Identity Provider name (common name of the user in the external Auth provider) in both the Rancher and downstream Kubernetes audit logs. + +Before v2.6, a Rancher Admin could not trace an event from the Rancher audit logs and into the Kubernetes audit logs without knowing the mapping of the external Identity Provider username to the userId (`u-xXXX`) used in Rancher. +To know this mapping, the cluster admins needed to have access to Rancher API, UI, and the local management cluster. + +Now with this feature, a downstream cluster admin should be able to look at the Kubernetes audit logs and know which specific external Identity Provider (IDP) user performed an action without needing to view anything in Rancher. +If the audit logs are shipped off of the cluster, a user of the logging system should be able to identify the user in the external Identity Provider system. +A Rancher Admin should now be able to view Rancher audit logs and follow through to the Kubernetes audit log by using the external Identity Provider username. + +### Feature Description + +- When Kubernetes Audit logs are enabled on the downstream cluster, in each event that is logged, the external Identity Provider's username is now logged for each request, at the "metadata" level. +- When Rancher API Audit logs are enabled on the Rancher installation, the external Identity Provider's username is also logged now at the `auditLog.level=1` for each request that hits the Rancher API server, including the login requests. diff --git a/docusaurus.config.js b/docusaurus.config.js new file mode 100644 index 0000000000..77d8c1b375 --- /dev/null +++ b/docusaurus.config.js @@ -0,0 +1,117 @@ + +/** @type {import('@docusaurus/types').DocusaurusConfig} */ +module.exports = { + title: 'Rancher Manager', + tagline: '', + url: 'https://rancher.com/docs/rancher', + baseUrl: '/', + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', + favicon: 'img/favicon.png', + organizationName: 'rancher', // Usually your GitHub org/user name. + projectName: 'rancher-docs', // Usually your repo name. + trailingSlash: false, + themeConfig: { + algolia: { + // The application ID provided by Algolia + appId: 'YOUR_APP_ID', + + // Public API key: it is safe to commit it + apiKey: 'YOUR_SEARCH_API_KEY', + + indexName: 'YOUR_INDEX_NAME', + + // Optional: see doc section below + contextualSearch: true, + + // Optional: Specify domains where the navigation should occur through window.location instead on history.push. Useful when our Algolia config crawls multiple documentation sites and we want to navigate with window.location.href to them. + externalUrlRegex: 'external\\.com|domain\\.com', + + // Optional: Algolia search parameters + searchParameters: {}, + + // Optional: path for search page that enabled by default (`false` to disable it) + searchPagePath: 'search', + + //... other Algolia params + }, + colorMode: { + // "light" | "dark" + defaultMode: "light", + + // Hides the switch in the navbar + // Useful if you want to support a single color mode + disableSwitch: true, + }, + prism: { + additionalLanguages: ['rust'], + }, + navbar: { + title: "", + logo: { + alt: 'logo', + src: 'img/rancher-logo-horiz-color.svg', + // href: 'en', + }, + items: [ + { + type: 'doc', + docId: 'rancher-manager', + position: 'right', + label: 'Docs', + className: 'navbar__docs', + }, + { + href: 'https://github.com/rancher/', + label: 'GitHub', + position: 'right', + className: 'navbar__github btn btn-secondary icon-github', + }, + { + type: 'docsVersionDropdown', + position: 'left', + dropdownItemsAfter: [{to: '/versions', label: 'All versions'}], + dropdownActiveClassDisabled: false, + }, + ], + }, + footer: { + style: 'dark', + links: [], + copyright: `Copyright © ${new Date().getFullYear()} SUSE Rancher. All Rights Reserved.`, + }, + }, + presets: [ + [ + '@docusaurus/preset-classic', + { + docs: { + routeBasePath: '/', // Serve the docs at the site's root + /* other docs plugin options */ + sidebarPath: require.resolve('./sidebars.js'), + showLastUpdateTime: true, + editUrl: 'https://github.com/rancher/docs/edit/master/', + lastVersion: 'current', + versions: { + current: { + label: 'v2.6' + }, + 2.5: { + label: 'v2.5', + path: 'v2.5' + }, + '2.0-2.4': { + label: 'v2.0-v2.4', + path: 'v2.0-v2.4' + }, + }, + }, + blog: false, // Optional: disable the blog plugin + // ... + theme: { + customCss: [require.resolve("./src/css/custom.css")], + }, + }, + ], + ], +}; diff --git a/layouts/_default/backup-sngl.html b/layouts/_default/backup-sngl.html deleted file mode 100644 index feb677c475..0000000000 --- a/layouts/_default/backup-sngl.html +++ /dev/null @@ -1,37 +0,0 @@ -{{ define "hero" }} -
-
-
- {{ with .Params.Title }} -

{{ . }}

- {{end}} - - {{ with .Params.Author }} - - {{end}} -
-
-
-{{ end }} - -{{ define "main" }} -
-

default single template

- {{ partial "breadcrumbs.html" . }} -
-
- - {{ if .Params.Image }} - {{ .Params.Title }} - {{end}} - - {{ .Content }} - -
- - -
-
-{{ end }} diff --git a/layouts/_default/feed.json b/layouts/_default/feed.json deleted file mode 100644 index 27f820ee66..0000000000 --- a/layouts/_default/feed.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "events": [ - {{ range $i, $event := (where (where .Site.RegularPages "Section" "events") ".Params.type" "event") }} - {{ if $i }}, {{ end }} - { - "title": "{{ $event.Params.title }}", - "location": "{{$event.Params.location}}", - "eventDate": "{{$event.Params.eventdate}}", - "permalink": "{{.Permalink}}" - } - {{ end }} - ] -} diff --git a/layouts/_default/list.html b/layouts/_default/list.html deleted file mode 100644 index 50583bb751..0000000000 --- a/layouts/_default/list.html +++ /dev/null @@ -1,256 +0,0 @@ -{{ define "title" }} -{{ .Title }} – {{ .Site.Title }} -{{ end }} - -{{ define "hero" }} -{{ end }} - -{{ define "main" }} -
-
-
-
-
-
-
- -
- -
- -
- - {{ $url := urls.Parse .Permalink }} - {{ $path := split $url.Path "/" }} - {{ $product := index $path 1 }} - {{ $version := index $path 2 }} - {{ $productVersion := printf "%s/%s" $product $version}} - {{ if in .Dir "/v2.x" }} -
- We are transitioning to versioned documentation. The v2.x docs will no longer be maintained. Rancher v2.6 docsRancher v2.5 docsRancher v2.0-2.4 docs -
- {{end}} - {{ if in .Dir "/multi-cluster-apps" }} -
- As of Rancher v2.5, multi-cluster apps are deprecated. We now recommend using Fleet for deploying apps across clusters. It is available in the Rancher UI by clicking ☰ > Continuous Delivery. -
- {{end}} - {{ if in .Dir "os/v1.x" }} -
- RancherOS 1.x is currently in a maintain-only-as-essential mode. It is no longer being actively maintained at a code level other than addressing critical or security fixes. For more information about the support status of RancherOS, see this page. -
- {{end}} - {{ if or (in .Dir "/v2.0-v2.4") (in .Dir "/v2.5") }} -
- You are viewing the documentation for an older Rancher release. If you're looking for the documentation for the latest Rancher release, go to this page instead. -
- {{end}} - -
-
- -
- - - {{ partial "docs-side-nav.html" . }} - -
-
-
-
- ? - - Need Help? -  Get free intro and advanced online training - - × -
- - - - {{ with .Params.Title }} -

{{ . }}

-
- {{end}} - - {{ if .Params.ctaBanner }} - {{ with index .Site.Data.cta .Params.ctaBanner }} -
- -

{{ .header }}

- - -
- {{ end }} - {{ end }} - -
- - - - {{ .Content }} - - {{ $paginator := .Scratch.Get "paginator" }} - {{ range $paginator.Pages }} -
-
-

{{.Title}}

- - {{ if .Params.Image }} -
{{ .Params.Title }}
- {{end}} - -

{{ .Summary | safeHTML }}

- {{ if .Truncated }} - - {{ end }} -
-
-
- - {{ with .Params.Author }} -
person_outline By: {{ . }}
- {{end}} - - {{ if eq .Section "events" }} - {{ with .Params.EventDate }} -
event {{ . }}
- {{end}} - - {{ else }} - - {{ with .Params.Date }} -
event {{ .Format "January 2, 2006" }}
- {{end}} - {{end}} - - {{ if eq .Section "blog" }} -
timer Read Time: {{.ReadingTime}} minutes -
- {{end}} - - {{ with .Params.Location }} -
location_on {{ . }}
- {{end}} - -
-
-
- {{ end }} -
- {{ template "_internal/pagination.html" . }} - -
{{ partial "page-edit.html" . }}
- - -
-
-
-
- - - -{{ end }} diff --git a/layouts/_default/single.html b/layouts/_default/single.html deleted file mode 100644 index c7de0eeee7..0000000000 --- a/layouts/_default/single.html +++ /dev/null @@ -1,44 +0,0 @@ -{{ define "title" }} -{{ .Title }} – {{ .Site.Title }} -{{ end }} - -{{ define "hero" }} -
-
Rancher 2.x Docs
- {{ with .Params.Title }} -

{{ . }}

- {{end}} - - {{ with .Params.Author }} - - {{end}} -
-{{ end }} - -{{ define "main" }} - -
-
- - - -
- -
- {{ .Content }} - -
- -
{{ partial "page-edit.html" . }}
-
- -
- -
-{{ end }} \ No newline at end of file diff --git a/layouts/index.headers b/layouts/index.headers deleted file mode 100644 index 9c5050f826..0000000000 --- a/layouts/index.headers +++ /dev/null @@ -1,5 +0,0 @@ -/* - X-Frame-Options: DENY - X-XSS-Protection: 1; mode=block - X-Content-Type-Options: nosniff - Referrer-Policy: origin-when-cross-origin \ No newline at end of file diff --git a/layouts/index.html b/layouts/index.html deleted file mode 100644 index 466c351c04..0000000000 --- a/layouts/index.html +++ /dev/null @@ -1,7 +0,0 @@ - - -{{ define "main" }} -
- {{ .Content }} -
-{{ end }} diff --git a/layouts/index.redirects b/layouts/index.redirects deleted file mode 100644 index 4323285cdd..0000000000 --- a/layouts/index.redirects +++ /dev/null @@ -1,6 +0,0 @@ -# redirects for Netlify - https://www.netlify.com/docs/redirects/ -{{- range $p := .Site.Pages -}} -{{- range .Aliases }} -{{ . }} {{ $p.RelPermalink -}} -{{- end }} -{{- end -}} \ No newline at end of file diff --git a/layouts/partials/docs-nav.html b/layouts/partials/docs-nav.html deleted file mode 100644 index a1bdcf7c5e..0000000000 --- a/layouts/partials/docs-nav.html +++ /dev/null @@ -1,70 +0,0 @@ -{{ $.Scratch.Set "topLevel" . }} -{{ range $product := $.Site.Sections }} - {{ range $version := .Sections }} - {{ range $language := .Sections }} - {{if $.CurrentSection}} - {{ if .IsAncestor $.CurrentSection }} - {{ $.Scratch.Set "topLevel" . }} - {{end}} - {{end}} - {{end}} - {{end}} -{{end}} - - diff --git a/layouts/partials/docs-side-nav.html b/layouts/partials/docs-side-nav.html deleted file mode 100644 index 29857608a9..0000000000 --- a/layouts/partials/docs-side-nav.html +++ /dev/null @@ -1,136 +0,0 @@ -{{ $.Scratch.Set "topLevel" . }} -{{ range $product := $.Site.Sections }} - {{ range $version := .Sections }} - {{ range $language := .Sections }} - {{if $.CurrentSection}} - {{ if .IsAncestor $.CurrentSection }} - {{ $.Scratch.Set "topLevel" . }} - {{end}} - {{end}} - {{end}} - {{end}} -{{end}} - - -
- -
- -
- {{ if .Title }} - {{.Title}} - {{ else if .Params.shortTitle }} - {{.Params.shortTitle}} - {{ else }} - (No Title) - {{end}} - - - - - - -
-
-
    - {{ $activeNode := . }} - {{ range ($.Scratch.Get "topLevel").Sections }} - {{ template "menu" dict "node" . "activeNode" $activeNode }} - {{ end }} -
-
- -{{ define "menu" }} - {{/* .node and .activeNode come from the caller */}} - {{- $activeNode := .activeNode -}} - {{- with .node -}} - {{- $isActive := "" -}} - {{- if eq .UniqueID $activeNode.UniqueID -}} - {{- $isActive = "active" -}} - {{- end -}} - - {{- $isOpen := false -}} - {{- if (or .Params.alwaysOpen (.IsAncestor $activeNode)) -}} - {{- $isOpen = true -}} - {{- end -}} - - {{- if .IsSection -}} - {{- $children := (add (len .Pages) (len .Sections)) -}} -
  • - - {{- if .Params.shortTitle -}} - {{ .Params.shortTitle}} - {{- else -}} - {{ .Title }} - {{- end -}} - {{- if gt $children 0 -}} - - - - - - {{- end -}} - - {{if gt $children 0}} - - {{- end -}} -
  • - {{- else if not .Params.hidden -}} -
  • - - {{- if .Params.shortTitle -}} - {{ .Params.shortTitle}} - {{- else -}} - {{ .Title }} - {{- end -}} - -
  • - {{- end -}} - {{ end }} -{{ end }} diff --git a/layouts/partials/page-edit.html b/layouts/partials/page-edit.html deleted file mode 100755 index 6918c2dba4..0000000000 --- a/layouts/partials/page-edit.html +++ /dev/null @@ -1,9 +0,0 @@ -{{ if not .Lastmod.IsZero }}Last updated on {{ .Lastmod.Format "Jan 2, 2006" }}{{ end }} - diff --git a/layouts/partials/page-nav.html b/layouts/partials/page-nav.html deleted file mode 100644 index 0891b848c4..0000000000 --- a/layouts/partials/page-nav.html +++ /dev/null @@ -1,19 +0,0 @@ - -{{ define "page-nav" }} - {{ range .Sections}} -
  • {{ .Title }} - {{if gt (len .Sections) 0}} -
      - {{ range .Pages }} -
    • {{ .Title }}
    • - {{ end }} - {{ template "page-nav" . }} -
    - {{end}} -
  • - {{ end }} -{{ end }} diff --git a/layouts/partials/seo.html b/layouts/partials/seo.html deleted file mode 100644 index e1ae8ae8a1..0000000000 --- a/layouts/partials/seo.html +++ /dev/null @@ -1,135 +0,0 @@ - - {{ with .Params.metaTitle }} - {{ . }} - {{ else }} - {{ if eq .Section "tags" }} - {{ .Title }} Blog Posts by Rancher - {{ else }} - Rancher Docs: {{ .Title }} - {{ end }} - {{ end }} - - -{{- .Scratch.Set "permalink" .Permalink -}} -{{- if (and .Pages (not .IsHome)) -}} - {{/* - Hugo doesn't generate permalinks for lists with the page number in them, - which makes all the pages of a list look lik the same page to a search - engine, which is bad. - */}} - - {{- $by := .Params.pageBy | default .Site.Params.pageBy | default "default" -}} - {{- $limit := .Site.Params.pageLimit | default 10 -}} - - {{- if (eq .Site.Params.pageBy "newest") -}} - {{- $paginator := .Paginate .Pages.ByDate.Reverse $limit -}} - {{- .Scratch.Set "paginator" $paginator -}} - {{- else if (eq .Site.Params.pageBy "title") -}} - {{- $paginator := .Paginate .Pages.ByTitle $limit -}} - {{- .Scratch.Set "paginator" $paginator -}} - {{- else -}} - {{- $paginator := .Paginate $limit -}} - {{- .Scratch.Set "paginator" $paginator -}} - {{- end -}} - - {{- $paginator := .Scratch.Get "paginator" -}} - {{- if (gt $paginator.PageNumber 1) -}} - {{ .Scratch.Set "permalink" ($paginator.URL | absURL) }} - {{- end -}} - - {{ with $paginator.Prev -}} - - {{- end }} - {{ with $paginator.Next -}} - - {{- end }} -{{- end -}} - - {{ $permalink := .Scratch.Get "permalink" }} - {{ if .Params.canonical }} - - {{ end }} - - {{ if .RSSLink -}} - - {{- end }} - - {{ if eq .Section "tags" }} - - {{ else }} - - {{ end }} - - - - - - - - - - - - - - - - - {{ range .Params.categories }}{{ end }} - {{ if isset .Params "date" }}{{ end }} - -{{- if .IsHome -}} - -{{- else if .IsPage -}} - -{{ end }} diff --git a/layouts/robots.txt b/layouts/robots.txt deleted file mode 100644 index 449f2c85a6..0000000000 --- a/layouts/robots.txt +++ /dev/null @@ -1,12 +0,0 @@ -User-agent: * - -{{ if ne (getenv "HUGO_ENV") "production" }} -Disallow: / -{{ end }} - -{{ range .Pages }} -{{ if in .Dir "rancher/v2.x" }} -Disallow: {{ .RelPermalink }} -{{end}} -{{ end }} -Sitemap: https://rancher.com/sitemap.xml diff --git a/layouts/shortcodes/accordion.html b/layouts/shortcodes/accordion.html deleted file mode 100644 index 0e38ce3b21..0000000000 --- a/layouts/shortcodes/accordion.html +++ /dev/null @@ -1,7 +0,0 @@ -
    - - -
    - {{ .Inner }} -
    -
    diff --git a/layouts/shortcodes/carousel.html b/layouts/shortcodes/carousel.html deleted file mode 100644 index 09d6895db9..0000000000 --- a/layouts/shortcodes/carousel.html +++ /dev/null @@ -1,13 +0,0 @@ -
    -
    -
      - {{ .Inner }} -
    -
    - - - - - - -
    diff --git a/layouts/shortcodes/column.html b/layouts/shortcodes/column.html deleted file mode 100644 index 1033d6aeae..0000000000 --- a/layouts/shortcodes/column.html +++ /dev/null @@ -1 +0,0 @@ -
    {{ .Inner }}
    \ No newline at end of file diff --git a/layouts/shortcodes/img.html b/layouts/shortcodes/img.html deleted file mode 100644 index e7a85ae6d4..0000000000 --- a/layouts/shortcodes/img.html +++ /dev/null @@ -1,14 +0,0 @@ -{{ $img := .Get 0 }} -{{ $alt := .Get 1 }} -{{ with resources.Get $img }} - {{ $thumb10 := .Resize "1000x" }} - {{ $thumb8 := .Resize "800x" }} - {{ $thumb6 := .Resize "600x" }} - {{ $thumb4 := .Resize "400x" }} - {{ $thumb2 := .Resize "200x" }} - {{$alt}} -{{ end }} diff --git a/layouts/shortcodes/include.html b/layouts/shortcodes/include.html deleted file mode 100644 index 0d98507f06..0000000000 --- a/layouts/shortcodes/include.html +++ /dev/null @@ -1,5 +0,0 @@ -{{$file := .Get "file"}} - -{{- with .Site.GetPage $file -}} -{{- .Content | markdownify -}} -{{- end -}} diff --git a/layouts/shortcodes/modal-button.html b/layouts/shortcodes/modal-button.html deleted file mode 100644 index 670f62410e..0000000000 --- a/layouts/shortcodes/modal-button.html +++ /dev/null @@ -1,9 +0,0 @@ - - diff --git a/layouts/shortcodes/ports-custom-nodes.html b/layouts/shortcodes/ports-custom-nodes.html deleted file mode 100644 index b5dfa8f4a2..0000000000 --- a/layouts/shortcodes/ports-custom-nodes.html +++ /dev/null @@ -1,168 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    From / ToRancher Nodesetcd Plane NodesControl Plane NodesWorker Plane NodesExternal Rancher Load BalancerInternet
    Rancher Nodes (1)git.rancher.io
    etcd Plane Nodes443 TCP (3)2379 TCP443 TCP
    2380 TCP
    6443 TCP
    8472 UDP
    4789 UDP (6)
    9099 TCP (4)
    Control Plane Nodes443 TCP (3)2379 TCP443 TCP
    2380 TCP
    6443 TCP
    8472 UDP
    4789 UDP (6)
    10250 TCP
    9099 TCP (4)
    10254 TCP (4)
    Worker Plane Nodes443 TCP (3)6443 TCP443 TCP
    8472 UDP
    4789 UDP (6)
    9099 TCP (4)
    10254 TCP (4)
    Kubernetes API Clients6443 TCP (5)
    Workload Clients or Load Balancer30000-32767 TCP / UDP
    (nodeport)
    80 TCP (Ingress)
    443 TCP (Ingress)
    Notes:

    1. Nodes running standalone server or Rancher HA deployment.
    2. Required to fetch Rancher chart library.
    3. Only without external load balancer in front of Rancher.
    4. Local traffic to the node itself (not across nodes).
    5. Only if Authorized Cluster Endpoints are activated.
    6. Only if using Overlay mode on Windows cluster.
    diff --git a/layouts/shortcodes/ports-iaas-nodes.html b/layouts/shortcodes/ports-iaas-nodes.html deleted file mode 100644 index 45b401149f..0000000000 --- a/layouts/shortcodes/ports-iaas-nodes.html +++ /dev/null @@ -1,159 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    From / ToRancher Nodesetcd Plane NodesControl Plane NodesWorker Plane NodesExternal Rancher Load BalancerInternet
    Rancher Nodes (1)22 TCPgit.rancher.io
    2376 TCP
    etcd Plane Nodes443 TCP (3)2379 TCP443 TCP
    2380 TCP
    6443 TCP
    8472 UDP
    9099 TCP (4)
    Control Plane Nodes443 TCP (3)2379 TCP443 TCP
    2380 TCP
    6443 TCP
    8472 UDP
    10250 TCP
    9099 TCP (4)
    10254 TCP (4)
    Worker Plane Nodes443 TCP (3)6443 TCP443 TCP
    8472 UDP
    9099 TCP (4)
    10254 TCP (4)
    Kubernetes API Clients6443 TCP (5)
    Workload Clients or Load Balancer30000-32767 TCP / UDP
    (nodeport)
    80 TCP (Ingress)
    443 TCP (Ingress)
    Notes:

    1. Nodes running standalone server or Rancher HA deployment.
    2. Required to fetch Rancher chart library.
    3. Only without external load balancer in front of Rancher.
    4. Local traffic to the node itself (not across nodes).
    5. Only if Authorized Cluster Endpoints are activated.
    diff --git a/layouts/shortcodes/ports-imported-hosted.html b/layouts/shortcodes/ports-imported-hosted.html deleted file mode 100644 index 48e4201bae..0000000000 --- a/layouts/shortcodes/ports-imported-hosted.html +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    From / ToRancher NodesHosted / Imported ClusterExternal Rancher Load BalancerInternet
    Rancher Nodes (1)Kubernetes API
    Endpoint Port (2)
    git.rancher.io
    8443 TCP
    9443 TCP
    Hosted / Imported Cluster443 TCP (4)(5)443 TCP (5)
    Kubernetes API ClientsCluster / Provider Specific (6)
    Workload ClientCluster / Provider Specific (7)
    Notes:

    1. Nodes running standalone server or Rancher HA deployment.
    2. Only for hosted clusters.
    3. Required to fetch Rancher chart library.
    4. Only without external load balancer.
    5. From worker nodes.
    6. For direct access to the Kubernetes API without Rancher.
    7. Usually Ingress backed by infrastructure load balancer and/or nodeport.
    diff --git a/layouts/shortcodes/ports-rke-nodes.html b/layouts/shortcodes/ports-rke-nodes.html deleted file mode 100644 index e8afb0e084..0000000000 --- a/layouts/shortcodes/ports-rke-nodes.html +++ /dev/null @@ -1,31 +0,0 @@ -
    -

    RKE node:
    Node that runs the rke commands

    -

    RKE node - Outbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortSourceDestinationDescription
    TCP22RKE node
    • Any node configured in Cluster Configuration File
    SSH provisioning of node by RKE
    TCP6443RKE node
    • controlplane nodes
    Kubernetes apiserver
    -
    diff --git a/layouts/shortcodes/readfile.html b/layouts/shortcodes/readfile.html deleted file mode 100644 index 117c7e8706..0000000000 --- a/layouts/shortcodes/readfile.html +++ /dev/null @@ -1,8 +0,0 @@ -{{$file := .Get "file"}} -{{- if eq (.Get "markdown") "true" -}} -{{- $file | readFile | markdownify -}} -{{- else if (.Get "highlight") -}} -{{- highlight ($file | readFile) (.Get "highlight") "" -}} -{{- else -}} -{{ $file | readFile | safeHTML }} -{{- end -}} diff --git a/layouts/shortcodes/release-channel.html b/layouts/shortcodes/release-channel.html deleted file mode 100644 index ef1e50d854..0000000000 --- a/layouts/shortcodes/release-channel.html +++ /dev/null @@ -1,22 +0,0 @@ -
    -

    - -

    -

    - -

    -

    - -

    -
    diff --git a/layouts/shortcodes/requirements_ports_rancher_rke.html b/layouts/shortcodes/requirements_ports_rancher_rke.html deleted file mode 100644 index 41401055ff..0000000000 --- a/layouts/shortcodes/requirements_ports_rancher_rke.html +++ /dev/null @@ -1,57 +0,0 @@ -
    -
    Cluster External Ports
    -

    These ports should be open between nodes and the external network for communication and management of Rancher.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortDescription
    TCP22SSH for RKE
    TCP80Ingress controller - redirect to HTTPS
    TCP443Ingress controller - HTTPS traffic to Rancher
    TCP6443HTTPS to kube-api, used by kubectl and helm
    -

    -
    Additional Ports Required Between Rancher Cluster Nodes
    -

    In addition to the ports listed above these ports must be open between nodes.

    - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortDescription
    TCP2379-2380etcd
    udp8472Overlay networking
    TCP10250kubelet
    -
    diff --git a/layouts/shortcodes/requirements_ports_rke.html b/layouts/shortcodes/requirements_ports_rke.html deleted file mode 100644 index 1957b00910..0000000000 --- a/layouts/shortcodes/requirements_ports_rke.html +++ /dev/null @@ -1,331 +0,0 @@ -
    -

    etcd nodes:
    Nodes with the role etcd

    -

    etcd nodes - Inbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortSourceDescription
    TCP2376
    • Rancher nodes
    Docker daemon TLS port used by Docker Machine
    (only needed when using Node Driver/Templates)
    TCP2379
    • etcd nodes
    • controlplane nodes
    etcd client requests
    TCP2380
    • etcd nodes
    • controlplane nodes
    etcd peer communication
    UDP8472
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Canal/Flannel VXLAN overlay networking
    TCP9099
    • etcd node itself (local traffic, not across nodes)
    See Local node traffic
    Canal/Flannel livenessProbe/readinessProbe
    TCP10250
    • Metrics server communications with all nodes
    kubelet
    -

    etcd nodes - Outbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortDestinationDescription
    TCP443
    • Rancher nodes
    Rancher agent
    TCP2379
    • etcd nodes
    etcd client requests
    TCP2380
    • etcd nodes
    etcd peer communication
    TCP6443
    • controlplane nodes
    Kubernetes apiserver
    UDP8472
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Canal/Flannel VXLAN overlay networking
    TCP9099
    • etcd node itself (local traffic, not across nodes)
    See Local node traffic
    Canal/Flannel livenessProbe/readinessProbe
    -

    controlplane nodes:
    Nodes with the role controlplane

    -

    controlplane nodes - Inbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortSourceDescription
    TCP80
    • Any that consumes Ingress services
    Ingress controller (HTTP)
    TCP443
    • Any that consumes Ingress services
    Ingress controller (HTTPS)
    TCP2376
    • Rancher nodes
    Docker daemon TLS port used by Docker Machine
    (only needed when using Node Driver/Templates)
    TCP6443
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Kubernetes apiserver
    UDP8472
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Canal/Flannel VXLAN overlay networking
    TCP9099
    • controlplane node itself (local traffic, not across nodes)
    See Local node traffic
    Canal/Flannel livenessProbe/readinessProbe
    TCP10250
    • Metrics server communications with all nodes
    kubelet
    TCP10254
    • controlplane node itself (local traffic, not across nodes)
    See Local node traffic
    Ingress controller livenessProbe/readinessProbe
    TCP/UDP30000-32767
    • Any source that consumes NodePort services
    NodePort port range
    -

    controlplane nodes - Outbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortDestinationDescription
    TCP443
    • Rancher nodes
    Rancher agent
    TCP2379
    • etcd nodes
    etcd client requests
    TCP2380
    • etcd nodes
    etcd peer communication
    UDP8472
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Canal/Flannel VXLAN overlay networking
    TCP9099
    • controlplane node itself (local traffic, not across nodes)
    See Local node traffic
    Canal/Flannel livenessProbe/readinessProbe
    TCP10250
    • etcd nodes
    • controlplane nodes
    • worker nodes
    kubelet
    TCP10254
    • controlplane node itself (local traffic, not across nodes)
    See Local node traffic
    Ingress controller livenessProbe/readinessProbe
    -

    worker nodes:
    Nodes with the role worker

    -

    worker nodes - Inbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortSourceDescription
    TCP22 -
      -
    • Linux worker nodes only
    • -
    • Any network that you want to be able to remotely access this node from.
    • -
    -
    Remote access over SSH
    TCP3389 -
      -
    • Windows worker nodes only
    • -
    • Any network that you want to be able to remotely access this node from.
    • -
    -
    Remote access over RDP
    TCP80
    • Any that consumes Ingress services
    Ingress controller (HTTP)
    TCP443
    • Any that consumes Ingress services
    Ingress controller (HTTPS)
    TCP2376
    • Rancher nodes
    Docker daemon TLS port used by Docker Machine
    (only needed when using Node Driver/Templates)
    UDP8472
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Canal/Flannel VXLAN overlay networking
    TCP9099
    • worker node itself (local traffic, not across nodes)
    See Local node traffic
    Canal/Flannel livenessProbe/readinessProbe
    TCP10250
    • Metrics server communications with all nodes
    kubelet
    TCP10254
    • worker node itself (local traffic, not across nodes)
    See Local node traffic
    Ingress controller livenessProbe/readinessProbe
    TCP/UDP30000-32767
    • Any source that consumes NodePort services
    NodePort port range
    -

    worker nodes - Outbound rules

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProtocolPortDestinationDescription
    TCP443
    • Rancher nodes
    Rancher agent
    TCP6443
    • controlplane nodes
    Kubernetes apiserver
    UDP8472
    • etcd nodes
    • controlplane nodes
    • worker nodes
    Canal/Flannel VXLAN overlay networking
    TCP9099
    • worker node itself (local traffic, not across nodes)
    See Local node traffic
    Canal/Flannel livenessProbe/readinessProbe
    TCP10254
    • worker node itself (local traffic, not across nodes)
    See Local node traffic
    Ingress controller livenessProbe/readinessProbe
    -
    -

    Information on local node traffic

    -

    Kubernetes healthchecks (livenessProbe and readinessProbe) are executed on the host itself. On most nodes, this is allowed by default. When you have applied strict host firewall (i.e. iptables) policies on the node, or when you are using nodes that have multiple interfaces (multihomed), this traffic gets blocked. In this case, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as Source or Destination in your security group, that this only applies to the private interface of the nodes/instances. -

    -
    diff --git a/layouts/shortcodes/requirements_software.html b/layouts/shortcodes/requirements_software.html deleted file mode 100644 index 025472b57e..0000000000 --- a/layouts/shortcodes/requirements_software.html +++ /dev/null @@ -1,13 +0,0 @@ -
    - -

    A supported version of Docker is required.

    -

    Supported Versions:

    -
      -
    • 1.12.6
    • -
    • 1.13.1
    • -
    • 17.03.2
    • -
    -
    -

    Docker Documentation: Installation Instructions

    - -
    diff --git a/layouts/shortcodes/row.html b/layouts/shortcodes/row.html deleted file mode 100644 index 6490ab234b..0000000000 --- a/layouts/shortcodes/row.html +++ /dev/null @@ -1 +0,0 @@ -
    {{ .Inner }}
    \ No newline at end of file diff --git a/layouts/shortcodes/saml_caveats.html b/layouts/shortcodes/saml_caveats.html deleted file mode 100644 index c8b6e9a5ae..0000000000 --- a/layouts/shortcodes/saml_caveats.html +++ /dev/null @@ -1,15 +0,0 @@ - -
    -

    SAML Provider Caveats:

    - -
      -
    • SAML Protocol does not support search or lookup for users or groups. Therefore, there is no validation on users or groups when adding them to Rancher.
    • -
    • When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match.
    • - -
    • When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user.

      - -
        -
      • The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of.
      • -
    • -
    -
    diff --git a/layouts/shortcodes/ssl_faq_ha.html b/layouts/shortcodes/ssl_faq_ha.html deleted file mode 100644 index 3b46867e35..0000000000 --- a/layouts/shortcodes/ssl_faq_ha.html +++ /dev/null @@ -1,83 +0,0 @@ -

    How Do I Know if My Certificates are in PEM Format?

    - -

    You can recognize the PEM format by the following traits:

    -
      -
    • The file begins with the following header:
      -----BEGIN CERTIFICATE-----
    • -
    • The header is followed by a long string of characters. Like, really long.
    • -
    • The file ends with a footer:
      -----END CERTIFICATE-----
    • -
    - -

    PEM Certificate Example:

    - -
    -----BEGIN CERTIFICATE-----
    -MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV
    -... more lines
    -VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg==
    ------END CERTIFICATE-----
    -
    - -

    How Can I Encode My PEM Files in base64?

    - -

    To encode your certificates in base64:

    - -
      -
    1. Change directory to where the PEM file resides.
    2. -
    3. Run one of the following commands. Replace FILENAME with the name of your certificate. -
      -# MacOS
      -cat FILENAME | base64
      -# Linux
      -cat FILENAME | base64 -w0
      -# Windows
      -certutil -encode FILENAME FILENAME.base64
      -
      -
    4. -
    - -

    How Can I Verify My Generated base64 String For The Certificates?

    - -

    To decode your certificates in base64:

    - -
      -
    1. Copy the generated base64 string.
    2. -
    3. Run one of the following commands. Replace YOUR_BASE64_STRING with the previously copied base64 - string. -
      -# MacOS
      -echo YOUR_BASE64_STRING | base64 -D
      -# Linux
      -echo YOUR_BASE64_STRING | base64 -d
      -# Windows
      -certutil -decode FILENAME.base64 FILENAME.verify
      -
      -
    4. -
    - - -

    What is the Order of Certificates if I Want to Add My Intermediate(s)?

    - -

    The order of adding certificates is as follows:

    - -
    ------BEGIN CERTIFICATE-----
    -%YOUR_CERTIFICATE%
    ------END CERTIFICATE-----
    ------BEGIN CERTIFICATE-----
    -%YOUR_INTERMEDIATE_CERTIFICATE%
    ------END CERTIFICATE-----
    -
    - -

    How Do I Validate My Certificate Chain?

    - -

    You can validate the certificate chain by using the openssl binary. If the output of the command (see - the command example below) ends with Verify return code: 0 (ok), your certificate chain is valid. The - ca.pem file must be the same as you added to the rancher/rancher container. When using a - certificate signed by a recognized Certificate Authority, you can omit the -CAfile parameter.

    - -

    Command:

    -
    -openssl s_client -CAfile ca.pem -connect rancher.yourdomain.com:443 -servername rancher.yourdomain.com
    -...
    -    Verify return code: 0 (ok)
    -
    diff --git a/newfile b/newfile deleted file mode 100644 index 3df46ad190..0000000000 --- a/newfile +++ /dev/null @@ -1 +0,0 @@ -fun diff --git a/nginx.conf b/nginx.conf deleted file mode 100644 index 601bf100d1..0000000000 --- a/nginx.conf +++ /dev/null @@ -1,181 +0,0 @@ -map_hash_bucket_size 256; -map $request_uri $redirect_uri { - ~^/docs/rancher/v2.0/(.*)$ /docs/rancher/v2.x/$1; - ~^/docs/rke/v0.1.x(/?.*)$ /docs/rke/latest$1; - ~^/docs/os/quick-start-guide/?$ /rancher-os; - ~^/docs/rancher/v1.0/zh/rancher-compose/?$ /docs/rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/; - ~^/docs/rancher/v1.0/zh/rancher-compose/?$ /docs/rancher/v1.6/en/cattle/rancher-compose/; - ~^/docs/rancher/rancher-ui/applications/stacks/adding-balancers/?$ /docs/rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/; - ~^/docs/os/running-rancheros/server/install-to-disk/?$ /os/v1.x/en/quick-start-guide/; - ~^/docs/os/running-rancheros/workstation/boot-from-iso/?$ /docs/os/v1.x/en/overview/; - ~^/docs/rancher/installing-rancher/installing-server/multi-nodes/?$ /docs/rancher/v2.x/en/; - ~^/docs/os/running-rancheros/server/install-to-disk/?$ /docs/os/v1.x/en/quick-start-guide/; - ~^/docs/os/running-rancheros/cloud/gce/?$ /docs/os/v1.x/en/installation/running-rancheros/cloud/gce/; - ~^/docs/os/amazon-ecs/?$ /docs/os/v1.x/en/installation/running-rancheros/cloud/aws/; - ~^/docs/rancher/concepts/?$ /docs/rancher/v2.x/en/overview/architecture/; - ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; - ~^/docs/rancher/concepts/?$ /docs/rancher/v2.x/en/overview/architecture/; - ~^/docs/rancher/rancher-compose/?$ /docs/rancher/v2.x/en/; - ~^/docs/os/networking/interfaces/?$ /docs/os/v1.x/en/installation/networking/interfaces/; - ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; - ~^/docs/rancher/concepts/?$ /docs/rancher/v2.x/en/; - ~^/docs/rancher/rancher-services/storage-service/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/; - ~^/docs/rancher/installing-rancher/installing-server/multi-nodes/?$ /docs/rancher/v2.x/en/installation/ha/; - ~^/docs/rancher/upgrading/?$ /docs/rancher/v2.x/en/upgrades/; - ~^/docs/rancher/configuration/access-control/?$ /docs/rancher/v2.x/en/admin-settings/rbac/; - ~^/docs/os/running-rancheros/server/install-to-disk/?$ /docs/os/v1.x/en/installation/running-rancheros/; - ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; - ~^/docs/os/configuration/custom-rancheros-iso/?$ /docs/os/v1.x/en/installation/configuration/; - ~^/docs/rancher/rancher-compose/?$ /docs/rancher/v2.x/en/; - ~^/docs/os/running-rancheros/server/raspberry-pi/?$ /docs/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/; - ~^/docs/rancher/v1.0/zh/installing-rancher/installing-server/?$ /docs/rancher/v1.6/en/installing-rancher/installing-server/; - ~^/docs/rancher/rancher-services/metadata-service/?$ /docs/rancher/v1.6/en/rancher-services/metadata-service/; - ~^/docs/rancher/api/?$ /docs/rancher/v2.x/en/api/; - ~^/docs/os/running-rancheros/server/raspberry-pi/?$ /docs/os/v1.x/en/installation/running-rancheros/server/raspberry-pi/; - ~^/docs/os/quick-start-guide/?$ /docs/os/v1.x/en/quick-start-guide/; - ~^/docs/os/configuration/switching-consoles/?$ /docs/os/v1.x/en/about/recovery-console/; - ~^/docs/os/running-rancheros/server/install-to-disk//?$ /docs/os/v1.x/en/installation/running-rancheros/server/install-to-disk/; - ~^/docs/os/running-rancheros/workstation/boot-from-iso/?$ /docs/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/; - ~^/docs/rancher/v1.2/en/installing-rancher/installing-server/multi-nodes//?$ /docs/rancher/v2.x/en/installation/ha/; - ~^/docs/os/running-rancheros/workstation/boot-from-iso/?$ /docs/os/v1.x/en/installation/running-rancheros/workstation/boot-from-iso/; - ~^/docs/os/system-services/adding-system-services/?$ /docs/os/v1.x/en/installation/system-services/adding-system-services/; - ~^/docs/rancher/installing-rancher/installing-server/?$ /docs/rancher/v2.x/en/installation/; - ~^/docs/rancher/latest/en/installing-rancher/installing-server/multi-nodes/?$ /docs/rancher/v2.x/en/installation/ha/; - ~^/docs/rancher/rancher-services/metadata-service/?$ /docs/rancher/v1.6/en/rancher-services/metadata-service/; - ~^/docs/rancher/rancher-services/health-checks/?$ /docs/rancher/v1.0/en/rancher-services/health-checks/; - ~^/docs/os/cloud-config/?$ /docs/os/v1.x/en/installation/configuration/; - ~^/docs/rancher/api/?$ /docs/rancher/v2.x/en/api/; - - ~^/docs/rancher/v1.0/en/environments/certificates/?$ /docs/rancher/v1.0/en/rancher-ui/infrastructure/certificates/; - ~^/docs/rancher/v1.1/en/api/api-keys/?$ /docs/rancher/v1.1/en/api/v1/api-keys/; - ~^/docs/rancher/v1.1/zh/?$ /docs/rancher/v1.1/en/; - ~^/docs/rancher/v1.2/en/api/api-keys/?$ /docs/rancher/v1.2/en/api/v2-beta/api-keys/; - ~^/docs/rancher/v1.2/zh/?$ /docs/rancher/v1.2/en/; - ~^/docs/rancher/v1.3/en/api/api-keys/?$ /docs/rancher/v1.3/en/api/v2-beta/api-keys/; - ~^/docs/rancher/v1.4/en/api/api-keys/?$ /docs/rancher/v1.4/en/api/v2-beta/api-keys/; - ~^/docs/rancher/v1.4/zh/?$ /docs/rancher/v1.4/en/; - ~^/docs/rancher/v1.5/en/api/api-keys/?$ /docs/rancher/v1.5/en/api/v2-beta/api-keys/; - ~^/docs/rancher/v1.5/zh/?$ /docs/rancher/v1.5/en/; - ~^/docs/rancher/v1.6/en/api/api-keys/?$ /docs/rancher/latest/en/api/v2-beta/api-keys/; - ~^/docs/rancher/v2.x/en/admin-settings/agent-options/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/; - ~^/docs/rancher/v2.x/en/admin-settings/api-audit-log/?$ /docs/rancher/v2.x/en/installation/options/api-audit-log/; - ~^/rancher/v2.x/en/admin-settings/custom-ca-root-certificate/?$ /docs/rancher/v2.x/en/installation/options/custom-ca-root-certificate/; - ~^/docs/rancher/v2.x/en/admin-settings/feature-flags/?$ /docs/rancher/v2.x/en/installation/options/feature-flags/; - ~^/docs/rancher/v2.x/en/admin-settings/removing-rancher/rancher-cluster-nodes/?$ /docs/rancher/v2.x/en/removing-rancher/; - ~^/docs/rancher/v2.x/en/admin-settings/removing-rancher/user-cluster-nodes/?$ /docs/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/; - ~^/docs/rancher/v2.x/en/admin-settings/server-url/?$ /docs/rancher/v2.x/en/admin-settings/; - ~^/docs/rancher/v2.x/en/admin-settings/tls-settings/?$ /docs/rancher/v2.x/en/installation/options/tls-settings/; - ~^/docs/rancher/v2.x/en/cluster-admin/kubectl/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; - ~^/docs/rancher/v2.x/en/cluster-provisioning/cluster-members/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/; - ~^/docs/rancher/v2.x/en/cluster-provisioning/custom-clusters/agent-options/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/; - ~^/docs/rancher/v2.x/en/cluster-provisioning/rancher-agents/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/; - ~^/docs/rancher/v2.x/en/concepts/cli-configuration/?$ /docs/rancher/v2.x/en/cli/; - ~^/docs/rancher/v2.x/en/concepts/volumes-and-storage/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/; - ~^/docs/rancher/v2.x/en/faq/cleaning-cluster-nodes/?$ /docs/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/; - ~^/docs/rancher/v2.x/en/installation/air-gap-high-availability/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/; - ~^/docs/rancher/v2.x/en/installation/air-gap-high-availability/provision-hosts/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/; - ~^/docs/rancher/v2.x/en/installation/air-gap-installation/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/; - ~^/docs/rancher/v2.x/en/installation/air-gap-installation/install-rancher/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/; - ~^/docs/rancher/v2.x/en/installation/air-gap-installation/prepare-private-reg/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/; - ~^/docs/rancher/v2.x/en/installation/air-gap-single-node/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/; - ~^/docs/rancher/v2.x/en/installation/air-gap-single-node/install-rancher/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/; - ~^/docs/rancher/v2.x/en/installation/api-auditing/?$ /docs/rancher/v2.x/en/installation/options/api-audit-log/; - ~^/docs/rancher/v2.x/en/installation/backups-and-restoration/ha-backup-and-restoration/?$ /docs/rancher/v2.x/en/backups/backups/ha-backups/; - ~^/docs/rancher/v2.x/en/installation/backups-and-restoration/single-node-backup-and-restoration/?$ /docs/rancher/v2.x/en/backups/backups/single-node-backups/; - ~^/docs/rancher/v2.x/en/installation/ha-server-install-external-lb/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/layer-7-lb/; - ~^/docs/rancher/v2.x/en/installation/ha-server-install/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/; - ~^/docs/rancher/v2.x/en/installation/ha-server-install/nlb/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/layer-4-lb/nlb/; - ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/chart-options/?$ /docs/rancher/v2.x/en/installation/options/chart-options/; - ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/tls-secrets/?$ /docs/rancher/v2.x/en/installation/options/tls-secrets/; - ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/troubleshooting/?$ /docs/rancher/v2.x/en/installation/options/troubleshooting/; - ~^/docs/rancher/v2.x/en/installation/ha/rke-add-on/api-auditing/?$ /docs/rancher/v2.x/en/installation/options/rke-add-on/api-auditing/; - ~^/docs/rancher/v2.x/en/installation/references/?$ /docs/rancher/v2.x/en/installation/requirements/; - ~^/docs/rancher/v2.x/en/installation/single-node-install-external-lb/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node/single-node-install-external-lb/; - ~^/docs/rancher/v2.x/en/installation/single-node-install/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node/; - ~^/docs/rancher/v2.x/en/installation/single-node/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/editing-clusters/?$ /docs/rancher/v2.x/en/cluster-admin/editing-clusters/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/kubeconfig/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubeconfig/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/kubectl/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/?$ /docs/rancher/v2.x/en/cluster-admin/projects-and-namespaces/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/editing-projects/?$ /docs/rancher/v2.x/en/project-admin/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/project-members/?$ /docs/rancher/v2.x/en/project-admin/project-members/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/projects-and-namespaces/resource-quotas/?$ /docs/rancher/v2.x/en/project-admin/resource-quotas/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/nfs/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/examples/vsphere/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/; - ~^/docs/rancher/v2.x/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/?$ /docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/; - ~^/docs/rancher/v2.x/en/project-admin/tools/pipelines/?$ /docs/rancher/v2.x/en/project-admin/pipelines/; - ~^/docs/rancher/v2.x/en/tasks/clusters/creating-a-cluster/?$ /docs/rancher/v2.x/en/cluster-provisioning/; - ~^/docs/rancher/v2.x/en/tasks/clusters/creating-a-cluster/create-cluster-amazon-ec2/?$ /docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/; - ~^/docs/rancher/v2.x/en/tasks/clusters/using-kubectl-to-access-a-cluster/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; - ~^/docs/rancher/v2.x/en/tools/?$ /docs/rancher/v2.x/en/cluster-admin/tools/; - ~^/docs/rancher/v2.x/en/tools/logging/?$ /docs/rancher/v2.x/en/cluster-admin/tools/logging/; - ~^/docs/rancher/v2.x/en/tools/pipelines/?$ /docs/rancher/v2.x/en/project-admin/pipelines/; - ~^/docs/rancher/v2.x/en/tools/pipelines/docs-for-v2.0.x/?$ /docs/rancher/v2.x/en/project-admin/pipelines/docs-for-v2.0.x/; - ~^/docs/rancher/v2.x/en/upgrades/ha-server-rollbacks/?$ /docs/rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks/; - ~^/docs/rancher/v2.x/en/upgrades/single-node-rollbacks/?$ /docs/rancher/v2.x/en/upgrades/rollbacks/single-node-rollbacks/; - ~^/docs/rancher/v2.x/en/upgrades/upgrades/ha-server-upgrade-helm/?$ /docs/rancher/v2.x/en/upgrades/upgrades/ha/; - ~^/docs/rancher/v2.x/en/upgrades/upgrades/single-node-upgrade/?$ /docs/rancher/v2.x/en/upgrades/upgrades/single-node/; - ~^/docs/rke/latest/en/installation/os/?$ /docs/rke/latest/en/os/; - - ~^/docs/rancher/v2.x/en/k8s-in-rancher/nodes/?$ /docs/rancher/v2.x/en/cluster-admin/nodes/; - ~^/docs/rancher/v2.x/en/installation/air-gap-high-availability/install-rancher/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/; - ~^/docs/rancher/v2.x/en/installation/k8s-install-server-install/?$ /docs/rancher/v2.x/en/installation/options/helm2/rke-add-on/layer-4-lb/; - ~^/docs/rancher/v1.0/en/infrastructure/hosts/?$ /docs/rancher/v1.0/en/rancher-ui/infrastructure/hosts/; - ~^/docs/rancher/v2.x/en/cluster-admin/cluster-access/kubeconfig/?$ /docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/; - ~^/docs/os/v1.0/en/configuration/custom-console/?$ /docs/os/latest/en/configuration/switching-consoles/; - ~^/docs/os/latest/en/configuration/switching-consoles/?$ /docs/os/v1.x/en/configuration/switching-consoles/; - ~^/docs/os/v1.1/en/configuration/custom-console/?$ /docs/os/v1.1/en/configuration/switching-consoles/; - ~^/docs/os/v1.1/en/system-services/built-in-system-services/?$ /docs/os/v1.1/en/boot-process/built-in-system-services/; - ~^/docs/os/v1.2/en/configuration/custom-console/?$ /docs/os/v1.2/en/configuration/switching-consoles/; - ~^/docs/os/v1.2/en/system-services/built-in-system-services/?$ /docs/os/v1.2/en/boot-process/built-in-system-services/; - ~^/docs/rancher/v2.x/en/removing-rancher/?$ /docs/rancher/v2.x/en/faq/removing-rancher/; - ~^/docs/rancher/v2.x/en/installation/ha/?$ /docs/rancher/v2.x/en/installation/k8s-install/; - ~^/docs/rancher/v2.x/en/installation/ha/helm-rancher/?$ /docs/rancher/v2.x/en/installation/k8s-install/helm-rancher/; - ~^/docs/rancher/v2.x/en/installation/other-installation-methods/single-node/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/single-node-docker/; - ~^/docs/rancher/v2.x/en/installation/air-gap/install-rancher/?$ /docs/rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/; - ~^/docs/rancher/v1.0/en/api/v1/access-control/?$ /docs/rancher/v1.0/en/api/v1/api-keys/; - ~^/docs/os/latest/en/storage/additional-mounts/?$ /docs/os/v1.x/en/storage/additional-mounts/; - ~^/docs/os/v1.0/en/configuration/custom-rancheros-iso/?$ /docs/os/latest/custom-builds/custom-rancheros-iso/; - ~^/docs/os/v1.0/en/configuration/custom-kernels/?$ /docs/os/latest/custom-builds/custom-kernels/; - ~^/docs/rancher/v1.0/en/environments/?$ /docs/rancher/v1.0/en/configuration/environments/; - ~^/docs/os/v1.1/en/configuration/custom-kernels/?$ /docs/os/v1.1/en/custom-builds/custom-kernels/; - ~^/docs/os/v1.0/en/system-services/built-in-system-services/?$ /docs/os/latest/boot-process/built-in-system-services/; - ~^/docs/os/latest/custom-builds/custom-rancheros-iso/?$ /docs/os/v1.x/en/custom-builds/custom-rancheros-iso/; - ~^/docs/os/v1.0/en/system-services/?$ /docs/os/latest/en/system-services/adding-system-services/; - ~^/docs/os/v1.0/en/configuration/additional-mounts/?$ /docs/os/latest/en/storage/additional-mounts/; - ~^/docs/os/latest/custom-builds/custom-kernels/?$ /docs/os/v1.x/en/custom-builds/custom-kernels/; - ~^/docs/os/v1.1/en/system-services/?$ /docs/os/v1.1/en/system-services/adding-system-services/; - ~^/docs/os/v1.1/en/configuration/additional-mounts/?$ /docs/os/v1.1/en/storage/additional-mounts/; - ~^/docs/os/latest/boot-process/built-in-system-services/?$ /docs/os/v1.x/en/boot-process/built-in-system-services/; - ~^/docs/os/latest/en/system-services/adding-system-services/?$ /docs/os/v1.x/en/system-services/adding-system-services/; - ~^/docs/rancher/v1.0/en/cattle/rancher-compose/?$ /docs/rancher/v1.0/en/rancher-compose/; - ~^/docs/os/v1.1/en/configuration/custom-docker/?$ /docs/os/v1.1/en/configuration/switching-docker-versions/; - ~^/docs/os/v1.2/en/configuration/custom-kernels/?$ /docs/os/v1.x/en/custom-builds/custom-kernels/; - ~^/docs/os/v1.2/en/configuration/custom-rancheros-iso/?$ /docs/os/v1.x/en/custom-builds/custom-rancheros-iso/; - ~^/docs/os/v1.2/en/system-services/?$ /docs/os/v1.2/en/system-services/adding-system-services/; - ~^/docs/os/v1.2/en/configuration/additional-mounts/?$ /docs/os/v1.2/en/storage/additional-mounts/; - ~^/docs/rancher/v2.x/en/backups/rollbacks/?$ /docs/rancher/v2.x/en/upgrades/; - ~^/docs/rancher/v2.x/en/admin-settings/feature-flags/enable-not-default-storage-drivers/?$ /docs/rancher/v2.x/en/installation/options/feature-flags/enable-not-default-storage-drivers/; - ~^/docs/rancher/v2.x/en/installation/server-tags/?$ /docs/rancher/v2.x/en/installation/options/server-tags/; - ~^/rancher/v2.x/en/admin-settings/feature-flags/istio-virtual-service-ui/?$ /docs/rancher/v2.x/en/installation/options/feature-flags/istio-virtual-service-ui/; - ~^/docs/os/v1.1/en/configuration/custom-rancheros-iso/?$ /docs/os/v1.1/en/custom-builds/custom-rancheros-iso/; -} - -server { - listen 80; - server_name localhost; - absolute_redirect off; - - location / { - root /usr/share/nginx/html; - index index.html index.htm; - } - - error_page 404 /404.html; - error_page 500 502 503 504 /50x.html; - - if ( $redirect_uri ) { - return 302 $redirect_uri; - } -} diff --git a/package.json b/package.json index adc998cd0f..4d48c42de2 100644 --- a/package.json +++ b/package.json @@ -1,27 +1,40 @@ { "name": "rancher-docs", - "author": "Rancher Labs, Inc.", - "license": "Apache-2.0", - "version": "2.2.0", + "version": "0.0.0", "private": true, "scripts": { - "dev": "./scripts/dev", - "dev-theme": "./scripts/dev -t ../website-theme", - "build-algolia": "node ./scripts/build-algolia.js", - "publish-algolia": "node ./scripts/publish-algolia.js" + "docusaurus": "docusaurus", + "start": "docusaurus start", + "build": "docusaurus build", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "clear": "docusaurus clear", + "serve": "docusaurus serve", + "write-translations": "docusaurus write-translations", + "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "atomic-algolia": "^0.3.15", - "instantsearch.js": "^2.8.0", - "izimodal": "^1.5.1", - "jquery": "^3.5.0", - "jsdom": "^11.11.0", - "lory.js": "^2.4.1", - "md5": "^2.2.1", - "moment": "^2.20.1", - "moment-timezone": "^0.5.26", - "rancher-website-theme": "https://github.com/rancherlabs/website-theme.git", - "request": "^2.87.0", - "tingle.js": "^0.13.2" + "@docusaurus/core": "2.0.0-beta.21", + "@docusaurus/preset-classic": "2.0.0-beta.21", + "@mdx-js/react": "^1.6.22", + "clsx": "^1.1.1", + "prism-react-renderer": "^1.3.3", + "react": "^17.0.2", + "react-dom": "^17.0.2" + }, + "devDependencies": { + "@docusaurus/module-type-aliases": "2.0.0-beta.21" + }, + "browserslist": { + "production": [ + ">0.5%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] } } diff --git a/pull_request_template.md b/pull_request_template.md deleted file mode 100644 index 807614b749..0000000000 --- a/pull_request_template.md +++ /dev/null @@ -1,4 +0,0 @@ -### For Rancher (product) docs only -When contributing to docs, please update the versioned docs. For example, the docs in the v2.6 folder of the `rancher` folder. - -Doc versions older than the latest minor version should only be updated to fix inaccuracies or make minor updates as necessary. The majority of new content should be added to the folder for the latest minor version. diff --git a/scripts/build-algolia.js b/scripts/build-algolia.js deleted file mode 100755 index 2e802de56c..0000000000 --- a/scripts/build-algolia.js +++ /dev/null @@ -1,86 +0,0 @@ -#! /usr/bin/env node - -'use strict'; -const jsdom = require("jsdom"); -const { - JSDOM -} = jsdom; -const md5 = require('md5'); -const fs = require('fs'); -const newNodes = []; -const newParagraphs = []; -const rawdata = fs.readFileSync('/output/algolia.json'); -const nodes = JSON.parse(rawdata); - -nodes.forEach(node => { - const dom = new JSDOM(node.content); - const content = dom.window.document.body; //post content wrapped in a body tag - const contentChildren = content.children; // all the children of the body tag - const paragraphOut = { - anchor: '#', - title: '', - content: '', - postref: node.objectID, - objectID: null, - permalink: node.permalink - }; - - let childCount = contentChildren.length - 1; // how many children - - // loop over the content until the next h2 heading -> this is the paragraph of searchable text - while(childCount >= 0) { - const child = contentChildren[childCount]; - - if (child.tagName === "H2") { - //this is our header - paragraphOut.anchor = `#${child.id}`; - paragraphOut.title = child.textContent; - - let next = child.nextElementSibling; - - while(next && next.tagName !== 'H2') { - if (next && next.textContent) { - paragraphOut.content += next.textContent; - } - next = next.nextElementSibling; - } - - } - - childCount--; - } - - // a post without headers - if (paragraphOut.title === '') { - // Set the title to the page title - paragraphOut.title = node.title; - - // pass along the content - paragraphOut.content = content.textContent; - } - - if (paragraphOut.content) { - // limit the content to 10k so we dont blow up just incase someone decides to make a 40k blog post in one paragraph ¯\_(ツ)_/¯ - paragraphOut.content = paragraphOut.content.substr(0, 9000); - - // objectID is not quite unique yet so hash the entire object - paragraphOut.objectID = md5(JSON.stringify(paragraphOut)); - - newParagraphs.push(paragraphOut); - newNodes.push(node); - } - - - // remove potentially large content (see size limits) and replace with the summary so that we don't get results with zero highlightable results - node.content = node.summary; - - // remove summary for dedup - delete node.summary; - -}); - -const merged = [...newParagraphs, ...newNodes]; - -fs.writeFileSync('/output/final.algolia.json', JSON.stringify(merged)); - -process.exit(0); diff --git a/scripts/converters/Dockerfile b/scripts/converters/Dockerfile deleted file mode 100644 index 1a30b8cf80..0000000000 --- a/scripts/converters/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM debian:latest - -RUN apt-get update -RUN apt-get -y install build-essential python3-dev python3-pip python3-setuptools python3-wheel python3-cffi libcairo2 libpango-1.0-0 libpangocairo-1.0-0 libgdk-pixbuf2.0-0 libffi-dev shared-mime-info pandoc jq - -RUN apt-get clean -RUN apt-get autoclean - -RUN pip3 install WeasyPrint - -WORKDIR /doc_tools - -COPY fonts/ fonts/ -COPY css css/ -COPY images images/ -COPY templates templates/ -COPY headers headers/ -COPY scripts scripts/ - -RUN ls -la fonts - -ENTRYPOINT ["scripts/entrypoint.sh"] diff --git a/scripts/converters/README.md b/scripts/converters/README.md deleted file mode 100644 index 045e5cf6c6..0000000000 --- a/scripts/converters/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Converters -A collection of scripts and tools to transform documentation from one format to another. These scripts leverage docker containers to ease portability to different platforms. - -### Usage -To get started first build the local docker image by running the build script: - -``` -build.sh -``` - -#### Kube-bench json results to markdown -This converter takes two positional arguments, a `kube-bench` json results file and the directory of the helper scripts used for the kube-bench execution. - -``` -./run_results_to_md.sh results.json kube-bench/test_helpers -``` - -The conversion is sent to `stdout` which can be redirected to a file to save the conversion. - -#### Markdown to pdf -This converter takes a single argument, a markdown file to convert to pdf format. The resulting pdf file is saved in `output/output.pdf` - -``` -./run_md_to_pdf.sh ../../content/rancher/v2.x/en/security/hardening-2.3.3/_index.md -``` diff --git a/scripts/converters/build.sh b/scripts/converters/build.sh deleted file mode 100755 index 2251ca81a2..0000000000 --- a/scripts/converters/build.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker build --rm -t doc_converters:latest . diff --git a/scripts/converters/css/self-assessment-style.css b/scripts/converters/css/self-assessment-style.css deleted file mode 100644 index 4cdf2a4d2f..0000000000 --- a/scripts/converters/css/self-assessment-style.css +++ /dev/null @@ -1,87 +0,0 @@ -body { - font-family: sans-serif; -} - -h1 { - font-size: 50px; - color: #4a6482; - font-weight: 400; - margin-top: 70px; -} - -h2 { - font-size: 40px; - font-weight: 400; - color: #4a6482; - background-color: ghostwhite; - padding-top: 70px; - padding-left: 10px; - padding-right: 10px; - padding-bottom: 10px; - margin-bottom: 0; -} - -h3 { - font-size: 30px; - font-weight: 400; - color: white; - background-color: #8ea3be; - padding-left: 10px; - padding-right: 10px; - padding-bottom: 10px; - padding-top: 30px; - margin-top: 0; -} - -h4 { - font-size: 20px; - color: #4a6482; - margin-top: 50px; -} - -li { - margin-top: 12px; - margin-bottom: 10px; -} - -a { - text-decoration: none; - color: #0075A8; -} - -p { - margin-top: 20px; - line-height: 25px; - font-size: 15px; -} - -strong { - color: #4a6482; -} -code { - background-color: ghostwhite; - line-height: 25px; - color: #4a6482; - word-wrap: break-word; -} - -h4 > code { - color: #4a6482; -} - -.sourceCode { - color: #333333; - background-color: #f4f4f4; - font-size: 15px; - width: 100%; - display: inline-block; - overflow-wrap: break-word; - word-wrap: break-word; - word-break: normal; - line-break: strict; - hyphens: none; - -webkit-hyphens: none; - -moz-hyphens: none; - white-space: normal; - padding-right: 50px; -} diff --git a/scripts/converters/css/style-portrait.css b/scripts/converters/css/style-portrait.css deleted file mode 100644 index ab19e788e2..0000000000 --- a/scripts/converters/css/style-portrait.css +++ /dev/null @@ -1,372 +0,0 @@ -/* -Theme Name: Linux Academy Study Guide Template 08-14-2019 -*/ - -/* -#@font-face {font-family: Poppins;src: url(fonts/Poppins/Poppins-Regular.ttf);} -@font-face {font-family: Poppins;src: url('https://fonts.googleapis.com/css?family=Poppins&display=swap');} -@font-face {font-family: Roboto;src: url(fonts/truetype/Roboto/Roboto-Regular.ttf);} -@font-face {font-family: PoppinsExtraLight; src: url(fonts/truetype/Poppins/Poppins-ExtraLight.ttf);} -*/ - -/* This lighter one is only used as H1, and in the table of contents */ - -font-family: 'Poppins', sans-serif; -font-family: 'Roboto', sans-serif; - - -@page :first { - size: portrait; - padding:0cm; - border-left-style: none; - background:none; - background: url("../images/rancher-logo-stacked-color.png") no-repeat left; - background-size: 50cm; - background-position: top 1cm left; - margin-top:1cm; - margin-bottom:1cm; - margin-left:1cm; - - @top-left { - background: #000; - color:#fff; - content: "v2.3.5"; - height: 1cm; - text-align: center; - width: 5cm; - font-size:1em; - font-weight:bold;} - - @top-center { - background: none; - content: ''; } - @top-right { - background: none; - content: ''; } - -/* - @bottom-left { - height: 1.5cm; - width:4.6cm; - background-color:#000; - content: url("images/white_logo.png"); - - - } -*/ - - @bottom-left img {padding-left:.4cm;float: none;margin-top:-.2cm;} - @bottom-center { - background: none; - content: ''; } - @bottom-right { - background: none; - content: ''; } - @bottom-right-corner { - background: none; - border-left: 1px solid #fff; - content: '';} - } - -@page { - font-family: Poppins; - size: portrait; - margin-top:1cm; - padding:.5cm; - border-left-style: dashed; border-left-width: 1px; border-color: #000; - @top-left { - background: #000; - color:#fff; - content: "" string(title); - height: 1cm; - text-align: center; - width: auto; - font-size:1em; - font-weight:bold; - padding-right:.5cm; - padding-left:.5cm; - } - - @top-center { - background: none; - content: none; - margin-bottom: 2cm; - } - @top-right { - background: none; - content: none;} - @bottom-left { - background: none; - content: none; } - @bottom-right { - content: url("../images/rancher_logo.svg"); - height: 1cm; - vertical-align: middle; - margin-top:.4cm; - width: 5.5cm; } - @bottom-right img {width:10%;/*height:auto;*/padding-right:.2cm;padding-top:.1cm;} - - @bottom-right-corner { - background: #fff; - content: counter(page); - width: 2cm; - height:.5cm; - border-left: 1px solid #000; - margin-top:.5cm; - padding:.2cm; - font-size: 10pt;} - } - -@page :blank { - size: portrait; - @top-left { - padding:0cm; - background: #000; - color:#fff; - content: "Rancher 2.3.x"; - height: 1cm; - text-align: center; - width: 5cm; - font-size:1em; - font-weight:bold;} - @top-center { - content: none; } - @top-right { - content: none; } - @bottom-right { - background: none; - content: ''; } - @bottom-right-corner { - background: none; - border-left: 1px solid #fff; - content: '';} - - } - -@page no-chapter { - size: portrait; - border-left-style: none; - @top-left { - background: #000; - color:#fff; - content: "" string(title); - height: 1cm; - text-align: center; - width: auto; - font-size:1em; - font-weight:bold; - padding-right:.5cm; - padding-left:.5cm;} - @top-center { - content: none; } - @top-right { - content: none; } - @bottom-left { - background: none; - content: none; } - @bottom-center { - content: none; } - @bottom-right { - content: url("../images/rancher_logo.svg"); - font-size: 9pt; - height: 1cm; - vertical-align: middle; - margin-top:.4cm; - width: 5.5cm; } - @bottom-right img {width:20%;/*height:auto;*/padding-right:.2cm;padding-top:.1cm;} - } - -body { font-family: Poppins;color: #000; line-height:1.25em;padding:.25cm;} - -header{/*margin-left:11cm;*/ margin-top:10cm;border-left: 1px solid #aaa;min-height:6.5cm;} -#header_bottom_text{/*position:absolute;bottom:6.2cm;*/} - -h1.title {font-family:PoppinsExtraLight;font-weight:100;font-size:2.5em;line-height:1em; padding-left:1cm;} -p.subtitle{margin-left:1cm;margin-bottom:1.5cm;font-weight:bold;} -p.author{margin-left:1cm;margin-top:0; margin-bottom:0;font-weight:bold;} -p.email{margin-left:1cm;margin-top:0; margin-bottom:0;font-weight:bold;} -p.email a {color:#000;} -p.date{margin-left:1cm;margin-top:0; margin-bottom:0;font-weight:bold;} - -#contentsbox { height:1.5cm; width:4cm; border-bottom:1px solid #aaa;margin-top:1.5cm;margin-bottom:1.5cm;} -#contentsbox p {font-family:PoppinsExtraLight;font-weight:100;font-size:2.5em;} - -img {float:right;max-width:50%; height:auto;border-radius:10px;margin-left:1cm;margin-right:-1cm;} - -a:link{outline:none;color:#000;font-weight:bold;} - -pre {font-family: Roboto;font-size:.95em; margin-left:.5cm; background-color:#eee;padding:2px; - white-space: pre-wrap; - word-wrap: normal;} -code {font-family: Roboto;font-size:.95em;background-color:#87dcc5;padding:2px;} - -pre code{background-color:#eee;} - -pre.sourceCode{color:initial;} - -/* ------------------- Lists --------------------------*/ - -/* Top level is bold, next level is numbered */ -ul li {margin-left:-.25cm;font-weight:bold;} -ul li ul {list-style-type: decimal;} -ul li ul li {margin-left:-.25cm;font-weight:lighter;} -ul li ul li ul {list-style-type: none;} - -/* Same as above, but top level is normal weight */ -.nob ul li {margin-left:-.25cm;font-weight:normal;} -.nob ul li ul {list-style-type: decimal;} -.nob ul li ul li {margin-left:-.25cm;font-weight:lighter;} -.nob ul li ul li ul {list-style-type: none;} - -/* Top level is bold, next level is just bullets, no numbers */ -.nonum ul li {margin-left:-.25cm;font-weight:bold;} -.nonum ul li ul {list-style-type: disc;} -.nonum ul li ul li {margin-left:-.25cm;font-weight:lighter;} -.nonum ul li ul li ul {list-style-type: none;} - -h1 {string-set:title content();} - -h2 { - max-width:50%; - line-height:2em; vertical-align: middle;padding-left:1cm; padding-top:.2cm; padding-bottom:.2cm;font-size:.9em; margin-left:-1.5cm;width:60%; background-color:#fff; - border-radius:5px; - border-top: 1px solid #eee; - border-left: 1px solid #eee; - border-right: 1px solid #aaa; - border-bottom: 1px solid #aaa; - page-break-before: always; - font-size:1.5em; - } - -h3 {font-size:1.2em;} -h4 {font-size:1.3em; - line-height:30px; -} -h5 {font-size:1.2em;} -h6 {font-size:1.1em;} - -h1 code {background:none;} -h2 code {background:none;} -h3 code {background:none;} -h4 code {background:none;} -h5 code {background:none;} -h6 code {background:none;} - -/*---------------------------------------- NAV (also known as Table of Contents) -----------------------------------------*/ - -/*-------------------------------------------- -08-14-2019 -By far, the biggest issue with this template -is the fact that the TOC is only one column. -When I have recovered from this ruckus, I -will be digging in and trying to make it work ---------------------------------------------*/ - - -nav {position:relative;margin-left:-.5em;background: none;string-set:heading content(); page:no-chapter;page-break-after: always;} -nav ul {margin-top:0;margin-left:-.5em;list-style-type: none;} - -nav ul li {margin-top:.75em; margin-bottom:1em; width:100%;} -nav ul li a { - border-radius:5px; - border-top: 1px solid #eee; - border-left: 1px solid #eee; - border-right: 1px solid #aaa; - border-bottom: 1px solid #aaa; - display:block; - height:2em; - padding-top:.6em; - padding-left:.6em; - margin-left:-1.5em; -} - -nav ul li a::after {content: target-counter(attr(href url), page, decimal); float:right;margin-right:10px;} -nav ul li ul {list-style-type: none; border-left-style: dashed; border-left-width: 1px; border-color: #000; margin-top:1.5em;} -nav ul li ul li {margin-left:-.5em;color:#ff0000;} -nav ul li ul li a {border:none;font-family:PoppinsExtraLight;font-size:.75em;margin-bottom:1.8em;} -nav ul li ul li a::after {font-size:.75em;} -nav code {background:none;} -nav a{text-decoration:none;outline:none;color:#000;} - -table { - border-collapse: collapse; -} -td, th { - border: 1px solid #ddd;padding: 10px; -} - -blockquote{color:#fff; background-color:#777;padding:2mm;border-radius:4px;border:1px solid #000;} -blockquote pre, blockquote code {background-color:#555;} - - -.pagebreak { page-break-before: always; } - - -code { - background-color: ghostwhite; - font-family: monospace; - line-height: 25px; - color: #4a6482; - word-wrap: break-word; -} - -h4 > code { - color: #4a6482; -} - -#.sourceCode { -# color: #333333; -# background-color: #f4f4f4; -# font-size: 15px; -# width: 100%; -# display: inline-block; -# overflow-wrap: break-word; -# word-wrap: break-word; -# word-break: normal; -# line-break: strict; -# hyphens: none; -# -webkit-hyphens: none; -# -moz-hyphens: none; -# white-space: normal; -# padding-right: 50px; -#} - -.sourceCode { - background-color: ghostwhite; - font-family: monospace; - line-height: 25px; - color: #4a6482; - word-wrap: break-word; -} - -/*-------------------------------------------- -08-14-2019 -The Python looks good here, the Bash not -so much. So we're leaving them black, at -least for now, to match the original template ---------------------------------------------*/ - -/* -code.sourceCode.python {color:#d49173;} -.op {color:#89d473;} -.dv {color: #ff0000;} -code.sourceCode.bash {color:#739ed4;} -.ex{color:#b473d4;} -*/ - -/*-------------------------------------------- -08-14-2019 -Here I was trying to dork with image captions, -but they keep showing up on the left ---------------------------------------------*/ - -/* -figure { - display: table; -} -figcaption { - display: table-caption; - caption-side: bottom; - float:right; -} -*/ diff --git a/scripts/converters/fonts/Poppins/OFL.txt b/scripts/converters/fonts/Poppins/OFL.txt deleted file mode 100644 index 450ee68541..0000000000 --- a/scripts/converters/fonts/Poppins/OFL.txt +++ /dev/null @@ -1,93 +0,0 @@ -Copyright 2014-2017 Indian Type Foundry (info@indiantypefoundry.com) - -This Font Software is licensed under the SIL Open Font License, Version 1.1. -This license is copied below, and is also available with a FAQ at: -http://scripts.sil.org/OFL - - ------------------------------------------------------------ -SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ------------------------------------------------------------ - -PREAMBLE -The goals of the Open Font License (OFL) are to stimulate worldwide -development of collaborative font projects, to support the font creation -efforts of academic and linguistic communities, and to provide a free and -open framework in which fonts may be shared and improved in partnership -with others. - -The OFL allows the licensed fonts to be used, studied, modified and -redistributed freely as long as they are not sold by themselves. The -fonts, including any derivative works, can be bundled, embedded, -redistributed and/or sold with any software provided that any reserved -names are not used by derivative works. The fonts and derivatives, -however, cannot be released under any other type of license. The -requirement for fonts to remain under this license does not apply -to any document created using the fonts or their derivatives. - -DEFINITIONS -"Font Software" refers to the set of files released by the Copyright -Holder(s) under this license and clearly marked as such. This may -include source files, build scripts and documentation. - -"Reserved Font Name" refers to any names specified as such after the -copyright statement(s). - -"Original Version" refers to the collection of Font Software components as -distributed by the Copyright Holder(s). - -"Modified Version" refers to any derivative made by adding to, deleting, -or substituting -- in part or in whole -- any of the components of the -Original Version, by changing formats or by porting the Font Software to a -new environment. - -"Author" refers to any designer, engineer, programmer, technical -writer or other person who contributed to the Font Software. - -PERMISSION & CONDITIONS -Permission is hereby granted, free of charge, to any person obtaining -a copy of the Font Software, to use, study, copy, merge, embed, modify, -redistribute, and sell modified and unmodified copies of the Font -Software, subject to the following conditions: - -1) Neither the Font Software nor any of its individual components, -in Original or Modified Versions, may be sold by itself. - -2) Original or Modified Versions of the Font Software may be bundled, -redistributed and/or sold with any software, provided that each copy -contains the above copyright notice and this license. These can be -included either as stand-alone text files, human-readable headers or -in the appropriate machine-readable metadata fields within text or -binary files as long as those fields can be easily viewed by the user. - -3) No Modified Version of the Font Software may use the Reserved Font -Name(s) unless explicit written permission is granted by the corresponding -Copyright Holder. This restriction only applies to the primary font name as -presented to the users. - -4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font -Software shall not be used to promote, endorse or advertise any -Modified Version, except to acknowledge the contribution(s) of the -Copyright Holder(s) and the Author(s) or with their explicit written -permission. - -5) The Font Software, modified or unmodified, in part or in whole, -must be distributed entirely under this license, and must not be -distributed under any other license. The requirement for fonts to -remain under this license does not apply to any document created -using the Font Software. - -TERMINATION -This license becomes null and void if any of the above conditions are -not met. - -DISCLAIMER -THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT -OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL -DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM -OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/scripts/converters/fonts/Poppins/Poppins-Black.ttf b/scripts/converters/fonts/Poppins/Poppins-Black.ttf deleted file mode 100644 index 3c8f363713..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Black.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-BlackItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-BlackItalic.ttf deleted file mode 100644 index f2139088da..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-BlackItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-Bold.ttf b/scripts/converters/fonts/Poppins/Poppins-Bold.ttf deleted file mode 100644 index 6e26de76d4..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Bold.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-BoldItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-BoldItalic.ttf deleted file mode 100644 index 67d5513059..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-BoldItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-ExtraBold.ttf b/scripts/converters/fonts/Poppins/Poppins-ExtraBold.ttf deleted file mode 100644 index c2ac5271cc..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-ExtraBold.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-ExtraBoldItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-ExtraBoldItalic.ttf deleted file mode 100644 index 9b66592a59..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-ExtraBoldItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-ExtraLight.ttf b/scripts/converters/fonts/Poppins/Poppins-ExtraLight.ttf deleted file mode 100644 index 24e2d0c5c7..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-ExtraLight.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-ExtraLightItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-ExtraLightItalic.ttf deleted file mode 100644 index 1bc58aa192..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-ExtraLightItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-Italic.ttf b/scripts/converters/fonts/Poppins/Poppins-Italic.ttf deleted file mode 100644 index 0bc0d35f22..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Italic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-Light.ttf b/scripts/converters/fonts/Poppins/Poppins-Light.ttf deleted file mode 100644 index 52d424ba2b..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Light.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-LightItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-LightItalic.ttf deleted file mode 100644 index 2d98ab53fc..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-LightItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-Medium.ttf b/scripts/converters/fonts/Poppins/Poppins-Medium.ttf deleted file mode 100644 index 89aae6b6af..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Medium.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-MediumItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-MediumItalic.ttf deleted file mode 100644 index 14704d619f..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-MediumItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-Regular.ttf b/scripts/converters/fonts/Poppins/Poppins-Regular.ttf deleted file mode 100644 index 441d3baab4..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Regular.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-SemiBold.ttf b/scripts/converters/fonts/Poppins/Poppins-SemiBold.ttf deleted file mode 100644 index 3b8622f67b..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-SemiBold.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-SemiBoldItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-SemiBoldItalic.ttf deleted file mode 100644 index 257a1cf7ed..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-SemiBoldItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-Thin.ttf b/scripts/converters/fonts/Poppins/Poppins-Thin.ttf deleted file mode 100644 index 2d62e19e70..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-Thin.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Poppins/Poppins-ThinItalic.ttf b/scripts/converters/fonts/Poppins/Poppins-ThinItalic.ttf deleted file mode 100644 index bf24d1b135..0000000000 Binary files a/scripts/converters/fonts/Poppins/Poppins-ThinItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/LICENSE.txt b/scripts/converters/fonts/Roboto/LICENSE.txt deleted file mode 100644 index 75b52484ea..0000000000 --- a/scripts/converters/fonts/Roboto/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/scripts/converters/fonts/Roboto/Roboto-Black.ttf b/scripts/converters/fonts/Roboto/Roboto-Black.ttf deleted file mode 100644 index 2d45238365..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Black.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-BlackItalic.ttf b/scripts/converters/fonts/Roboto/Roboto-BlackItalic.ttf deleted file mode 100644 index 29a4359ed0..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-BlackItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-Bold.ttf b/scripts/converters/fonts/Roboto/Roboto-Bold.ttf deleted file mode 100644 index d998cf5b46..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Bold.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-BoldItalic.ttf b/scripts/converters/fonts/Roboto/Roboto-BoldItalic.ttf deleted file mode 100644 index b4e2210393..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-BoldItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-Italic.ttf b/scripts/converters/fonts/Roboto/Roboto-Italic.ttf deleted file mode 100644 index 5b390ff950..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Italic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-Light.ttf b/scripts/converters/fonts/Roboto/Roboto-Light.ttf deleted file mode 100644 index 35267989de..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Light.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-LightItalic.ttf b/scripts/converters/fonts/Roboto/Roboto-LightItalic.ttf deleted file mode 100644 index 46e9bf7c95..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-LightItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-Medium.ttf b/scripts/converters/fonts/Roboto/Roboto-Medium.ttf deleted file mode 100644 index f714a514d9..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Medium.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-MediumItalic.ttf b/scripts/converters/fonts/Roboto/Roboto-MediumItalic.ttf deleted file mode 100644 index 5dc6a2dc6c..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-MediumItalic.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-Regular.ttf b/scripts/converters/fonts/Roboto/Roboto-Regular.ttf deleted file mode 100644 index 2b6392ffe8..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Regular.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-Thin.ttf b/scripts/converters/fonts/Roboto/Roboto-Thin.ttf deleted file mode 100644 index 4e797cf7ef..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-Thin.ttf and /dev/null differ diff --git a/scripts/converters/fonts/Roboto/Roboto-ThinItalic.ttf b/scripts/converters/fonts/Roboto/Roboto-ThinItalic.ttf deleted file mode 100644 index eea836f4a3..0000000000 Binary files a/scripts/converters/fonts/Roboto/Roboto-ThinItalic.ttf and /dev/null differ diff --git a/scripts/converters/headers/header-2.3.4.md b/scripts/converters/headers/header-2.3.4.md deleted file mode 100644 index 5076aac311..0000000000 --- a/scripts/converters/headers/header-2.3.4.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.4 -weight: 103 ---- - -### CIS Kubernetes Benchmark 1.5 - Rancher 2.3.4 with Kubernetes 1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.4/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.3.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.4 | Rancher v2.3.4 | Hardening Guide v2.3.4 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Scoring the commands is different in Rancher Labs than in the CIS Benchmark. Where the commands differ from the original CIS benchmark, the commands specific to Rancher Labs are provided for testing. Only **scored** tests will be covered in this guide. - -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the `jq` and `kubectl` (with valid config) commands to provide human-readable formatting. - -### Controls - ---- diff --git a/scripts/converters/headers/header-2.3.5.md b/scripts/converters/headers/header-2.3.5.md deleted file mode 100644 index 38afe8c6a2..0000000000 --- a/scripts/converters/headers/header-2.3.5.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - v2.3.5 -weight: 105 ---- - -### CIS Kubernetes Benchmark 1.5 - Rancher 2.3.5 with Kubernetes 1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.3.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- diff --git a/scripts/converters/headers/header-2.4.md b/scripts/converters/headers/header-2.4.md deleted file mode 100644 index d332a92096..0000000000 --- a/scripts/converters/headers/header-2.4.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: CIS Benchmark Rancher Self-Assessment Guide - v2.4 -weight: 105 ---- - -### CIS Kubernetes Benchmark v1.5 - Rancher v2.4 with Kubernetes v1.15 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher v2.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: - -Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version ----------------------------|----------|---------|-------|----- -Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -> NOTE: only scored tests are covered in this guide. - -### Controls - ---- diff --git a/scripts/converters/headers/header-2.6-rke2.md b/scripts/converters/headers/header-2.6-rke2.md deleted file mode 100755 index 46f67c4fab..0000000000 --- a/scripts/converters/headers/header-2.6-rke2.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: RKE2 CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 -weight: 101 ---- - -### CIS v1.6 Kubernetes Benchmark - Rancher v2.6 RKE2 with Kubernetes v1.21 up to v1.23 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). - -#### Overview - -This document is a companion to the [Rancher v2.6 RKE2 security hardening guide]({{}}/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher with RKE2 provisioned clusters, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: - -| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | -| ----------------------- | --------------- | --------------------- | ------------------- | -| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6.5+ | CIS v1.6 | Kubernetes v1.21 up to v1.23 | - -Because Rancher and RKE2 install Kubernetes services as containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of \`Not Applicable\`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -RKE2 launches control plane components as static pods, managed by the kubelet, and uses containerd as the container runtime. Configuration is defined by arguments passed to the container at the time of initialization or via configuration file. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE2 nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. - -> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. - -### Controls - ---- diff --git a/scripts/converters/headers/header-2.6.md b/scripts/converters/headers/header-2.6.md deleted file mode 100755 index 4e2380c6b9..0000000000 --- a/scripts/converters/headers/header-2.6.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: RKE CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 -weight: 101 ---- - -### RKE CIS v1.6 Kubernetes Benchmark - Rancher v2.6 with Kubernetes v1.18 to v1.23 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). - -#### Overview - -This document is a companion to the [Rancher v2.6 RKE security hardening guide]({{}}/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: - -| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | -| ----------------------- | --------------- | --------------------- | ------------------- | -| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6 | CIS v1.6 | Kubernetes v1.18 up to v1.23 | - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of \`Not Applicable\`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. - -> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. - -### Controls - ---- diff --git a/scripts/converters/headers/header-k3s.md b/scripts/converters/headers/header-k3s.md deleted file mode 100755 index 80461cc9c3..0000000000 --- a/scripts/converters/headers/header-k3s.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: CIS Self Assessment Guide -weight: 90 ---- - -### CIS Kubernetes Benchmark v1.6 - K3s with Kubernetes v1.17 to v1.21 - -#### Overview - -This document is a companion to the [K3s security hardening guide]({{}}/k3s/latest/en/security/hardening_guide/). The hardening guide provides prescriptive guidance for hardening a production installation of K3s, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the CIS Kubernetes Benchmark. It is to be used by K3s operators, security teams, auditors, and decision-makers. - -This guide is specific to the **v1.17**, **v1.18**, **v1.19**, **v1.20** and **v1.21** release line of K3s and the **v1.6** release of the CIS Kubernetes Benchmark. - -For more information about each control, including detailed descriptions and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Each control in the CIS Kubernetes Benchmark was evaluated against a K3s cluster that was configured according to the accompanying hardening guide. - -Where control audits differ from the original CIS benchmark, the audit commands specific to K3s are provided for testing. - -These are the possible results for each control: - -- **Pass** - The K3s cluster under test passed the audit outlined in the benchmark. -- **Not Applicable** - The control is not applicable to K3s because of how it is designed to operate. The remediation section will explain why this is so. -- **Warn** - The control is manual in the CIS benchmark and it depends on the cluster's use case or some other factor that must be determined by the cluster operator. These controls have been evaluated to ensure K3s does not prevent their implementation, but no further configuration or auditing of the cluster under test has been performed. - -This guide makes the assumption that K3s is running as a Systemd unit. Your installation may vary and will require you to adjust the "audit" commands to fit your scenario. - -> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. - -### Controls - ---- - diff --git a/scripts/converters/images/rancher-logo-stacked-black.png b/scripts/converters/images/rancher-logo-stacked-black.png deleted file mode 100644 index 8cd47d7b06..0000000000 Binary files a/scripts/converters/images/rancher-logo-stacked-black.png and /dev/null differ diff --git a/scripts/converters/images/rancher-logo-stacked-black.svg b/scripts/converters/images/rancher-logo-stacked-black.svg deleted file mode 100644 index 2599cef9f2..0000000000 --- a/scripts/converters/images/rancher-logo-stacked-black.svg +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - diff --git a/scripts/converters/images/rancher-logo-stacked-color.png b/scripts/converters/images/rancher-logo-stacked-color.png deleted file mode 100644 index 02ab6c99e6..0000000000 Binary files a/scripts/converters/images/rancher-logo-stacked-color.png and /dev/null differ diff --git a/scripts/converters/images/rancher_logo.svg b/scripts/converters/images/rancher_logo.svg deleted file mode 100644 index 205a0e9315..0000000000 --- a/scripts/converters/images/rancher_logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/scripts/converters/results_to_markdown/.terraform-version b/scripts/converters/results_to_markdown/.terraform-version deleted file mode 100644 index ac4a79626c..0000000000 --- a/scripts/converters/results_to_markdown/.terraform-version +++ /dev/null @@ -1 +0,0 @@ -0.14.3 diff --git a/scripts/converters/results_to_markdown/main.tf b/scripts/converters/results_to_markdown/main.tf deleted file mode 100644 index d86b6fcd0f..0000000000 --- a/scripts/converters/results_to_markdown/main.tf +++ /dev/null @@ -1,20 +0,0 @@ -locals { - results = jsondecode(file(var.results_file)) - markdown = templatefile("${path.module}/templates/hardening.template.default.layout.md", - { - rancher_version = var.rancher_version - kubernetes_version = var.kubernetes_version - cis_version = var.cis_version - test_node = var.test_node - test_helper_path = var.test_helper_path - results = local.results - weight = var.index_weight - } - ) -} - -resource "local_file" "output" { - content = local.markdown - file_permission = "0644" - filename = var.output_file -} diff --git a/scripts/converters/results_to_markdown/outputs.tf b/scripts/converters/results_to_markdown/outputs.tf deleted file mode 100644 index 1694d4fef8..0000000000 --- a/scripts/converters/results_to_markdown/outputs.tf +++ /dev/null @@ -1,11 +0,0 @@ -/* -output "results" { - value = local.results -} -*/ - -/* -output "markdown" { - value = local.markdown -} -*/ diff --git a/scripts/converters/results_to_markdown/rancher-v2.5-cis-1.6.auto.tfvars.example b/scripts/converters/results_to_markdown/rancher-v2.5-cis-1.6.auto.tfvars.example deleted file mode 100644 index 6441816f8d..0000000000 --- a/scripts/converters/results_to_markdown/rancher-v2.5-cis-1.6.auto.tfvars.example +++ /dev/null @@ -1,8 +0,0 @@ -rancher_version = "v2.5.4" -kubernetes_version = "v1.18" -cis_version = "1.6" -test_node = "cis-aio-0" -test_helper_path = "/home/myuser/repos/rancher-security-scan/package/helper_scripts" -results_file = "/home/myuser/tmp/tf_mk/csr.json" -index_weight = 101 -output_file = "/home/myuser/repos/rancher-docs/content/rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/_index.md" diff --git a/scripts/converters/results_to_markdown/readme.md b/scripts/converters/results_to_markdown/readme.md deleted file mode 100644 index 98d5494f4c..0000000000 --- a/scripts/converters/results_to_markdown/readme.md +++ /dev/null @@ -1 +0,0 @@ -This code is intended to replace the bash script to convert Rancer CIS scan results to markdown format suitable for publishing to the docs site. diff --git a/scripts/converters/results_to_markdown/templates/hardening.template.alternate.layout.md b/scripts/converters/results_to_markdown/templates/hardening.template.alternate.layout.md deleted file mode 100644 index 5156a2d40c..0000000000 --- a/scripts/converters/results_to_markdown/templates/hardening.template.alternate.layout.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: CIS ${cis_version} Benchmark - Self-Assessment Guide - Rancher ${rancher_version} -weight: ${weight} ---- - -# CIS v1.6 Kubernetes Benchmark - Rancher v2.5 with Kubernetes v1.18 - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.6_Benchmark_Assessment.pdf) - -## Overview - -This document is a companion to the Rancher v2.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ----------------------------|----------|---------|------- -Hardening Guide with CIS 1.5 Benchmark | Rancher v2.5 | CIS v1.5| Kubernetes v1.15 - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -## Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - - -## Control Result Details -%{ for section in results ~} - -### ${section.id} ${section.description} -%{ for check in section.checks ~} - -#### ${check.id}: ${check.description} - -Attribute | Details ---- | --- -**Result** | ${check.state} -**Remediation** | ${replace(replace(check.remediation,"|","\\|"),"\n","
    ")} -%{ if check.audit != "" ~} -**Audit** | ${replace(check.audit,"|","\\|")} -%{ endif ~} -%{ if check.expected_result != "" ~} -**Expected Result** | ${check.expected_result} -%{ endif ~} -%{ if check.actual_value_per_node[test_node] != "" ~} - -{{% accordion label="Audit Output" %}} -```console - ${check.actual_value_per_node[test_node]} - ``` -{{% /accordion %}} - %{ endif ~} - -%{ if length(regexall("^\\w+(\\.sh)", "${check.audit}")) == 1 ~} -{{% accordion label="Audit Script" %}} -```bash -${file("${test_helper_path}/${regex("\\S+", check.audit)}") } -``` -{{% /accordion %}} -%{ endif ~} -%{ endfor ~} -%{ endfor ~} diff --git a/scripts/converters/results_to_markdown/templates/hardening.template.default.layout.md b/scripts/converters/results_to_markdown/templates/hardening.template.default.layout.md deleted file mode 100644 index 233e89536c..0000000000 --- a/scripts/converters/results_to_markdown/templates/hardening.template.default.layout.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: CIS ${cis_version} Benchmark - Self-Assessment Guide - Rancher ${rancher_version} -weight: ${weight} ---- - -### CIS ${cis_version} Kubernetes Benchmark - Rancher ${rancher_version} with Kubernetes ${kubernetes_version} - -[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.6_Benchmark_Assessment.pdf) - -#### Overview - -This document is a companion to the Rancher ${rancher_version} security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. - -This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: - -Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version ----------------------------|----------|---------|------- -Hardening Guide with CIS ${cis_version} Benchmark | Rancher ${rancher_version} | CIS ${cis_version}| Kubernetes ${kubernetes_version} - -Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. - -This document is to be used by Rancher operators, security teams, auditors and decision makers. - -For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark ${cis_version}. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). - -#### Testing controls methodology - -Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. - -Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. -When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. - -### Controls - -%{ for section in results ~} -## ${section.id} ${section.description} -%{ for check in section.checks ~} -### ${check.id} ${check.description} - -**Result:** ${check.state} - -**Remediation:** -${check.remediation} - -**Audit:** - -```bash -${check.audit} -``` - -%{ if check.expected_result != "" ~} -**Expected Result**: - -```console -${check.expected_result} -``` -%{ endif ~} - -%{ if length(regexall("^\\w+(\\.sh)", "${check.audit}")) == 1 ~} -**Audit Script:** -```bash -${file("${test_helper_path}/${regex("\\S+", check.audit)}") } -``` -%{ endif ~} -%{ if check.actual_value_per_node[test_node] != "" ~} -**Returned Value**: - -```console -${check.actual_value_per_node[test_node]} -``` -%{ endif ~} -%{ endfor ~} -%{ endfor ~} \ No newline at end of file diff --git a/scripts/converters/results_to_markdown/variables.tf b/scripts/converters/results_to_markdown/variables.tf deleted file mode 100644 index 5d2ad18dac..0000000000 --- a/scripts/converters/results_to_markdown/variables.tf +++ /dev/null @@ -1,32 +0,0 @@ -variable "rancher_version" { - type = string -} - -variable "kubernetes_version" { - type = string -} - -variable "cis_version" { - type = string -} - -variable "test_node" { - type = string - -} - -variable "results_file" { - type = string -} - -variable "test_helper_path" { - type = string -} - -variable "index_weight" { - type = number -} - -variable "output_file" { - type = string -} diff --git a/scripts/converters/run_md_to_pdf.sh b/scripts/converters/run_md_to_pdf.sh deleted file mode 100755 index da11beab46..0000000000 --- a/scripts/converters/run_md_to_pdf.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -e - -abs_path() { - echo "$(cd "$(dirname "$1")"; pwd -P)/$(basename "$1")" -} - -md_source=${1:?path to markdown file is a required argument} - -[ -f ${md_source} ] || (echo "file:'${results}' does not exist"; exit 1) - -docker run -v $(abs_path ${md_source}):/source/source.md -v $(pwd)/output:/output -it --rm doc_converters:latest md_to_pdf diff --git a/scripts/converters/run_results_to_md.sh b/scripts/converters/run_results_to_md.sh deleted file mode 100755 index ae3ac700b9..0000000000 --- a/scripts/converters/run_results_to_md.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -e - -results=${1:?path to kube-bench json results is a required argument} -test_helpers=${2:?path to kube-bench test_helpers scripts is a required argument} -header=${3:?path to header file is a required argument} - -[ -f ${results} ] || (echo "file:'${results}' does not exist"; exit 1) -[ -d ${test_helpers} ] || (echo "dir: '${test_helpers}' not a valid directory"; exit 1) - -docker run -v ${results}:/source/results.json -v ${test_helpers}:/test_helpers -v ${header}:/headers/header.md -it --rm doc_converters:latest results_to_md diff --git a/scripts/converters/scripts/entrypoint.sh b/scripts/converters/scripts/entrypoint.sh deleted file mode 100755 index e6821bb37d..0000000000 --- a/scripts/converters/scripts/entrypoint.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -e - -usage() { -echo -n "[command] [command_options] - -commands: - results_to_md: take json output from kube-bench as source and outputs markdown - options: - -s, --source source json from kube-bench default: /source/results.json -" -} - -if [[ $# -eq 0 ]]; then - usage - exit 2 -fi - -scripts/${1}.sh diff --git a/scripts/converters/scripts/md_to_pdf.sh b/scripts/converters/scripts/md_to_pdf.sh deleted file mode 100755 index df45ea209f..0000000000 --- a/scripts/converters/scripts/md_to_pdf.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -e - -md_source="${1:-/source/source.md}" - -pandoc -s --template="templates/default.html" -f markdown-smart --toc -c css/style-portrait.css "${md_source}" -o "source.html" -python3 -m weasyprint source.html /output/output.pdf diff --git a/scripts/converters/scripts/results_to_md.sh b/scripts/converters/scripts/results_to_md.sh deleted file mode 100755 index a7d5d47331..0000000000 --- a/scripts/converters/scripts/results_to_md.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash - -results_file="${1:-/source/results.json}" -test_helpers="${2:-/test_helpers}" -header_file="${3:-/headers/header.md}" - -header() { -cat ${header_file} -} - -get_ids() { - jq -r .[].id ${results_file} | sort -n -} - -get_id_text() { - id=${1} - jq -r --arg id "${id}" '.[] | select(.id==$id) | .description' ${results_file} -} - -get_section_ids() { - id=${1} - jq -r --arg id "${id}" '.[] | select(.id==$id) | .checks[].id' ${results_file} | sort -V -} - -get_section_desc() { - id=${1} - section=${2} - jq -r --arg id "${id}" --arg section "${section}" '.[] | select(.id==$id).checks[] | select(.id==$section).description' ${results_file} -} - -get_tests() { - id=${1} - section=${2} - jq -r --arg id "${id}" --arg section "${section}" '.[] | select(.id==$id).checks[] | select(.id==$section).id' ${results_file} -} - -get_test() { - id=${1} - section=${2} - test_number=${3} - jq -r --arg id "${id}" --arg section "${section}" --arg test_number "${test_number}" '.[] | select(.id==$id).checks[] | select(.id==$test_number)' ${results_file} -} - -header - -for id in $(get_ids); do - echo "## ${id} $(get_id_text ${id})" - for section in $(get_section_ids ${id}); do - echo "### ${section} $(get_section_desc ${id} ${section})" - echo - for test in $(get_tests ${id} ${section}); do - result=$(get_test ${id} ${section} ${test}) - test_desc=$(echo ${result} | jq -r '.description') - audit=$(echo ${result} | jq -r '.audit') - audit_config=$(echo ${result} | jq -r '.audit_config') - actual_value=$(echo ${result} | jq -r '.actual_value_per_node[]') - type=$(echo ${result} | jq -r '.test_type') - status=$(echo ${result} | jq -r '.state') - remediation=$(echo ${result} | jq -r '.remediation') - expected_result=$(echo ${result} | jq -r '.expected_result') - echo - if [ "${type}" = "skip" ]; then - echo "**Result:** Not Applicable" - echo - else - echo "**Result:** ${status}" - echo - fi - if [ ! -z "${remediation}" ]; then - echo "**Remediation:**" - echo -e "${remediation//\\n/
    }" - echo - fi - if [ ! -z "${audit}" ] && [ "${status}" != "INFO" ] && [ "${type}" != "skip" ]; then - if [[ ${audit} =~ ".sh" ]]; then - audit_script=$(basename $(echo ${audit} | cut -d ' ' -f1)) - test_helper="${test_helpers}/${audit_script}" - echo "**Audit Script:** \`${audit_script}\`" - echo - echo '```bash' - cat ${test_helper} - echo - echo '```' - echo - echo "**Audit Execution:**" - echo - echo '```bash' - echo "./${audit_script} $(echo ${audit} | awk '{print $2}')" - echo '```' - echo - else - echo "**Audit:**" - echo - echo '```bash' - echo ${audit} - echo '```' - echo - fi - fi - if [ ! -z "${audit_config}" ] && [ ${status} != "INFO" ]; then - echo "**Audit Config:**" - echo - echo '```bash' - echo ${audit_config} - echo '```' - echo - fi - if [ ! -z "${expected_result}" ]; then - echo "**Expected Result**:" - echo - echo '```console' - echo ${expected_result} - echo '```' - echo - fi - if [ ! -z "${actual_value}" ] && [ "${status}" != "PASS" ] && [ "${type}" != "skip" ] && [ "${type}" != "manual" ]; then - echo "**Returned Value**:" - echo - echo '```console' - echo ${actual_value} - echo '```' - echo - fi - done - done -done diff --git a/scripts/converters/templates/default.html b/scripts/converters/templates/default.html deleted file mode 100644 index edfcb20d97..0000000000 --- a/scripts/converters/templates/default.html +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - -$for(author-meta)$ - -$endfor$ -$if(date-meta)$ - -$endif$ -$if(keywords)$ - -$endif$ - $if(title-prefix)$$title-prefix$ – $endif$$pagetitle$ -$for(css)$ - -$endfor$ -$if(math)$ - $math$ -$endif$ -$for(header-includes)$ - $header-includes$ -$endfor$ - - -$for(include-before)$ -$include-before$ -$endfor$ -$if(title)$ -
    -

    $title$

    -$if(subtitle)$ -

    $subtitle$

    -$endif$ -
    -$for(author)$ -

    $author$

    -$endfor$ -$if(email)$ - -$endif$ -$if(date)$ -

    $date$

    -$endif$ -
    -
    -$endif$ -$if(toc)$ - -$endif$ -$body$ -$for(include-after)$ -$include-after$ -$endfor$ - - diff --git a/scripts/dev b/scripts/dev deleted file mode 100755 index ce9d74edc5..0000000000 --- a/scripts/dev +++ /dev/null @@ -1,127 +0,0 @@ -#!/bin/bash -set -e - -PORT=9001 -IMAGE=rancher/docs -TAG=dev -THEME= -WEBSITE= -BUILD_BUILD= -BUILD_DEV= -SKIP_PULL= -UPLOAD= - -# cd to app root -CWD=$(dirname $0) -if [[ `basename $(pwd)` = 'scripts' ]]; then - cd ../ -else - cd `dirname $CWD` -fi - -print_help() -{ - cat 1>&2 <&2; -} - -absolute() { -# $1 : relative filename - echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" -} - - -while getopts ":bdp:st:u" opt;do - case $opt in - b) - BUILD_BUILD="true" - BUILD_DEV="true" - ;; - d) - BUILD_DEV="true" - ;; - p) - PORT="${OPTARG}" - ;; - s) - SKIP_PULL="true" - ;; - t) - THEME="${OPTARG}" - ;; - u) - UPLOAD="true" - ;; - \?) - echoerr "Invalid arguments" - print_help - exit 1 - ;; - :) - echoerr "Option -${OPTARG} requires an argument." - print_help - exit 1 - ;; - esac -done - -THEMEVOLUME="" -if [[ "$THEME" ]]; then - echo "Using theme from ${THEME}" - ABSOLUTE=$(absolute $THEME) - THEMEVOLUME="-v ${ABSOLUTE}:/run/node_modules/rancher-website-theme" -fi - -if [[ "$BUILD_BUILD" ]]; then - echo "Building ${IMAGE}:build" - docker build --no-cache -f Dockerfile.build --build-arg TWITTER_CONSUMER=${TWITTER_CONSUMER} --build-arg TWITTER_SECRET=${TWITTER_SECRET} -t ${IMAGE}:build . - if [[ "$UPLOAD" ]]; then - docker push ${IMAGE}:build - fi -elif [[ "$SKIP_PULL" ]]; then - echo "Skipping pull of ${IMAGE}:build" -else - echo "Pulling ${IMAGE}:build" - docker pull ${IMAGE}:build -fi - -if [[ "$BUILD_DEV" ]]; then - TAG=local - echo "Building ${IMAGE}:${TAG}" - docker build -f Dockerfile.dev -t ${IMAGE}:${TAG} . -elif [[ "$SKIP_PULL" ]]; then - echo "Skipping pull of ${IMAGE}:${TAG}" -else - echo "Pulling ${IMAGE}:${TAG}" - docker pull ${IMAGE}:${TAG} -fi - -echo "Starting server on http://localhost:${PORT}" -docker run --rm -p ${PORT}:${PORT} -it \ - -v $(pwd)/archetypes:/run/archetypes \ - -v $(pwd)/assets:/run/assets \ - -v $(pwd)/content:/run/content \ - -v $(pwd)/data:/run/data \ - -v $(pwd)/layouts:/run/layouts \ - -v $(pwd)/scripts:/run/scripts \ - -v $(pwd)/static:/run/static \ - -v $(pwd)/.git:/run/.git \ - -v $(pwd)/config.toml:/run/config.toml \ - ${THEMEVOLUME} ${IMAGE}:${TAG} --port=${PORT} diff --git a/scripts/dev-windows.ps1 b/scripts/dev-windows.ps1 deleted file mode 100755 index 72350371c7..0000000000 --- a/scripts/dev-windows.ps1 +++ /dev/null @@ -1,74 +0,0 @@ -#Requires -Version 5.0 - -param ( - [parameter(Mandatory = $false,HelpMessage="Build the build & dev images instead of pulling from the registry")] [switch]$buildBuild, - [parameter(Mandatory = $false,HelpMessage="Build the dev image instead of pulling from the registry")] [switch]$buildDev, - [parameter(Mandatory = $false,HelpMessage="Port to listen on")] [string]$port, - [parameter(Mandatory = $false,HelpMessage="Skip pulling build/dev images")] [switch]$skipPull, - [parameter(Mandatory = $false,HelpMessage="Use DIR to for the theme, to devlop the theme at the same time")] [string]$theme, - [parameter(Mandatory = $false,HelpMessage="Upload/push the build image after building")] [switch]$upload -) - -$DefaultPort = 9001 -$ListenPort = $DefaultPort -$Image = "rancher/docs" -$Tag = "dev" -$twitterConsumer = $env:TWITTER_CONSUMER -$twitterSecret = $env:TWITTER_SECRET - -$dirPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$baseDirPath = Get-Location -if ($dirPath -eq $baseDirPath) { - $baseDirPath = (Resolve-Path "$dirPath\..").Path -} -pushd $baseDirPath - -if ($port) { - $ListenPort = $port -} - -$ThemeVolume = "" -if ($theme) { - Write-Host "Using theme from $theme" - $ThemeVolume = "-v ${baseDirPath}/${theme}:/run/node_modules/rancher-website-theme" -} - -if ($buildBuild) { - Write-Host "Building ${Image}:build" - docker build --no-cache -f Dockerfile.build --build-arg TWITTER_CONSUMER=$twitterConsumer --build-arg TWITTER_SECRET=$twitterSecret -t ${Image}:build . - if ($upload) { - docker push ${Image}:build - } - $buildDev = $true -} elseif ($skipPull) { - Write-Host "Skipping pull of ${Image}:build" -} else { - Write-Host "Pulling ${Image}:build" - docker pull ${Image}:build -} - -if ($buildDev) { - $Tag = "local" - Write-Host "Building ${Image}:${Tag}" - docker build -f Dockerfile.dev -t ${Image}:${Tag} . -} elseif ($skipPull) { - Write-Host "Skipping pull of ${Image}:${Tag}" -} else { - Write-Host "Pulling ${Image}:${Tag}" - docker pull ${Image}:${Tag} -} - -Write-Host "Starting server on http://localhost:${ListenPORT}" -docker run --rm -p ${ListenPort}:${ListenPort} -it ` - -v ${baseDirPath}/archetypes:/run/archetypes ` - -v ${baseDirPath}/assets:/run/assets ` - -v ${baseDirPath}/content:/run/content ` - -v ${baseDirPath}/data:/run/data ` - -v ${baseDirPath}/layouts:/run/layouts ` - -v ${baseDirPath}/scripts:/run/scripts ` - -v ${baseDirPath}/static:/run/static ` - -v ${baseDirPath}/.git:/run/.git ` - -v ${baseDirPath}/config.toml:/run/config.toml ` - ${ThemeVolume} ${Image}:${Tag} --port=${ListenPort} - -popd diff --git a/scripts/minify-images b/scripts/minify-images deleted file mode 100755 index cea980bad0..0000000000 --- a/scripts/minify-images +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# cd to app root -CWD=$(dirname $0) -if [[ `basename $(pwd)` = 'scripts' ]]; then - cd ../ -else - cd `dirname $CWD` -fi - -PORT=9000 -if [ ! -z "$1" ]; then - PORT=$1 -fi - -docker run --rm -it -v $(pwd):/site rancher/docs:dev minify-images diff --git a/scripts/publish b/scripts/publish deleted file mode 100755 index 0dafe1cff9..0000000000 --- a/scripts/publish +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# cd to app root -CWD=$(dirname $0) -if [[ `basename $(pwd)` = 'scripts' ]]; then - cd ../ -else - cd `dirname $CWD` -fi - -echo "Building Image..." && \ -docker build -t rancher/docs:latest . && \ -echo "Pushing Image..." && \ -docker push rancher/docs:latest && \ -echo "OK" diff --git a/scripts/publish-algolia.js b/scripts/publish-algolia.js deleted file mode 100755 index d468de21ac..0000000000 --- a/scripts/publish-algolia.js +++ /dev/null @@ -1,14 +0,0 @@ -#! /usr/bin/env node -'use strict'; - -const atomicalgolia = require("atomic-algolia"); - -console.log('Publishing to algolia', process.env.ALGOLIA_INDEX_NAME); -atomicalgolia(process.env.ALGOLIA_INDEX_NAME, '/run/final.algolia.json', {verbose: true}, (err, result) => { - console.log(result); - if ( err ) { - process.exit(1); - } else { - process.exit(0); - } -}); diff --git a/sidebars.js b/sidebars.js new file mode 100644 index 0000000000..db6a07af7b --- /dev/null +++ b/sidebars.js @@ -0,0 +1,1368 @@ +/** + * Creating a sidebar enables you to: + - create an ordered group of docs + - render a sidebar for each doc of that group + - provide next/previous navigation + + The sidebars can be generated from the filesystem, or explicitly defined here. + + Create as many sidebars as you want. + */ + +// @ts-check + +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const sidebars = { + // By default, Docusaurus generates a sidebar from the docs folder structure + // tutorialSidebar: [{type: 'autogenerated', dirName: '.'}], + + // But you can create a sidebar manually + tutorialSidebar: [ + + 'rancher-manager', + { + type: 'category', + label: 'Getting Started', + link: { + type: 'doc', + id: "getting-started", + }, + items: [ + { + type: 'category', + label: 'Introduction', + link: { + type: 'doc', + id: "pages-for-subheaders/introduction", + }, + items: [ + "getting-started/introduction/overview", + "getting-started/introduction/what-are-divio-docs", + ], + }, + { + type: 'category', + label: 'Quick Start Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/quick-start-guides", + }, + items: [ + { + type: 'category', + label: 'Deploy Rancher Manager', + link: { + type: 'doc', + id: "pages-for-subheaders/deploy-rancher-manager", + }, + items: [ + "getting-started/quick-start-guides/deploy-rancher-manager/aws", + "getting-started/quick-start-guides/deploy-rancher-manager/aws-marketplace", + "getting-started/quick-start-guides/deploy-rancher-manager/azure", + "getting-started/quick-start-guides/deploy-rancher-manager/digitalocean", + "getting-started/quick-start-guides/deploy-rancher-manager/gcp", + "getting-started/quick-start-guides/deploy-rancher-manager/hetzner-cloud", + "getting-started/quick-start-guides/deploy-rancher-manager/vagrant", + "getting-started/quick-start-guides/deploy-rancher-manager/equinix-metal", + "getting-started/quick-start-guides/deploy-rancher-manager/helm-cli", + ] + }, + { + type: 'category', + label: 'Deploy Rancher Workloads', + link: { + type: 'doc', + id: "pages-for-subheaders/deploy-rancher-workloads", + }, + items: [ + "getting-started/quick-start-guides/deploy-workloads/workload-ingress", + "getting-started/quick-start-guides/deploy-workloads/nodeports", + ], + } + ] + }, + { + type: 'category', + label: 'Installation and Upgrade', + link: { + type: 'doc', + id: "pages-for-subheaders/installation-and-upgrade", + }, + items: [ + { + type: 'category', + label: 'Installation Requirements', + link: { + type: 'doc', + id: "pages-for-subheaders/installation-requirements", + }, + items: [ + "getting-started/installation-and-upgrade/installation-requirements/install-docker", + "getting-started/installation-and-upgrade/installation-requirements/dockershim", + "getting-started/installation-and-upgrade/installation-requirements/port-requirements", + ] + }, + { + type: 'category', + label: 'Install/Upgrade on a Kubernetes Cluster', + link: { + type: 'doc', + id: "pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster", + }, + items: [ + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting", + ] + }, + { + type: 'category', + label: 'Other Installation Methods', + link: { + type: 'doc', + id: "pages-for-subheaders/other-installation-methods", + }, + items: [ + { + type: 'category', + label: 'Air-Gapped Helm CLI Install', + link: { + type: 'doc', + id: "pages-for-subheaders/air-gapped-helm-cli-install", + }, + items: [ + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands", + ] + }, + { + type: 'category', + label: 'Rancher on a Single Node with Docker', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-on-a-single-node-with-docker", + }, + items: [ + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting", + ] + }, + { + type: 'category', + label: 'Rancher Behind an HTTP Proxy', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-behind-an-http-proxy", + }, + items: [ + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher", + ] + } + ], + }, + { + type: 'category', + label: 'Resources', + link: { + type: 'doc', + id: "pages-for-subheaders/resources", + }, + items: [ + "getting-started/installation-and-upgrade/resources/choose-a-rancher-version", + "getting-started/installation-and-upgrade/resources/helm-version-requirements", + "getting-started/installation-and-upgrade/resources/add-tls-secrets", + "getting-started/installation-and-upgrade/resources/custom-ca-root-certificates", + "getting-started/installation-and-upgrade/resources/upgrade-cert-manager", + "getting-started/installation-and-upgrade/resources/update-rancher-certificate", + "getting-started/installation-and-upgrade/resources/bootstrap-password", + "getting-started/installation-and-upgrade/resources/local-system-charts", + ] + }, + "getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes", + + "getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher", + { + type: 'category', + label: 'Advanced Options', + link: { + type: 'doc', + id: "pages-for-subheaders/advanced-options", + }, + items: [ + { + type: 'category', + label: 'Enable Experimental Features', + link: { + type: 'doc', + id: "pages-for-subheaders/enable-experimental-features", + }, + items: [ + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/continuous-delivery", + ] + }, + { + type: 'category', + label: 'Advanced Use Cases', + items: [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer", + ] + } + ] + } + ] + } + ] + }, + { + type: 'category', + label: 'How-to Guides', + link: { + type: 'doc', + id: "how-to-guides", + }, + items: [ + { + type: 'category', + label: 'New User Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/new-user-guides", + }, + items: [ + { + type: 'category', + label: 'Kubernetes Cluster Setup', + link: { + type: 'doc', + id: "pages-for-subheaders/kubernetes-cluster-setup", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher", + ], + }, + { + type: 'category', + label: 'Infrastructure Setup', + link: { + type: 'doc', + id: "pages-for-subheaders/infrastructure-setup", + }, + items: [ + "how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2", + "how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds", + "how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer", + "how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer", + ], + }, + { + type: 'category', + label: 'Kubernetes Clusters in Rancher Setup', + link: { + type: 'doc', + id: "pages-for-subheaders/kubernetes-clusters-in-rancher-setup", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters", + { + type: 'category', + label: 'Checklist for Production-Ready Clusters', + link: { + type: 'doc', + id: "pages-for-subheaders/checklist-for-production-ready-clusters", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes", + ], + }, + { + type: 'category', + label: 'Set Up Clusters from Hosted Kubernetes Providers', + link: { + type: 'doc', + id: "pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei", + ] + }, + { + type: 'category', + label: 'Launch Kubernetes with Rancher', + link: { + type: 'doc', + id: "pages-for-subheaders/launch-kubernetes-with-rancher", + }, + items: [ + { + type: 'category', + label: 'Use New Nodes in an Infra Provider', + link: { + type: 'doc', + id: "pages-for-subheaders/use-new-nodes-in-an-infra-provider", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster", + + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster", + + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster", + { + type: 'category', + label: 'vSphere', + link: { + type: 'doc', + id: "pages-for-subheaders/vsphere", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-a-vm-template", + ] + }, + { + type: 'category', + label: 'Nutanix', + link: { + type: 'doc', + id: "pages-for-subheaders/nutanix", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/nutanix/provision-kubernetes-clusters-in-aos", + ] + } + ] + }, + { + type: 'category', + label: 'Use Windows Clusters', + link: { + type: 'doc', + id: "pages-for-subheaders/use-windows-clusters", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/windows-linux-cluster-feature-parity", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway", + ] + }, + { + type: 'category', + label: 'Set Up Cloud Providers', + link: { + type: 'doc', + id: "pages-for-subheaders/set-up-cloud-providers", + }, + items: [ + { + type: 'category', + label: 'Other Cloud Providers', + link: { + type: 'doc', + id: "pages-for-subheaders/other-cloud-providers", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine", + ] + }, + { + type: 'category', + label: 'vSphere', + link: { + type: 'doc', + id: "pages-for-subheaders/vsphere-cloud-provider" + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree", + ] + } + ] + }, + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/rke1-vs-rke2-differences", + + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents", + ] + }, + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters", + ] + }, + { + type: 'category', + label: 'Kubernetes Resources Setup', + link: { + type: 'doc', + id: "pages-for-subheaders/kubernetes-resources-setup", + }, + items: [ + { + type: 'category', + label: 'Workloads and Pods', + link: { + type: 'doc', + id: "pages-for-subheaders/workloads-and-pods", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar", + ] + }, + { + type: 'category', + label: 'Horizontal Pod Autoscaler', + link: { + type: 'doc', + id: "pages-for-subheaders/horizontal-pod-autoscaler", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl", + ] + }, + { + type: 'category', + label: 'Load Balancer and Ingress Controller', + link: { + type: 'doc', + id: "pages-for-subheaders/load-balancer-and-ingress-controller", + }, + items: [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing", + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses", + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/ingress-configuration", + ] + }, + "how-to-guides/new-user-guides/kubernetes-resources-setup/create-services", + + "how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication", + + "how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps", + + "how-to-guides/new-user-guides/kubernetes-resources-setup/secrets", + + "how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries", + ], + }, + { + type: 'category', + label: 'Helm Charts in Rancher', + link: { + type: 'doc', + id: "pages-for-subheaders/helm-charts-in-rancher", + }, + items: [ + "how-to-guides/new-user-guides/helm-charts-in-rancher/create-apps", + ] + }, + { + type: 'category', + label: 'Deploy Apps Across Clusters', + link: { + type: 'doc', + id: "pages-for-subheaders/deploy-apps-across-clusters", + }, + items: [ + "how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet", + "how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps", + ] + }, + { + type: 'category', + label: 'Backup, Restore, and Disaster Recovery', + link: { + type: 'doc', + id: "pages-for-subheaders/backup-restore-and-disaster-recovery", + }, + items: [ + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup", + ] + } + ] + }, + { + type: 'category', + label: 'Advanced User Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/advanced-user-guides", + }, + items: [ + { + type: 'category', + label: 'Authentication, Permissions, and Global Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/authentication-permissions-and-global-configuration", + }, + items: [ + { + type: 'category', + label: 'About Authentication', + link: { + type: 'doc', + id: "pages-for-subheaders/about-authentication", + }, + items: [ + { + type: 'category', + label: 'Authentication Config', + link: { + type: 'doc', + id: "pages-for-subheaders/authentication-config", + }, + items: [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-oidc", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak-saml", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml", + ] + }, + { + type: 'category', + label: 'Configure Microsoft AD Federation Service (SAML)', + link: { + type: 'doc', + id: "pages-for-subheaders/configure-microsoft-ad-federation-service-saml", + }, + items: [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs", + ] + }, + { + type: 'category', + label: 'Configure Shibboleth (SAML)', + link: { + type: 'doc', + id: "pages-for-subheaders/configure-shibboleth-saml", + }, + items: [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions", + ] + } + ] + + }, + { + type: 'category', + label: 'Manage Role-Based Access Control (RBAC)', + link: { + type: 'doc', + id: "pages-for-subheaders/manage-role-based-access-control-rbac", + }, + items: [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles", + ] + }, + { + type: 'category', + label: 'About Provisioning Drivers', + link: { + type: 'doc', + id: "pages-for-subheaders/about-provisioning-drivers", + }, + items: [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers", + ] + }, + { + type: 'category', + label: 'About RKE1 Templates', + link: { + type: 'doc', + id: "pages-for-subheaders/about-rke1-templates", + }, + items: [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases", + ] + }, + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-cluster-templates", + + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies", + + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry", + + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/custom-branding", + ] + }, + { + type: 'category', + label: 'Manage Clusters', + link: { + type: 'doc', + id: "pages-for-subheaders/manage-clusters", + }, + items: [ + { + type: 'category', + label: 'Access Clusters', + link: { + type: 'doc', + id: "pages-for-subheaders/access-clusters", + }, + items: [ + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig", + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint", + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters", + ] + }, + { + type: 'category', + label: 'Install Cluster Autoscaler', + link: { + type: 'doc', + id: "pages-for-subheaders/install-cluster-autoscaler", + }, + items: [ + "how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups", + ] + }, + { + type: 'category', + label: 'Create Kubernetes Persistent Storage', + link: { + type: 'doc', + id: "pages-for-subheaders/create-kubernetes-persistent-storage", + }, + items: [ + { + type: 'category', + label: 'Manage Persistent Storage', + link: { + type: 'doc', + id: "pages-for-subheaders/manage-persistent-storage", + }, + items: [ + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes", + ] + }, + { + type: 'category', + label: 'Provisioning Storage Examples', + link: { + type: 'doc', + id: "pages-for-subheaders/provisioning-storage-examples", + }, + items: [ + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage", + ] + } + ] + }, + "how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces", + + "how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration", + + "how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates", + + "how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools", + + "how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes", + + "how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy", + + "how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies", + ] + }, + { + type: 'category', + label: 'Manage Projects', + link: { + type: 'doc', + id: "pages-for-subheaders/manage-projects", + }, + items: [ + "how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects", + "how-to-guides/advanced-user-guides/manage-projects/manage-namespaces", + "how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines", + "how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies", + { + type: 'category', + label: 'Manage Project Resource Quotas', + link: { + type: 'doc', + id: "pages-for-subheaders/manage-project-resource-quotas", + }, + items: [ + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types", + ] + } + ] + }, + { + type: 'category', + label: 'Monitoring/Alerting Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/monitoring-alerting-guides", + }, + items: [ + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring", + { + type: 'category', + label: 'Prometheus Federator Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/prometheus-federator-guides", + }, + items: [ + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/enable-prometheus-federator", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/uninstall-prometheus-federator", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/customize-grafana-dashboards", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/prometheus-federator-guides/set-up-workloads", + ] + } + ] + }, + { + type: 'category', + label: 'Monitoring V2 Configuration Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/monitoring-v2-configuration-guides", + }, + items: [ + { + type: 'category', + label: 'Advanced Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/advanced-configuration", + }, + items: [ + "how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager", + "how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus", + "how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules", + ] + } + ] + }, + { + type: 'category', + label: 'Istio Setup Guide', + link: { + type: 'doc', + id: "pages-for-subheaders/istio-setup-guide", + }, + items: [ + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster", + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace", + "how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar", + "how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway", + "how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management", + "how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic", + ] + }, + { + type: 'category', + label: 'CIS Scan Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/cis-scan-guides", + }, + items: [ + "how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan", + "how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule", + "how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests", + "how-to-guides/advanced-user-guides/cis-scan-guides/view-reports", + "how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule", + "how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run", + ] + } + ] + } + ] + }, + { + type: 'category', + label: 'Reference Guides', + link: { + type: 'doc', + id: "reference-guides", + }, + items: [ + { + type: 'category', + label: 'Best Practices', + link: { + type: 'doc', + id: "pages-for-subheaders/best-practices", + }, + items: [ + { + type: 'category', + label: 'Rancher Server', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-server", + }, + items: [ + "reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere", + "reference-guides/best-practices/rancher-server/rancher-deployment-strategy", + "reference-guides/best-practices/rancher-server/tips-for-running-rancher" + ] + }, + { + type: 'category', + label: 'Rancher-Managed Clusters', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-managed-clusters", + }, + items: [ + "reference-guides/best-practices/rancher-managed-clusters/logging-best-practices", + "reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices", + "reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers", + "reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere" + ] + } + ] + }, + { + type: 'category', + label: 'Rancher Manager Architecture', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-manager-architecture", + }, + items: [ + "reference-guides/rancher-manager-architecture/rancher-server-and-components", + "reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters", + "reference-guides/rancher-manager-architecture/architecture-recommendations", + ] + }, + { + type: 'category', + label: 'Cluster Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/cluster-configuration", + }, + items: [ + { + type: 'category', + label: 'Rancher Server Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-server-configuration", + }, + items: [ + "reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration", + "reference-guides/cluster-configuration/rancher-server-configuration/rke2-cluster-configuration", + "reference-guides/cluster-configuration/rancher-server-configuration/k3s-cluster-configuration", + "reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration", + "reference-guides/cluster-configuration/rancher-server-configuration/aks-cluster-configuration", + { + type: 'category', + label: 'GKE Cluster Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/gke-cluster-configuration", + }, + items: [ + "reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters", + ] + }, + { + type: 'category', + label: 'Use Existing Nodes', + link: { + type: 'doc', + id: "pages-for-subheaders/use-existing-nodes", + }, + items: [ + "reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options", + ] + }, + "reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters", + ] + }, + { + type: 'category', + label: 'Downstream Cluster Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/downstream-cluster-configuration", + }, + items: [ + { + type: 'category', + label: 'Node Template Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/node-template-configuration", + }, + items: [ + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/nutanix", + ] + }, + { + type: 'category', + label: 'Machine Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/machine-configuration", + }, + items: [ + "reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/amazon-ec2", + "reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/digitalocean", + "reference-guides/cluster-configuration/downstream-cluster-configuration/machine-configuration/azure", + ] + } + ] + } + ] + }, + { + type: 'category', + label: 'Single-Node Rancher in Docker', + link: { + type: 'doc', + id: "pages-for-subheaders/single-node-rancher-in-docker", + }, + items: [ + "reference-guides/single-node-rancher-in-docker/http-proxy-configuration", + "reference-guides/single-node-rancher-in-docker/advanced-options", + ] + }, + { + type: 'category', + label: 'Installation References', + link: { + type: 'doc', + id: "pages-for-subheaders/installation-references", + }, + items: [ + "reference-guides/installation-references/helm-chart-options", + "reference-guides/installation-references/tls-settings", + "reference-guides/installation-references/feature-flags" + ] + }, + { + type: 'category', + label: 'Amazon EKS Permissions', + link: { + type: 'doc', + id: "pages-for-subheaders/amazon-eks-permissions", + }, + items: [ + "reference-guides/amazon-eks-permissions/minimum-eks-permissions", + ] + }, + { + type: 'category', + label: 'Backup & Restore Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/backup-restore-configuration", + }, + items: [ + "reference-guides/backup-restore-configuration/backup-configuration", + "reference-guides/backup-restore-configuration/restore-configuration", + "reference-guides/backup-restore-configuration/storage-configuration", + "reference-guides/backup-restore-configuration/examples", + ], + }, + { + type: 'category', + label: 'Configure OpenLDAP', + link: { + type: 'doc', + id: "pages-for-subheaders/configure-openldap", + }, + items: [ + "reference-guides/configure-openldap/openldap-config-reference", + ] + }, + "reference-guides/kubernetes-concepts", + { + type: 'category', + label: 'Monitoring V2 Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/monitoring-v2-configuration", + }, + items: [ + "reference-guides/monitoring-v2-configuration/receivers", + "reference-guides/monitoring-v2-configuration/routes", + "reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors", + "reference-guides/monitoring-v2-configuration/helm-chart-options", + "reference-guides/monitoring-v2-configuration/examples", + ], + }, + { + type: 'category', + label: 'Prometheus Federator', + link: { + type: 'doc', + id: "pages-for-subheaders/prometheus-federator", + }, + items: [ + "reference-guides/prometheus-federator/rbac", + ] + }, + { + type: 'category', + label: 'User Settings', + link: { + type: 'doc', + id: "pages-for-subheaders/user-settings", + }, + items: [ + "reference-guides/user-settings/api-keys", + "reference-guides/user-settings/manage-node-templates", + "reference-guides/user-settings/manage-cloud-credentials", + "reference-guides/user-settings/user-preferences", + ], + }, + { + type: 'category', + label: 'CLI with Rancher', + link: { + type: 'doc', + id: "pages-for-subheaders/cli-with-rancher", + }, + items: [ + "reference-guides/cli-with-rancher/rancher-cli", + "reference-guides/cli-with-rancher/kubectl-utility", + ] + }, + { + type: 'category', + label: 'About the API', + link: { + type: 'doc', + id: "pages-for-subheaders/about-the-api", + }, + items: [ + "reference-guides/about-the-api/api-tokens", + ] + }, + "reference-guides/rancher-cluster-tools", + + "reference-guides/rancher-project-tools", + + "reference-guides/system-tools", + + "reference-guides/rke1-template-example-yaml", + { + type: 'category', + label: 'Pipelines', + link: { + type: 'doc', + id: "pages-for-subheaders/pipelines", + }, + items: [ + "reference-guides/pipelines/concepts", + "reference-guides/pipelines/pipeline-configuration", + "reference-guides/pipelines/configure-persistent-data", + "reference-guides/pipelines/example-repositories", + "reference-guides/pipelines/example-yaml", + ], + }, + { + type: 'category', + label: 'Rancher Security', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-security", + }, + items: [ + { + type: 'category', + label: 'Rancher v2.6 Hardening Guides', + link: { + type: 'doc', + id: "pages-for-subheaders/rancher-v2.6-hardening-guides", + }, + items: [ + "reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-hardening-guide-with-cis-v1.6-benchmark", + "reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke1-self-assessment-guide-with-cis-v1.6-benchmark", + "reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-hardening-guide-with-cis-v1.6-benchmark", + "reference-guides/rancher-security/rancher-v2.6-hardening-guides/rke2-self-assessment-guide-with-cis-v1.6-benchmark", + ], + }, + { + type: 'category', + label: 'SELinux RPM', + link: { + type: 'doc', + id: "pages-for-subheaders/selinux-rpm", + }, + items: [ + "reference-guides/rancher-security/selinux-rpm/about-rancher-selinux", + "reference-guides/rancher-security/selinux-rpm/about-rke2-selinux", + ], + }, + "reference-guides/rancher-security/kubernetes-security-best-practices", + + "reference-guides/rancher-security/security-advisories-and-cves", + ], + } + ] + }, + { + type: 'category', + label: 'Explanations', + link: { + type: 'doc', + id: "explanations", + }, + items: [ + { + type: 'category', + label: 'Integrations in Rancher', + link: { + type: 'doc', + id: "pages-for-subheaders/integrations-in-rancher", + }, + items: [ + { + type: 'category', + label: 'CIS Scans', + link: { + type: 'doc', + id: "pages-for-subheaders/cis-scans", + }, + items: [ + "explanations/integrations-in-rancher/cis-scans/configuration-reference", + "explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans", + "explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests", + "explanations/integrations-in-rancher/cis-scans/custom-benchmark", + ], + }, + { + type: 'category', + label: 'Fleet - GitOps at Scale', + link: { + type: 'doc', + id: "pages-for-subheaders/fleet-gitops-at-scale", + }, + items: [ + "explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture", + "explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support", + "explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy", + ] + }, + "explanations/integrations-in-rancher/harvester", + { + type: 'category', + label: 'Istio', + link: { + type: 'doc', + id: "pages-for-subheaders/istio", + }, + items: [ + "explanations/integrations-in-rancher/istio/cpu-and-memory-allocations", + "explanations/integrations-in-rancher/istio/rbac-for-istio", + "explanations/integrations-in-rancher/istio/disable-istio", + { + type: 'category', + label: 'Configuration Options', + link: { + type: 'doc', + id: "pages-for-subheaders/configuration-options", + }, + items: [ + "explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies", + "explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations", + "explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster", + "explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation", + ] + } + ] + }, + "explanations/integrations-in-rancher/longhorn", + { + type: 'category', + label: 'Logging', + link: { + type: 'doc', + id: "pages-for-subheaders/logging", + }, + items: [ + "explanations/integrations-in-rancher/logging/logging-architecture", + "explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging", + "explanations/integrations-in-rancher/logging/rbac-for-logging", + "explanations/integrations-in-rancher/logging/logging-helm-chart-options", + "explanations/integrations-in-rancher/logging/taints-and-tolerations", + { + type: 'category', + label: 'Custom Resource Configuration', + link: { + type: 'doc', + id: "pages-for-subheaders/custom-resource-configuration", + }, + items: [ + "explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows", + "explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs" + ] + } + ] + }, + { + type: 'category', + label: 'Monitoring and Alerting', + link: { + type: "doc", + id: "pages-for-subheaders/monitoring-and-alerting", + }, + items: [ + "explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works", + "explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring", + "explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards", + "explanations/integrations-in-rancher/monitoring-and-alerting/windows-support", + "explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions", + ] + }, + "explanations/integrations-in-rancher/neuvector", + + "explanations/integrations-in-rancher/opa-gatekeeper", + ] + } + ] + }, + { + type: 'category', + label: 'FAQ', + link: { + type: 'doc', + id: "faq", + }, + items: [ + "faq/deprecated-features-in-v2.5", + "faq/install-and-configure-kubectl", + "faq/dockershim", + "faq/technical-items", + "faq/security", + "faq/telemetry", + "faq/container-network-interface-providers", + "faq/rancher-is-no-longer-needed", + ] + }, + { + type: 'category', + label: 'Troubleshooting', + link: { + type: 'doc', + id: "troubleshooting", + }, + items: [ + { + type: 'category', + label: 'Kubernetes Components', + link: { + type: 'doc', + id: "pages-for-subheaders/kubernetes-components", + }, + items: [ + "troubleshooting/kubernetes-components/troubleshooting-etcd-nodes", + "troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes", + "troubleshooting/kubernetes-components/troubleshooting-nginx-proxy", + "troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components", + ] + }, + { + type: 'category', + label: 'Other Troubleshooting Tips', + link: { + type: 'doc', + id: "pages-for-subheaders/other-troubleshooting-tips", + }, + items: [ + "troubleshooting/other-troubleshooting-tips/kubernetes-resources", + "troubleshooting/other-troubleshooting-tips/networking", + "troubleshooting/other-troubleshooting-tips/dns", + "troubleshooting/other-troubleshooting-tips/rancher-ha", + "troubleshooting/other-troubleshooting-tips/registered-clusters", + "troubleshooting/other-troubleshooting-tips/logging", + "troubleshooting/other-troubleshooting-tips/user-id-tracking-in-audit-logs", + "troubleshooting/other-troubleshooting-tips/expired-webhook-certificate-rotation", + ], + } + ] + }, + "contribute-to-rancher", + ], +} +module.exports = sidebars; diff --git a/src/css/custom.css b/src/css/custom.css new file mode 100644 index 0000000000..9941cb7794 --- /dev/null +++ b/src/css/custom.css @@ -0,0 +1,147 @@ +/* stylelint-disable docusaurus/copyright-header */ +/** + * Any CSS included here will be global. The classic template + * bundles Infima by default. Infima is a CSS framework designed to + * work well for content-centric websites. + */ +/* Import fonts. */ + +/* poppins */ +@font-face { + font-family: 'Poppins'; + font-style: normal; + font-weight: normal; + src: local(''), + url('../fonts/poppins/poppins-regular.woff') format('woff2'), /* Super Modern Browsers */ + url('../fonts/poppins/poppins-regular.woff2') format('woff'), /* Modern Browsers */ +} + +/* Roboto-Mono */ +@font-face { + font-family: 'Roboto'; + font-style: normal; + font-weight: normal; + src: local(''), + url('../fonts/roboto-mono/roboto-mono-regular.woff') format('woff2'), /* Super Modern Browsers */ + url('../fonts/roboto-mono/roboto-mono-regular.woff2') format('woff'), /* Modern Browsers */ +} + +/* Lato */ +@font-face { + font-family: 'Lato'; + font-style: normal; + font-weight: normal; + src: local(''), + url('../fonts/lato/lato-regular.woff') format('woff2'), /* Super Modern Browsers */ + url('../fonts/lato/lato-regular.woff2') format('woff'), /* Modern Browsers */ +} +/* You can override the default Infima variables here. */ +:root { + --ifm-color-primary-lightest: #80bbe2; + --ifm-color-primary-lighter: #5eaadb; + --ifm-color-primary-light: #53a4d8; + --ifm-color-primary: #3d98d3; + --ifm-color-primary-dark: #2d8bc8; + --ifm-color-primary-darker: #2b83bc; + --ifm-color-primary-darkest: #236c9b; + --ifm-code-font-size: 95%; +} +@media (max-width: 768px) { + #theme-main h1 { + font-size: 50px !important; + line-height: 3rem !important; + font-weight: 700 + } + #theme-main .header-docs { + margin-bottom: 20px; + } +} + +body { + font-family: 'Lato', sans-serif; +} + +h1, h2, h3, h4, h5, h6 { + font-family: 'Poppins', sans-serif; +} + +code { + font-family: 'Roboto Mono', monospace; +} + +hr { + background-image: -webkit-linear-gradient(left,#f3f3f3,#adadb1,#f3f3f3); + margin: 0 auto; +} + +.navbar__brand { + height: 40px; +} + +.btn.navbar__github { + display: inline-block; + font-weight: 400; + color: #212529; + text-align: center; + vertical-align: middle; + user-select: none; + background-color: initial; + border: 2px solid transparent; + padding: .375rem .75rem; + font-size: 1rem; + line-height: 1.66; + border-radius: 3px; + transition: color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out; +} + +a.btn.navbar__github { + border: 2px solid transparent; + padding: 8px 20px 7px 47px; + font-size: 1rem; + line-height: 1.66; + font-weight: 400; + position: relative; +} + +a.btn.navbar__github { + font-family: poppins,sans-serif; + text-decoration: none; + align-items: center; + border-radius: 3px; +} + +.btn-secondary.navbar__github:hover { + color: #fff; + background-color: #273230; + border-color: #222a29; +} + +a.btn.navbar__github::before { + content: ""; + background-image: url("data:image/svg+xml,%3Csvg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 20.5 20%22%3E%3Cdefs/%3E%3Cpath fill=%22%23fff%22 d=%22M10.3.0C4.6.0.0 4.6.0 10.3c0 4.4 2.8 8.3 7 9.7.5.1.7-.2.7-.5v-1.9c-2.6.5-3.2-.6-3.4-1.2-.2-.6-.6-1.1-1-1.5-.4-.2-.9-.7.0-.7.7.1 1.3.5 1.6 1 .6 1.1 1.9 1.4 3 .8.0-.5.3-1 .7-1.4-2.3-.3-4.7-1.1-4.7-5.1.0-1 .4-2 1.1-2.8-.5-.6-.5-1.6-.1-2.5.0.0.9-.3 2.8 1.1 1.7-.5 3.4-.5 5.1.0 2-1.3 2.8-1.1 2.8-1.1.4.9.5 1.9.2 2.8.7.7 1.1 1.7 1.1 2.8.0 3.9-2.4 4.8-4.7 5.1.5.5.7 1.2.7 1.9v2.8c0 .3.2.6.7.5 5.4-1.8 8.3-7.6 6.5-13C18.6 2.8 14.7.0 10.3.0z%22/%3E%3C/svg%3E"); + height: 20px; + width: 20px; + position: absolute; + left: 15px; + top: 10px; +} + + +.icon-github.navbar__github { + margin-left: 20px; +} + +.btn-secondary.navbar__github { + color: #fff!important; + border-color: #384745; + background-color: #384745; + box-shadow: inset 0 1px rgb(255 255 255 / 15%), 0 1px 1px rgb(0 0 0 / 8%); +} + +.docusaurus-highlight-code-line { + background-color: rgb(72, 77, 91); + display: block; + margin: 0 calc(-1 * var(--ifm-pre-padding)); + padding: 0 var(--ifm-pre-padding); +} + diff --git a/src/diagrams/auth-providers.xml b/src/diagrams/auth-providers.xml deleted file mode 100644 index 933f142dc6..0000000000 --- a/src/diagrams/auth-providers.xml +++ /dev/null @@ -1 +0,0 @@ -3LzXsqvYljb4NOeyKvCgS7wQHoQwNx14LyQ8PH3PKa2d5mTWX6eiTnd09I69c2GmHeMb3zCTlf/A+X6Xx/hV6UOWd//AkGz/By78AwN/GAT8gE+O7xP0QhDfJ+VYZz/Pfn/g1mf+8/CnY7nUWT79qeE8DN1cv/78MB2ezzyd//QsHsdh+3OzYuj+POsrLvO/PHDTuPvrU7/O5ur7lCGR359f87qsfs2MIj9vkjhty3FYnj/z/QPDi8+f7+s+/jXWT/upirNh+8MjXPwHzo/DMH+v+p3POyjcX2L79pP+i7e/rXvMn/O/0oH6dljjbsl/rfizrvn4JQuwxBe8rPuP0Lg1H+caiEqLk7yzhqme6+EJ3ifDPA/9HxqwXV3CF/PwAk/j6fXVVFHvOZid+xlPyOI5/gfOfm8xaVrLf2Dc3oNV8tbVwKKDIxJ/X9ITqeOrg6TCsGp4hmcHiesHuaZ9uuoNu+n85cz6tFau1ZzI5Gk+qyn2ydFyb0N2dTazZlbQC9ee6an1lyM6mN28t6SGf9spNYdFwe2M/ctiucquNWKtyF0L+uth0HVpx+ygDQHebem1BPO0F6XhtpxXyqzvugy5rbmA1DrPborggfUQhyIoqAHep7hDJrJ3UfrHEvq3KXLROvSNMcJva+aTrdmTXXbAfmxp3NlNa5RSbzzEPJQyl9HJfBobaN+BtWxp/zjBOrHIVYCcbrVBhPeJ+ZmhzK7dFt2BeuUHkfGXJsX12qrDJpdFmnfJMxM4Ouq7KRGGRhdY3PBEwsSRXsGrytzaJcXtPceiVyJvlHJ6pHFvUbuZLz/z3LSngejPuY/9fTJrQ7AR+E+nC/ePcxi/zWE3Bq8jyvlPcyC/zYFEstOCf7/PwWi4geT+3v1at3XlqkwuyxDbK7CfiwLX4CplBKR9v4uLUW+70bSL4ZenAaQeniIGJDjp/HYYB3FoQrqY7q9r5dQaqQHvdr0mEO1eLoa77aZLkNod9ksnvVHAWPqhNd4E2uyaYC/6sSFa006GSxDf8dgyPPXF4DdcE7zFBOOBucE78QT/QNuq0T9t2Uk/iFO7s3CdYG59BsjAEh4BCAsn/W4vRq9voN0M+iOg76K7CPFZv0vs4YGQWlOCvSA7mBvV7jqYCznAOzCmiIP3cJ2z8Vk/WIcL2tVgn3dxg8/1e7joNVvGPALWlGH6qSymWIK5Ix+8A2M/KEUE4332KRJwDVmjgL7OAOSFhCcccwPrE8E85fzd08813BMG1n5PgXzg+OkM5Ih/59Y/166ggLYSnIPUv/LBwHM0FuAcFZB1OZmCMWh3eF+WirzBvcD1o2C9igll68NnG9w/Dvaxm+L2p7HymjNMsBagg0NvRIiDHaydADJG4Zp0sD6gz1mv4X0LfoK+d6gj/Q/XcA0sbPfRhQ7XIEBsIEBn4kdPOtD5XWi3jz4ayBAf2e2GwEIMHcZd+c5dE2RWIyfEVHh+9Ip+9Y/sYN07mG+G45lyCcZ6QJzgEGMASyfEmPmRMcQGwJe/Qd1g4D2YT1IA0+FgbELHyo9uzSD8zKPwHMQ08sUKbC+C9ja8JqGcQN9f+4NygFj+4u0O2ykzWPP5lUM1AV3sUL8GxGIP5mluPmCj4zesQJuDsobjyVBeOtT3F8vuhgC8o+AZ3BPYjwd0qv/gC9jCCW0J+dqeoIM5FYBBA8wpQj0Amej7x+4++oT6AnuAGAa4BuP+2kuj3duvTdQ/uP/tWgG24wBZfGwEgbYK5js+9vmxC4WAsjKh/cD1fNeMftevwH3tJtBXIogklAvUTQhsxgBrBnMAbOsfeeknkA3QMcSJfSrHR34Qjx+5iN9xIC5+v//Ki+cMgHn0Y/tfmQ5Ap4sRhGBMD8r7gPjPGh2FXkCRywPoEP1N59+2cF4E2MK37Xcdn3vY9tEoKJDvxy70M9w++P2Zy4CyfIK5gMw+44E5DRzeA/4Ec4G9Q+xCWzvheL/dA3uKBR3gw8G+tgntxfvDXGwJsIHA9QGu/H+fj32AJYAV44C6LD96hTr/rAVwxcc+/r/Cx8CmwNg40DFoW/1w3+N3PuYBz9yBV/1y3P8DnPwdO6+V9TcfK6e/RxO3v/W4/C+Pq5P6R3PQ6jNdP9vtyzZGA5ntu1sdMuf+lQpAGP+9BtLZodaA9CAzTF+m1z8aB+/RrAGo/DBwCFePfqQOrec7Fvqd5wGR92FjwCxQe8iHsXnIoh/mBgjhfEO4NT/vpp9308+7AaJd9/TTFFooSUQHSDAhw4OfHxYR/rjHtNZO4lf0wqSyBCyNaxJZOmFMCqKZDcSFME4E1mbjX/ayf6yLhciEazt/1oj+/AQa4wCblwiQyfZlavgz/SDrY7HAU0DL/niQLzt/PcgP6n7JJP1o2Yay3L+y/sjyhChKPnKGMoTWDdbhbl9LPD5stUPWA4j48RbGVy7Q0r5WQkAr0T8WZX/W/F1r+GNB+g9zpvNXZmDuD0OFkMUhwj97BojFPlbiIhj0OoBRjkT4MircF1wn2CO8/+WRMcgCgF1/a+8CdANUf701HPPD3iLsg332DmQFPPOHVSHDAQv+hQsos698P/gB+wHMAxgH9vlZs/7d0wcjcMzt6+Hrb3+9BwxyfLwPAteaNZ/oCTLWx1uD5wBTKWQePIZeD0Z+H0/Fgmff6AH8hF4bsikC9fl9r0NmAXMC+WElaXze6T+Wnf4+LuCUn37T7/1g1AneIeWpwwjib+b7rBV6BoC5H88A2Qv7eDr+F448KOef649t+Qb0bJABoX4Be34igePLukCWgKFumN7oP54//MHHLzvd/mSnP/j/mXP7w5zbH+YEEQLPwQgDRKb6aQDdm18PAKMI9IPDT0StT3+yr+MP7MVfnv9atmATkEHBeCB3JLDfdaFAfH7xA+0B2tFT/3rpT3TmQW+B/q73EmLxi5/f+6He+fv4359/nu+XlzQ/7AxwBTOIH1kDLAAuhFEgtAPl432+3gR6TuS7/w+f/vJ+znctYO0AD8iPHZwfPYk6mMP7maOcDKHFvxFOin3W+vs92EtEfdckfvRiQJ2BSODrMSEW4fxw3I8HxD845L/cADOvj83+skf/cw9t5wRehjRANP+JpEFk9YkkYSTT/FwDvno0QC4fT8QZYE0/XAXXZDQ/XPmNYL88tH2i91/e/MOX4ZdT7x/++PX+kyVkMBPwSrAvINvn8DuHn9ua4tHTKmF546fk8aeKyU8RBZYx8v0Pj34qKHI+9Pk8HqDJz1scp79dfspd5K9q0PZ77Qilf0ot1R/qRpefdvFPuar8bejfSzbg4qdq8/cVHAL/70s4eVbm7s/tc3jCKs6nRgXrMAIC7qq5B+MLKLhs8nk+fipy8TIP4NEwztVQDs+40wZYy/m0+y/lNg3LmP6qo/2sBS7g/yjJMe/iuV7/XH77O7H8dLWGGkz7mwYIjPx7DfwaYo7HMp9/ev2TcH9bxr8k719b+oO8nfiZVvn4D1hrozogDS4BN1QJr9hlroCE6jT+lMn+8t4ah/346+O/qPB3fUHhb1U95+4r/sh5G+PXn3X4W1UR3qRDX6c/ep7mcWhzfuiG8TMsTjP8hZd+e/Or2AnkzBV11/2hJYLQJMvA58NzluK+7qCsxyEZPhiBT//QOk8LpICzxz9FwBQIAYgI58oxzur898Y/ePx3mCH5ZxDgxN+Y4a9nfzRD5t9ghti/YIawMPz61zf6W3k7Tn6NgPy9AH42/B/EX/b7W8n6j/slflXj/1cbJv9mw18YZ/X6p41T7wXWszmwvfk/fgDBwu19MYEhH/0jEEL/MX2YB75Fmdf+e9/fDeM7B2z8kdAvyP3eEIfVYTz7a99/tkUEGN9aZ/k4/RoVbPo78J8nA48/e/q32Ob/wAT/Yjx/X1r/k0W+hterfgLFch0s0HO/HUj8k8H9iSP+Csrf8Pzfm98vc/urtf0t+n47+/hfeb1/6dziu0EIryyeqr/zdn/Wxo9oun862fiL+P+nRx//rMZfRyr9XsIDtP/spzTO/zPthiX7zziFfvD/yuoRWP4AvdLfMPFFoP63Wvv19teR26+jOvTyFzVS2F+1SFH/eyUy//PDpz+o7u9l/d+q6L+wit8P6n5Opthf51XI/+nwqppneCTJwj1jUpo9kf+sAWsXNSCE8T+B64XUAQ+4MAk+B0QjDUDBP5coAoEs9fHY/kdZz9WS/AeKMf/5epb/DtViyH/+2SGivwT8B92SzF91++vZ/0a3KPr/Q+Vi/61ypyGt4+4/Prf/8XVOmETCmFHaQLdhm/69Gv4n4/01xB+NF/0b4/0/BNj/soL/hYDnf3x0/C/o739yuvwnNf8X580fVfD1gzOdDVHlcmDBH8P1KtErwZVmg//wCM/q8Lkqgh4hvOJWlNMfYvDFBvjLwIf9KRjZUi+RdK4gJ4SBwyRyrAMuiycdvDRtZwpx0vjDF5viIPIwv1ZUem1l0GN8ncbdaN78+KbWEfArNzGWcMbvsWhW5oleCs8WOa4/BKuYJ7UaYjm++xrJrPapn7ovVfHsyGJ/q2MieGbv6o6hcxJYJ3NBsqszph6Xz/X+au9zM/IX1W3HisilOuw1bkulfD52YzqifjudMTvB0vlsOO4taYiO6PiPLnoXd3J43atX37ybgTKG95m9yxNrM0V69fdRO5h59luzKol9Clu10Tzs9DByfrT3FUCDE0o6oxHmFjdTdzodlY1RKwnVThjyFtT4TUg35tW6ncbj2v54Hzm99GRW3QPh9aaWicSub+QpHUG8nKr5OnuX72UwrNwqNpkCIUveQdRJr4RPa7eE7dqZT9cePEcNno+8nl5EfnVkrkUAjqW1X+aE4M+OvFwKdPUOsnbWLu4mYBtcfb5i/v2YiU3CbULHa1x/pmiiYjuI3zlqU2pvD91okQLu0MA+pFcxir3b4XPbe2uMvMaeMAQexVO8d4ZTve/UPVZeeUVeOuIBhhiDivfbS57Jly5/oFZxvsxEYN4WWK/O63TAjybBLM9yWo31MsxyQ9xvZbmFTuSaGNIBsJmq14n2wyGeJpaAIdHpTjnBdUM3t6xDh63DhptEW3/14p0N+T4/2tCe3iXGU86ci1nmzdgr8UFPDHvk3ZqMSZJly4erQoM+mwXPmFs3MPU9j2jzsqVLciRbf7eKC3me57jduOtJMkugdcelWJ9/vZaJlyU93wbDSmqka1sXgdnyHpCWhMjvBy0n7o0qdmYx0iEIVwu4K8kAQT8n0boGru345a8wPYYGpqw0RkP/KVjddTGQN+I22Pxais4Pq6arKCnfcBuE9dJT7rxOj8ghLVhrnD/DbqudvO1pQeWQ9WUvTkqUH5WnVXyslHs2ZayZD/n1qnxRdH1eJJc1eBVz3uCPNHzZamknKUoFLjbDJGF9Vpf5knk5sUBDiSKFpe/6OKJgh9JlTWgUUsLFBUNHlzjGhUB58ae58TNbL2ikgLcmZJ8AZ2AHySLswLofW3TD9aOXw/j1wnWVwYdLYdXPU09k6Gy8gF2MrsT2bto97vXma57VIM9MmLA937fw0YZiMBvzkAOV6ac8OHEoLp3YzdT6kGpBu+YXG2RGkkix43tiy5RqW0UlSuPMWvJJrUgQBhZpZmX6dr1tH8Q+eUE8wFW+J40M3tY8PlP3IZqtVFZnuz0GMs2+GqJWcjSyFkiEW7KDfaujGbhmKiqZoxtDfyrby8gHCLj3wochgxBOc+sHXiiTLRvr9A3GiLI5OEe57HjyYNkhijZOH8YijZNHLj3Q4q7p66SwDH5XL/6DNuK3o4dsTVL5TSbmaDTsebAjQ+wfXW2TEipwmyKVIjFS3ruLSy9qs+55HYgzjuKDcl8tlThiVHNc8iZpFGyTG1IuLo94kOwBZ6dpw1Z/M1DeZ9HBoW5uXuuY47SNM5/ZmllY76cv3nO8cIsOXvQBy8fUceGiWMZ1yY1tqyO88MZFsvzoXimy3+VJuDv4I1+D1Y8T6Gc59s27b/TxbkelhoQW2nse4cubvuYIhlU76bnArxx0gEiJ3xH300Ply7omZ0elHdHRd7d9O3xmedo9EV/1otzxeSbNF0ewGfsmhRklUyrrFuklnHlXvRKpCae3H+9lkEh9uDYP5bhYe5qhts9KUH/t9hIeIBKRqscJndc7meObY1FkjF1XN0ls5vJUnw/J47nZuj26e+QrVHmhmsQHHNjwLpWu+MwFj+ptqLVteX6IJtDuJdJ9e1IvmKQrdMbjXccR9+TvEMTMLXrewE+T6thkWCZOGaOkxawxf9zr25vGZAx94FHAX6nRIaM37DOu16ENi0AjV2NOvH5xsH5SlXcdJi75Mvq1Y1MtKrTn7l0uuRsVRxg+3bY5zAEiNlTa0O2DvgZDbRHxDnbidqlemjBcqJsFWnDkm3+rrzl5mCh3x2o8UQBjSsRmcOKVCspNdtfQR7P7OzuXx3EJRtyJjfUFY0afVl1L3RzTESENWEr88VAuqueW/1br9mjS40BJETGlbYnKdxQ++xC0EZJO9EHgyOktF0jJxG3Bg4VjjHVOuams+T4cX3Iu4nujbzJmBGKk1HideHkrVWKYTmWsT5fubepHeJZFfeJNxs614PWW8VBR1Lgp65WRN2jbmX+UqmxOJJzRtW/kjUdedBg8nzHatvzmsVfjQhHnnb+VO/KqYAg0RRz8Sm8KVzI7aUZh7oVxme+LjRjA7l/kfGE9FTQ4TxfHy6ANjSRUJTd81vhWWo19Mc6tUMfhRiDGHfoEmaSAu7gusdbS5VqPh/0kR0bS5z4Yb0vvv+euDX2dm2+CYLfxmZrVOojvEKlcY90BZzeSmzzVnHk6b4zPZC2WS8LiFL0qZgVEUQ3Du7F89Z9PGuTaZvWiRS0foUybt2KbVEKxoW0rvN5LTaXmszy/rxXa2EWAVYhjYbml3Du+vWIdJKlAzHBVjLKX4FkEqvcXtiVKBn6wVyJbe4S2h+rtZSCv9mgQODCmMDFhePLm2+QJZc46N+Bb8EsfPV+yj1y4gHEDBXd7LzyPVI+tw1lBojhRqCm0tVzLbZCyoV4d6CDfoMPZtqVF5BCwB3d5UDfaVXiIS0M140W5XL0QB/sDcTLguxtFDMEmXLmTOQQC7yeniZB0thWNYAIItwu9kGEWW+bXFTZdcx1u+WJpB9tyliSew0vGCb3cLWVX5Hx+w0m5bjukhi/HCFflpKSs7mLDBdQV1b4oNRntBXrwGsjwbMCFPA3Ws61q+XJ2KyGLKsdlOqXyF+5A9A4zFvzCczd6FqA/RTn7MmG6WW8bl5azbnhdfSROKwwQfMntJrNmYyIFyEi2nnzL2eJbSu9bt6tn2ENEI4V2rqpeL5MvPkf+aQjQG7sx/3qpGXIX75j8gGEOBcx/cDRNVY4rwU2UlSvKdKFS/o6Wn7zN4gI8p23mSSby5NTpxVw8w5MMRBWbI6O7dSEig9IkS2hRw/E0Cq+HJeR8SjU06eWtw2SbgjDukEM3S4MhcHtGqGF4x43v8KtqUOHF1cMkfc9EavDme3uMt9e7fz2t/HXzGLNB51f5RDK5vlo4aSMiwU/RYiUQRNwz3uZIUCPO1pTz0qPDYdB3buqmpNKwezlQOpc9qBKfjG8uYvdUjfcLO/bqw6nMkDpwPY0BusFrS3AWdr8j6YBiDg8okyxLU8iqlxcYu9oLkBx2XjhA9CpVLFlpIhM690YTa3JBdOvVIm078BrxuKeU0PFHBDxvm/l6iHYHq3XqagFOe3mjSPg5+0ydSL5D8xMplSCeWlE37zCfNDS/34bebLnBjaw3lvpRoGFLH6/iutzpG/DA/Hcrryq6XTr+oXRYiiFh+4KotEUQyWAbNiw48ZSEnaOYwPKIRaup1n/dEq1oj0BZ8jlARyrXxIvVKr5bZKtAqenTKdc9v4r56FOYTVYwMpyibRFYKoSeICoJw3wKpNFLWfDA9Xl1H7cuYxLSFUNDyuz2cU32e+lMQW42LGarO38EdX83UdmZElFZHqq82YwJBplnXU7PPPGqmRwoaCiXqhvXJcrX3oLf9fa0kTcvN0N7AiHGvdsGmJCxnvJU18d0i/N+OvX6TYdJE8QanlTQcAeb7LkjVGdtvrYxJbMXieAHUQGofY7TZkRXkcjvYieAxAGm97c62QTrzmy6hb0G3YIhZlc6ciVJHe8yV+qpyNgcjN1JmrOxh0THEkzOHEswwzDbVbXteADz1R66YlaM4fUkolRwoZGWSnij4eQGXd4mXV3S5w9qEvEBX5ZME27qO75PrIU9vAeR5nZEmeFSCD0v0plgzN1TSYIWhkLz7W280TchPySR3QdceUteVdTgb9FUMCoNQb4g0TFT5isjFmuzL9SH1CI8IYajI0ivuWLGtdmxzHjesRirGITu+uKdC88hUElceSk0BE+yvWjqcV4hDeYMduGFpyq86NyYimt43bGQfMFaKXep0CnGbOYMThPDsvumpJuxegHNkkW7wq+osGog5iY2/a5XUuNC3k6YFbTDvLv7UbFwW0x0hZU//u3QGYSz2ft8tt/79RGYhJV3k0v7IBmewxwxdqVeGh1zvSV7j038ukTEwsMMosftiMDxl0KllWBdcrNCe4WLxy4eA9+V4hGmsjbM8HDazpDoeh3LlT7q40v0pmzwV1qXm2PfEftTb0BP5BSaIHXHCyq36XXJCTpO3veh05wlsqljwF7EUI+eIPuCXB15hjglLV3lMh+q+1CxRFrjWqONtKf6VHqFpFHT72d4JSaSxGO/3hGtUDglypVKD108AR4wLbe5jHy9L8sjG3jraioUgrCAtXmqOVsAneQmgVzrxeuZyb5yyqHIlMXipbkUy9ZlQRLyHs32yEXwCBw9DdwLEI7FkhdHIXZ5WDt53i8f1zaMiA4kmxV0qeGoac7SY6sP56RfitZXOjJWzyg3aSgyaRnCy4aLz/fu3EtmcbxA2pCPJ9HownlmlxClb0SAoMoDjDN1afdQlyfi5LkvpGaceb4Lf4lBejWPLLOhgZrpOx1u+4ayT6Y/Xlg7enRUQt+A7TwsmHJ7RfBkVTWusmchvwC/lI5vbyUGI7GzxtcNlH7fi/rqGDukpMrrlLuvvMaua2vVtD2yUmIzTYyrlxbUHUmyan5BdjG1Z+xzOco16aJ0d099BgNL+m80MED+XJE2jKVfBXUsSP0MzGfD3JToghO4/EiAq5b9Xae99b1g9/eqqmsVRQmrtz26eE+veWzZ8Pb1mJjpNlctLtuD10aRSoO45AQR4NNk38k3RFUypc+uzqvQhJFcLHJ4C6Vk2OEIpI83m9NLjwe9nPtN41LLoC+O5lYeKsz3F9KW75dwtX3nWSGV+UxaGAG/u3raFD/gzmssjegQ8VJUo9iBc8DS4p7h6bqwFYlc0IIQXaG+duEUBmrn03W7BsQTJBSJooy54hvHPB43pZ/3ZUmciwE9okFJ5FWG0vPamTawQbh6LkN6tAGD+yawpSae3kYEP9U5FBhPaO97hzrY+FLNvoe5ZFpiz7N5vdl56K+dsxBHe5/89NITmDP7l4aw+71Fnwog0Ie3o2ejB+T7mSfD0Lr+GnMjYAoyr3KPO+NpcWL9JEm/9o5KKTgMTxsnVtT9rmTs+EiJCgdxpKBw3GAuOCyYgFhYF99JeOMxfnbk16e4C/QhN40co6w1yeWbeze41hlcIzxcH7+UYxsxazi/x5NzTMje8dT1LO1wXp3J0ZSTvnHfbXlM0WKz9tf24uvnGlLsfTRlUXJM9JXkE8qfGjX4mLtqsvWJtBzPSA7n0aLbK1yiSmj5dx2R2J0TzfQlElHe4/RVIA5ksf3lbeM3Xn8eGJqF0Di6UY3f3qi/rMezv17UZEVS00vUxuhLK/WWE9ZXS7eYmsctfpjXRH2GkIgre2ekJilsWB6brdHLNqO5V8l7fF6PT1SKMEhmpv49H7ZXdseQyKXvE6+8ic40IQnUxbptt5wSTIxMhHOFA4HsI7rl93uwQjejG9fr/HSj4VWsVFGI3mxsz+660leGMOleh+mcTHboJ2JrzeACw/TAo1Kcho6r+FS8SvxQ+NuSBJ11aScNmIpuG9apKPt0FMXzINMl0NAvaxfWv3YPDx4YOKnNlCoS6FxNuAf7KV3fHI8Ux/ZWlr++gfr3nCXS5D+fJZJ/OY6g/+ZEmP43HAijf/c5wv//jiN8eCuVPAsPIXgpjA14LoG64q9S8JlleOVFGS0zxawfmO36Lcj7Wc/x+B1xeZGLXdFrPYdFLxle02sw1pP4kp7Nu58E9l3RY/JeFmtZxrfY8LGqDK5/ZyW1ddGMqkiWIskTNytdLo/+rjeWxTAnWRR0nEUg/Bm1akEuZjQRe8lxPM/pXigqShja/J0tq0biWd6VRdEeNiW0N7sU+4rjZFXnRVFXFBBk23ZVVyxn8Dz/v+rbo6WpFPs7K2PCZOmXxi/ko+QJV5SOxDpSrq7HSlfYzAEpAzc14eKWEpeRxV1pVjferqIFrAnkRM48+HzIsYdYP1mreFRRO3LFY5SR+6keyGEP1/VNVAJ3Zd3Q6F41tMINZ5VjtHdqLbxjNFVO9TnWBM7pZpNaYNlEbXkKd1tDNG/m2NTX7fZuTMZ66zy6Ia5U5SIxWxMnwBiXS9ob49qqrvanwfnQJekBcUWeknODNSWH5NkrduWf0qOZIuvBtUpcljed6u8e396VW1Dmm4BNM8w/e5iFIh5hXKKjKMX2GRK31k4lmXNjpNeQR+cw2iAasn95OcrNx5B38HbGDbFb2EZF2ohSETZ2FPu4vjQhSJLSbL3QVdS8BlmNP7dcf/MIAQSy1EKSRCwapHIuxuVWc7rUG86AcjdRZThtGPeXg952DfHDWlH4u/ru9kfut26kFhKaigFd+2gVJYfc3p42SLzsl2pbl4fJdayc+JChX1qXsKPF2adUx2Vveb79fvE3GHfeYZ1bOjNrW9KFEBGKd4zcC4VbUmzdWpNxxV/siu2fqi60EshoOG0kZlV043ToV3bXu4JAF4dfdq88pSZzb6ovMxV3tfnbzN0KCTpTbGyslENXQb/wrDnNlcLyd8t2cXRqcZFxbuNRsuYS0os22pUIJmNZCr3X4eo4xPUA3oiuKURxbtWnnS11DKyQK5PoS9UahkN95+oGjKzvRRoACIhKMET9QNqFPJVhAvKydJdUt+qsMBi47RHeMDqHOVj6qHvGUHVN3D/uESTtoM98221TIpP8eKnMnbFbdYI11wbGO7FTl7laGulrm2wwFwRyYtruRbpxKH6ObulTHuHeNJSH0Vdk+wESyOomXLIRwhTOJR0NWwxNTe0AE/ltlmTYFJaGrqjG43zl3ICzLe8JxQdD0Tu7Hgyq2xFdro2co2JYZzPOcXEUkhRv2KdvcUX1yrtQrqNbCsLTbDqsDcmLbpg7pehf9SDYj4bfFWuQR4F1erfkAPxUjdAR4VERpQh4RLxdQU7IxaQi1hQIWkiNW4pLLRqelCKdcmvQRXhX/E2Xh5LVXF0gpPfIOlOvWLdF18LZ9SThUWDdwDXvWDDr8ajK0s0bqC3yidqc2AOa5aRhLxkXdW7ufoc1JCGiWid9M1Nxlw7eFJ/cSMn1WBR9HsZvZbw8uBNqy/bAON6UkfwuKpw0Ncotr5fDqkfekh838wgeyUGYZfCkPgWyh1g2+grdYmuxIX2B1WFJltwrCsMKjzSL2nGKR3oLbHfsc3O8Mm9HqqXscbJceUnvV2aPAZzakR9ZtlLeaz09IggD9nKH0WMqegCiQVBqOTw/89y0AjHdJXizSnt5l+xVX3Tbwl1GGNfbN7B88vZ0eqdsXmiKEX+4RIik3gKDAG7YD1hnn9EJFdbJ4Nr97hX+qGp1NcBvJ24D3nRHjFBFDvHyBmmKrgJdd8Fd6Q+ZwcdNeFyY9flu+JsRD+JkX4GSlyalWBTJNmliy+d5WfKZZ5dNCoOyMjfRRLtJgbWcUjy5XoURHDa5upSLJ59dSHPjVxBrgY0RsP4t1YXnbxWj6iKY+fkQABvUAzLZT459re+3Gj16K2Sv49WlgckLD0UXOl56SEkvEsH7rompK90nNhX5HsaKGs/Dem/1fgBHNB4a6z5qF9EvmAD5pmpPbh9s9jT6YadLP01oxVZGKjqi5pq30QN4XaK1fL/vANw9zwuVGJYghMFbgScv4wNf9MdeSBjC61ZbKG3sRKUCPB6nI9p+iQjHxUXrei3vpYKjXfW6HZr8yq+Sw/d339vKJGNvfWlqYxB1zfRYRLAi7wJrbB5zK1qBq6L7IU3AV10a0ErenUopFXViLw4wS+3NloKGmuIlzBe2mJWwjCk2qO6VVakbl49ewwcXm7TEPI51TYi4cRdR4o6WCM1aDUuxF6Tlr9YR5A+LSQOjpcFy9OfFdWrl9TK3dWZvmL589vxmjc0mivuz6cleN+P9mYtrfRZMI5mwhMKfw43a3KzUUFhBu8gBMS0WIBVm3JTzEfPKxQnxoZy6VbSfYXG5skXoAlnRVA3PKrTuLd7tkFPeW93VGkQk4LKgFXYyW9U0qyTAOMzAcrSsPwKBv0nLraxpLR02jmi959Yp4rIygefcUkV1smr1QiAm7U6MIGDAqB1nFLa+S3XdgcRy05GkHC8G4zwCpNPS19lsJjx5W9F7pGBy9WDu6sXRP2flbE+U7sA04fVraOIrvAMqx97vHWOfpW8L0qgVniNMN2lTVgQ4dQtwtIzwHkyNrqyUXVoSjdwm3jxa4jk/2a+yyKVtOACe1+phEwIEq9URZdxbqr4rVsRvwm6JTphwCfxyvqRUjYTJI2kr7jHWanr79Gq9Zq/CYVUTGlXQt4igd2o5tjqvHuI1LL1gf4QtGdQry0ixzTnPrp7dqVksm1pfYmdDjzp7KTe+/XbP60m9Vgk73KIEl6/EMkeSfK015176FbusU0eIpIZpRP9sAJvSwxsETiBC1LDu9lJFWnrQDDu6qq7UA8vTdiF2Wn0TTUFf5bo25kXTEaec/NZZ0YVNgwQsK/WDh3WXYKrbmOEVntj7MUB0FL9N06CJs9hVc00l8jobh0j2DyLb7q2aCg+xfjMt4S7q+Pm8S16khAn8CpAPx3ab1OaKJsFvPVZ5SHFVgwW2AfHC7WoyjlCeK+Bsaf70uJrWWELdbiOr47LHRsPk4szmrSK9R74hcs+BzRW8M3zpWbAwjnuckF+k6ImTuaZPj5q6TVthllJgW2KXRXJU9sh0Z1S0hsXNfhQhoGcZMLBUejR0HKrI4qf7tjSaD64xPM+zVO4R2ufnsP89h+/VxZ6w0ls5FewMcpzbumfR/Trv11FLfJBO61FBBQRTP4Ajf6l8R6N77iundeZ8+TnC1Lgc+FkmO82YY4APARHvjdk43E6J1G2pLZcZWhT2m43X9VbikNCBH1soa3A53Q9ySfNNeX8IQZP7ZNpYIPC6P55BLG0TpvCIIeHfjw/ynEtW5oqYt1HkoRbfg7PmdXA3HowTcyImWzL/OqxpKmoq5mAR9qHxuTIdvgZzZWJc63cJxH45G6bzqU0ZiPU6hf4dSUJYq5VQNUdie5d6F8+KXBlWxn5YwM7Hjw4Fs3vDL1UohN2RjtsJn7gT9uRRUP5SKFdjdfIIgBU8DEMZLyL8FaK7QBgkCJRl5aNSGAQgV596+UY6cjHgWN1/OiBrCTqXQ8aRWdKToUsy/dKAXtIHTlfZjQ6f8l7y2hTSIxF2BsN7y+i9uJWVqCEJ3QxEJATVXldYC1yk6U3ty/W8+MR7g1/UNBeJHGsXr2xsK2kCX5s7TVkjst7wQCYCkXUulfyVcq1qknS52U0iobBmSNa40HdloRLUdBTPi+c+cnWCxWmCdjKik4Wd9p4WMyuSlirbA5gurezr9rAmApoGSMI4lSYT9DYy+sRFhR8JxLtyCjxt44o+VDGl0/TJRI+PBhBXYWL7JWHKCuQwjf7zfKlJQnVJGRaJdtDtLGyZJSzKvDpRkuEYzHZw8Yrc29NiKGAQt6qjeFaYEP+WkLz6OPpv3eTlo0zY7rieBucrH26+4HZw08PN3MmiMkotIbsope7JDhzyZd/3IAO5QXnjggGlimvgcQPdgg4rsQBvvabKur7KpltY87iVow38g5RPIPnYeSbzd1TPChjlyCwAwi1BHoiZqk0lMFy9OwXLTSDGHZ7pc9+Z485k5is2y46uZhBh1nDFXNkmScoVwF6o1+Tjsc5OYW6N3hzNihiEOmEbUJ4PJ2n6N+MH1KJ2n72aTUIWKInuVe4WiKspy+bYZRJKD+YW6y+qPicv+ETzCX5NJ5y4B3v2rHj4kY/jyAtMi0WQ4wDyfj1h2P/5+lEfiSUq4C+RuxxoQ4m8JSgC7T44olxUDXle0HQCmTLP4EwSOOG6vSQXsCiiiBg7r8CXPHavXZtYHWnyxbiYWjDXFWJkdrEBqQP2wheaLNafTzUwO2Yc6/oAS5TmQ9Mu2LIrV2Sj9r4mLCKpzJtWCRrJ3G1PwCun+6BnsJtAnvNA4eEgDl96OK52eAojNN0rUn1Uco/DVG0X1b3IsLS1r45WjACHkBIx/VL5Wco9G10fnbukOncQcdnALz7aQ6e9wRn3DmNFMi/H/MaaA2BdLSduLvBk7/ls9wtIXYI9Bt3ivnToOf8ckWsw/J8MioeZBAGicZBVtT7QK8i770C5wFOU7erhZ+bxCg1/b0VK1qlueItxjQtDKNMjeERSIitEMO1WCbyhpIvaPt5XrYEb3UNd2tDGILbJg7meHDEk9JpAdlfguYei8VtkexKPS31d+FbQXI5qThJENDCu/8TswgNqvYb45iRTOu4gId0fqbRHqaKsj+CkdIJwGQ/f4s//lugifg7lLm1U4Go+vUZBohUiUpjIY03mEZD0nZCm4LyilxvnHjSKmJCyAzqMYR5xqYOGenunr6NVk5aKryUEiOdrDsixaWE6m1W5Kj7o3hqPqGQ2BZ4zGDOxs16wGdIyWWb/1g7Xx0qwL5Kyyidt6VxccI9yLG0Eut7gOdYAmUdv0YwQ7EoDzNWHR4fM5XbC6DxbzimYUhFXtIxuTUZx8MRNCeci97xDlNMtf0g4oXsdAjIt3FHQtaYxPOmV+GErSwyQnVO3VTstH0pjNyJGyi3nmG9R92KnT5IVEURo5qoMkHVna+95zGZ+gb62zSzIeH0BP+2ULIDc8zhSDl1uyaMd91t4U4rA7usSZjYgbyPOdb16J7TbDAF2BYHl45EM68vwU0Qf4EeiNvlDHWoVW+LMvC1iGUo3jAMpCN7qJKUCrQHjN1/MDDI75vF4kFpOwcBAHeNDkwxMOPETnoANlxGQsa+l8nFXpmrmOhke5uc8ymrAjRKDi28k8QShUUjqz7uY3baOAZllaqkB+25+KtXU+njSyTRs1bm7A3/VEHGci8Nv39fPFwl17gCWASntJLdgxIF7X0zoWZambHCt9Jo1FhaBnuP9Cg88uIEjZXzHtgrq+iaHtfvQt7uSUPYkeEpu0UX5YcDl5VGh81YzoDdIXive70LyVu40r08znGI+KwGJQx5+tKxO7m1EcCklZtcD/Mj3V1HOoBwHoMQiPT8fkjCweBuVtZNUBczQFwcaKkNSR5+To8h6+HHz+Mgq2LBjL4z3iaYhbV7RWkkRCu4mFJaQ2pC3cg7dmlX0NhfVo2o2NyTaRSSGfWWnIHAiWzkO+CksJy2ivO9lXTNO3Tb7nEcMYQUf/9P7lV1+clCHr18iFdeRySX2ZOOKlQmnBQsyNI5TZoQH1W1VUs3RCKhaHhBhnSCTTB2t8cgZFif21RzY7UTogx2G1XWwol5uXEKA7W9Qj5GlkkCPWM1Z1WNGxQbnL2yzv7nHqNarNe+5nEODciJx5TMhqH1ox94R6XYhl5iniSgGQzCOBogEzNj0/bQZ4vJcdSXbvcv29Ezoy5zj6d4evg6LFCE/fZjJfIWU7gQSy/jKCBBJb1AR4YkLtatNIu6pOogied2L8M3eiWqxISOKfqFXV5A3i/VT0BYAm8y5jmrweFAbB1i9sx3gvuEXF5ApDvwldyFIjIx7BlJs4lGKcBKui916lLqtqSkv7Yfgbl/5ax3T8JcC2suvOCC6lGpuUIHQIipkepIZ4RlQ5vamD8XnPx6FOCaJqtGUUQck79hWAThNfz7L5IaGswXPjhlu9XVslORc7sVqnzmUF6246JOL+lIQw8F5pkj7u9Yus2C+9jMy8dKYbnEhBfrLat6YYe8bKiKkZLHAVVz2+FNcUx4hY1GrDx335+Oq1UwUscbZB+nv+flcbubFlnzFv3afLw5sH/cGmKhkvUinTjdyGZ8C6vD1YR0HNCn4TOVOTMgcFubU0yspg1ZL78zlG7IGl3CnRLfgL4dg5pVPQkM/ptfFha6l5IpivQmmNhjwMzCk6NqCGGpjjLA5V957Xe2q7sfT3UtvwA5FMDLGxhSC2Ta5HoDD/D2hb2izlVGzZ47YbzXeiCosjjWvXejQznp79GOlHOUeEEqUYDA2XyVFsA/iYhi0m8AIjlDVYKb7556kTtTdmCyn3QMGa1Vs5NHFPvBjsWjOnKJrc10DrFXOKw1355UA3C0mE0PhA6ZwWuT9JISiuKi6d8F9227NcaWs3kMllCjSrbBbGEf177bJ31xLaGtqIaPSrsxNGs/1+olaJog11em3O7ufMzGWF96dKjGslrC9AhX50wt1u3RgadxizKMZr8tX1o3AGzrjessaqaeiFEeRt5ViXoqiygppF2tNK4Zyb64OzRZaz17IKFs+FKCTQkyICWLrRnDc7mcnJlQbmegk5WrDSwZTe+sbU9qBypcIftjoEPWeQFeUf9ME+LXYFAXvh2SqcSSHFDYFu4zJrjjEbMSM8RxpeNt9vuefYoSgWaTcJPXZUKzsQ7POc5igfb72E/Bx68dNpICB9N2oTxNaGW9JE7rvXNsnj6BP69nmZT4XkFfjeFKtJBe286Y0aPP5OB00i93EbGzxFogF+46oVUnchH9TxlyPN0yc10Wqeq0jeMAY7K0EqQLZFXFKQ0+9KgoCz5QLWihp+lJbqfmkhTVQkvjFgf3p2ZFVkq/JYaQ5c4WPsnd5xWpxZ7j3gXb2WEB/LsSP94QSDBNT+H1S6bkol7vI79JJYGJ0daP65a0rZYK8JZqrUvukEyG1d8CGwfVoPkr8NZq5F/vH54x/LPS2dnAi9y7wdysSBR4491Vaj+ztefoxA6sBMwZDitrZsniVtbCMdAs7IbiszPX1SxnPNP+2m3CJRrQRx5ac0L0oUhFt9LcKi9gbqd/pwbtnRCRbuWDxPkGXbQlJwx/FW36lSZtu6jK8QEnrPhqknS3Pwaa3nXw3C1N5NoEiNiVzvgftDBLTfvfElkUnVYlN1Uh5e5NIW/YVuNCTUi4nEBNFA7XWNmZM7MjkmKcKz26RLVFLu9Kltu6h9xdS7i9tui6LSl7UA0/YZUFurgYS++k1H92IhSdWDasCos5bbRWUI+90BRKKrXtWhFSI17R3VrTS37qD501hd3ZDPTZ/7kfmLeUpA/RYpM5ATlXaCT7N58RDyUPh+U7eF+bCRqLPwnh3IPOk9V6eK3FxbkZH7+aRcJ5WXqrBnc0LYlm6OFLEd9YRaD2nwg1GVSvneyH83OxivWFtTXwkBtDaOHuoGEAvp9OlJoEoYL4NeDDWINq3Rx88ROD3nFN5F5gsvOON8dRAthfeA3b7FNITaBJ9/ooJu7tKA9YNJowiHWiVh5rQMB7lhv1aMvAbCE8cq84aZfFC3CM15O8j7xLyW4sXa+30sSyLDOT/birh1tyJO29F5sjnntIh3ZOM38TzxfbZyiaxNEl0GYIoLVni86QiyEG3hubFWX5aRXSrFR/NeOUSOqogB9dPVFz23X6UIELGqBq7vbfnwpVsiJlF0SZPt6gzImMym/ZWFBXbhj7k3io7HWc44+Wl0QlMGkzSFdeleJN+peqAbO0tUl4lXxRo2KmWgExBLj+crGJRoWUQVBQFmNY0JOo+LAJj0ju5V04UNXgjNQFbjg6hzbQSEpybvfBbRBHVNqr6q90wsSBvTWNON9St+7WjfWe3kClEsozQqz13+Xe1TTeKYypjwx6P/kljMc9NrSHnWF1wRtu0+qQiBsBuulAvJAKpqdHdbvqDNslFy/UyDN2JOYIHh1Zmm/SdY2TFKeat0sUvMx/5bLvbbb5fRz6Jbd6A6kMfjMhesheG1paq0GdJbvVIQFTNr8G22qfLQ4kDW7hOEc0aifd4nAXL+uSW0ozJXgaxYArJ3/wHSMmcxyD3ChnX8AhgvlWiaSz7wT/0U2pgMo4fpuz0t6uI77OYW5zblmhHJ8v5fIpJeEpXNqKON9vIDLatNl15tSCouqKhV12tZsGW4C/QSdhEko4byLQBULtcbZSpsYH9MCSiGEHwOXm/2GrAvNZd/mRPDEOcWnq7StuZpfbt0p6KvQeDSUTCnrD0yEe+l7Urf+GuiIfOUrETeTVrdMGglFP78rHPwFR4MoenwqWOH5cHOhnzC2GmXiCwBgfhkW64GRKOWH48O/4WN3H11ld6lLNtz4SXeNQYsLIzqc1XaYlvAyYHhgYNeaNziCnl9tiZ5E34F6eEIZ5McrBSV2FZkuLC20xN7Zr6U9kVHP4GdDWOklIUG4o2JqGbzx1FuQPHBV8/3s65Hor5eKbr1dwtnV8LXHde1efwiiPvtfOaoJE/Xlfg7HBO8L2tukrvQNFh+pIcVxixvuoIEw7sKk/RyaJRvjWq0mdbAN6lIGUqxheV59f09baK9Ia4gY/wxkXxw8RrIZ0cYjo5++VZ+OK76g31im84WVIVDNXvz+OiF0Gp1yWkFlj47FBCoYAFn5SVhaO6DuSm4NBlOoJc42deovmOPdjDUKb6LlPwk9jUo/orzZQgWlLrzleJt6wZj6zSAkJqaGIpjJw/t+N6oUws6ulvlEqZKfuivBdvwTwyQc4A1e0HEfcNJQbr63a+VL0oRvQtcnBZHCrJQDwQQ3xGNxDXGTy0lWg1n5RV9mk6IzB8LcdGCoiCjS/SIjdKPAxb6o8x3aFLgb0Fhmm10i3jZdJW5dj5QsGvAiYs4kLkzHOls2icmFOjjKPn8du1uJKoN+3dQSGpHV3eLEi7mNXF4HmASGLzoHDzqg/0/YgvkfM2k0uBEaTw5NtisQsib+L4RpR2gHR+aCxh+TovuIEQMCVRJWbcLnle0Sdj2Hathpo6GKivyrgcMk5UliTDmhdtPsWbu83v4/nQ5+KzZsH3g8cnkCdc+oAAGjL2ye+Jhj5IHgbswZJF3kNtWVQqYXAmojdxH9XU3tuTvjGcvVsZ/PzRwud0sAfm/shNmEegFdFPEeo+qVeukhykivpSl0pTUsyKU/7lUasJk4YZxuIWiohzCA8HmOtIcRdfAOG2KHL1GkcwdX4qrlTDbzva1pQzYJEnjCoknEZBkg67zcGIWclzwOkQJ/WSLpL3ML42p3kOIpMn6CE82dNDsBrutKPTXbryWrmoU+BktVYp/cqX7nQHRLy3mu9hL5cBuVoFcprBKDG5fZ3Ji2qCw5u3gfQlWb4cJ2bXtqTeuCPLicEhQo4hTSK4MWq9S+k9tBib2TMT5p3PaBCeV4DJ5KSuZ8PvrQGi9vWKXvKryqh715+tNzgoTAaz8tarrfQSVrEW+CR1bxOXFPIGMxahBPpnZvyAEScU6P4EcTg9M8tiPa/edG9SBsROCDfvOn5e1KV1HtXLcGgH7S/V5/dKiwFblORSWqOoTH4LFfzeMgljTbGGSQImPZqNbkEic3F8OqPWZ2BOrHVVI+Ym+gpW8+aUOVl5Ospr3a7pOOVCpsf7E8Qnc0LWlXNv4HkG9PSQB13DlpsAo95veSjL+8LfprjEyXOhGYduUFN3YlpvsoK8o09pFutPHUhiYGW5u9gIgVLP5AoS74pd7UDT1f31DBTLKx20Xun15StG4sq42Is8n50iJjtEv0aTPWj4cp02GACj0NCzZB9aP4wDFB7mnvMTHxF3ertI+nTI0oof8io64WKVi6x3lPPEHzRJgcw+IkrTS0huIw7gWcIUliSTLag1E7WzpbiMauEdXFBRCpeCqEP01ZOIhiENLja9XTlpMSHh2KodikEkNmnOllfPCx1cw5OJ1+MIS6/32mJuKrW7BdgUrJ7zQJyzZAvJPK4FKubh/QEGqrinnhM2r+o8zEj5NI8xRq828Rl2+6eKJ2gOo+dyzMmmtqI68Ln/N3vvsSO7smwJ/lIwqIfUWmvOqLUIavLrm5771hsUGugavEmhH3D2ATIjI8JJN1u2lgmnOKgYuJ8OzQd/mYYCzVpom+h2wuE1Rh2AepIk3xp2GrgPdNxQilqG7h9GK6bxY+gfRs1uRis/0Ps7Zt/khjUOOkHpz5/9dVq0FmRcby3VjvRnl0uop2f05dbuhcB8a4F0owMMoj9hzXwWrTwc1KuNYCKi5lm8cDen3xluPax+9NXiMsSYJguxXe5LvWpqy5/nsVWxJDqmBgSYa/7qsSUShHD1OWlqPyvBku6RKfaMYKwcqnbDSF+Mg7QWxaYKvxoFK1z2zscnR4D/w6SslZPj7nRGd8Ra/e3pEu32pIJeHTPT0sM0FVJ3aM+p8pEi0Onnh+aCjEKC5qB+zghW3Dw5dSxK5NS3DVZml2IFtg6SFvTDIBH5wZnDCOO8xB6PyHQbFkIjZlNg26Kig010kfZvdrxRX0qBN0eaCV7kyAn/155yXjxFvHK+5/BftaiVf0ft+OXsHQS9KzviKwfZXR8ilXXnE+Tsz83sSFEsv9W6XMYpLcRhIudvpaHuLOFqgpFDms7OirobuEY9SyIdjafry7PgPWL+0X4GJYADDejLympEYiT9x0Zl8mPhTu+/dWCCajHEM3ZRFC+LBis5TkIka/b4l1em64wWDXdlMjTfAGPQ5r9RXiNaGg9qzQ/zzS86A1mB2898Nyr3KaGr6KgTFqGTnjmhhywtrKIpn05arlhj64Xvl5GbUXZGoNCNCiyvv1HXxl8C4MG2CPINAu+eyvoNn1Op4oRHOheJI/clmpj/6rviLzkLoSHE67AVhE+9ngwrdScXA+GEMqyo41PQLnZzCDQTv5v2Xh3YBO3ld6UGwVQULWdfcfZp84PU0kdjsNJ+9T1AzTBaBUJ693412JQS1q94cefZ8Uun7qb0u0JeEI8si+DSZRchOaNBfAL9jdiWySZm+2rIh/xluyko0qhfu3WVOkE2cA22d5WvVUCLM/wJISk2xkPm/Z2X/BPR9UTZ6rSV7iUGmtX9Pi2JNYy6SgmOJD35CiqQIujnKmbxmD4jHe00rQ088rrhdhPWJ6YeGvAxCwedP63SAEPmV+juOQuUSa3zVnuaSF+focbUdldFX4+Dzg1kxe5+NwC0MhQZt3enE/CnCAqJCNqwK1uPp/BmCTzE5oj0T5y+9ngTV437lJUiWWu+CoBXM80H8Z3ZB45CGzx9UVaJMhvlDnxzZvdD4lsqiz/w/nKDAhnoVUkLYUAVTy4wTa0zo0mcq8WVXB0mYpAtWOcc8BN7sSRA7ZdcmV1wshS9ptdYcVeaK/irCnzWxX+YcS612WaT68zUqHBtWg1RiR5zBxM2sskHuCGuJPU6ckC8gtqlcvA+QttSpgrgM4Hc5tUGpmuW86Pni/KGRDQhHSgpAiPMQ+92254DjoB+WC5G3NJF5B5Di3WGWX49thQUb3lwhdR50hJ0Yn+z5+J3Yw8ti5GU6F9ucHx2CQf2iFT/9ffm1Ie879Dky4t+AupxKvufvYWYSoIvQf1qK0EasiBd5aDuLuAQTa4AZxi/Ik8EMHRUJJXu3aslOCo4sDNkQLxAjRNWrJnZta90+PUO6pBr2PUWAH3A7lO3YrY2JM64a6ajDZiSGKN3i2T+xCAKQXXpeXe3eHcXp9a2EzDv+K4yyny52o28JiV1K+nFvxk40wtGXFxDEIrQ7AJZ4/Cv8WWMXoOTAzL91KXIFHnswrKS6p0/k0yEMcUWDpXVpyWGW18RTRt4UV3EXsXslX/lfrEdvZO0PFAvhtjyv/rSbzi8PCYlY93Cn8nnJL4WL4+Bo/75j33/FdcO24kbyWJXGSxqwKuvGBPO+/k+Vr/WE2syRGDECOtfF67li+0v/giqCY0q2QsudjFc31n8ZEJszebwUyHHP+6FEgKRXuuPbgoSKEVvm74v7gwFad9mlnmw6kG16fle9Kr+papbo7CCYLK3xQAsY6wbapC235crS4KHRk2i8KyBX9TMOjt9f05APx5XyHn1e+N7AvLrlQEDbdbSBcq+CDPrgyIdcat0Zv6NyNplHLryauKSK3gJ3jhuCVrxV4kwRYj9O+2A8Qs/iczDNgRL1Frdgvp3rRtgn66yGuXnioke0Kxek6RvdyK8/NO7H36lSuENRQDYGNfeFLKvcQQiCxXG54gkBGdYHZwgKQgoo+W8oWF03IoU9zgXQFbeNpSFKNEL2EFyuwTjRxmmzrJytc9snr3pTYdTCKTtwjE90j9MuoqPV2A3AM1LJV54n07OO4VWrz65pSsgCuFmMc3p9TvCiN9BI+u/vLiEQzywuVpEs0qp1SXrHImAVY40HDzgiLpVbMpe78zg2y+qvng9p9nM8rusPOzG2ZixNGCyUSfkr+ArUVN4198xRCKzCI5bVx4oCWphmFjvepmGq8YoM/wfzuwCwqCgHauJGwruyjuveryP6XcPhPNor2hfZ7zp33sA8HjPq++qw+uMMoLOG2cjpvTK3A+BYk2/KZ/7xcKlKjxUnbCC0mUDSbVgaoElUPzFdcsUtWdcfVM2VJ8mqo+DOkGa8cb+mNzNYkOktj2HOkn9kYyE8DF3grwfGaLyr44WLt2jEiuRrAI5mxM5bek6Mc7ldEySsFcx1nSmvCy8h8JK6/ioRa+XA9M4YRKUpWYKrd9atlcnCoMO0azykVihIp9mpnf7f7H4yPQJVY6cgbg1SRrnaJluyJiwpdXxkD00mpJ1TZyHfJorrAW2U8mXPZv4IABih+MO3vwes/+Z2FVeBI00ADLd2X4cUVQ0MTeFXKD+7L7fYcr08ybSMbguPQ8QC2dwfe4uWcj5OwYi+MFhddwk+yNGT4oh2xSsB5WpPxfQXiFfvRdUluKXWI11vmgB/puBbkGmLUUz+vJBq+YLyecdLoh2Q8Xl8CTL3sJEhx8hYE3ydDBEC98NJBHtEHWh4LbyMD+o7TeHogHUclKChn6Wcdh7bz2xU0udNe5Xba9sFco2hK38u/erh/iL/QCxUf8GcC9KGrk+zW/ybKMCbcFj/zpvTgk2UXMXZR1V+AHUPJCNFk9yf6aRxOGVc1YqHfQ40UWH6ZtKmsl6vKsGOZLTiEB2USeCiuZJ+qoYu6/0IaJoN84qwCcJ/w+VyHGMHRtEmbrlq86NODegpf4kmfal7zi1MaL0NCBjHLkkXYWRnVmSfWZbPYIb9l67uVEtnKSWda2M7cnzG9JRV0Lm8+U6kDHQ8Gcr16zEm1NkQBD57ms2VMh9LGNUKlH+sn+OS4vA/zCpsbLEBZwT/0xDjbjVNVP6QKtu8+76uFHgJsvFB9Bxioo5eddzEWVBXq1InLzeyp4zqnEJTKQWwi9j1mkrC/djBd0bOfmXEtB4BTgUzpsTNUbMoh37AlstkVSMBvKXjTnsa2eK2hsiXchpV3k48Q9QHaNEYve4M4FY/+QOEt6oFhctxecjORd4LS3nVmDowP6+6ksXaln82J53gmup2A8kKCtHTSfr2UomKwnjADG/TZeSdvr7se8PGJ8vnGZjFXQYjEGELxtu9S+fjApSWh1F513xYdgNQ3uBM51t1tz5ZeOzWX/zCsTIdHgDy4KRKvJ3A0NCHOozqFfJHlAxWW5zUZLLqoSNnalWjWL+JLWF7XVh+5ddo6GZ4C3/oL71SsmIL63Ssdwzh5lpodFwecM/JYB+nEOUXSQkhNroGpVg23jrRP8fn3Qth7ZFAFKmCVpxQKyZKnUMTcGvDAaEa+LlwUKiB5LEXZuZfW55NXMpfRXDi93k5SDbeaZU5hr7sUP74U3sYk3v7+SJfn4lQ3SI3EauE58bKDnzgH0NbjH+puZozu4eOyipxVr4fOeDXMhDEi4zRSuuUanWpG5+gg1LGDUR++sksSlh/+H9kjfylKanxcUpK/Vc0n0Ie5nXBNm3xus9Qv443OeKfEm0zfRqTPybvUv9oCp99xJsQrzAKdkLLWPqsdCPcC+pPgtuVzID9qUF2aPOQzCtLDQTRSrzvVfo+fKXhdZwOF1RvGH+hk3o+YcX7caDpkfek1JkB8JoCcnCJtkrHEAn9MnYgf6QNl2XoIfFgm1jYEWCukAhiq9z5QNKZ2k9mmMk9x5gLt3yLHJRzQdWi2VTnwphLD9eeeCzdc3hwF8ZVf+2E+qXsv5Xkk3gC+Y7Tt2/4rfsxX/2wVe/U0YqL8oN9PMqEQwJP7bt1sM1ReFViFT4MtkLJIV4IxeFj4Em19H5B4A5UtsQsjoZUPINYej9YtmqTc7o7/o1ddqbilszP4QDxeA0IYEGDq0smvOqeaRXOaOokxx//rq69pf1VjQC+r3FQPrbBalW/yrWfhX3/1JSh57JY0SRzstJ+/OR6+3utH/3xRg591w2FMEAxRhfm6uvyLyWrKmFg1eg1YwQTTAx+/N0815N+hZIHs7Miz5y6j2IJC6Za4w1TWkzeeNusn5+9IKN1AtrgRe80KfQbjaAlmP5v3xgUsLir5tHmLUOOb36XdihLj8FMUj8HB2QdDkL0mikxnOpjoZMsDrS0+e7pcYjDqA+qaX+SRLkKKQdZFRpdUKk52q4QCFJkyNtZD+xkDYFjvgnc7y8Y1CrQwNZYP96faFX5L43kGtGmuO1GcNeu3yvBtnnpkI6r5ba60DsIAI9BJ4lrxIsLinuINZ86kjjTW5/86nBcETCZ28ca0tUU5H9qRrz4RitfdC/GNXZW/1sdKHNf2nB7V3kGagEwvgGdXjDi2EHlh5iyLDG6xqbTPmII63qLEJfBg1qU3PUbPp5z/l3wi53/Pbysqv00pbzB5NaJml/XaUMV0aCmsnhtdsABBaYgY/ApPZTDSZrC2y7ptsGfEPEBSix14UWI9gz7koAM5KiX9jVXHPNLMwxA7wk+Tg6aEI/j4h91VBJyJqUXa90Fwredi5WL6Y6XXAS5iLIJH+UVFwFyWVm6E3l1+7GRoeohLAVHnt9D4Sn1iXxDzBT3HhA5pGPytfK6lpn5A3NzOgRf1IAGeWrRK8d+1rSP1v5538zypFTt3lgDJz2FOKJcaMS8eD4JSVld6ytjrJ4weKMoor8QL26VKaKJN39tHNiaaSgkZfyQUXo5fJRXmgygp6gvwbBJn9pkkQrPzRio8Pz4QcqqKXtvtVoxlBpdSJLbLCvHMiOc4k9t+DwRYhTVBarfJJd+TKkx3n5AQJyYrTH8AYV2Nx1LoZzvoLoF/UUEQU/CRR3fmYuR/gb0dRXf1fPl2tKRl89JauNLtGEw5TNmEZyBor60zVHL2vD8nvuu/osRkF0e/Pqt/99vs6JgTojJPvi8RRjRQ2UOZ7yq8cI0nxNioLnR8b9j4JZrvbd79XU28BkWs0p26T01zMPrAm7Ox/M1zl6lcgGx0uFsQwlDGdnRUA1DDJlPPjWqduNVCIY0j20QgJnANCw6zJxFXZICYFWtPFcCAthwKJoeni2piyCMHiFQ+oiYBZyuyt+fgmdrredDO3VZmiBonDMKLmm2AknJMNi91D8wGQe4v4A8LHS4E22Pp8krew7nZMKUrSaF0ZqdafnkX352PQc7b647VuJ+fZlA4SA4lL8KepZDmVIJX1nkBGgRfyrob3tOjejk6CUeBUgXmMBpeSVOHQvw1G55U/50A/3p7VmXfyUjRiOLRk+CM74+vse01ugjdmDdelZgLPpd1S+3hC5K0V8Opyp/IGNnBv5szBY9groWVDVcEHnnvUPkAHfvCymPRIGSaTpr415G1iM5F5UnmjtRLFEw0ArhjOBGpuc2eCH3HZ0Fl4gmXsu1H9+IiZbxZoHtyLYi1IgsbuQ1PcKAX1ltg17/ZvjaHSGJM5NclNaqtKFz+tXhGb5re3od9DiGz+0x/HDC6R2jyf1ewpLES87AS/UR+ke2VudEmDdAZfE6ov66YB271pLNqLZKpus9td+lH142BXRwMbP+WFTMeaJJ09kyF/hRq7LDxGp/4orPHUXqO2UWaJK/ivCW3KEeMpO6ZEhvpLPVAxPpCvtPRSgpuclZWkX8Tal/zXvUjPXwCzV+Thj868JfDU/SidOipX5/MpWb1Y9T3O7fQGwgVLR/Yu4OCb0hP/nLON80x0n+QdicFGMlCjcefA9a8NmCW0bp5JbAm+tkcbdE5e+6HOz8gHYfzpLUcao9K+WxueTgyL5vnytT4qAV+/r9waIEJU+fXkqkYcEnPw3Lkyhx9dmvRfvIv7diZTcgA0EyQORQsApq1Q7NuE0sVH/JOflf+OEzWu27OjKIT+pi1K9uRiMbTWNP3XfyDG2e7iB6+Wg2l+B7k6GktABAvo0N3cRynCY3E2vvUdJEWgwf+P2N2YQnRu/s5KyKjXXqrQ+qWxArwFMvqgJdhCQo5yerDqMTfcr33QBjh3ha1qQNAETPk7E/9b0VT630QeJ5sb4xNuTvRmCBnLnxAi/Xrw5NT0sds0SydcfzXMRdHrW7CdofAo4i4t8FAliJ9sxz5Kimb9Gw8Mvj2DPyvIaP02javU05U77fo6HcoC/WSPPBxRKsMrPB6H7wJ1ZO3sezAbf+mrzx3pQSxdHuT4oAuRO8oL+ZURK8VbW/9X3iH0GNdNJw7nZWPx5koewJGgHg6vMOk9Uo+k3hv6aHzmiRl7a3RPOPrP95Q/PQCRox/RvtsZaRlztKy8e3IzVp6qW6WWl2FBzOURIOwnX9wtzVwsw82fDr6Xc5hu4uGvRaHkFCWP2j2vDP543zHRHY2ZrP5zOCBY5f2LlAzPV19Bs9FjTkXRyj+aMJHwIHcxWTkKurGFYwPKj3soXexTSnls1hRfH4n8ch9lZr/rqXLXh+AApf8Y1TZTe/UYcEocmgxZePvJChfcSTBttlhRuGysGdZyJquBXFdc+VEVxgxTWIUJNehPAclo3eyVjxHFNyFrKSYK/b00PF2q7Vhc8buJ84iLk+/0JH95xmRzW7MgB9jQlQrS6FDjACMY49B/6VSGX+ydtlLVNOahoKzQn5rAFYuf7agyTCC+8WuE7f9chRNbNSA+Et8W8eIPCcHYdQv08vXmjBDg3lP4wWpYQDQOLTq7U/Cak7DVq5fgtNz8pNW0+9pcu2KVFEphf0UwoXZMXWb8z3N2iWajqD333fo7sAT6Df+u2nHko55kjCp8pDxSdHDmbposDSRIwnGGDuXU6lMEuO5UYdH+1yC8RSJlpCheAGUReFTABy6CKFXOwCfoTZumGX2ENvqGil4/A2yplrKjuK4mdyVZoZMAqIftVeC3bmU69//V7l3CKF9lLwScrIZUq4pz3MwGfxPkt+p61XZmpVBqMnNSKQYD8j5hbGaU1f/nJ104fzCwp5VcF1svdt4g4ab85NqoVBbykPh5GhcQJpfCjEZdYpJYxePDJd2l5iebyeShXqzoH0WhB7qzdPtJSAHtFOs+ICEL+WmCjYCrB+j/OPzn6tWYou/C6o6NJn66T/74Yp2Myv4tumF02Fx+GfrPMX53dNhwzszBKch5fGChZuxsti8hX10WVpJMv1CsLP1fbEJGfRuXG6N/ZC9SrvP8ZUypRlVTwLsyA8zyeBo3+jnT1Xy78ckPddxUTEORpJNig4JuRkfW/RhoslawfHs18SbRMy6KDndaG0C2oGZnMJdYfuIiDJYxwNn1XCjZ09ry/IT/c9xTwCdWwgGqGPEiSA11rUCHlZkJbVbuVNm7ZSIAjTuq00TWi9B9ygsaPWorJymKCSdhpj8xg57y5e7l9ekhqliqww/nF7VMtD8odPphgfr9BhWFwyh4f/lYFgLww8pU9V4z7273CZOwP6DD6tvQGeZW3zqhYMvRxfgcQpeqrp7qhzp2KGyGw48OSs2mzBTUFR6DCJPqS83ceEUgnk8Jv3qSDsNkJYprQi+i49dTXdgB0eyOSvvfGFuv1I4g4vgCuRJJWKXq/lNRmUhwrhaDucLLhhxeoJjySNzyBlkY7/t0wbwH3Hkg9YVFETDic3r6EWYrpwhRW3SowXCmZ/Q0sXDDqa5HQ2zxmVxJTAeCNiouFZVm3nESuAzJ2PJP2UEHGrrKIayDtEZ0ZVtUueYu+oGEP5kYIukGWNJcH9PtnqaCQeQ2HLYI2T8x24OwWXehuQFCk3PRyeRlN/b3JlzwHwZmOp4uVJFq9Kvb8iR3L6Akq3txnwEzg9SKZtDdGEyIOzE0cptpMji9IDTRSPd97LXq6fIi7ezpClLoKoFfWygpM/CpDJx4EpAJyeD4xrlu8Gn157lwTZafXknOlMJEQddCBFNzD59SdQh28VewnQtAO4DpfST1OY0BhTqrh8RWHITHKyENY65kplmWM3mFzL1l5/UkPAmK+8z14q7lSHdGNI1TiTPa9o9tjtvPhYS5YlAMWzboHTqXvmthBVgm+alFUQAMYp9GrBf4WqXsGY7TXQXjk6g0if+yGJgikV0oao/6RLyLaztystp6L9aOUUFbP1EjHcJ7ixXgXtgU7yPDnURxBytU5ht1IiDa+jp6eXAZkP+eHiLvqFVntJNwY33vqPyEIMrnpthuKvUR2F9N1/cCn1+6xORmPUVb/OTNMKeqVZocI2mrTRK4/8yFVzOmVArAHhgqigst/0EVFeDfTQbTVnaJjVjXoPzGwWsuDI7ZBWSx+8ro21NXJ3iCHliArYW+4fbhpc0tVfAe2av2AUXv28K10ahej3yeLC8iSaM8/R3bWFVe1vwKxUaZwQ2T1rJOel98PL7CNcHWnBBB6KBvmQ3/thRboHzM3LOUFAso0HKpW2i+oypk5Ui08WC+PHybkMSzQFrWGVlrba41t9vtVJqFHU7gRJqOAyFJknw/zN9raJBxnSMV/40lU/0fvBQSBoCjz375ZGVEhB1Vg89/5X5Tj+YatoEwkSf99x7VhCPn/+Xig/9dHP33/G45rw/5/cVybfb7/49Z/x7VRdltd+t9+hrpjfyRqWZEMs/5eGi0PoimKudrzICLLA7+UM66Os/cj2BWc8oazVJYfP+F9A3sfjmfTvlgXeAzB4yvsvi9y6QlTTQsShZXFc4WFnJyGyDScUbHCxjKTVajC2rK3MrJivkL3duqXFQmO7MxRNeR7vsr62EPtQcD7r8CTcYaXJgUtLxoOiJmJf3M4XmbGh0ryr8D+m7F5wP7pc1DzBr05+oMbTQdSHRzHSr/ZJA86N+GxCMupoCyri3KmQmqZjSZDcqyvwlYcJzcVIrOM1On/89r/vPY/r/3f9JrkotuhDiV6/OVTPuV3i0FpF/T10jkJsY7QqU6FiCNcBvryKYP++vyLc9Ds/Q3X8ZIhtReZ3ShIYC44EZ2TKTkaR3svDDPWsHyD+0LRjalAvizyFhAos8IbR5xGlX8sBkosMAUeh/O/nyfvA7JMsK+Dtq7nXAziXTc61WKmIhscGwbnbG88vvWjIbZ4k1MxYEHjCF+kUmBkKAMkKzwml8T+DOMN3o8rYlVl2J2BKgDqnsPXTXiLyYk8I+bjlnhsgCVgRnGoQcifsv+lvmw8s2V+nxfATIUDUoimRJF0byIrMTAogt0D4TpBrObKR8Ii3uLpr2KFAv94OKjCa76BoF/e3c29Jjs2AnIYZNDoYDt0vFfTlo27m7gHC/TmL1/FHtXfFXQ0GV5WnXSehfa4INNJ9N45kF50tyQVT+Lj9cc2Z/kHFnn7fEm4USlt6xoQO+HJV8tZK+GzNj6aDsi5FFfGcJAdDY++YFaHjqelvTnMPto11E8axIG1FKS60LyhYpBlZN3Qvjj5Z7F2n5+zIbyK6rJrg3fBjtQ28klzriaYpjRdF4n4xKjYDmwAKs56Vjg7VAgsb1Pwxxrpm4eg8cTHhkXExflP73h6/E3w4ur8p1BfUt+4HK+UcO1lPEhpRctKVoXPYKFd/ORcgnvYeURFYYn4IKC0+MhtfRzD6pjQL6+Igtu0EeHqswS3d5rNg0GYyf5WH6WZrtSo8+Irt9VxBCl24IWTW6d2cZ6URPiluDW19+cPnLREEwnnjbExToKG7jbU0kkoaTIwdn0JpjizbrEzo+hA1JZKBAokBIFDbIT1O+PGGIHlEioZK5j7n1cStWX4lQ5T3zRbYc7D4q/rQlp5R9w+/ZeRkE/yd16YLcmYCQtCSzB3jjNd+LNIUwZC+HFMvKpx8wL9RUHSYjKPkrr5WNlfD/LCbwIiYTG7g87XbpTLz2wXhRZ44Yhtw5ytaIpx1y9sJD4RtQsJOVV2vo4dwpqKYZX7MLoYZy3plsdGtm6pIQZj3TF6LqN3onqlSzZs65HCXjNMnoj/rejcfb4CSJajehRTXzcYCVgszlykf3e1M9J9Bvvjk9iE4DkpEuPfowtQduTHEjYSpXw2y9cJnO5B5dfcWnksk9mJoDJYU7r/3Gv0YxCrqpecz6vRJIwkPbRzS000S2aNb5tIj2Tx/XPeFT1iOchuzNzoZ7ymojSxtX86i/DDge/Ek/0RHgmIjf969wGe0QfUa/TQ6M5As9hPXCxZsOMhxwOepmT5/uU3U2m/Klpwg4EMyXSkP5UejryPYJgn5G2fQV8ORU4vmggxUZkzWMJnLEHXObHUnRU3CpCUWUX+IGDvwO0m15aKBZd6MT1/hk0SoTyPoDc7z6eAL7IopPJly/auAY3qypMUsuPp9FbMfWTSUXM2aZLWEU3kAbdKygvQATdDtlUup0DrSPhbaXRjVbaOshIddf18PSsXqtaxwbIsMymoorNxh/ar6D9ueAE4VsvqFzBrAlDza2cQFn8MPfPFsU1kOy/uzG122TZM+P3gxGVmKE1cglbhC4HGBaUFIuPGuvMpWnEPDELDu87AFJCNt0bWxQF6fLaqiB+BNAB69EvaFujg5z8LDpT6sSVYOEr+LpffkXItmT2Y/THWgqsymlkytpGEOS5vAJAdJH26xdL1Nxa4boFp9wfeYWWD/AGqFkzXk9nfFo6tvh220l5f2rZ7PSzsy3IcQRi4C8eUgR4oPbTlw8C1z/NttU3DZ1BCGfIr9tF2zVxdj/8aMl3x9xwedA8R7Hu/QVLxX5YxPRWUrRRfwfF18qV//L+Bbg/zQCU3PvKnL8nkfEap5RCs4OszB/oK9DDcv/TznTc9cAfk+mHiMOAss/a9/nAsUCVoIDxVEF8RSHfg2fx8Zo/+ebTQ4fNdFIJwLU7dtzpp66/ihx+3Nd7dyOABROwirRbYHB4F884el9msgGTOutbfxamOreimap5P94wd+5V4UwiVPXxAmWn4/T1FQHVVpKJEhpybby0N6etKB5VTaz+DzZyGiAjE8XF7iGN9o32D3Gvo6Lljkt19peuISKh5McBPO6g++WCVp/biaFpTl2F2gGWhv2zIy8k1ZKFO81CtzhsY36yWCWRtxR7I8mV9idd8QWQIKDD8T1/Y9QYgO738yRL+maz899DEofZUCS4L3ZmmsjHdQqZLnW5N6hrLKO45T6SrLWAF1jy6SJuKNlmd8A2UmvB8N26F1DSoZPEZVl7XFJfSF9L8ejluwFsUTWY7fe4EhZU0iIbWK0wOVynCvhRHsk1c2B8w3OLMvxKZu89KLdfD2EdZV7pMvuDpjxeS5J/vJUj8g+NI7RWXlsOzX5a6+VEflc29/nWryKm30xPhyDncVuAP8soT64K6YPMyJkarw+ivJHjhL2RXsyHtkeYsVhZHv7N/WLWLIbI4evO/yqz8YOebdqIJHlJECXVhw8K7z31fC6PKglZ0wca/PHmWP+rEpHJrwnJ2quqipRA5TLvAujZcKmG/k1+fGTsUOQN5+uPS+DoSx3iq+fDF73z419OOwp/8Vk7p2fpQr24V/q6cTco8//XPTAUczrpidTqcEvrA38s0QRHfZ91mkYehYuvVPtkBKaHbhb1eGpQ2dMtZ39k2Wz+mX1vtUjcUmZS9ZlIs8MTA6AU3fuQaMz97H1XUxVFp35iFpM9RCI/scJQJHNRlJloaI+zXG/fILActu3Uk7P1+wHzHZ0JZrWcgSX3U+VPx1/gZN0MTfllRFLmmam62octSMqBHaHuXr7GSBDOR/D4MpDmnrb2TlbCefg4oMxsMniCxmy3SBLDwH6B36Kzsl7HI1eOclwJy+Dup9PeExNGHeNKBu324StPDXvS2+nqURsTnuW0m1/KE3lBmZU6cIYiH6v09eawV9Jm/GPYJ/zqAI6edT1+ciTD8CgJStUfDRtKH6AELmF2VhGOxkszPa1Fp/KIWC50+euG1FMkMJhUtw1pwPFqfy9/NMf4gM2CpPgyIjwK2jb/qCq7Yz7b4rpB/8h+WucLf0z83Aj/xbnoxFat14ySMZweBG9QMIaWDthepYUDu+J/IjVojTmEm0JEGakHfwtp3og8yDsQ4OkcnyvWZyrKJAjwOh+aef+1nr7eXzsLpPGVQXzVLXoKPI6zN3e+7Lm4Q2e2kMPErpWtP5OFVKBjyTzDoBGIiWfSRGGI6tkIo5LbvzViPUuHlJeKZjyV4MqUn2auBVFKl6ktupmkjuvPHxMWkY0B02eUWnK+9lyfMpcj2InGrqc61fr+9yZydPE3VTlW5eU8y0lBFRly6cCHafdncmq7O5rDjQr6CS4bhZnQKEvlxw76rvawu+6z3zvoyiWGh3iBDgXV7nh8Sr+v0/LVrkLb39mpyuwmHOtbpJCne4+4E4/OjqPGIbtZM83Tka/EK/Jys6J//N2SzRO7fqfV+qC16F3g1z4kcJtqPUmMOThthMvh6xUO2gdBjK0+1yoJYFD4dbi1QtjRuZpe7QdigYYm+iDwGTZbVrGb4V4oPe2QILYIJ0KyPfG9wmCMd2SuGV5lRU+pupvAMD5EEZ5oQei5HIWQFSwbZE6F2srioq2cIPz8m0GDG2jqs+jHFCRhyHqhb2iapp7uk0+/TK54aldNfZlIZ5aDZzA8blncj3O1R5i3MvtSz5smovJ9mEVdxS78TzK8yuNxO1A6ylqL3MxgpSorBSMxaqH56YHs8h/hVnpBCThGGLkZmRWCPBWlnWP4Iia2T5x7h/ll27L1ooiHlFETC4WP0L2fcZJgSeTHHBlCDEdjTbMH0Qc5vRrtUEmYv9Mv+eo9Au9o/dQ40L/Iary0lpu/WSWcskuGp1dBBLNlcfOvlMLLQpWUBVfDThPkk14eyXYt8XKHcua+EXgrIwyKlX5+4cXlIkYL0ctcioqsvj4eCJ4zxGA3Hx6T/5FQhCPGvH6N2M0Qiim4/PQ6mdi1UN3r6osPOB1zSgeLVrIfJp3Xu6MArDvCgf7k/3kDWf4qeZv1iTnmbMdQPAQ7qs489YWZTj9RXHWlduZJlBm09op62UZsI8XwS+DaKBbrZjPzZPJRnteiEiHR+xi7g7PjPZoQZFHZ2yRvNEof/yu91zkKhncEQEsxcfKU4eFCBiWPH54ega5xwoJ+V0qvse3R5mRl2wlSYHB2n0u552y3HaaxfEWhxYVXe+AmoPKTjpsDC2MrFCM7nng+D35Y3qurfQ2ltsz2ixMSfDrbGq3vZRGWJCgG+yNW4Ol2TVzq2r5I25TEUXnxURScewMtRg/sjTbivy4Q0jhyOhDBMFjAPAjNDgc9/D4ek1BuTOAtFBl7n12Xf7z35TPuB9dMU/520drRmszGfJa0Krqb7smoMnao+RnGVrPmJ9QKpuctIR+Qzn8dxtepNTBgRu9jPH8OWJRnsO1RkfRo4m/6qBVHgnjMXGg+J4FpX7mYi+O9pr8A9AdUcerYKwuCinEnMePr8UK19s2evxpsvdswy6dSYPB8k0KmQP7UY4ezrQA/dSAyyNk/Wj25qru1xnFs6wEbALr/GhQ363/mLPE1i6yGQRVD/Yx8jTX8Q044F1TEj9d2mvUWWmAk8B/R/6O5jrAhVXjCUTb4ME46t7rqS0XSw92Fkp3RdttcpOtX4+f7yXkZJJyzhv4RO+HeY4vazgqphNCiSvALq1aqsKvnTvsqZ/FKfhSy1o1tSMo57et53zQQmzzePseS9i+NWLKJktywDhmK4VMlTykyfl4eOWCsWNFdsoso7nZ4LiLNWMAp8LDQIlgp6YaXBBssjW4Lb6sX0X0WyY+lceppp04f2eZQnPkCSvR61d5vCr537q/3RWXBWv5avBBSwGWOhQ1/EiddmEZjwxhfZ3kRQSRSy9qBHauFtU7wzavBfvXPs3Hy6z6bPAQ2ZmSG/JB8WxoKiHgYl82jChCI5duh7fPAqWST/x0cft3Bb0hu2eBpuUD10WM4KM9n9rWkmhtJOWxmlv6S4z+zxcIYCHRm1LBLiB5w9TMS9+W0zSndNGNGQ9+1XwV1EbkOSlacKRuSp0x7MqjKtI1bv8QlBS784reyKbhfMffK4EBEaPxeZ+ZJXCTJ5BvW4v1YSxeHnRBU/B9p3Za8cs82mkqQHX2bvXMr1DEfwsKgz36k9hC91odmJhcZJ5OqnuiIkIJDx73m3UugCTBH/Do13M7UBnoXH5DGMHpqIVFmrIKl5wmc5Bi7sa7RTCNEUgFO4DiCoJHL5m8okIlwWTfuvNHKU03DMoMGcPy8CSIw7n8jXYUh6SmeIyzCw1ufFiyL5rWUwWfVNAJ4XbcdH1lbPTH82ohHRkv3zBTPIqEifOV3aefm6ZLY5U1lVUyzFAdc5ZgZBT8umRP+XmsP+EbFRqQpfq/RK5sAHZNrvTGE8IwL6kFWH2axMS1xEA0zty/4+nl+VqtZqIRyouIts+y8dlnP+LDCGDDu4EHj9+Uu97EG6vSzRWdySGLVRqN97s7yUXkM5DJ8FMVJGepuSauzmtBwSL619H1nA7Oh4fNbjGBLlwe9KhvuGo39NfywxOcZgUu+FxGh0PZUWgDRkMmLm2cwMKgNKOCdiLjoTxPXhZZR4hvJrbqepZlB6zF0jMediGMbLlYqLzDXjlAhVBNp7U3kUpFnhnx18PwivwtVRSw5Yc3uCQyj57ysUQK5E10nz3uIq0W3EMlMfO+8QwoZT/5RM/vJAi5sKRpMJni601/0PV32oTGphGa7pj9/puNVxPaDv3OHqSB4Z9eNKFjIZhq696kEBeV1FPguTPsSQICDeBn1Gr/0z80sWZjQ9ha7KxOEl9xDr4feiTaFkSXyMnzJ7RBO/8TQ8y2r6re6/2lpBXiZjUNswZks8mqF7+pJo0vkRkDH24yqU2CkG+pbm3qn3VXKZV61/n+ZWdN41lP6H7abzIaAwiNsO/V3t6+gbdgwv/6rFJiFJixuOQqv3Y8KPEqbjgQnPDzxM9Hu3Flt1C3Gvz9OQSZfQVQNuqyqqGfiaSC+Fmz0uJBBPEeWGusdCfslpiR5o/LF/lI4plorURQZaQdBOQLXWfd5IWLan98Xpb1mHensKiQ3VfNnlAr7CqluFIUl64yKNI7uuhk7G7gxfnhFBjcFO6OCfX9P4RgpEAJAnhChWkN8sHke78LJSrCkshX+9vaRB/6R3tQcxA9V/lWhi7iIuvcHyYxevURXweqdo9YIv2mixcCxULBFkcO2ZguE2RX02+cWj2YCVv1S4hcjUDJu6tRQ0EViWOENyM6J9NpZ/cuavg5b8whA6fCaDrSTwrbHflgZHtMuPVT9iSfR0sqaBcjCnMsNSirZ8MnS0PNmrWgv3c38aHkwkj77ecoyFxxyB2wUsCo4TrdOrBPe9BS3H956iqpImXPA3KqOk3zyUs3RRG7im6BdHtQ+omdxf6Mf1ZvCDIJE2+Tj6WxNePaCCze/lPgsF4VuufbWd49x1PY4OMct7A93HuWBDIEvYG2/j7gkjIm41JCkmtJC1/UIb3vGAuc/9t79e/e0YOax4cDb9MohlMID2ksAyyufwEacoGvjhzuBuqVxyflgwHwJXRx8ImZIR74Dznsyy21+JDojJvbuW/ZvLZojLn0qRXK61ZaMSajiqjw0YKhOC+gmzY+Y/QiVbcv6InNpWgGE8OvntoSeHnq/LaOaPsT5RGJ7tXvxAaOM0FvurU889CAh16NhjaHpWcgVNkwrrXLkg2dYea8R6fH060VdS2OtVNVQeqXtXkF+2PtlbqmorewMH3AItFe8suBcVX8PPymX3vPUhzElhcSwNb4XOGYI+vCzCxS5HK6SajEsIC7701lR4zElFlkEdbHtXTZRt2dwG0QUxn3b8QLE4Bf8pBnwNJG2whhapapM++HBVdjhWQv1IEBlH3HshLv8ASVdEt7yIFMvC+zaf2UcOTYxKKf5bVxCh694yFUX/hYPnY4nGS21ALQ2yCQWhFQZMW9J+94bYjNnPkK+9qysQP16RU5zcGDCQ2wegsrAakTlFBzVr6GtImuKpeevurnBkadr2M+G/UMCJ7hxUWk8zIynT7nccv05XvGd+whzyf0Ac4TsKh5PWV+VX6kBMGBnKsrX8b/yILFR5NE/HSdC7jb/5weIK+jeLiX27En4dipeNFBcOcXzJwq3oO4o+01nsb/h5/yq9twxTIozo1kH9aYsh19fpoVfGyKx+e9Ixa3vL6ploNgzyLIR4fu/6BTDY0T8MDCJ6jhvLJYqm34GhBA+oxelTvfxcHZu5yEutCGAYPByUBqoBgQY+hi9eZG0yYYStRTURBPxnuR0mgyemQhKycKBHAz25rxPRR4TcFE4k4KDKOQU84MMj8LvXgT1JrI1DBWefr7TFsAqKOaFfGz6kjd6qlPMYJvUkiKEVcryaXCpTfwItCse3zTmqaJW2ioJPs62w7sdlSpNVP+HitLlVJNU2BF3zWH0ZXMu6jy5OhODQhogKnN5bk7FhWcV2cZjpFgstEMJdAYjtpqWS/aS4e15h9QTyHlaxDIbcPmSNJ0NyxUmJRB/pjf+TY5IjuMgNq4B5wYqEjq/iYsHDSGiKajQ0nNGWozZdR17HIFP8A/fLHLWBqkMIq6k78E9D+EGTgYqp+PrYUwb8TJYu8GMI954Kan7V1RH7X4ztz37B7mZXPZEWQM6QoW7qlR1KkXyGHF5zamUIzWhTt8kDduXRfyfbgNQ5nsspvFtkbqLfXtX1B9XQQAczeGgWeDeF3M64DKUDrBMdtRTI3hD5Dh9wLZXyunBgBvSlPOxt4j/tF/pfLe9aF9dGn6jhZaRIFnF9+NNR3bNaPplD6n8xIATRoHBoNalS59RCPeV0cCLgHtUnzupXRXRIFyD8iOJ8PEkAjvXhb+szHNlwLELAfJM6oKe6Prym8H7bOl0APaJ7NRg+Q83kRTpq49NrN9WtujhOQnJHe1ZDq+WxxOYkPmHM8K9kjarPdRwT8blF4UNbeeH36s7RA+Ls1FoUtwueL89tsv/yjXzyffFl9Ue1aywN4ICYC3ur0/wpFjh3jTSU0TGr8z82/zsaDlxCgoUMCpNSip/PczYHqIPTggGkZ1NYlokDac6bWZND/eYiq57rAkHofZ/f1wZKTDzOHTEPkhJjzQTjeMRhfYxLxe0ZyHY8y6/tPdk37WJ+9uQxphB6yKgWi+blyAvd2Xx9oxDU9jhbmQukE+s+t9pQ5CpGuk32AqSWMyuK8NdZBnTiE1Ep58vJjSs77esJP87fk7XnTuF2NAbS9mCT+LgHOCYR8Y0eX+ib9lO7HO5QkOudkwHomwcyyU+rJdgm+DIkjMBy4e8ouGxcsoojpldOAnV4xdYojPNTF+cgNewz/hOUoKLnh/DGjh0L/1zZJG4r/hZo3+ttgoJpCfr8k/D2DPtqPyZ4VztZ40hI5Lyupn9qVRipMlDk6vyX9Dy/JDY6OjEQFPaiFOnwr8e+khvYIyigwyBZt3GMDI7C4hvk5FdifxeVVyRI2jIWD9bshXib5scp6c2reT/8uISE6sJPVXdrP1rh/ZcMAv9eIoEzHJITPJEFsmjUSjR3qqr737Hm+BiTlKdu3vjIalFSev0yUDisAuLe9Xzi2PxSQ9WT/C1q/uaIteqbcIBe4bxQoWpK6RMD96+WrCoSIkTwqN8Zb5SW4SjIrvPBdkgRgx3FVx7bB3fbnty2JYeqkOX8k1DYyhv9zQI9wBUR2tTPwDctQ3A88JLWxaYY/463AFO/D74B1jkon/RwECT6+ZWoLMir5jB3Mk2m6BM2xJHOBrqUH9gHHu6yx/BgHxczYARKS++6ff5UYLId600IPnqBkRMrEyS+mg38pTK/2rV/pZyhpOHsXy4TI+hkQUrpSdBNqw3JT/FFYOevRkKbNWO0apgfU5yl5V8+TimCMlTmEd3uZWAU9kHrdS2yX6o/hrauTWFks7PzW88TxDbWZWJnhBdTZyJWFzVx/zoW0tTv8aozoO2CTkI28SBdC+zR5+++YsmcbiJmkn7tXsS54QH7WmhYA2IFoqb9sU9+CZiOGBLkMA7MuOo/jtw7qP1JuQ83g/oOFuBkeKV8jbz/Sa6IU8IlngcMDgPg03jEq8VspTBVYBsAut1QSJ9yA8gc0dX3RPyrR8IX7TVwbD+d/wbdmcOy2J7VVNWBFuwHOxUHNshKw8WE2yuq6/s2xKfvB5NnrB2fGhr3/kuOWHYBwllVRm+7Z5RlZWmvr4gHxl5N2ZDaLNAw/SPb7AejuhCL5qw9Eup2pA0NxKK1Kpo28H4nUxmEI1cKQFMQEz/VqP7NHisDoWZiQoDHit0bOZCLpkozwC16EklsdVm4CiwAgB06E8yLrEDETL30Rpeaikz3C2kaXFaensmchq9EUU6RSIOCZw5zSN/z+MM/hNQP0JCFnwZSEYn4YLimYk9jmHhFIFl85uZXh1lXy4VPVAjs46YgjCOh1tAJyG+cbrfAwhuaYKbLGj33+Qv0i0Pk69TdvmNDMexMWShDGhJDCkpUnyqjSaSSGydfw5AVamsHWIx8xVrcjOquvPiICstHJuW2BgN0pBhsEfnT2ENXeXiXHb7Kc69FvaHIoPkc7NU24lV9qq1R7isqyqWBntBNr1+evCIW0DqdvgcOpb41DfU/GOWkgz2Ncql/m4/aiQm7hHmer5XiUfnSsONWCgYHPTG3C1jaj8NNOotRRLOikP2apGrvjox15d6jkXOQVv83C/OlAa0jiO9fbKEPYcA798eiGIxLC4zN3bCvkm/UWAZ/e43/f8j7jiXJlR26r9Ge3izpyaL3Zkfvit4Wv17MnvcipAjtpJ0ipu+dnq6uIpPAwTkAEvkhR/fbguhMp/AQqwX/hEyb/eu92kYJjZ0LWO73cxUSJ3Syc0YJKKhg6CeKVF4tHrK17PV2f0JDPNQdCSBn4QLTnoZiBYBinCY3z0C7xXQoFE6uOQ2X94HxzV8YehwsQ7Taq4tacHBMuw89iJzi959KF6l9oqqPrPATiZDIzeICeerVobfrDZo7HPs9eiTtz0+gZS/e8V/YKe9uReCIs79dXeJgGcgx8b7MIv6+hZmg3ofxsv5qb4mcHs2T1n30wqP5porBTYHmIU+KWBrUHItwD2UHG9csXFLRwdz+uHsj9ZHrljjJxcNe4Ecy8GcMPcfD7TqoG3tBnwYWmSLEZdtK0xR+T0s8+fywVE5fDK0nyh2NW4SJgU/EMI0TRl5sI+3QaxV/FJeW3NTjLw8jjrye5Ac4JBoHsRnZxKlYLjAlpaVMy9mOL9g88eLmiDRxDsevfreJjIpWGmxfjS6rxLAU2XYKcNkhPXKR+zlkp1nlV8MxRNO0l6f/KKW0CYtnsdL86Dko+Kw/KK2fq7eCBqpqJmSuZp5fGtxM99YGNU8Z1wQOZOH8redrvamv3wZpJ/PhF2JFz9+iMSq7lGDWgEwQQ+6FkPflHNyZ9FzSHuD86vZG0+BVMc9lCUs8+9dP3Ue5XDOXco9R/wZNOvbisynsv/Ag4qzuUFS1/bESRaZfabXCp8BXQrUyUKpPPBIKvzAd/VdzYDUSHX5OfhcQSE+W73YWFLlleX/qYivu5C8/2mWrZj5qmWv5CF1kfBGXNz5tSV+V99JPGoN/5TcLg1f2Tqe45L48Er9Furj2b3yZnc5wcnBiDbuO4IvMuAnxPgJpDbXk4/tkxPhO8rgs8bXOLhsI1sSE8JPYJNZJ/mHOWRfm0mGwpyNhSmcQVaGsoqt/ONkAkMqlhK7c7PAxpDtfsQjpQIxLDY+rQspWu6NSoyikKwn/5B+wV2XJGizVRk/DdYyUW4Hrpwsdsov6DiHNi9N7uYmjGqEvAgCnBnM3pUYP95R1Fg3tRBRjZqJJG0T9JD5JIzM9Id9ScIm4+zWCKss0JORViFNJ+LIN3MYVBbUwGUFz9XuxyuZIdjL8MV1KcEhIK7elJXn2mKJwVkFrwr/Tc11OcXfmteGINXQVc4cCkNIOWfhbNl5hPEQaf/QCgaU+S+KvXw+ncc5XmMVa1KkArED+HCVKGWdPazzL+sfiEOha4FXXXDI4FOxzz2MznaSGwGrP/Chri7SthHUjmmZxbb4RmHcGP7ChqrPzEG04RSUGysE2tH9FIQeqMLNTlPvL/lbcsjz7MEdAoj7pRVVCUyBNVbA2ZIi0jyGcDpJBrCBQinz+mmS2ssJ2pslEE3TRqtIhggcMvFh2aFk/kZsd0dbTAYwWFGV16tlVTCpqyxP2X/6BGRGnOS9zloOSdIH66p1pnntah/W4cVD7rSLdxJ3N9sYBSVQNXNWhsfoOiPAwEFnI5sOVoZ/T/1GaUZBH9ZqhvAqDDiECNf+m7Z7bq+nJ33QYOtU+8N8cUnrjl+Z43bwLel/GCUsMJaXstrY6+wK+HCe3pwpVsnNWFIO0hEyuTof4OynI+PnYudd1dQvBDzybs/87GiwJGwUiaIqFn+aU9p8OWD80NTrsKgV7Qt8VMXG/ZA145opv00l8mNAJIrmmoO78yhqGgTbFq7nnQiZKWhuDNHbLPbFo1BmDyEtYNi+roI/Lurdp+/47CqBBl5apfsLL0ifzt2BL63mUa6WYNrM9Q8xFVa7cj4M91bnnjaTQz4iUJxwP6Eo4Y0k7sLNCOEakI0xHviO87hRFks7s0GpKnS40OH2vi/ECDLbw/LegCcAOAO8RPVM+Gui567wwqHQc6RIqUWZmpdtO32AvkEXV1m9Ujz/3L/It1q5wgY4kA4ImbhyYmodbr9513de+3oJAY4Xm0VUdu3wuf4eqHFIBu7q7AG1FeQ9JjyadRC5Run0REuOnofNrk2kjSQz4NBAORpvvAhpoP9LHq8hC/913nfzAeG3RgXS1OUU2UsOf65PZ1SWtBlGLKanqdQH6tPHP5GfA3fyKOA6Bv441dSjjExR6aJXEqxfkAH0JSIji0hmzD0xN33vhV1y2Vkygz+/NcXf0S15oPvfkQ38OnWy2vyEnMJOSyCO4GvQjCXWlVpComizDXHbGoVYsD9hTiVgaAtmvYIjKtJ3iMGzZTWcwymIv7ot/jrpehZ2y86+w9OH9U1STITvjW1t3rvh+WAtMnjJ3rd5g47do3HbusYYmzyAt9Ikbixy/CjXHNGFrMP6yiSWBQ2cIZPn0H+eva+DB2gAD0GoGuS+e/NONXA0lIgAsgaGQjZgMsMD+0aX6z1RIlTz/csCu9mvJvKC30VmiGdcRzeDVzIdsvg7SIxAnJdbajc1Dvg7zMqtkqq3wRE2gnJrP5QmAKcdTxmG/o/rbtg4YJKyuGbHGykUa0hqbiTk7pOdMHPVdYGQfqtleBwEVtm1Ji/fvslKGDPnwT1Mpn2b5ctjXWJ18aaj5tp9fZGCrfpoJ4T3O+Yo8K4y7faqyjtliW5jYo9AgIJwNMun0ibwmKEC5miwtWT/iSI3Q0j50jqI8I/OP9Uyb6VS7Bkg5SwIpR6UFG49ZH8vedbm+ETbPo7/GP9j5dFabvbpoYZ03UvcU5uUKeq4/ejeeE8RP0jVdv2AiXZKBJfb9aCDG4qNoJ5c5TZwgqlMBkU/KVczds3pSEH/wxjTu88jBA7AHMRnU6GqoIOBMl7Hwa3TJl3yT7LFsfDY5RiRpOxgDxp40LLOI5HxukSwYOWhyQVba05rOVrnAEOlf8e36VE3gPlj7YHiy1H5mVvyGZoBx3Clp5tSC/QysyRvrvzDEH0a9PSQ54fNlZ399Y86Pe6BRlcwXLaOBd7FPSaDTx7hODhsiGhoOh8iZFN6H+b0yb+I2kcUgOUP1fTWZzHSYEHW6l9Dxu0NErSEoaoAFH+lrAHoUIC1g2N3KMbge7QjMDwlZGnyw4H87vO/fKmbafDLfV7y+oc8l2p46I2N4lesyJ03IxkIAY3U50WxuinXpf2HqFYoUZE+JhFGfZ8G/4B6B3vqbr4yfelxg9Wa6X7XHKJD0Ac3xZzmW6pc3TYYrN8oVs4E2rMN/5oFbHXU3Avnq2OWB0c3kMIitVA63Ypi+9v13rilheZupiWkZfohnjY0HLKcpysQ3bR7Kds6mX0gf20u5IqX4PETz71STXGYW7b43Le3wQeD1sNJ+mRvjzf5aOuCbtrOAPTKiPIiHzKLrRWy3Ydq0aojsj+LT8BePuwmiPnmNPg4KOawEbz40zMe5llkbjiOaGLOd9k7GIuXXfjUyoJA+rBOxwHsrEz7JtRts7dSxSxmhGBzby3fD3LSIvzk/aVanFszKeZHQXxn/3Tjq9UU7iYnvsCjHkcr2bReiChfRzFXr84VG05gvtTu5piDAFIovGDwIlJXlP1bF78zzXrnHfZwkiSKYSOvE+cTQdOUSZ8T3isibenoIZcDJ0jnoCn5zXKkq/BbKXLDIw2NXqUS/qyqySTttCUZlELwfEf2w8u/MjnOyFnnzzcQ5O8hZhKYqye3ho0sGFu0SVWv7shqWyou7poHPZ6xjZq5FLNhODRIKMJxaudlhUx5Zx5yAmpCIunL+4euEmrOzWz/sXzb14etKEhdubJNNyABsyIGn9VZJp5K0pj32YveSclNi9Rk5Aa8d/kY/nqMwWvVZUc6K0CD3/pJj43mMHs4ojqhb49zLjwfyjH09ExsJt48SkJDYFc02BshyD1zOs3qCtL5VEKf9dYkIjmGWwIWu4uF6d90Ize0suw7aZaLeqqx1K79mnWKurAFseld2sy/ge5xJszH5ohuGvLHclsUeGO88EplwP7q7yaOzo9wFp84EIsqjm9+w0Fr2VbX8mcFxYeljyLcs+zWsnHeq0OpJBFP3AhL4yXicLJ5V5aketZ5NnagDfffpl0zUIIkIFlwOROasyx6sKKcGVzj5mrNOHluTDLg+ZHQjzGQaB3tqBSNdsDtaTBY/kzUB1XZOx/xZbjS1w3tsjkQ75tSCZnXR1+thgNtPliHdnvn2DiMvkO71yQhuwqqA0zWYxFKsbW115KROHFwjorcQwdcGuFNVWDC7j0Yh+dZxjHy/Yj8ezg+9/nOg9af7ln8bVMhUDfN8gh6UY0iip1UMKNeiZQkSXim6yBcdWWWGyAppweSmkLPuBzb9qrJuBii6FpmM2lMrM3DzV2CeZIP62UT6uZO/jSp4U2oLPZVMTuXlG+CfihrV3s7FWX7Xpt/2c+4Jz/BMl9p+d896Gj+62+3IZVv8pdV2UqS0YLlRJVWzWcoNXsn/zhMJlkYqglwquv1HMrZcn52RVK7/+8yqana0Zf38LHD2R8ADJVZEzF1o+pvs4cr1s0JZ4w3vhojF3IZdp8tg6fpVRvd//He7MhD9pqE9xHnm91DTY8lUS5Q9ALDAuSbsxoQm4o6zt8l33t7DjP4IB+V7LBRgzZ2BQ4mgpchiCo3Z7VtLHDMk7cXUgC9BXvA3yh7wooGqatrWjr98Kj+CM9mH5ZiWDXquiWAD3JMKWRR6WcL7jv0784OV0nhkhcrOXoNWUF3d/3gD5MHn1pHNuYp3MDgUMsPLlmzIG4ktTOUv5Td9rjn3oOm1fcazRgl3cpFlN9+rUlvRfnKFkNypr8Y4wHZVCZl+JuffzHfgbZdD2t1qS1tR03F5u3kccJLlCliEm8cG1zmiIXXSxyAXMqQThVvRtp4/M/9EdZL7Hch6sYTn7z9iwVjTwTWpHCttVnLTZkcktIQLLnGYIs/PJ+S61Pe+B/QSHPcIlIzkRLwPTx5fFsaJl7jI69KALVQwB6UHtuf/FkIzN/Mp/EajuvKCacEfmYZwIs62zo9TAHAYI5tYyw13UnHPXHpKif6MD7rNRmdaE5Kq0fd+QqaeI9fPKXj2KIv4z/67XxRxWzAb1i0kdMj6eervNu7Ycyi+dgwAFOl94VS3UElOREqlC9XOm1KfjqGVp+W+R89+IG8IAPPOh9vU4mNH65u1orDgRn74PiMJY/d3P3rj0tXt1QdGmEN5KDFJr0cM+Z20zkDaCNPtl0Xz+Ctf/+7QjXh/Q5+/pBP5LqjzScr08eaRWaEq/RtM06Qm3a4u/fMo8tILha5bM+C5inYcPza2hdtoV1EjG+TSxwwMjGDfy71y8TrmYwRim5Br3jC8F+Fxmz8+1nKuPr6HY6QFW8rNEjm+PvhSC6NFQF7+o0Aw8nJVvtvj6IheBs7yhl+488cFXmCoGIuSuIkDywIlVE6353zhjrvoRD2pR5gEnmm5PlZ3BHmCMGk+ILV4TMexjqMf41pY2r8sDKqo4dgVEtinwV4ffP2iZFhk9+3VWw6euZYxmqsVi280fnYaOX9jTpf8lJ90lw6QGNn2ZOrWG/zZ+pffLUE0/EwaGV1O2XzSJFjWb5xAMHbYVa/8M4bxhKJyRdPxODiqeIlB5C5Kn+N2Vw415QpIdZHsVVXx1ZgZOPK+87IG8abXGjj609WL9P0cTXIeSw9ZOkS6Wq6Gu05CYIcX+32E9QSLvvvlN4gju0utApohPuTqW4RGtvBBpQPQzD3U6ZS7LJOMmrH7046x0omBa1XQXzUE3+3437MD01Qnij537rOXi0w2x+8HItLM/9arH/EEPPU7IhwsvFH3i9sLbzabZLDF1ZrWDy4Tggo0brRTWJwZZIC89AC7K8SR58eBKKNUyKAHAVM1WGMov+rlDIMIqvkL7My/qz7TdnderYpaY+QYxDom6NIdmE8wJNv7ms/CbxxSAAx2yys8fqsQD2fGNZLj0LASmUT3WaB4Mhkdws2/fR1/J0uJVRXM23B8Q3cdMLrtLBADeXbiaLqAInhxpaAac4Scu008+ub6zKeuuFVS10fS1LUV539dtzMc3vW0cxOO4Lm9wY4uXzX9cq9SRP4FhsPHPVg9P5OdC8k852LyKqcd1edz23ITNiu7DIgdiP2gwDnkEys+1ZKgFow+mJsdf/shweFp7BXN8iY5zV3iJWssXWZawT7/cMMl3qDm/Kaewhf6PhVBqNSK5EOTVx93y+Wl45lgHEQskFXEe20YqSMWHguHwghbpJ0UwR5ZAe4SYzfVu+gZFfAMcagL2AYfM1T60v+/Y6i1HN/0NVldIxWdm5UfxixQnYpFePtSQ4p/tPUHH9/Gg3Wnrv9wTH1q7gdcFvd7i3ZrJH5hbC2/qdYqJKV5Tz0Y3yB5QIvVAKu4B+VEDL3AqWAhZjwa8Vnmodagdfu3oFSp/tXsboRlcv4m9i2m0ILSufvEqk83FVOSwqp6xX8eupusgu6G2zxegVnpDxqxhfCnZEGrG2rAdm+25BvxcQ+CzJK/vOUhOHfeYfk2LaooZy2Sn9CTd8hBp+TJMZ3JIym0Tz1xj7Idm8vfAGAHoPl8wC/2vRAnMXVn6N51mK1i07ttmLURwcIxDt4WaDBOxuEpUjz8dBFdGqg72lGs8gQpRRqVbFeTvZfwvl+ieA2TZUskjvE4YGBqJJ6mK1qcH7Dj5MQ/gKzH1jPckLQGr6/WSpia1nbiEx9ZuC95w0oEWdQWuCaKKt/xtKbVCmgZi9tcS5JDggyQFCuXvy5WKFIL+fu56eM4EHeo1kbJd5fv/6284j10HM7sC1e8QW5MH50f/RU3vnEzicaM3BTSDkhTih+QegHNLGxVGFP19C/Zosit9IhiueZTZE3IOjnfEzCEfPjmazUIHXtBSeH25klNyTPL+XPCT0grfI3hKAQB5vKll6ZbRS71C2LmP2VjGX81lFc1JctfxuqvE3u82/LzyEJFx2Afe/0wUwZGMcUc9gtYXcP4ewumkn56elp8Ez1cVd9RS7KQulqwC7U/SZVf2vAxnAWVR+/kAH396l71uCsUZd61y10TMaXSinbOtVngmaoyaApoSrJcClMtQ/i1bhgapWllHWdA/VZP+qeTVxSppcv7aN6+Oua8/u2L0GwwR9OFTrP6SdfNNNCCe5YHIMEKkLKogDPyERcDEn+XRQE5TlUAXBC6AdOOL6iNE8lGeGUrr7aBmL3JuwSBjKlHnvx+yNgXzCMLkZ17TZgU5uiIW657lWD01wy5jVS48aK06yqSFr8DFn69vXbZ88mbqpq4U/jbDq5UrBAsJdi5AcvSaiAOIrHHpqC/uP48DXW0ysVTMZ2C98xkroFeBCK7RdU2P6xCfIO8D36TKNInXZMVn1fgfDBoPCuf3XuW5xaiVn+gFwFLHXn4FPGJueq6KFx7cfX4O8Rumvjll90YGrzm92iFc3yGOg/u5vTcFLNRmw5Bfv1Xo/SBXXMcU4Itc/AHptDqU8NzVMonr/fGF4wUNwzB+X48LGjI5TvE1sF7eR9F2y382M36U7ykZw2lwF7YpH+fJauccAAbDAApDCg6IuZBx7BhYIXXOtWNxszyHDJyV4kAXIbp9Xyu6vTf4MkVCbIcK+CMSWCIa8ToGxVtwXyh2ZRbZdE6/IcQi1rYqlKv2Fo9h/bRSeJad/9XRgjskrLNF1PpLgjSRB9QAYTXaJEOPQ5kuWR9wVXYAgvLKUGhj09x3GUvmYef9FWMfnEuDfAP4MU20XI1cvsxyhFqo9TA8TWOK0iCmM7GmDQNJ20uOhSwlU0nQa+M/v0WkxDu1ovJjP4aRZHpz6u6VhdU+uhVTIuwpi+0yjPDwomkwCdSct5VKUp+07P1G8lTovN38gH+Gr42UN7lq/eoDCaNmUZYD6auUkx2jyVq2QA2kLMPrhywcv/tAByAY0/k8UlJdTDAnlNX3e5z8jaBRTe2oUAz0WFFyvIDfI8wf21U+JTTMdH0nZDyG/4dI48GNC6JAVbD0d8cBLSOVpqWR4BvOw8wa8fV+ez6nDLuw+Fv6jTZtOraCfTlrbxgHfWwG+T4NH2zQx7Tj+mgShdwQMSp/3YMVCJz6KqxjrkAbnbBYp4Cyu7D9cG+hxIxqbx68wUr9zmsEpHZuPbn17GSAazoDY/hogEyfHXcYJcvTTiH00VufBJOwwqo3aCwcVROP9EeJFevSDsW6MMvT/G3ReL+MvMdvCB7CdbuVvoEj/uvPAvkRRD0ZSmhcYxmJdNfOYqi32Et0fl6UKrZvZMw4uVFzb3tlGIrIBDWfrZZ4Ysx96+5MftEULZSJhrruvQ3ybjir3bD906hcUUgYCpZgQQGRhaAjQEd+y7DIIQnsBqo4FHlr9a8oNxR+oG0yck99aA2/pVPRNNtxFdsHIJ+Jq/UaeRAH7Cc/JyUB/C3Z+wo2cHYZC8u/I0X733BZnOMN/KhAHkU69mzF+FXugb52NvnYYcIUyLD2kF0PWOFP/k898yHGhHS0XXIr74IZRrslaDo4aB+wUfGGBPpXBf5RU3d0Ys1SEYuvSeXL8XnCntgSLFqv2kZypxLrmxluSomvUtwqiryumK9F1/++Nu/J2orSWTwD4+klmnNyUjiMxKIiaOc4iFv4IcGQ9rYgaHXr/gIkPWtUsZz9xg014N4g2kbMyiuzPXXoIiYyWLTZCqPRs/rdNnyJEid9btvvryJhhtoekklOM7tkVc3h2DdS7CLi/lFg0Y84GRj8TPLOQHrr/qHALi9znAdH6TNKk+yWbzx++17j1+HADXBZn4FEjaN3EaxjJlU0csFKCjzf076UKuj5peMYjSu4PlkizcuSOwpZWWyd7lI04PvCIb73nhhs3H2kUdHTg7RufhTF25ctA4Yfu9Sdsp2CD/8K3l/vwZuRLlaNHvn3I1sefue5h02ZN8BZWpI+W5nltMuaWWHyDb0FcujzuIhxz8MVh8nMpzrGx0tQ56gZbM5HXqGmjUgFmJ01JQao7dmssoNet9ZlJ6QiD8uFjBqK+/SEb68ZwaQctsjOvPJkZVTefKl6h+a0CvnQeqoWwmgg+9wz2ciemFScu5147ULdtS4Rh+zPZGEan/QI787ASczbNDc9Gpon99iDopjjynwsRpDkjg7/iNlSaKINWb/+oWP7w8W3WtA14tiMK1nNmkcmvNvRDrvdaGfXXJKGMoEzoJ3PpSMlNuP/Tj1F3A93v87rqctjVf3qqBlwDeJJ/rrHcifobiwTRINJJV3i8mx0EsDFvqbFrqAe+bXI08uxlVPjNVmr+/jqCAqZmEm0zn2e+7xpabRgFUxDn0BQTByuDxZduaPBk1++PeRP7hXRUPs5WFSrm1AYDgiFLnuguG2rPhXKOrNem9GU0O/55gRka4FrZJTin4PWOAZQ3cfh1oUHh0sSSyK7Ffy2jkNjdsxIcq+QdKrW79X/ivrpTUWrlECyxQJh5uzoMXaZyaZcSRmRQtkTYAq59MGWqIzehgU2SWhn+PaIVVoMEPhJkMWz6wQv2zBMS87/NeIP8fjnULkK7MmVxkEDwCVuGNHHTcqh/h3kqFTJZDZdg5Sm2LruOQUcBP3LzH6rgH3+BUtNhhLdxY9eNqjvauSecXB6cctYrX/pe+fy9/HZksYX5WLQLJ+EuUWd+mCP4n59kE79ch1hGkuEMAk6tJdlk5uWLg2LFBVNjAPRP6Je1TFZhblSRFVNwwkWe3uL6W3HLKVFHA5CnsHLLXr/3IOqAsPGnfy5JIiumCcHXFtB7+11sfuq0GdjWMxOc4S2jSvAAtdqeIZqqquT6alVRR7/blRpRGn0K5J0jPavRDQAgxCj104YAnr76l4koDoxkxespr4AXkdqibYnc5OL2OgSXStmVYr3Q5jieYYDtffUI76m1YUXURQ6QTlNxt9gsLUBy8Aw4rCb7wJv6wKRaHmu5puhNnkUX+01U59mptpt0/2gxTWIrig/Y2QneG1y0+hQilzZKNB2dhKIdqxjaM+SnbOF/NL6QvMXvfbLgu5Sk81tojn3y4EVvvyrA34K+RHXwFkBt1gQVuOZUe4aHJX5q8qDJDlC2tqn0kAChhbr0mnCIKWLXY4yILOubk82xH1qn2xwdmDnbOzMoz3ARPNq2QDlmsqXR9mCc3yIt40MP0OtX9Ezwo8WPpWRzxTB+nkQC3wH2dDdVRfr2QOQz5BLVunjwzy95NajqECvd8Ea78RoUDUkIPsnlJ2+ml3wOaUzqPP5CaF0pUo9u9z8pqCOnaMYAe1879zAOCwvFcc1nF1HHRbYOPSzmqjVfDEiGcB9qzRKIhll5C/0duXimx56LYOEXM1zV2u1A8Qg7rtlZXi56Wy/HXbGYM0fe63L6Vyb+QuZNGXGYV43xcgTSujCbPvhVTOp7tEYOC6l5XYbm1tWc4sCT1jS8h1bymhor94/Qaq8X2OIBVZrW5+w0prTCIyH0AZ/xgq3LUXr4X2vT3l+kqK7lzumNMpo7b1YsS0T51bZfZTLwZwdGJ4FtWj/xHZm7sbMA2a/ZJ5IhUlkTkLJh9dfFqXj2Yyg1wEvBgt8/t75zv8pPbFjyAFn2tX2X5Pojm1V6mPEWKM68pMu1K9qMVeHPtdz9Zswe6lD+DNRpbN8/cOUw+NZLd8sOaOOoVUaZctGLCGHM0XelElf0daHT8RhsYQC+G/oz+WKEuYOg3kUCSEvycNcd+2qZUndqQqvIlU+HIU/PcsQowBxs1CUwQaHV75+d/RioxtDypf97rS2pDKt0JvtP93P9sUFUiJvzObebyhPy7wh8+POv5m0Qpf0evdwx447v/RYGGcgP73wcL/nfb7vw4Wxv4Pg4Wp/weDhan/LwYLN+Bb/vo3WJh1EBhqwb9wEauEERgWvHnvfzThEphhvsCLVA/58jbM2qlEQxlqTBnK1K5v8IrM/jIkmTPJZxj55VeBMQaiNYsdiqKnw7lYy3yF/fQbX1HED9dijN+78aLYve1OdTMVfeDnguP4viBRbd0yptlNnC4GwsYrk4owVa+kqlkTptJuK4lFR5fSKQVS338d52sUWUe5UTjyPFfFcMwvdHnDMWJGX3ylFEru1XWehJX2YSuC+pFfm2XaJ+H+jlhydZaxgK9b/LUqGyOfNcOWhq5xDM94ycno7O/vhGHAoOz5mzMxJzA5C/Veo3EK4zMCS3S9Orz/l96fFQOXeIz6foTEazGidYzI7IzEjGBrUuYxCgeg6PMlGZvNvpwQMCnnvBftJXHz/k5NMupjBdMsMmJ9vY9mcCkbZ4x6Z3j2EyCJwmgcyui8KTDHj2Fq/f13p1X8mWEc8v1MRfkZGKO4yfu7EvXNKco9DsHMl/c5Y0OmUIdr404sZhwH3yxcRFqDTU/fR+ptI7H9aYaeOa5xSCRPZf8KxTVFXe/z5xzRkWsQUS261DFz3UjTmAiE/1I7sYKZBeLv08Z/Q9D/NsStufDajSlC1S7/pRxAF4M0MSQ7sWACy6J8ydqZovqoi6rUklWxbM1/iegItueIBkgEyBhlCWHTa2OClFGPG416C58szueoqvDxAv2bDHg1DBITEQZil1XBmEa2H0X4RbEos1ZXX9sDeaqfP7jyaSQtxycaPFMt/8+c+6uAfprghuqrMSYfgwU3j3FhmJ7nNaG29VVXzf0vdNI9tlPgky79L1/vM5sESp9MdDha2hTxZSHuL7dD2WOwM2cYSyUzSvZVpuZrqeWxnEb/k9T+98F+WX9sjkn6wc1thol6u9e+nroBy5Q5MNhb5Vh1/Tfg3WMl7mHY5mHZ4mG5vzqBMTncR/c4u2E5bnAZWf7UavqSibaVnowpXrs8YYEpDJ11UiX1cgY7rm+I8R18gaTY3HAfklW1H8vZzhtlay925bRlxLb91l1X1y/Vf3V1Lirs+6oPx39a1nGU/nAMv779afYVMVDkSqHzWsEb+JaOWSBfw/zV5086hfz+KtLP91Cf1wL+SGtUmhxpqk87ke17mKhw+px2vtqpFV/IVFb2gtQWMgFVbFP5dZl1zD6WkPHMcW8Gt3n1xiVG9yOFdmV+690sV7Bc/IGN8J+UvovLdms/YjgvFgpIqWMHZ12x0UOBjF21ruWXaAkFaPHq+VeFcp+f6i3CAX3WCXQj5HYcd8r9KlRF46xQmChPu1myFSzhVWZu6+u/DznYTD/cs+i5wv6RoAF3QaGp35Zkg1x26npqTBPp+LHP4L9Asv6UqB9Lj8d5qwcKxG6o76aO4YfJUw8DxQ1l8rd5cZvxK5g+hNvJd+A/bJCaqb8j0xTYL57R6bs4zf5VTz8uJ+Fx4KEbjfpOPi4RvgEj8qShjlKrmIGKbqCjkYuvg5tfmo2g7QRTgtg2IhgLKq7LaJqQv7FmVNkhk5g8Tu5XHYE6ZuypcQJh2ytG7Mt45HUwy4THv2cQ1MvezpSYyCo3B+rXTQPpN52BOizzspZLvQcUHGkBWIw5+hr02ieBDlvdYo/+d9mDZYZtkJPwkXI2iVZehnLKwDl24izjfy3kvRTmhn5BND+LUr39tVKcelxziJ5saL0p4WYsWALXXiR8VtlAohtR1wOk1R2EbB9kWVIBtx9D1eHy8U/TTzpKxvAGkMjPDXn1OKpqGVEooaKNd6qlMktEDHlafyfBvS5zGn2/7eqqu7NA1Xp66UGm5buksHjAI1nbOHtD0xhxhZz8tcGxCTr69NdD01yd78SwvBVeW2JJCT+bsl0oSPoAXSpEPK746+64nhFVWWbg/Ge2uWkAQTCggyFKgCRaIPMiGO539ey+6U9TU7cX3NlzFRpVZVQg8lFuMDYRNN9QirDYdY5CoCY6kDo/tjpplCJT7zEElbI87T51PLzBQPPK2SDn2fWyYVDTmPT4Xb8pVH9SzYDnHh4e6IYfbQRyosxg/Kx2hE1Lcdr2pUUOKfhsh5PO3/HY1/toggkP0yAOgrnYoHQfU+gLr8d+ELkzk1+zh4h13JPxIvl73wZkyfJkM6OY3Y9jHxDklXlj3BTjToXH9dfwcSYGfYwIiOceJPNxlfDLVoQNaObKSecn8nNo7kT6zM9hoahIf7JZQ4r4uXkL4a1ubyxNfT98DMwMgopRREo8x6ynVY6Pi2maNoOQpM1bdu+ocpaOWljBHZUiAZ1hMZBmdh3IhIAE4GEc+ExOVT5SFzT+hjL0jAfSLQX0ZLAOXoAeOrC3lQ106axWGbdskItlEzIalR/dabgYrUV57USALmfRkDf4bLrD/xI3IHJgz4WinVaGwL51g+LQn1ZpHmWcI+hzEFEILAQom9MgVf23rwWyubHmfDa5zE/tS2x4gU0GrDxTf5M4TOtZs6RKf0gJs53K4bYTsB2m7vURsasrTj/zZ6+ujOJ4Z/Y5PvMvqFRHF0JM0eCDpF0opn+wUG6CUrIw/GrFOoU8KTKiykEF40Ehj3A+iYDO+6j7FL7+YNVwQfRbWnyxjAk9OpewqPhIvqMOB20RCt/NvS9ca5fDGPDtBFGfIitTxuZXbAPdQor4GydE+CpT2bxOV5zvdNYaZTPnhPGUxjQ/HhhTtCxsqn8qxSt+ocmNIyfnn/HvLBfH2CUFEYxcbXCT6uvFixaX/EiUEfX97ZlfzQzxhivnXrt13n+K4VlKcZ4RRYJ+lrfQemy6fcLe9jI1XmIiII8xj/iQID/i/jYZ0Fd3SGj7jyq/dvVJSdNemSDRXjC2l2wKxGmCf5O8DcU3QECy5IlH1UzSdWxPg8x/z28/bAN/zvFfzN+FTXGxREXmONGCH1SEZkqv0oRv3f2y/E3Fbl9UsSRkl4s6lAVRBzTwiHMP8PKoVmgoe3itA5L9TRq72+JKXHvAHaFYlWf+t8Oy8OD6tWrw17hapNEBlEWMB3VtvEzE86Ci6wmVu6+EFecPBIsE/bjQkp7CEXpWsn8GosxeSoZn/JwuwCTTiB8wAykWiC/ocDdRmO6jTMMrQzUOsiaTN7aDRLiltVSSVp9ng0gLdvvA+qvdf22E3LViD+Xor87mkM1OnlYMkUaUhSTYTyd65Ofc94AJAgMOsyCpdpD0IC3UIojPcozY9/1xlMJmFsn7fVkuIKeQrFwEApOjDzuoawWO29HxSkC5eQPE//sjzuhZ/CDDqOGgSx4Ja43LkceeqPK/frq85L01LaabXE1Bhm4akcO80JCZG041debdG849i78H4VEAraAiW9BxpRx6q3j8gizxi7un4f/CMSVizeuR6vdgv+KbwHTVkjgqvVJwTUgcsUwUmf6OcQWgJ8heA/Ugp9f+odzJn2ZkqVhhqaDnhvW9gGyiYaGICEGXwpz+MjOW51ElOWpHuq7fPCXHo1wRuEQylHCO0qDIgtZgnHJAWg3ew9L00OKsDAJ8X4l/k/Mqcf/D1bain+0gkSgfyQWhPPKuaIHf7eLLo3EVWH95CdToTlSml0sua4qWc1B5y6Umtv7GJstwd9E8KGuzb6A9JD7O1rGr/6qhY2uhGk1Uv9Gm6P75veEE1OqA127HId7xWZ1dQNCVCSWdMyXV74sDG8qiH+D15coQ93AiVMbdDSiF/6L60QMSCs5zv6IqL270nGjKAolnh1JkCtShkQ7fqsCD1Irs78ED8EkJDN4BRi3pj1ZsQOjWMdS43su4y/ZVrQrHv2r4rwj3/sjsXx3rg2NzFiB8zfkVzGxx9uCaROf9JpaTjyO83N1YtzMaclko8H+uTePLuo8JuMNXEymgM7zgVRr7RrHGsy9RTWoJZULzo8vSrcmMwF3uhxJ+jPxdKRBeuFAIBYyLfgzrxsydkBciKHi9m6kaI/fKF13NoDt438TeU1BmTIiSAJl9sZCBIn4QWgcHZpkk8oVoki7W6Lvuz+74f+UJf8aJpZyWWIqVmlPeL+lr+7X7Ctn2/bZ9ZavDgi9Wsf/9nFNqVhBs8MW4gvLKc6Wua+XFZMF2OIE16m590CG12R1Y8dAMZk/UPK7z1iPDIRSH/XIZ/MSedcRQd5HIVcwekhaemB8rKoshxPeqUF3/GeK3ejK1/oohOFlKdL7WPCJaanz8I5q+o/xVemWizI5SZlEwZJ36RgCavAe0EYvXyeps06zJbmqxAdex0ICSb/erbF/1mIzggJGCleowUyZv6u+vRkoTYvBETU7pt89nfU32n2uu4zcI8BdXZ85e1wL/faD+L09cNSVL/jQX+iv+clAz6LPA67H4V1EimxE7a6plOr17ZXFzx1Og66lvkEffEbGScGO8Tyj9D5aqbuxgkvobp1oxvpo1ksQ79z6pMnPrTb8hDMedPZVJvvhAnA7Ky9VeP7EEOWsYDlayDT5Grl2XS0DLVROTnJ1QM08b1rXE2zxz6cwdu5sQvw+Q0cU1Qx7QC2TUMVfnQj1vPZJwjdLgPsdxTGFx18p0Ml8bkvsrFfKfVQfFKFdlS0pbfg2SQxV1q0n9bls6L0Gx7G9kLws/kM1H6Dn1xZ3GX3JNYtdNh5q6BCpctdS2nexj8H7QkLPK0i/d/75OyAqYHDNqQEmXqvgSCEV2tA8bJ8d7bUrNLkg8Ez+YWIfOlg86PxHNX7X0/XMGPllwP017SBSkvCTopcVgNfj1Gb6g/1oY/52VYpeFS57XntI074LE0V82x4b2WOWOk99sjcSwzIoZ4Yqjwlm5ZH9p/smG/rox5DYYStrWohWGTNyLH5t/8WI/rszZhPWrRY8zDHcIva/W1hSUYboGbAlmhRZ8Os7pcayyjC49DNMSv5ptLzHlkYT5pVxiX5Af1h6nsCYsUpBgtwJITPwNxCe7Hps2NmmYZ/9EZrNvzLdmpzB0+XQh3Mz+if65tSWdhWbVh/Hc8mTEn6OLlcBbG1232JC5u1cB4CUVYaSOBA/dV1mLjheroLz42oTNbnXBvf5f6g1Y/el31haJ6Qnbz8I/vxcakbUsm9vE7CHfd4tkxtZq7A8/FE6QJtVlYrxnshNU7nZmCGjVpv+DIq5t83RvOSCrT18e44q6A67OERjFzR3+nv46x5gIQg+/DiG8/C/qNNq6KMVD5iAkOJiq7OVNCQB3BIfl+jljdaZKsQJQdm52nfMPPwGOcUzdMIl3YwV+Wyr3be0X25i6VRRXwHrA3nfQHAMGG/XE8g/nmP6jsM0m7qSH1qF31j8GYB1nL12mNM1QPQfILsAlqx8f+g8lBZC2G+pMfl+N1tUfSrKCbXfo3VtSQ/8I/l+up/kh1fb3Ka3C2P0mpld1GnrIR30GWQBR7R7eVFtp3osYEOu8MFH60a81sBLInG3Mh/lgXHeNFHkBDk0yVsyWTMbkVveMOkmB1XzjlZquDbrWIfn9/tLPe9c50zR3P3Hny1jLia1unflAXKxBH4cD+6NwTuNIUIBkhxv0DLAmYwoOz6HKIHcMcBTy0bCOuPzXJGpOV5rL0ilBZjampJqrodiC/cvBgTThhxGw63dO4qIQTCwR6QgpQvOGS+6vMxvo72QbG1uwE3CM4ZBX+zxnP0tCb3k6mPSV3AntEHW1MCMu1uKNaEUOOB6cA7S85O6ycWf7mxX/Yi7FTEx2eTZ4nlki/J11ylZswPzt/FQaVWB9KMr+zqwmlmGMzC+O/o2ZFZzNQ5VdgT8e9K7cq4UYjxyhCVnY3Ea3Squ5S+7Z5rP/t5MbkmqhBIiCvQpCdkouZApGLqWqBFGVsewRHhN1zNu6AjNE637kv4bHmZx4yzV6WZSOD++SGeDYcJndkg8bXRF27X3I307aDdmxNTar5HyvmW0oc/VDXZn+vBBUiq+eDK7iid1Vwl9V/KES5cqtep4jTz9grNw/UNwbhcmv6tUryrL4zxPY9IaoubwYH80CkPS66epmHEJqyT0CsBQaQW1sNcad9lT066eB9QPN4iInbg/ugGFPhqHX7cTYn6GKPMObMgPmJ0w3KWJXlpjiSi1Oovrv7PYf742Sn3zGBfeL+PNvUPFKq8yYSWPhm5oEtuUxC9eX805ssBh0oInWokGsICh82JWoO8jD+YVuH10dp+mquv7kGlpHTBt83dOsoL5qZUpGerK/8yjwzA+SoW+eQJxiKpCxJlxeIhKFpRLNX5DBZUsk01YLdcM4ptr1fSOzSn8mNLqsjjh9iqZyN7rGr2Rfn5ZORNluh4/u5son2lvkydK/HK8ddvkKJfmEGfCTzAOOK8W0DoXzMqTrfJbVGOgQUykI2q4Pw/UiWY0XmmbCztf8JYQPlT0vE+k1ybo5QVzceZFgZyvdrrGqN1yiI7PW6JQMam/HcoOKwt+4TSWGHUFY6YAVdAlpdztGz4X87EkEMY8JGfOrVLBYS5YQFHdjAN3xWQYK8N7KNL6/uX60EACyThsDYMjSdhV+eq9x9rL/jRY3xeB22nG/NbIrFla20ALJ+igH9D/UaiU2dr6XU+OdqysgwQEx1+OwNss7h8vCaT6ZfMPToy5Wv9L57S/Nv2xd5idmW4hth9FJ2lDsWzPbnB3oIn0+F4bEay3qdqaAZleGYk6AXBxpesIQKQN3ZvcGDKVtzmnRXZ0qktAzaiSQdBWrG3VLfjbaNsHA2xeOlFcoferDYO2kb0PNdhupxnDfGCXx0ZJBR2tc7a+NGodt3yj0TwmAuDrZmbYMsao/FG7UKFPVEnTHKSnM4a3DcAhP0i3RpPl7Hpi0OllnTJDm+OjTinONYF+TKg6mnD2LL5sPyM30sTxK/agJigllu/3CyMB6c/3dbE+mEX6mmr8NupWTPVTfduUhtEK6dZb1Ujvic+mPER6Y0wjBNrFr6dCMBFlLnzqMQ00RdVMtYg3KzBEZIi6F/DvQyrBk28vZhBFf9HJKYARF7XV/IN42UOVl0ce+XEEH5RhK0LnZGSSaycjAFynZ8OZE0QMId+8tmjHfpZffZOrFWDYjb2H0CztzD93O9PGI7LXVj6cTZ67pEtsn3ykeOSukWYydHYG/TJchPpMPTO3xvaq4YgbaE7eERmUns5crofrL4uqXrUOy6JR0opDMdwB2elqBAH3CA3Q5deHDbuhFfftelnIpz6HYjKPaEMjlSj8c/9A/035urQ15k25zGcXt1UGvFqYK6zfPSNzCM68lh0RonYrIBuQ34hQnq32mfo4MELwHWA3y0vHwAZrQ6oQf/KFhcXqxOde6gnUsDh7RB0+4+UVSkTqOMVPF/l27DyU2nPBF8gJHbIanLoj3Dq8O0Uy/9TdGdxFKffpkhLtaMhIIMS5Bw/3xZdhzDIic3XvJywdAHFG4N/BfloTA7rcTmoQ+A+bbf+ov1zy3qznR39CPlUEd5e+EjNLwMl3NFT3bHd7BK+iv39l/8QOKvONJo24KXe6N8oDUfFrwK2ngm/OZhjEtp1eSbVJBVHkFyBGDc5Xa7nEccL/sazIkeLlEWp9HRaqwsDyKrnC9egiIwczzfoSc5x3L5p8yrSS55Wz1izSP/ugTwfMfWbjYVBAtoPp4tMmL8uMkiPozBB+t7OmqiSipjm9HWmf9BrZO7sPBXbkp2ZHCcKHw1VVKXJFcqb+P3WLGlsBJX/WpPpR0O9gzCV6zcBOeMDP6iNGkLbJ+gzjJ+NN3V7IpYKSWiMbuAB9E+E2qSkB0e8aq+csejKOsS5VJW1USSVO+XM7uppPlhvelw2R8rwf6ZHlOid0t39cwsXcUM8oRXqai8KBy+j6aJjDhqFHqlyLoGkWmHROGD8bsCbXhX5ZEGiB1Vhc5l4/9WNTvdxISkZiELOh6ooMUAK6Ich28QaKSSexlPZ0sqa8yupgGMdwYsgiHI+bXfwSZuL+mZJrSgALbLCDNArEutsX3w7Oob4PIVodMdL/XdQ6/JOix0mLqbB2/36YXnWh1OJjGqrO4UxijELLqnDTfnOGYEBFTVi+E9BxRKi5Xrubh5vNVW/0j+GyQy8nMC7kMjm8Rv4n+/gt2zRbwztEq95adrasNSxKH40LZrE+B/k+ariNbbhgHnmb2ymGpnGMr75Sz1K0snX7E7xmvbL/vdpMEgaoCCPwSuH+5WCH5n9rv1YlV+xnYkZrwvscQytBQye/K9+bRtoFZxXYPlWaXnIaxEDJ92tIN0K+VmXXQzHGr/HatGaZg+87tnP8LPCiw56eB9M9vPIRCBpO5IniDH9f0fmINciT5ztdnuH4fJ7Afs4XyjxXq4vi564119ArIVi1tYxNcz4CW8qVniHPIZIWg+QPmLfvQkkULvSj16jNrObDUx3gGKNL6vEFKhGlO0AlKr2WJuofjSyB2BccERmgeftC0cHv3vyGn/TW2zxZFg3wU3dbx4jcLuxJJIjsbUr25OLjS0ibnu2UQ5iQrRIrHN19Fu1nBlJVZvj8zb7X2TkBpTcIMvMDgj5AKOBqTk9sHAnO8bL7X4vyFI1dnk9MhSc0M8NsxLcGv3T9RTOsPbEuH2Ra3DATCX8y+xm68m4qci+XQwwuu+lQj9wAwJcVlKAywU9th6PQz+vZA/jpqb5DM4Sw2Yoxa85lPBzLjbM1KjHJypaKy5Sn451ZQsxMw5vFELsqscG39NdiQpGD+qQiThlWkGyjzrc2Ygxl94Ugy4QQYEgxGrjmHsxn3JRF8Zv8WDF4sC44T6NAceGGMjLd5isOM8Zw2mB9BO1OzDOKIsDljJzd5cvn8oCTwGVONVEqSdKVRtnVtIfDQIgmvUTCbuMoNAq3pCOrl2ErLCVZewBF4WwVgvC1u4zFmTahfNFQ7y9B//Z/11xwxmor9eZ3HclhAXsgpmiOPphSdVbvAXR64Wt4/hXRyWV2jNp+ts/a0sVeDGvp273F0q4XckRn2Gc+U/Bo0YJgrS2EHU6n4gYS5CzGpJBb1r1vhUhK/STiWm/HX1ZU1uEM0ZZsxTx7TiE/un6cffS/L7kKDPBMGgHOJCMq2i/udiZgXKqD1/vJbRiq2xJMZ7GRormSn7ANt8PUinp0d3PfgX+LRK6vCUYLCPbV08iv/AHxpeOolzCzFtKyaX0P+00dDebf0+pNuBVDmIazCdgMz4DHAC17CWdQTtCSngskUqzPZrTjSXmMMQxUu9osErGMWD3F2UeFPlhJPEWWWNhojCtdO8y6jMX8j4fSaAmsANbZay/thuKs5tficrlP9NgxIVDxyDD8vg3R5xDCFgrfnitGLDp+qU1DetXst0bdsw9yaaH6mT3jK5oJM88IHnfv52qxlDVxUvjGc5/8swM+/rvPVwJLE4RRv7xKbfqVH3YRf7pEJDN3GH7R3421URFNaZ4vu4wy96CRRM6qpqV6Uf+L1g/Z20rskJAl8+xv3tQta8ciYHcK8SDnckE9S/Ul21+ns86iVPoTGuClD44y6VtBY8gK+KFOYQtFdhuBlf/9qDAemC7MMFEdwoJHZx5dav5X4WAnnUBsc0JmWNz4055SB7GfLBjtOouYeSD0jEmxbkzDTf8JxwIZGWht4gRQVpFFlOz1Sd5DajQKRNJ6ZN3hz6sFYqqHIgdl9dp29TrHhYwa+7Itg5B6UlMQ/NkP8e/xQHhRYD9r1Kv7YT3nKkHAYrla9HwHXEiX3glVTgmRlzfYG6hJbxDalwL7IzrPik5oAHUBUuMiJ6kw55lRKezKGkNoK/vpySICU9KbJMnrF0e+9rk65FunXq/lSfbwxpFh/nMHJr53dZbOZ30S1mIl/Ignf452nFR2IeFi2Vv/88mR4FlRkGQfyTXt2nqmM9oSOlLWVZsWmew8Jw8Ye+AftLJCRnCiTswSyLRVLAfpEBGHvCEBab0uTn6GKl0DafkGDWi5jRevWTM42U13RQkdygwW4h15lo6hefqsQHhGQQOTyk+2R17jgyID+fvrEGRj9lK8PHO8RKhDvZwV5366nkunEvKsQfVqp7lypcQyx2SMVQDWHanGeYaVbIFjfIcsLH7vRwz8Rjtrv3zwe8ylTUsyhTOQK/bUJ3ClP+w8nyWPW4YMaHJVCHpWSVokKrT26GMACTGaEpBX5fsAfaEi2cs7f0jSpeyWZ4vFqQYVaYhgiQZYffyy7X4Il7Mw8nOxI0ou6MW0VKk379Ca7OuIJpJQfuLCdpj2p+/Ki7+sLB0F/D3vTqu/BqPvnUZYingTPiKD0r+nZjl9FLtq2iGsZP5qlMze+CrpDsZD5N4yZExrKgnGwl67yiOz21z2Huuje2EabVv89fNoI9Hf+VD6EM0IPH17EUf4NdN8PF8NDjGwhnJhoQpD4i1v2I824S/keuuuYhBv1ZP0COYz5/HWHIx5hZWBenr2Wx5lG3NPRrkdzOgtEu75la1j374m/FjDXWpFssd5FOnST0zp3gq4ZdL0E8bE7C4xveDmtOpiQ4sxUfJ74Uk4nMRm09lSw5Ciik0P8abua8+zlaWpx5ouP3DXhqdAgOrNJ4+XOpkXgdh/lMLoJxtLxcITK+jvFoCB2ki487UoZnmQQG/HGCBRshFduGAd3q+y0xn+uG/BBPIh0m9SpJtKjx/IcSXfPnL5kyQWme4GG5J9t4ejsMyHW+amMxIPSGMK3LJzwh07WvykkA4R3NN/iXwzoPLI5UPwqAT1vG7axAbGS5HrvMMmMrhwdjOISx/J336ti8cdY9e9OXhDyccX0evaWKbhRXm2WqY1rhlB0btq1aqoM7Tbiwtc07HHtBWUqzuI5lT3V8EQQqzWE2agUalysu4Zgn+Yynm5R3BiovlvMp2yf4jEHsvCYPqiOUteoNM32vuqTkP4qA7nZRCM+2bIP4tUIwnzeaHzlrL1TzmtKZ2t1hyXWd/mhfpmKbXC0BU45Er7gHarJItmqSpjZtwN1JCDdcPw9x6zARSoamq3rGMED3qgcissE6QlZxz67WNr4jcZ3plawhHshcaDtGpFtyEWKD7jmpihWyXCNoph4q7jT8Hym2ncUg9PNR4/XaWRpQ1ygyKq/amKl/RczNSPtuxwNrqqh+dHLQad4+Y5jotiCepJm89hVl7KPaDGpYZ/a9HzlrDpzwb1vmNmD5f5rfCDL07rCHXqUIdsJoASp2TO4u52kjaRq/EWdE84ms6QIPrQ0tE0ldVcVXRY8U7JQXH1YQo2VF2YKbkT4FnKzO4i1u1eQHJ/dR/kLFuizf51QbKOr+ntdCO42gcLry8zUYBmtYtKl6rJ/ulR65zlPKZsJr7EZL3JPOhprsKD2L+roLdZCmg4LATV67PozsYskfKLXzv714WGtF9uL8YTbTmtVXrwesrYtAavNQb3OHefacOxWie9P3mLLx6wJFvWywdeo2zhqvKc673GaZn2f4YzNYcR3UXkwgSFDDxRFj4v+HnEPXytF6EMZ5pHrwLhMGqbfW/xQL37wrn2DUtsMlGmGmWoFN8X1WcVpntRry/JQVdvuGOZkz78sfIqhebVG+kBcijiM6mparH0mDVVkJcdsEzw+xXK8kSN5aQKJVE5xJakV7lscRPGCCEsSjDVxorMYoxbcG/zYv9CKyRmdZw7dGSnoqwekvNY582VS/AjsxcIjbmgaqKGHxiXGLw+52ic36imLFnSqFjrTbPKzW8nSE5jHSGTZeMd+IjVj6j51G9/8Dt2Rak+ZcW6G4l1G/zYPo8ZsKCCCgLcv9OKgbUlaiEjMUN8QPdUaDIMuSnxQnZBxPUbX2JmyzclmR6x/L7fADTnySOfxbJAp68BkMpH3QZGU5v8Y9cSqbrR+Ss0y1hgzsdxMrByBDq/8TFj368DCXxD3AqvIdhKa5W38FWlX88gmrFfAmRN/28PjP4hQQUzbYDufNDHDFh/DYYcwZbQyh2N425M88HlsF5i/2iyGUl5It6Ukh0VUTj1s2N2PX4zhMCUVQUYpHeA/RLzngZ/VU3psUNbAsgmH4/p1GaT0425d3TROndEzzygUa4/xI33RRH4xjj2C2zaloA7Gjp/Dnhd3qovh022oGALLsfkPxr6MlAl+Hs20KmeAKjGsIdSret0Zuh4GJxnDmeOxwh5uumPM1wHczl4/RofZDkuppWCFqqY/RpeZQrBXQGw3Gy0p9gn3NDx4rDci9tg24N8tSxHs53ge5f1J8p3P82Sc/QmJkokhJ+qzf7mB6IWBRaZnA46l9IWi0XR2os5L2JQMVmz+y2lDdkm3qLLpeWVrQh3FzsGgp+15Ti3m3+5lLKywp48vf3q/EyayjvP6Y17GScEnYmTepk2Nbzmv6y3sFmMcvh534PE1LAZBUSxp8pm+Okj4j8QV6mflOJ60fzEwvJc9l9ASJjOsKjYrSpL8+RyXj8TcChLENTXasqTG9jHB/CQhH63NP+0vhnRz8OHWZwSdBlmw8Jtr9ZPFfEWsg4qHNC1gru00MXHjFcqM0S2AZ0d+MTnwaR3H+5T2GK20UKSDSVkb1cXpEuLIZ4YYs6/z+xB1oP0qK5GSn+k6HOYbvHAxH2jeK/I+cKdscpgh/loZ8H85mDUtvimIEYMDXCf/U8EfupVN8Q7PHL5kCG0fOLXtIv08y1Lil5v0ucuSu60oc8elPm2q3juf43Zo8yBHlqxcxsgB+uJJnymfZyvwSUP9gwEUw8cEXCbo8vKHnN0ZlnV09TvJHUW5dSXP+EGv5/UrX9ckpEpVf1/aPRJ3wo5f14t9LVJvnKD/amyux69QfgHJMtLQouMzVJpOVv7kSn9jOAU+h17/n1UszkcXz5yL0Gxqqubs52BZ9Q9WtzffXxjIdNaQF639I15MeuqSlgel5hy3o1Q4MWWYb3bTgHxqsiUhLNb0fFC4ttXKQ2Th3S8kJLZ21co6zurYXW9a0rE/V6Nmm9/WhDC4LulY0uxfZobF92eALJilEk7vbMamaDwKYon5cn4aMCZ0GnudsZNzf5Nl+Je7qrJwA+L/Z9/oCEESHwqjXElQhfob1GsBtBtYEqEYzJHK6WnquTkn8qCFuxECh/fEqYztQXOaXAmj8hRFZUEddzlNRbJ1QRO6KeQfXFDX4KPGX7ZXfZcuQADTt90rU7w5CuUQTRUYre9EVJSmG50deBtPtlC266AuA7fuiL2bIINB5wMTCo+TBNU34X+00nzgYw2oApK2AqWhSH2jmwhSziCt6eDybQ7Fl8OU7xWtGIWdF/NL+/3DuONrSue5uOWLjaiZ1H0UM3DEg1pW5VrOybabTXX0Qq92qwarm3ePDmEuzZ83kqow+LpJZhjTdAkrBXvkZ2YwUB35nqR4fWjeQVfl7kpbt0R/QYbsrmobVlek5amUfqBEnvhWl0EO6RGglgCU2jrZpN4Zbjiadu8/RBgDaNCfcg5TsjAzc23FkSiwF83JGGMr5khQqqCVPKxU6ffWHvZMh0fNEoeA7m5J8qE/zEm0llZ34nA4WkYYBy2QjgVPjGd5o9wAZE+wFDAkCZpAOdyoDyS6nr3GS1S2qztnNgc7xbLrVcoSXlqiYDY9zB0t5aboLX/+o5AuijSI6j//byu4ZSgQ6Ii0RBe+azm9EWpLMB/7JgbrhcNmS6GTlCMwao/QOM6fZynGJOcAvRJVm39ettjK+U4MC1achWz9nA+u8yQpJSelYTSsWweRjX1HlN35sXTzJTRop9hnBl0Qc8dR4KjhAhNmJG0LpoYJsnO4bDwH8E/oT8nOaAYOxdw1GfjkhahQOcvw/jW0p90rsJKNKH86SnzB/MgOVrjm99LOc3SEUlgiaWC2VhxvnqiqHhG+rLUxn4x0m5+M6i151JAuPbnxZXvNL9ii5nhmBMd03Qq5Ekf8gkBFcCU+cor84b0o/xUEwwYws532+QIy6lo/cBY18GUv+skrwaPWWZt4II4A4P5TSVR3boDwkmtLXkoldIuU2m153CwS5RXaX4yrm2zIakebhhmozqcyI7j3YWwyrE0AFOXjn2hI9C6Qly1VZjrTH69N7zFxImm6m2NQlFbpMiJLx/HMU4NENwSFD7aSj2gdI2XLr/Ubp0YdosYMudT3IxN2YxQ9omBVBjWV8uHCO3GxgBwvu8gMu7v6Umw8p+POsxIdUlo+XaPmmzniYVmWnzeQb7sLdMccYCiS4iJHPz+faZIBV+fnnoKFr7IeXCW9tlUBtEMn6BJBkrHfeg/kqdi0H0n+8XXWD5r0XcKpn032ZWJtnNnz37hDiuiVVX2Z0xZ7PPO19PksP98UCIeMezqfzcYmuRwFOqORTB5nyOgrvkRrGPRCZg929G3IQajHFLreSQX9kjveATtaeViSP7EIoERj/rUBCAjyxztJFX3o65L5B4ZNi5TR7jwL2tw7xLJZ7V2ecuNb5K71wl2ksm6GLxmg6uF5us9eQGNqv4iQODbLPLZIyvPrHsEICpbFFvlps2EiSg54PctaRC7+m0YHsRnPuI80MZ6j5pN+57pCmIBbA/OWdVC1i7GMepHkMoJwoGm6b3QTMJVicpGtd/uCce3S2jKwaZEneagvqwnBLaJlB49+dlbNiqkl92UwvPZAMWzbi9/PFW910HkWg8cGdxvEMN1WmecnbtnH0Mv8FS/tlhrbiKpuNtYBIKFf6ofYQ5rCeC4hnhP2ycBiZQBOPdRZs+S2YOI8Yv31VUfosLX7/lIYd0uOlKbk6VBTAzAGOOAUF0qvchBcAeGRv/Gg1KrXbR8l/MuaG6pN/+o4ZounLEFICDnAGsUxDdtjmI/ERH/wJSw6xIyxryIlN6iybBqEz1lK/3Ie+ZU0BR2xmavb7QjrlcD2XSGwag/8b1PKHvZXZUmTC/Gt49j0QEab3uR+Uxi2/oT5lC9T/pXEiREMmSeJLwfeLAXCkOs2tP/lTKtNdmH8QgXufBo9oYnvGMgwF1C3y1MIVibW0gjEINho9fqs5qogmuM7y2Lq1hQJtb1Mw+XpZ9G2Cga+vZLH2udXItlcSVJ4m9f17YUG45/0xQBg+AmVixXx4+Q3vYPwPPh7xXGx61y7xkqJMyAmJ85Gv1RwtjrUuZw3OwwyrntQ68r+1ZaVldZqAGleYNa5tcRfWARfTI2qzyP1JAbfSyat7b5qhx2rqjRq6bsdhfwi3XXrsKvLW5hWtIsUKk257ErDY83GzF/uiEUqsBsi/0oOr529+P11P1JtV3YzobhtV2xQsrp17iTqphcYxd8ZoSKyiTfcTWpfoGrKnxKKsuce5Q2sJDWm2YCIQpaLGN+FtuVXZQOygmrd9Hix+Rytddh+wiSVYDXIw+q9KpD4nVD5aTpcN84lCmmcvonfBEQUETGMO5ZBHRqKay9yoMivCip/0awq20W+XhwZohtZViv5PC/3Hlep+rNEFEQbtoEK0X1R+onN/eQMvbZIRbPHewaD1I18q93yIXDktz9fZjLPywzverdS4v1IBxNi2ZSDY6K/0mivfx8qRsK2o1cQRbz8hdy6lJIeOvthuuKUiYtcHNrz67ksxUzUr4tZZ5atwG9XF92+amSCz6An+45z1P1MkPOou/rr3SxRkqam30h9P7ozyKQi+MFEl0gWXOfphPFt5ZfRzQThQEg42vg6+pGOTEcqHL9vW4g7vZLWiG0pRQmrFBsiln9ezmMB9ToYS+OZQ10Rrm9TI8ANuQnBz6gPADUUtXDczfapodedf1dfgIWfOKRNig3K+GCNH0K1p60mYdKSBfHw7sGpiymu9WFuQ2mR3lVB9YRYm7GZRGf8UoNQebH+wzBpabMCjjxw713iUxe4cdq0SghJ7uX8IlVNjhQfrpbOMcTjXchlUIijm4XIZz7pucJLWc5Tw9u1bCRQsE3c+GcNHc2oXx6KSHYiR4azDC3IbWJxNB1WLRrKKGIDARK2i37xeUo7SpuXhqZSMvR3gofr2rbyeB+4YtcXh1aZxXdBeLGSM4QMGbQKh6FnsQYWWe7Rl2e885oAmBsXyPg8OrMqTFR7TCqaj2PxZxdlom2aNm/cXSzgBFYEarhvAW9iETcvKpMgitx/oyJyRYGvNRkK1judftTDdRjfMv7EAFsEinMcY6S4L3YI9CnPzPmhm4/HBY8XGJjKJRdJ1MGJeT1mjcHTQ8xd1dkYPnhcT2AT+M6nlJbFI1ffkzKwOZIDjSCrOiRy+4iMThkTkoNXWAglnvIiA12m9bVz0QdeifUHitHEG3mp8Vy0KsUL9zQXI8qwGCH+dZRh/vLIICDkopKs35lD/uoDrIaiLufSWxkFl13Kavbw/vJqEdCBgadv8XFaHAEZa0N275TuA6C+yqo4aGOUtGkmV6TboTRFISZJ47HJ7SB7BYPieZFy+XE5M/ivK/gBsrLwgkGr+Ydr/95JiproyfSHnRY/8chm5PnmB+WpnbEwezZQ/gwcEq4ZnKWlgDEZa+TxHpw60URoYkJqz6kdgv2+JfkLzQX+cDA7XFR1UR70+mkBs3jlY7xeBH8jZpnJCLylNOI8o+erCI4MnJafBIuPzdc2FdU2h+7sjvs2PMJNmkFhqphLnrPqhBfLbbenMrAFvOVAp0TNMWi97rVqXQ43ZybkgqoG+xNKOL3KdYVG7oI0BgcqT0bXn7iDQNZKC/zqjUmBHI8XtAmz/t2pee5RkDI+D6s1ityIv1OR08XtEO7obtkbCAWEYUsDO8qhN+Blw56y+FiB3K1e1zPZ2RonobVrgJwveqN4iBhHWw1TtiVF0KBTYnPOMCcjWckvEUNc/7qccNlYttmG5rA9/DMGdwceNP5mHZzJtYVxUyD4dPXpD//qrq14XyU1gQ2ZEJTuyfOQxXdUXNug4YZ9qjfIYKbhv757Z25iJbOuNrhhTaMP9YiMy0OMhY3Z0gGSIXffc7bSOlPOL1sqnBtjrPFY2lIfFU3Me3414ya5T3Gpc0xrDqb8Vgian3wt796BbfWFRSi8CrbzcoAXN9BObtg9DV42iOaH1iimqI34dxiJJFnOE37A2EQ2OnEwOFtk1v3+nujPvfDrUsWjXI3Mm9wC8HHp+VXr+nngT4Zqfcaoi4T7+cbPN7DcSuvxb9UAS4Z6fHCtNZ4GX36BmdOOR8gWKWY/VCA2ClxjVNGCiqxgboszbU4UuMFTvPiHlVCqFECI8/P0rnDTocQp8azUniNmqv1F1mnZXUP3Xrm87UB9m3VkuEOL8YS41zBK9AweiT49ob3oyZC9ciAi5bH5O/QcdBMu/NM3sd0xkG0Z0T+RHtNckU0f9AIZPpEmrEI1a+HmgLelMUaDksU+k0W7Nn0sF6w0qaMKyoeUixdVn9h9eA+fqupMs99i38rLHYbgSblcyAaM68EjFqUmfen9NqgZxGsI/2UG1uIvCyv33WDylXH1O07F4AvwSm4NAOS+UfNYw2tJt2lWzC3ZSGbscmZXbCY6zZOBmWFkd5DDduAiCmAMzVOztD30IKgYSl+4WUHK4sMZdCRb8ij2FuU7Vc5a6cxIwMb6lanuKSq/u1x8u+ZAooRtrJDP9DbtvZ4qJpF+HRPSt8yX1zfFxRlGVjxwCc7bCTAVA19TS4bmLDc6QmkhfSCBsTwc4nE7+ym9r5DJwY1qyfGOc6H6vmCRHN4qJ71x/9+boLVkFE6JH4RfT7GBKgTJaq+/c5f5e3FUHVWpqzXUv0hd+OvgPFybFYy58cKyaMbtXmY+BuRCBunT39o5cyYfwvSOhyn00QT7tIEbQ7nDJBbXwih7xx6rQJ72N37TOYkwIQppEALLrdtf8ZEkCS/aiilNm5Hw3gH4VG/Nj45KeqOfd7bc7VB0/Ilgs4jr37ioPNeyiDi+vwA6d8natTBhTTJJT89xlhLPXopGTEyNE5MdoCDKXF6nnOLL9w3nZllRnx4oz/SMgwU/qzvWbgSPuseBkjDHVlZxMITFXavRbfPV4lk0/8ZdnfBqvEf6MDA5F0oluHez2R8xuEq4vuwmAJfBArhXQp2LJuEPxx1qry5QM5ZJsNoRGonvteG1Fyg3hL6oLZUFq6w2WnxMlRCzUf4EBr1UXomzFX9/DZpzmQq8h5IhbKZuVP7Y2+OREuhbDZ4OlsIAIQvoKMRGerw16N8JLy/kJhpAa461ynSXXh/DvexZf+2h+hZ/7Rj/Gi61H+a9/RVMvwGuNaj37pozbNKgKEGYCuXnOYID9Qj6YCdjfYe6S4UAZJg0lEV0JMAeoJ/ja7kihZEu+x7sFRT/JYfDuyLQpXAq8Ap0166m+aXIi4WhTSfrRBA7WGe3F/XV956hp2pqH59ot6fEYl7DdxGsL0fMzjnwx0FjTyup4oLipFYDXm6rbx8152SIM5+EzT7muQXrGdb6gJsFFyN6Y48OyulpX+Q4Znel5RBYQWEr8LzXj8EzlzVjw+c5ePUDSY6ZswX34yTWMj9PcVqX/J7XERLnROnntbrP5YzMgkywEwe0nfu5Y1hfbmQ8UWM8+UNEbCsDynP1/gt+s+Kvjk+WtVjKkqH2DArNTXVTLV66zpd2k3OScSt0oqeGqIwXQ757SpiujE4eoGhyG/I14V7R9Y6BarhuLimyZzS7/j47FukJHheY9cmYpofs3VYFvELOS4Pgbz3jimnWVMqNB7QlkS/nbSz+Dvjk65J3QkXmlZYuLKiAVCCr1bgqGpVYf/CPyTVZBXCSEEZenXzJz3qd3M5+Xwqw5NBajhtr1V1sn43w1zqnkfe2up/icaVO9dHzI/NEFVBQjXMOa4MSSrYXMSIB77feWKyg7m+r8+w0O0ZgbaaVPjnykN7ze28uQDCvd5f4+RTXC7uzU8du7vOkf0Ul/S+nBRiaTCpjjCrlLEV0zf5r9k74Ya91iDukk0PeMATJZ13vZrNmZ/zIPrGfVXRNKTjhxLpT1nRxmjQpZTnX6FPVsOjtHtPjWMil+JV/jr90wzxshp2qg4EMEO9W+o9K6xH4e6bYnxnYcsxWzcrSjz1n1C+y3dkXZg9mD5ePD/zFNtj++XC3yNxJWkLPFfbc1E+QLRnxJCqBRijQvFUyzvgaeAob58R3XcbtNq6q3iWCaRXob06qDtJ0IFpO30Fe1FNMRvwlh0aBFp5kq7qa2OTy04wfqQmPze082p6EkibJywerJ39667FwmYs14eRm0v7oc8+lSNDrBzmhJtzAOPeFiigu+kM1DPbu+fwZa1EV6l9e8ytwZQlOvyEQwJDW9H4znA3+dP5NX6KyK1RYq4A4hMz0QC3uRab7J8uvEVmxq4Iqzms5J6AESsiZo8mLUTI5W4AEC4oELTYqY258jBn02YOUXFTnrlXcvbAKt1Vq9oVJ6ooEC2UMs0ExFTbUWqqW0lqSGGEd97YgnffzmL/x05zqrLzOb6FIh58hNLa9+GWIFCTV4FhOclI1jDVS8O1Yiqf0QGppQ9MGt1J0Jm8y+GVwv70b0pWQbrmFDE/UM0uwrmevpS1y5UHY6DfyPvI2yWvvF1a0EUHP+b5rimbYrp2YndwN3nwYMTRMAeEDsQHgGAa++MYYU0qkXof0WXY1ZCn5/a4CjwxnEYnehv7+2tgYMWytkG6r18/RZyPKh1athPPcv+xn4ejEGqDP/L8K/JfxdemAvIbNu7bXqXwxMy0bEjWCvr8iE11Cu6AnSNw/CHONBq7+e8OtRz0TkB4PIGdt6EtyyCzjUKryoqkzkRvikNrgFL6Kxq4W2Xe1nBW95t4S376uRzwaB9nXxBDmW/3yZqAMM2Hz+oc8wEWvB8bq9BQxlw+8hRbkC86IUVxis4M73JabgIkiNKYpN03BWJHpYpx9ZEg6+lCwO7a8cjbrJf7wGTV0kt0+g+RgjXpB45kqo19gCOr3y7rPOFrIrafQqTgNokfHCY1vUPekTd7L4/FEcD+v6bvLr4kImO1yFpBNefe6CF6shebR1qYj2hBA8+5hxbvUWRj+FDi83xASw99kgU6EEwFP1i9HK65cg/7U1jXA6z6n8bEu2sbg1YQOBVAjdq/9aZx+SP3eKMZPe472VJaN2m/TGCj767El1n81mUY1K/5tfd7TPNu674xLdXOb4thAaAWUjMKfl+CgrpACydIfJmGzgp5VxAOdfWWKk8NA2wQGXygir8s2ZIPHPxeaVNiWAvZmlcpEfHtSQFKdN7qeQgpYJ/ojJU+l1Qy+hk12tpUty9pTt0mq4vkaDf94b/kJ0QUitnARgJTzGH9z2IzQf40u3u4RjD46LfbE8loqDWCJ1SB1f4VZRry9wPiwl+f8ucTzr/WYaGwQlKP8vPtqgpuVkRFf/AvHsaAYhvWnEcTei1+wdPkbYlWdCn8QRCM7ChtnTB2PptH6gtlrvw5IGt33a/9YJOUWQAqHRNR39aWu8dNxWmy2goRqPOTZKt1h3JSk4yCMXxSCsuMNNmt4O/7BYAXj+fLSmzW/gSYOACYpSWRNQtnakUHTw62mVmZbT0k72xE8SR+PAySMElpzm/XX9K0TiOlXF4IrGSSO5PK1ebC3juzHT7Oh3obY/HCr2Slkh1tk5Rf4uIZL1N4djQOANvd/E9pv49EsCm13tfzrpyCSvXonP3SSbkEXCgmtDiQ2uTlm15RYFU4CObs5bX8GbQ5nU6rEuSoM18wr7q8lSt2Uz9nFh9z2Zfi4N+BohHFyCMFRsSNomXgDt8NWzJpPP0cb1TYe9GlCX7RG2wjdaLUZS2SJbqGOQ1pRr4cwhEwvYwjsPFmqmB88QbKUeiG2P6lfphrPWibLvuTGlPtNjmnO/vVRm6rnVFnueTRAP+Jgz7yRMwKMhu4fkZc5UQrpYEUqAz3vv7crOrwTtDQjr2UopeNzQoBpPrO61LZEMb9j8vkxvqdgMFALbZCQ5Ti4l75XW6f0cvk9O9UhlhzIb8E1A1NX/mKm/ieMFxlKkT9JdUhPYenlaiHQnYKlxcm1X+z1sfvdA+yBXz4nRR9dlxrD2LsGq+Q3ecb03zQq3r6AZ+dBJvrl/UI9vn6xGSXnKRa8qeSLEl/okws6yaFMKGS7CoEXHmIgfFTza9zS0yIUsQw3nhQu9MWDl7Qm778Oqy7V3y/00mTzpRyCdI/e5P9RUVUDb207vxItjknY0ytK2Et7huuyaxZ4oJ2xywFdgiQl/UoPQeRCRUKU2F+vI3CxgCzrDd5Zt+rDxDwrqX3mQhvGfY9lKgSloZ76SyeVgW9bXTHzV2+LHm8iiUL/xhMyFNOxKb3Kz5wGKmRjTKT6zy8UE4wJckcQF1LRgrommJG5iYjD7nAY4lPwPtknE13Hd1/noFSdDTq9iV+4jL163Bd2PgDZHeaOkzCzQ3DZUmuWH/2POeCykmz6Bwn2feYxygLM5XFyGnqRUX1dp5zjYvIEnhahCYH5r3Nbx0hqT/SgBxQlfl56kvqa0ytp7HtUyCAbpqmLhRj1vG79eq2ZTPyQ7oFWwzi/O1pCNU2++9dTHhTJ6j1v0a5E7Ui1c3UK8jKpg+a6hYhjP+VX3Iax+IP46+vfIZ3weitTJDPGg/M8iC9NlFI1NpbtuTUB6w2n/yTru03pvQINCXlBzdWPCwwn1B+5XWNapIVC0LlF95crLwNWcaxKCur45dUozN6MI7ye+4cQ4p54TBomsZTMWST9aUqonfzvrUm/1aCIgvutvFxDLyQP3Zgi/9r+Ajcu178t7T62rPtDGx8JeZEz2FlolBZ0As162LDk+dD/bRI1+zMDo966grdALo3JziI6P352lVaJKbZUCGUevc09ACCM6a3SIEEJ4hQ5FtyEZJfxKrA2rNSXhWxlVjiHz70YiIyoXeFprl7jbNN0yYg5nv8buCrM9j0mRGVmKa4Uj7Zho/KcAfRNzSmEjD9bF+W+BwG4XXjbrkX6/BJ4uMDiyEq8uO48tKjwZfBxOHEJMjDmAAWHdxlnDUlku7N5/0Ibl7K/X+bms743rggDjcja2478NXSUtlxZGI2pSD2AxNJXbWt+MaIeJg1sY6dWM4eocvA1qjbodtC3mrX+vSH7m74GvLe5ozw9rGJTVAUrUDCE+CxnxvoILyllzwRTU89pdJYk1a1G9KxdHS6rkxlWrs5ycSLhoXXWbGndzxGDDc60TRb2+xOZkuTvBfjdh9wWmnoh/4E3dHOtH/Wd7mQGx8z9/pkD+7OydgJiBI8b8r/+L2CemFh9b6XqXWe4C7PB+xf49Sm6Lox6+UvPhKdfb0SalEosRZsq5PtBvQyIzR1Mt8Bhm3D/N4evArjExIhtnHI9ecRNESIvk1h+q7WKLQ1GOgnPT1bgfl2sEarg4avf49bfhg9WFpKb6GRFealfvBgvgaCxwqANThrZTrM7ApRenhoJ/ZNkfHbmtQtCo1PuxEPKneZkpoTosJGX5cGzLcgDJBtBKQwL0h1YE35ZBJIUJDeOQ4P/VL76ZlokM/G7kAr5xfOw11y+v+1rBitXSQTqmZ/2Tb3+rrGiz2/o5LxMrG7oJSY8OGNHTjekNtAlRB4mTNyykNjK3b8EN6tmdOAVnKKXegViY/13EP69F68RCwaiwPNWXMZjTrJVLcOI0kJk3+nDgf0MZokcq5GkLu7Ryg5khWAaS2YIrfsfaj6QjOluO6ngZykMJPoSk6QYCsHIv0SxkVaDB0h3Usu1pkU++pifSNy03ZSYv15+QxrM41OzZ4ar1KAz/hnltMHWheyi5xbTb/Dvx8j63dTQkwYlqyfC2ort9sZnhDZg+KVO2i8U6qAsTP9mLmAWjAakSTzgGWHJC5fxgYcRgk3Ljg6QntHXmkHgW8GM7+7vJ+wehPjdqM4UYbMWzLuJ2+JXQr8CLhbX3ocv1Z2A3KciFwCNsQOoMholoy5L4pbCZyihw8BavMMYmwm6YhZ/uJCfsvhyGuQb/6W5fgFU0ENWYlIuENrXnAGd4nVmPOVfCjt/HYIx+3xaqfUwQ1JWNcORX3jqF8l+y6RPdzOuQB1uej+sQJvPSy9rtJ2jtoENw9PgGh1HqFpQUKUAdj8nYlZydWbbT8t4Hh17LMduLql6YnJe7ltDrYRh/+pCwOLEZcUpsJ8feKk613xZlJuDeHSYeGVnlf2aYG4t6vd+itIchimJ2i/sMX+ez/tWEuwbdRdRl8z+iXaayQQftjvOpuVjjKLp1Qh/aQ7Cvm5QDhYtZUOEOqgxlbSbph+nVKki9tE6dwjbkmDDg1AAZQcBdFQxi/1dDqP/XNo3/vmLKnEoqogklPGI79nHg09Xrugw3fPUdrPf5N2ESiBR/RtYy2Cd8Hnkc9QQ/EqFgFrBBVF26y/zIvsTQzKjIUEeo49BxVC5/nzrO2EawhMuolPCJZlP7glivIjdFEzAm6LjdqQWMDKOEc6JwhNPWrlpu9GgGGWmcgmYRbfqBbh/A3qYgfsJubSV1rtqGPmAtSV2RL+8Dlwcy7fc/PkriO9yR++nS/K65LGqLF+mapg/rl/PHjA/2zkamvbm3Sw9g9VPefYhDyodV9ZYQ6+MEsmLcdFPM2nsNBzmLYch0DJerPpT8gL8EB1WYRE8wbdTxQC6PREHUXPExtFJA7AKE2czsK6DiN+/ETcLZrUkoKmoB4vtNj10aaPwKD3GWHa/f0johdG07mn88xlM6gV0wXrZQI2MFk75Dhy4JOYtzsKwBh0YKKGxKlLiIP+Hg0QnJ8KgtpWW0/674zUHdkvNQElzSaAkhSnL6lZ7vfwpbxnfsl/5ynz0x9Lfj0gExjJHSWSAYFKm6PReXEXvC0JbJ+bqfwUGH6HrZ4uGURi2tt5WpMPxCeVERhUaReNtgOExvV7W/rrftoM5EDJh+ztpJy7rchGF8BGb8B/7/HIPTJ7A9saD70DtMV1NF1WV/MotAzgbBNbLvZoJOe6KNHgJS3AfF6ZM100kukVfTXfU/EhiGSmVgNlNsKJ98es2vu5w5F8UBed/ClDcqmUiGyPZnDnAQVjwp26pr4nVDl3zlIaxmlDe/qevZeyE0r8Jf1cJoUjh77PMCKlXmgUFk0dsGSTZwT9AKUADYXaf25+8gP0afow+hFesgFqGsXGKlln5g4lwllo9zDm/mGhwkbYgTSi7F219gwsNkCKDcxDrSZgLz95peLrcZP38HsYzPFsaon+KUsIWg1dSQ2FYLR/EVQucgkzRXPhQpVlKErVVwQFSt3coXlY2Bk9VifH1N9OFalnsRZjAQ1Zn5mQnQ135xWPRFhp2DSFkZhNhs9lFLtM5e+1zDWqMu1MpHxhWoSA7Qt8QmZ/4YJjBZ6iVk0j0FaaKRqH5m+JOY+YZOC9McXNv4yQlkjmn7bAuZdKTy7UMKUB5J//cprb9PRdQ3QQ8hiRoKr6wYyowhrEgCTxrFv1ImJtw3MCNdoUXpRz4uaHSKGMt8UZPEBtt9jphkb3FOBxrFoqHfY1qseZgRfBvDfTAF8943X5HrUHQJqq0CSEQiGYeltFWeCYpgZdrSFa7mpATmltG9PCYejqAopOjPjGP2DPuw55MHUZNAle3VUVDkoXQ9jlIryVfR0AeDRSlquFqxk7biyUxRa4HiKRQ0ioevCTm4RXMBsWXLALiEqVfx18rldgCy7TtaKs8itT/OCLO10gbMw6+Vcx1XnoYtFfFDwaZ4C5sMZluMgbLOPR76/NjWiy2RPFYN04TFj4g6OgvduKYSyX/XlOQPgLutxTsP2KMfiqjngYmORy6ekauemIT9+qMtbWzU1AfqwO4jfZ+a8WH6ymKcj5lWabghmzY+VwH2u4E6XXHawzU/PUWz0Ra6IUgtGRtOuZKJo6vg+wiQzroLkHRkwWUUEoUp9/vOt3UOA2vCaQxycjjoF8ONeOM5ckUZAq0vDLerZ71szJDezhcfj6IvAmlgrMm+gWDa+Ow3bZMeY/YQa5SU8AeitT9kJVmA9VscSvs9LOGosZz65kjXlnRiCfP4lvt9cxJXipJ1ynoab4X5urX51tmhOO3q98aQsGFn148yb9x6WRbOCwx7f46lBY9MBzxjHLTp0YrZI1PEGvdzVkoASr27gk4rV+orBosmBcwueulmSbU4mOiMxESVSVBIKhTYHW1TpzBKZlvyqzvYplBXigA9GbCa5MUAByJrJ9jvZcAfw1xGyGDB+nYbhi2/VP93IiAC6C5ZTYyHNkxdnGom67X2TsoT+T/YJjACuT+G7/dwlXs71w4P29KRsT4q97Uv36F19LPXZ2ChH9lgy9cgb7/kPUypKkMgNzGIsES5g5lvZCXXtI2iZBqFOjZ6QQ486BmpNA9SdrF2Cbg1/sZpTHC7Fb1weEqLk+b4g2GF9WHSbPOdJcT1hmVG9WcO9ehh8wlDFbqtL34Nm23mvad1v1Q1h0gzxtT26odMiMhh2srRjAOaGUrmj31Ei2lK2uKEatqamAdox7KSs1wO6/mxecmThI4PjcSmkEWd6yGeqfibZeZcrmNTBeGCqOEZkqfEg3NBAnxy0DfAJ/TZfOEZFgu6xu8BdczBs/fgeQqEZo8m59stv0ttT+v/zOkeLTKPTzsF4zMKDAAOW8egY29zuI7/GIN+/RvlpgBXWIdjjztTv7O2yWlhFB6xaP2yvXXL7KygoI1/n6sZf9Gg3GiWNC/hLRguIujkBLh4wSBRZnxD5cE/YtuHJUy+DGLM0XNk1HpvivynvPeddMk2+fJGPZgPiUdOJtjf46Em+xUt4raupsHkNsQ/2ZuPOG6+jVwx7YInTgqbdWqsMumLFW/zKwptfRrLmdAjVKADb1lTy3P7wUHLT3S+AFHQZhJVsHyUgUAgeMP4s3ZC1RmCmyaThSxbSK1gH2Yj5Dz7cxFs7YUggi9ERgQuoz5avVQGeNDjQcen7NiwsHT+CGbieMvz3J6wVtoG8vnqIB1kdb9oR3tq2475TxtesWrr03yc9/xSAlc0BDIQUteRvh/0VyEVG0Ku7ECQMDgKWUaDQSe9jww8OkmRuWUjb2kIGld96wFD+SK1ADA8wNtGSKIpa0z+1OAxFIduNnCLp2el1cEZEC7Ai/nWZJgORW1wIRI0coZef2Oa9sSQ4l+fmmZGw2Q+6uGvHXVJOwH6AQrPtCVJRdC6+Tm4qKFxj0/4hh/xL7cFDrkA20WuuBBjyEVug128uApCcDICIT4DJ6X1jLp1v+9/2bFegUJnveSPMY3vygaP61mLrHMTLcQqzBFvRkSkt2byqRrpGt8GUh2TZn1d5u+9TsY11mijrLr0Q5kw71z/KXxkIS93umQiLMPFRLuLTQese6MPwphKdWGPZOce3j9gueMt01+K8jFYkirzAUdf3+nmTDoimKJouEYjtggv7Gkh6Rx9HcrUvgi17+KO+CBPJqlfaUV7rsDtGrEyM5IHqiSlkoG+y7YCMqbVEbDZulO2TIcJUgDi6AB/ouifRugWIY/oRbLEpaWo9Fmvt+dJxpaXdSVQv5qWDjquddogVIVR15cuy2gNCABsA8gAJrMETHeJZixacycstztPVB0/2GpjyFRouBVS9If9/mFdbggUW/Szx1CGZAWRTody7lSE1TWtWI+wz0ue3r/BJgE1vmkqbjebB/V/Ehc1YD5coCp2Ejn/777aKAdTJBinuclRwIn7IE9eSCc8DwAVFmmZFE/Agzwk5qgtwPP8fHdpJQktL3Tv/j0KUDdi1hhzgug7utSRDu6JKGABIxDg7//LmZsB9U1VO/mE0bkPDnFVjs9UoT9ef523AeoxJGGBoq2jdHfpBiYuMY4P7mX1Y3YXwPfN1aTJZezP6brs1S8hOCszliLAu8DHYBkvsd9fsmv0NMB8sWwbvS2tmlzj94lDvO5Bzgm/qEow1YuRB1Vdqq3IqvY7WPnPwQXQQLN8Mj/kvceS64ry5bg17w5BKGG0FprzABCa4LQX18I5r63y3rU1bO2PraPJRNMgogIF2t5eLhfgiBCVSyFlKq8lWu7acRo4wVe0nB8O+DZSqbQlgLtgJDWwW/3zkavEVDLRakjPfk+NIp+22xo9A36OuifQVVBhUiGzU7r/mYMXMcbUxzEkdQ8+WumEACDxE00GtakR048DQqvc6Tks3uXVQymFPUcKYnFuiSJcyF0+IykN2n0FQvunGdmeZBm5v9CbFTLZYM3hHNKMi8xZkMxcSLpQzO8w0rmYft7yHiDDe/nt5umhordDRLxJm1swaozxog/vLPrr8dXAuhLRQFv6IA/EN6Q/swB3RmcWHUjur4P4EBq2cTeBGyFjMPn+A4iHXh5K+EvqJtEY5O+Cbs/CacTTy48FOQberF0mJ5Ccq84UOMMYu4pq7FDiGmcKTzZrmmZZHOJHAZijHIKzfocSInuT0HABHujz/ojSqlxUs1x7Z87mZS35oUFjFruHRw9aJcGjrYyHk/XUt9nANHcv/PS8Dp977s11fPWyo80bkNBvzmV9Q8PhrsRoaOGFzMqbDZC8U6g37mxWkAhVeK33A/yYQpyJJTPgwpnmm0KkiFbRLrLY/zkfABZk1HaOnfGOVQwULo5nezREmY1I7YgJE5r9a93ZAUwNyq9fWyn1Ub6AjWrv9M2xXoxIm2ETLNZmRVE0q+6ttePqw/1w5Pc2CkBop7QWzqyCiVxnhzGXteuu8PoFG9TlPw13WEqr3RFRiDrEKnOnl9jSn+baJa1Sab6Gsx+IJ4gvIol+7l/NZGcVeV83Id2QVDlcjR4SBo9zBeHM2WVdeVpQY1MY8/c/TxSWpZ1LEnwFZ/W4sccCPtKzfamUS6TULjn33qCG2P1LXWIE04AIFx/obqKPeBmpXFRrR4kJSG+pw3JyMa41Er7Y7Hj2P8EvCgd3u/U87Gxvy01pINAoqiAGos5j/LHxSx1RCAaohEAOf2tFb0rJyZhWhO5n5SJNtW+XgZtUIppor882j9+5wjraTgl5BaHYqBjBx8unYYY6pBe3AuAz/vLmD4D8ym/QA9I6cDpU1mDDd199frZNIGCy85EwrfQcmpLmx2smUn2wK76rdaZAnTZBt7YDUEsRZarb5XbtmNOueTcW7hqBajCzVVcwp36J/7GZ99eqof8OqNg0FsxeFWl5gcDKETZWZIXVgVa5NmY/2J4+8pY4P4MnVepeWshrLnooLywZlLMOPMhKGk6mNENS8d2ytFs67HIn+OSK258M7Hj/NpkoBL7ogiWA3s4jOB1Ek+qTlqNsIJxQpJriiPiIY9DsyJ7h6dHDyEqjnY9Pmyf7w/+/PaugNBnMP7yGb8gmZJ+EET9iQc2VSTW0GO6Zcfs05+Pwp6aP8usywxf4DqFIhGhsu461WFoMz8TXBBi17wTDkv2D+HKFWPVBfN9l5RX4+3Y8YxHtowLykmD4vyPv8gOPRojPHq3idQ3qn2C2vw0I7su077Vz7LDLyj5SL7F8ezF//dzLF3Rbt2/37pKpZLwRuxv/d8q1ZtAiMl1pIBzFCBcNTuCdEpsZ/+nF0B3EFUSSYs4vutUXOTksVL/+V72+V5ntPMj2t/cpW8K959a4goj2zR7aDLgxKYF7NLKzcL//bu5DnuvTj4u0qQv2yX8t+I239cwKfQJBSmgdWrC/rdeuGrHD0VqyIMpoB2iyVqO/rcnYuyMDc59BHj+TNYw7zTZ/tfzgNGa4hzeqzmdo0T40QIGHRnP8v57Iv6rFMxc5JT/nRTnW/+31ncn6z5BR/GGUt86Z/+6JzyDlHUMofeTIYGdFAYkkatT605Q+Pz5GF0pqvmM3kQmcERP+LjHzIPPVrR6iBbqwn6Knc5jUnwMMQg1Ot7pTUysOzHPSOmaNa32zaa00Vz522eSTpryhqZnnmNY7mAWek9FmWSSXzclnjKZvEyEusYSlQvB+JsybymIGWUkebBo96sVH8dznMzur+q5jCc0xcvyf2uWM/9ZPfbvfTBpfx0gnrn9VVcHVddr5m/8tvN/+lmfDrJm444Xhu4FX7or8uLfgvV6g+mhPXuHOptkcWGpboz18YP5emVbPPfmaGtiUAZEsK0Domgr5S2CtA4pm6O30mCtCRYgPXy78k6bbuCNjQdlnEJcMqCzxPLbcrtSJPdKRGpeqvjDjJOe/dI5P6N87HFgp0+gWc0XQVC+rnLD15PPcNteQUuF9C38/bXcbJ7Z47Jxa7j0hYR5U+UETTRAck15r1mDwOgZvYl91yTNBH9ntq/j0m6FogX/Hp0OyuaAaLgyX4THLgeyeo0eLsdDIdn9F/0XzOtCw8loyquC4IqLc6dFuhw/vONxTY+VeNbQCN+y/Vl4sAOnva2jnF9pnOJvUCOdcVHyNViJtgZJJzOuiF9KDzOh2igvfKSgpN+QSlD135rVMhKH69+ug+ZlR9RkwhepEsdkYITpnAeC16RREcCUsHRTs74L+tAKk01w0gTm7O+ZQzv1fykmYSO2X2ehR6CRk30n9CXx+Fvz9Q5swcz97RkQsMT7zV8uBs1WISweKC0uOKf4gqp3mGajCMhgUZBCJ2GEKCOgMBAOydQ86VKhIEhsVNrxpTm57GGwQ9nlskTjd3zUAKhmQtiVY8KTHK+G9U8eKq2REWfE9Qxk3E6NRVKQs34EmSeYI2IOZJKqWWcR0ixd/60svjFqb4emQa7LCR0FceR4FNn+Wa4UCSqWbQiOmNTOjn/jT47M6jb3jgrxtgStLkJSLTGqG0mxjhmud88uU4lrNGF5FPGte8Poi6KQE2RqFWIScHip+VejYXuOe+RSARdanlx/vlBvtaxcyLVc2EfKS9p0EbaZ+FjWyqIhV49wS3pgvhgEDxlMJcLkbob3h4rFLeTQlTWs1me4LU25lYKyT5aZsfeYpqqmIwbST8ijfTFgcF2KIO0aMvt4DFYWRrY2cwDBz3Lti7OKCzKeTr+mSIVgh9NB7puhJngPkAmgAIczi9QsHWE2tcC5ItadzIR/vNw31Pu2gIWzc6pJBCPcm9TiyjjTkitFqyezV4CjjIBFrx5Epm1YRJf3+mpgBWoxBD5pqfcdsWQoOSC1K4T1lsPCYgkWrM+xbAHZALlUGlNvzfknQgLrdwneE8InUu3zSL/+r89NTacDiMsrZ8Ns0oCUU51VgyE/qOk26YMHIU6hYoWF8KcI3fejt/2b0RZfIxTuqE0OueTdiOst2Ai0YhIP8QToVVmWGo2oAzwcSs3yA3f1Ah/0HAFwZYzopKEPaPdhVT+n+X0138+N6dGpZZIjBHOZsF0pLz3AJ4+X495j+jDjV0cBGp7IZcB1JpFI+JC0i46jv143ijSMhRhukghzmCjCEWH06qPb/kWaPymwF32ck3ePbJT62taU4VfX90W7VzEZrfK2O87fjKBT8XmIXnmr1UB557c50BcOqa3ptDczuzqpZo/wJ5FocHiMkVnDtGL58TL75u0CWNxB4CwtrFLIvL10tsXLMBif7F2fAalgU2M2aYLAjW6G3+1MKb4UD81VJ+Mb8fgYOlWkjEzOx3ewQ9mNTPt8OvzHy7tsLkUqMSKJQ2qCKkVkvDdEe35SS0UISMijvDMXX6bR2C2lWOS6+zjM9VKyR9XcNpfbv81rjH4B6rQ17eJM22MLMH0GPY18RhfoO9RlmpAPowcnpOtSoZhzswkN562K0XQvZz44vsf1ufSBECkKdU6NUvmHsl88083rBHMEZP8873M7U5Qni+h3NgH1iT5yK7265gbd3Ooq5KE3BXluFJe4eLrS0c7vZku+yXIMSv1CFP3m+vmxUIpUbdCicWy0nwObrwCD+ywqwEzw/dlr58HOd6fh6XLsbQ7OZ2x1f/3yxEdZBYHzns+gTz3dX5OQZe17n6WxtcflBhfaGGuqXd9vAL9n9EEi53t8vM1UqUgInb7HRbkqkHgYc9AGlQ1L7JQb228CqVO16o8xF8mFxuIShgFkb74yrbCmfmGLntGiQp4c7MZtkDfvw+csioztpdN2tTRljT716ZsvnPRYs5sbImKXB7iMHGGnHRUaYtEHI8GnUFhL5Y28QmXVjI9BeuSpkyOzsouU1cVcjEObEa+vvLyg+bSsWtTmN0BhAccjqX2jSzVx6XwUvbbjwDUaXyV8UCX0OjbBvycNDixmVUS5xvQ6bssoSuHhCoANnjHdomH5oBwomPevTKovJJW7T2OIssvXTg6wXNAK/M+HXR0cYBwbn2YOek0JKAgGnO7wiRcca0fVsm62L0Fnt3SuzGbVv1vJliGpr9KF3Cmnf86wbBGnCn97okrQgvFrtnSONkiH3OTVdfjzzYni0G8LK+MAtzW+PZ9imYe3CAfzClXmsMWMF53y7ZdmjjMMOILr3uY6Or74AAz8XIaFLbt4GB4/maO8WV+/1nKlqTyqEDNp+QrY6IXQ3ztcZjTRmzuAnCmqAyEWbM3DdZl5yS+PHcQXLNn9oYs/jy/geHtbsCeePjCZoAs3aYS7VBLGcOeQAjwiNTxQfRLP6VcSIVdWCAvgdgXxx9PROGSR6lN4ZrVtyqTS4dmAJmb7xJFdLB5HDF9Sdx74kb38xz8cMTbx+Je0NieXEXHtfLM2hg+J6jU6z0GN4x+0asUhGBULMbi4Hm3b5d9M7eh1wrlSsT0krjeJmvictR5bHzDJ1qguzYfDkETXADT5ui234PCLlz+yhqU5iF1vyQteb91odVAZmZl6pE3wYXXK4eqqz/dhivw0jZXv8LpSO6P7wC6deW3xx6neMmrVHJ9a0Wpz6s9zBlHWR0HSzEGhIo93PiG5Ek/jY26RkHHkI0BEj0mro4Fu3A8tyZlyO5cJbB/6A8N92ySn8skhAMi7ALOjP+JqJhKLaNzSBJyElQmT3Llj3eWb/N6B2xYFboqGVjRSMxkJI57vKGRcNf1Mg/RwSBDpgA9T4tZxeqPQHagUp9sihc6YqoV5ljSJgjgS2NNCYGn2ybcVmFLVNbLscR376lgYyWGulqBrPbP3OVGfRN+ugnnlxBuTVMxqEw9V2ofCULXaO6iAJyWzxDbydQwVYEcckV0M9Ry0jI11rlWOyxMtPyNHj73rIlMcJ1wDNwdxgGV1cKCJOjPkhkCxD4vM5nrrDUqRK9DzyZml6eccv1Q6TTGRtg4qU50DOS8e9BgXNrsIX5wgMdAL7vLD7MTwxuWXxSZuL8CL78jUpZH9F0sP9A0f/KvRE+vUL57YOWKCi4lDpM/rE2LJg+jmq97sXT4uACj9lN6js8oIyx4o2ih4vMC31y08T/ZBfh1y1smDFKRL3rCIJGepWnUFWO08fQ6hAjzqjQV3pJj0zTPfe2O7+E4Hd2dfKIqtNeGeD0Bzf+0GlybtIqq8H9bpsrVZrOKrQecI01bCJwnaBQkDjMqlRxTktbFCnpiySL2DwPgi93DJPysGoXUO/arLw+5YQzLp0GU/JgLytQ0veGTvcZmEHgUaVsQp5WjwvHv+JynW1y+IAoeDBEMF73q34QIflQjsGHU9u+VKQHgH7Bi3qyXgbyU8LUAi4MIf8dLJ79fBd+1ENjau0xB0cMu3ASgTEz2NOt/Hxpb21l9EgPNjZdRf5dd6OU6PIdAUBXU1be4YZsN0tbN8SHxAT592U/NZC56OHBkKT+MF8iw+qWVYmTFZYDeKCGNwXor4aHaFzEn2mZwS7m/rc79fEm/c7OaabjArOA52AZysG+KeOw0omwp75N5mCfAsFwrlYwGSzwPQHovwBfbA4R5vuI+hlAmkzcnueU3VYytIzSWmI17bIEAXnTKMF+GDTQZ2RNCa1g7qKIvNS7jBNu9Sefw10rcFdC4b1LGn8pZQaor27A3WsXqJPvh2yLrQNZ/Z22NlzC03avbXUcLvXBnOQV8VsLqM1JshEBHqFe1LlK05P90Zv6SebNvFSkPKI9vu1Q1g32J5jd34au3NwkNpNBiIn2/elIfU97nWAkAixtBbzr+U7O7dxrwEGfnVkSKVM+dpDOSXQrtJHIIHwe/1S97FSs7e+3sgKTKk+IF6DxvqZthK6kY2SYOk+OTyzfS+C2LxXkD4l8N476WggBD1Q/OKnmc1mZ+S4yO2Psm9QeUmDFvcyWB22KNaZTC+9RecBWKQss5moOzIfc8hKq85zMqDrfwiYnFzmeWFcoSC+fW/2ANr+xyF1jeqQP7575rh9iysj4Md1xeC/Itr2LXTlmoF1/B039MQXIq2OoLR82rV0O/o4bWsyAV1OnrKgjj/KDuPGgACqNO/vppMHNtYhR/1QkUpWmUx6Dsn24zJH8JBLQA0SnZzccjJPay2PXS35P/FS1g+rM6q/OYmX8TYd+rd/7exFoDP/s8+CyJW/2ef/b86hP4//OwzAycWN+D9Dz9ZlfCixwKEqmpwxsRsoqnmzRxok83RUkWTFfUsLcA12DLrn+U7hq6CIq4kPrD1uBlSR3IGobNCWsyKJQ5CS/n4ox0QbPf5ogZD9wHjm8LCfLxidF8hFw8QHr+s8fU+RRrkqvLVQ+W/a/VupiHjWuMM0jFn5Xf9nR5pYDr+y1nAVwFAF8rzhkaMZbZj0S+GxQ05HotmZV04QFYE86Wh3x8S16wXDonUjQLb8mz/ziLrL/bVT3PdRcz6bS3K9njKbkNqFGPuJRNy+ydOHMkhYqMQyVVV+QRUxYRvdfltuQoVs9gj3/9m9JnN5NOqsdS9vs7A5DjfbYrq/0rD0Wuc12GB+Fqj06D3I+gBSes4IOzSRQny+9+qCPKvKFuHHfzMdeLUXmfNSgaTIU3lEhuSHZtBGbQO9O7RPvjXuGsk48rbsDAGGZ4Keq0sOLcseFiLvJjrMXzl61ciHItSozceoq69AyWVkmIjDWRls8uKkDtg8/wL3yYaSoQ6dpQ0IT4SygMKzFkGpXiWBN8PkrhMkS8j6CDWYNjGsG/G+Gj4O2tylksXLg30N2oG+0E2iMkFFmS1eRupMKm1lFxiSxhNxqXFF9hz5mMJJlUlQ9GdG915h4wpx/AHWZQf6EJej4y5FM4nd9LixTn7nwK+/HevWN0KlJZ5dyyIUSzVdlsUPcj5hH4wEYsqDb7Cb3fZ+mSd/vnGEpyx4Gs4jvKX8GNu45fNrbjeFePGvmfuzR2rsyKpQfSX7ZltF5A3bMUarpaETXgE/3GSD4Q/C52labJ+wtxzx4EQNjUZpvgb2819BDEPfESHOvfysml/P4rsKB+c0QL7XZg1Qomk+IsRusyELlwpXlojl4wOQnjUtyyDi7SSr9Mb1i9pkjJK6dS/BcPtz+jV0L5CB4iG8y7HwrCkX7VB8XZ3EddhPo/oD2nFw7ixdHGi7Qyv52aAYxBLLDj6gProl8MiNWNbd3+0kLxSfzzSOQhSqouWrC01OuiRU1/IIUnwvRlBEFi34m+UCYgPbT4BYXZNici7N/N1VXSEUrwJrGKED/bRL2+qk6oL1CMPQb76r3mK8OhYP6X3VaH1Dn0BswLBTbcrHrYEn8hJ32Lf0jkLwrwOknOhORmk8jYCd1wJnOvjH5tGbQzw+JxLb7AFfL6h/UHTLpk8kLutl3g0qjUX0AGZC+nm3rY4tG0EFFR+j0G/NxqE3BmzFYgq5mz1XZC6cNHw1JEMyi8j79RPJo9RLOBxsvw9IeNa48PPuZzrvNSDRpvy0tpCPen6HPsim31XTiAc6R2xWN5uY3J5l10hiAcJCGn8G6YB9YIfPO/I5WBKoKMOK4a4Ramlm0K/TRhcjFdE3jJ+gyIE1zAx5046+bIEtbCa6su2wg6Cj8TNhyR8Lp6yPMKlpr8Xsj9sGmgBtZu6geTfyNEq8gobTVDizUEcS4Xk9x3FcoT20136dNRWh4MRr03pPrUjTXdXhugrss2T3CZtRL+V3jDfRWC3d0oplFSeImDEKe3yH3pMxSJFoNKgoyJJpnIE0ieouFug0ySPLa6uDpob3Jq+Zf3tr3qvOAMVwy8Zl298L4SbUspXhD5yvuRDz9ER86uDCbPdze+raWLJ5s312JaSI/bp9tVGYiLiqlcygAOFAzVJ7ZeS8XnB8B0LW0OE/qh+fy2IbSDn+5kpQIxAU9KW5WRYLDl2A4F+AXGGRN4DX6e88LTDVataF1WSe3W6gDbeejHiQSGiY1H0uNzV6yv5ydtSapNNKL+gdi6UC/blthV1XgH+YXOeGYzmY5dwVAzcy2ectDPhRxOmZK8yDdAMN/+Fv5nVkDr9MCIEPjQSPtYLJPoKDXJSvWmlwrguhroxzVeu0GU19gVbeQ8tSnuUpEuSe3w4vOHQchH5NiuDdjYJPAW+slqi5JMVKH1ds3nqhjkWYNnxeEYnvgtfvI9FyYfdJp4p7ey80bDzu0+LWXwY08cGVivduMZ5aRm+Ago5CzBN7muxuK9PGvC+NNq+kSaVdfoJWzWbBV6ipi6rNVhhTH7eGJi8Rwhi3qHrkNKI8xXxskIhgfO9grGcpfRkxDp9fnyx5sOC7SJRompGDYIUCiZ0Dv34NT4/jVGzdaeimCwuuM/w4c/PXWTA+cYokfyIURBdIdkdUbwPCwaZtybSm/ZRff91qyJl6XELz0jN0YQO8ZbTpsswW1Q8ZJBGWY7mhD85DstzfbTkswJ5YhgFN3DYeBkppuqeuSvJ0sTL10m+JuLo+0usqtuesJPtebF7X0bZkUF1DqWcljOuIVPZzpUyPazX7gfY00/3E5HZdMmfN58Cy1eayupoHnIeo/lNQBUWpitLz9JlOO+ScU6iSE08Iv/mdLmPMydn/uk3BWFDlDKHeYJCXCalVQt/NqWCF2rbbRDvYTYIeUPP8kR4mLYSktFgk7iQyqJlYdNrZhj62rQr+Lpn30MoPZh2IgxyuKHAEj5qdvKNp+h3bidLqL9zUgdxbfny6EjGvYXPUjnD3FwWhz6rsEqdVkdGHfgjR4KRCPSyvNF9+RX1ydvy2MNf6xgOnkVqE8i2ekzYJtf4yz8+TrifMw/SFWPjVpLVdqvYrTfOvdRGu67Fqmq4c319cjooxejso0gbxN06CdO5jPaW25mYeTsN8sObpWFaZ0c5BghjNWrlx2KaCDFlAneQFUBYhrZ+NXKj4gneW6jxi9q8tkunlO0zo+KmngzpbCtCChgMYjaL59OiLkU4fQpn7phsGRCRbSUS0C5sdlYVRKvHsUhhoqYNlrMMgpLxulVnGLHvay39HJdJouPJ9mvShmZw974+kDlPncudqyAveWqOIsb5lf/5FJWI6RqYEOgOdMc4twdKtaDyJbM2VN7l5Uq2Azf9h7mIKXO40w0PmU4qX4qHOkLEcE2MCKgxJpxRicD6XtbBRcZirM8CvywjL8Xkg92Q+06nPBmuAUxMTiJt2QYR5jX0p2AgKKiOyK6ba7towA4GLWDm1osfD4f5WDm55ZiMkzPnJ9XQwLDmdPaaX+l1YScXEO9YhLaeKUK2PnMNxO7wr56tYl9TGNatP99p9Hsrj9fn0pv1Qyn0RyWuAt6173yyguWkqiE57bstnMPgHZbtFHWSQzh0XmT6yhym2pb+lcXp9xnxzOUNan097uvkb+yW9od0ZmoCizjyS8v5n98Rd2FkJwR+wxrkRA7DIU6VEaONDAAc61/Lyr+IEncZn2Oh1Sw/kiWmQhQEAxxDabtVuqyjdOhS34xy7Owb1sh1B9JjwIzHNfHK3lN3imVjqZPMgBrhIJrAKUwfl44fk0mh49jVRbNASrp9GvoYZkBRQBx5CTKjCmezBrTqwWDuJVeMKqqvuEpvMYEoVYoJLp+H/nGgKITXllY9c5NPo5w1FfwwknS53U5gDPaFE36JOfsBTY1cGKPpDOyaVANTtDlzToOKTLz68y1b5zcZNVzIiiEOQKW4WbAaDiWXZ0woBDaDXirm2dkDZZ29Ab0+mKEqWoo1NoApeHWBdklYmlks2KrhHlBWzztHnLAa09feOsOgcLmYT9EsQc5NNQrP5FpY09Z5z+69PsZqlLfhYVv9KDo0wUtYkO/UhyFhMmBO/25sa1bstxXxyF7CQTEjCf5rTNa/lN0LvhG2Pi55bEG2ymdjF2qvxQqoRoD5OwqA090GwEX/sDVNrW761Sk216Y2c0oQb9b574P+76LGkdis4oDAG+rl3wjvH4JXfB72airz0nF9QbaI3MwP8nXKJlWES/kM0gJ0X7gkvHio0NQwyDmls5i8k9ovcXbLLZc0MgOpSf+4AAW5JcrcZpcq7BtydE6VSHBursu0zyfhSinjmffBGZuT1+tQRFEiIJsTBge+7w0NqwS+Og/gytxlbamcDkS8QhzyA7GEiWI1ARviZVoSVfLkevHkh0qJCA8eGJ7Jm3+Gd9b3ct6bn9YH0b+pjXp7mXK77txGUBbjXti7x4mb8F+vkKpDkAwr7a1+k+6JECuUpLNn4W5wr/RRI5GSb+F1YvQSDyRzQAfOQfWlDA5uv6chcn4HWwlbxYR3XW7C7EjlerxFj20CqBj5uF2O5RrUdF8HjdKEy0dko/J4BuW7JcuIyBchNKjD/rvAvOHq3gsk0EdOtnAQ+kWJJrN9BYE+s0VeSJZ3Lh9+QHTrgThUbiQm6XOy85IfwRsZbEZ3FessczH6qWTbpQsp35gyqKRwDCV859WDwKBQv7FHz6G+eZWFQFCwfxZcwUP9rYsCRrw9iBZEqvMFXP81xOkGPvd24Y2fMhlgE8mReLUo2vvRAVlETuD8TkgFDp1Lvxbqq6/cDaGfId9hanhEvdCCEQkU6cVF7a9/vWWZht1s77g1vN7y4a+KR9ci5UFOGN+LfB1jAU3UjYPkxB5GMaZD5BwyVvltnA+0YKd3QqC3BGhhhqvoznJjjyQ0/U55zumRLJhLu3AiHalYHjzYqy44yc5+XTnRgUQuvBMOhpSPuSGo1E7yOJ+5L8YBZt+3+d0FyR2FRBbEBC4v/hvJO50h6Fc1XH3qroaqkwMSZK+tI5td9QyapW/0Rr63Sv83lqTyzl8v1f9vxcd0BRRpEA6d3l/6nuscgbgQDf5z/cB0VIwFxxZR7n/Qh9E9cwvN6VKMK7iCIKAIALi2F8tanH/XYHAJ5f8HZYdTLKahWJdHqqB/7yLUv49c//4aef39fjT5Wv+7hiJ/1x6PUtX/vgqH/q6l37/fq//eGvjBvy8EDvFki77/z/f/XiNQk//7cvjf46b9Vvxd+rvwXa/+34Vvnc7gZTOk1fOTAUNr3mmvpVnRW9NDQZppfN7PpnWdhucPevAGk767apm2MWenflp+t0LL33//2z3ovqnAZ9dpfq6m37l4g9GVzVk8T8j8vpL+z1XoP1ee13n6gHaU/vsVEebxsUKPwWFM54DUxzGA5TJcv+b96nmVgF9Fm6Xj5ydL73emPy8qmu95O3Be0XZRD7/rAtTfdvYj3cswKldn2NXDnDWZET0u0Qc6aw29R/hMrGRpeawnUCuw1Q4gRSFABMg5BU7+nmisno6Lpb1aU/VjDuznqzm7Vlho82LotnIrQYijG8tyL2cjMDywwSn5ZraaQQFHHt4Tv+LwgTds+24Upkmr3ZSK9ZRKjqJqfYbUf79yNqv5NGRw9vP/ybAn/fcS3NCmO+f3y3GaEG1I4KVN0vZq/67akMXzGv/vwx5Fsw7/u8rxhE1D+d9HGRb+d0ubtO3vv4/CFkQr/N9V3Vtpzvm7Ic3///Vp5EKWe5BoNfMeEa8NXIr1krdd1dmrMAUW4bNcAdFmR9sSU9lI/6xfw7PBh66ZM5YeZPQXW87W6W8Pn7z/fvrh3IvwoUJZ0IX3+9YhK4oi4JYC5fM7fBwxLIvRdnUhFvGvtNjfvTz0r5mNgObW/SJLRTI4F2drOrxC17V2luEoRrJ21fmSCsdiwmFXm93JaoS9rwRIszeCOxBjbnGV7UxvyXBOXkY2pnKFNPn4IJBdzsu/rAMQKRtwHCc8bT1s+wD3UgwKW/d9/h1Glv3dNfkH82HcRH2ZOOAnwWxUFcd/GXWNtuMMZPbgyF/DJ/KkBzirg5Tw5vwLnxDoJX3X4SvUr9SyjVhSe7+ZMYrqe/wlP6BSWtP7gMaVphU5au7SCpcIj3osDNUzVHrv0ZucK34n6bXTlNB7ADVgfofReuzz0B7ZAVsOKc27+sL+Nuv+VbJheYp8257+YSfjhYnLwUMjVHW46Skd6hFEVmpgE3H/7W82FeM38vH4pp7cnZUgYC1fYkThwvqMK3GB5LVT/0KCFLpDK04B2BoUEC930eNMFd6j60mCyZzhOEryvJsCOcQlShVfY6LY65AQlpFP64T7de/zHgiChommSgsu39JVZL0K4k8QqJRaLiCjB6fIMNvxPJAsKAYDnw0Ds4+UAqLI0C25Z+b9vWXNCb5AfsdI6FA0cj0PRDGD+lg4+oIfUWMRa2RiUn4RxMrM24OFlYKlze+CodI4jqVMLcEl0PAj3TT3/XRaqnVElVawBrCmwT7irNGejBpXnofBR16fp6w0Oxm1jMBV04I68HRkbc6gjkSqHJYq9Ad+vqhidv+E+2GP7rHm59Fm3PNMyOPJObjKwKbkvpBfzse2dKCs6NM+hvLtigdYlwW8vf2pGvhHeGkG1ZUTV9nDEBiYQH8tn8BkGhuoWw/2yCnO/tDHFNR06muN/QmruG7xGh2Sic7lQTf49jzRkDkWMQ1h1cZ1xFSRSL2jqRAFYrIDWTykxkGW5viqbN7D82O3Hwn4NxLz0Qrive94UUv9h15p8b1WgCkSv9r71A1jSJ9k0jPLbSx5xdKnRpm6KGC5K3j25ium/iLEPTMB26YNOPLBRWc0aYjnkFf7DIq1LMCmBroYt4qJ6+ZrRSCcbz5zDRuW5QYeT0TY8vgXyWpNXiUeyD8YBZiquX6+g62kpl5T6LsoLDAyrWggzmkK6ENFLU6EliWxUBIDohXRpY/Y9GYbG1cUfxPdCzD2spP397GdBx3Rknx2AsQrvJbZtG87SJTyOn2ArWbX2IOjM7jpDqoI4ffDQfQCsl6WDcHAzvNJM/kF9XlHgsOQL9iQDlEbhCwKkADs9270Vs8Tncr0l7OBuKHtdbklKMFJ/any3p0MLHzcegJW5RCIxuy7Z0E+xmxjaUXbet4j9dXg6sEKq/B6i4b3O1hJR4/cqZrzer2hPMAFn9c6mWOGoJjWfeN+RxroCDaXyeHpr5ofMlNucWKLr8Up2pCtH6tAx48F/BzmsHrYuXx4RzLpjzNkrX2QlMOTf4+Y/c5D4jIdTvT0YIemZ1GnPmLYkGWJ0Ydo3+2+eGNqeEkMsNqVzp4c5S1buW8gBONTTDuvAaS3ociVsj8CzlQU5Uw873fgL2Yc/bF+YDFoGvKVqjT9JcL48UajdHn8k/hl1pbJ1VG/rfs3s8XEPTDZBj6Mjh+fcoLLIXVm03a/IMOwmeKACW7t40wOJobWV/tZnQ0nQnhNuj1d39qnWLkDMs6Ytq/QvqBqSfZ/Ord2MlvV8bMAoFEis3McR25c9csXCSvIoGK6qVG99h+UBwQFWvIa+BwwflpjoZV1Vw553Nu2O5BYWaaKot/3u+hwrBvzbLEe3006AlnQIwuyWt4+LmjrY+dAG3kh7tKVDtgpuixkgNjHt5e0+tiEM7pvFJUM+D+uVUAvSGXFuksI2vZNgD8lurIN6Dxh1BVFlrx4EFHuaO3xdczxdZVf6fE+KcBuBJynUt1lBJ2KDIzBKNoN95cwrgxZItCx9sEmX1v6WiHjLtuUHyBU/Tnj4nnOmhbzbgqmPQIztryAkEC02DmjgZxb1s0gjgOl9UfkcmdKZdsk6UPnaEpWjxCpfKuFpAHKw4mFWC61/vzDH2oVOku0YPZyxO8LfBPDuL8zMxtaXiCgHJJ6u9LMTOdMRQpzHCpcBKuy2P57MsaKP+y8NCUTf6hOO4G6bLARVgxUaXEZRZ7nlQ6emyPlI9wDXevfnC/3VFR2m5npIeA3+pGEhi6OvPSt3fq1DnMpAoUt9iCN1n7RVNN+EhPN1l9MayvehWSvM/+gqIKmZW7ypDTbXjo/PdgNMe6KQ9QyXhi6UrVhjvoxn+ji8fDcpyNMtQcOcrEhs3rbMle9TVHhr7wjFNmLPTcFntqL1bWVEYMLuuKxXstjvSaL52obXvbqAKuqGqht7lEhAtl6FAPBkWdFJ4k7cX+YjsH6PYf3iS9xDYjt2B9c2bEZfTqKw1X8+OCTRxu4KdLoBUf+sAZzdhSp05xZZw0NxqZzj77HsDlaQ0vj7xSpIZOrEmxOsuyZMlCxzVE+/DN3RrUaCFi3xRspuW3vo/EPESYf94+L5zGmF0AaQumiUVkusTV7vqx1dEan2X4AE6FuHOVS+BJVW6xX8eUCkWKjdBaIAmLE6TLlwP+TmI/sx6ocy+yHbdFyv1/vCEJmuuclDvbymJ+rIWWLH0/CP38Dc3vMQ+51yDqPTOBnhavw/DDfG1Heay1XQZvoqzG6HUxWPsccp4EbMu/vIObsBM630BSOH2vYtW7uwzNnyRkZ+gI+J/8FdoMRG+/wA3yPOXD1N/vgv/w3Ent473h6PFxtNGNzjO9fHV56MmcGHvLgYrqy0rt2LkM78aaKkf476PPRIi5P0S0Hf8TB6ZkLJG/hUNC1/qWwAT/uqilYaoRa/nDCntXZCj1l2p5X6o5rf3N0/TqGpin+wkBa1xv+dLrWcSBBAf/zjLq/Q8t/UfuvdqVnHtzyMacYGmoy1ZflU/aV3c98WkOZb3+r3/q62WPBTx1aS0rRVz1pRhfD7JIRnZKNkaAbJhz4u1iHSoMv3rU816niLdq5+tE5j1MCILdz9W1N+oSb40et1ABkhkEAY8ovTo6WqBH6ucPO3IzbWPRgzrioj1c+7Y8XASpeJEa8AAUXCqCtX9P4mK2J+EdlZawmwX7MlXDfkN9A01UcVmFM8Aj0zJF6LeWkcXr+Qkxc4I25Qfn4AkSY0fSfiJKOpr6iV6K7+RFDwZdxKT9gOC0yToRPYbCViuws3fVqAE87bX1zOJrPL8Oh9dZydRk+3rYyHtiinqn6zW8gtRZFcdiNvZ0lrlaT5hCcrPRq6bnD1uKi54/3MazfHRSZZbKJ+2UKPf/GBfVgCLjmkzXfAmpEe3yFy92JH+0t1tD+oOqNYcvhlCGdKS+2Ai1BBLLefIBIzvQCKzwlnmy+HqIgPt6Fo7WPa4QSvXYE+SYGyFt9yHVo4xnRbKINMXHFB30HU/HeivlRmIfvVMVjby9EpWjE1onMuN/yJ1u1OqUoOSGS3O35xKfTG78THjOqB0D/Knz8apAMUCXz35v61GbrvLc65L1ZXRpDLjPgS8bCMzz6C38dCMYzrYog6ycLDH3k3Wr39EawH8Do2Mchi5l8AUhfQSlld57LIGSAFy2URM5Q6DOsuP0AWtsrQ94w96j05u7yC+uK+YKSS+F9rhCIF3pGsvx42Fg3d8tde8YMJVLonaCxVAX6hqItu3fzea9XY518fcpX0iBtC0owMX3K8iDbhZK1FzcnX145WmJGkiCQMpjlmdcDhQ2i+qRF0VWFUXxDvY0X41cnFYHKVx5gvNemKyzGwsy6iWojRtdCCBYulY9KQNNd9b3tc2oC3oCpTbU9hHEYPO7hGbhBJ3i8T299L2Krik38NTv8Ty0fXUwmNNP4sPlA9sqm/kwi4BaOHOQELstTYBhEiDIvjhrJvcfZFdDGeAqlNrJFyrMZpY7fPLR6Qstb0SwUfFXuBvdqkYtojVhUXrb4aesps3eNg5TlXeV5LKbyfvEZ/ma0EhsvJBRV9psOq0gxdvYZUEn6lu+QuPRJz2aUeBtUC4+Bugv+0VSuNLUPVb2MRvg+xg/KFmAa6IGhC4Te5lw3KZjmcHoHq/Z6Ad/rgcqKShWoGTEc4ksPZTGoKCqKwCcxOM0IxNviIGKrcLEvHdrxPIC2ckXVqA4NJxFddtElh0XvjWdSJ56xMNkzG3IB1vCDWrW7fae7bHBE1b/BsQabULByEsxIDtl8udu9rtuFtc/ka8Os/WEkgyp4oVRE9den3N9tC+4lzKYghTHo2tAIigBQSsPPHWJLX3s4YU+19CsrS+xXDnB0Top6RgRRjvw+nBYh0EMRAoNyQXwg1xHtZIwx0z5TkqMSjx44SYgIZCxpHn+mFXQGZ5hDGhxyhLn11+3giol25yO48Mi2sHJ8b5gpsJBYCgVq0W6sLPCke4SVoRxncfJApenV1nGAgvrKDJnVgPdyNzMrCZFfYu0bOeKVAzsOLX/q0XWwG1TPvj3UUu2xFR7A20u71AtmUvmtXRdNWa5QhBYAW87x/nLP+sYQwUc79gkfJsp03odJP7fie/qn5SwBfxkYcERrSxg1J1IFVaUqRiW4Umf0wuIj+Rb3ZTWIG3Iei0VzcV6GLWA+MAictIfEHHlP9REqXzKHhfHXBgmS333OvoO10KatwgZXq9RWdcImkzDsY4qsr9g2yn1Gyc1tRtf76u/lEQ5Nf5C47lG6umQ1c/vqlYkM7UCj5q3icMaCAkI0Knb0qTdy1q8A/TyjXdvhmmdC6Qf21oMh6fyNjNyJ+Z8SDjpmbTbonOTjocHAgvRaLM0zbd8m9zrZ8IH2h72Bsfj6YD8i2vAU1UBfjnCB4cEXZGJp+4UMI/HzQOFC4I3ZweHbTwty3xVQ9UFwdaN1NrrjBdr6VWZL2/f+ANF6k1FFygAXv5YSgk2BDZnjxbxSCB2KssxSYNV4naLoWIBpIZOOJiTyDV2Q/B1QLOu75/mMnmfQJCc8WekLXSSPspWY15jCh6yRg49XXx7xzcXl90zD8xYAXcblq7Cj3MXwkEnhae2MJ2LIYZhRYIJMl1x3bctru/dh3t3oTitWG34AWJJdw0N7z3nuvR+8rQZ0GqbURud3ZjL5IPKpRcoljksdc0w2zZ9+dO2OOgco3bxZL1stJ4v29yqgAdHQEoUfcha/OOyFIJyfDEhoj9DilKnFgnSjx4PZsJTmYEPXfjvHGfRGHKLARgrV66ot+pceL2A5PgZTMKjstXWIxQLcYKsdZU/pN47H1IQi9PK5h3gCCVkhvp0iANd1gLDn6V2We/2t+3Sf0OKVOBFtidESI9jmDUlBxTMi5wbClDr1NfVPCIIYn6CiUV5ukbEYc0z08JljZmblGvnlG+EKI6qDUPgYXhxfteF4xLRRBpe/lk61RdGjpgXtmQNRFJmx1Tza+WF6FV8nlmv1mTHAFz1sDvdpv3oYuXrKnb/7C6ZT1+KdQlLDHivgSoDxkWeOPdx8CfLgKDlVeNh52vlyyE0Ij0y4SNpwxajaA8fL6tm/jDsb+Q3tHYYsAIU9vX7KxmIAGYvyaJaE8/J6ivsdiyTCXwGobGVVyOfkj4NPDO7QH5cq9hKHsyXIVxw5WI2G/Zl/qGBvT7lnUidtP8xd56oKUql3B62ajPBhyiVkDkEGKoJTZfW+z/Suvr8UOXNKAKFxRq8nM9hicjkOI5a1nSET+paI7rOW3vU9e4dNvxbYsaTOND8sclUgJPi6c5fAX/LrjxIxZ975WXLPAMeGPfrjRwcblby9JWOCBCT10ixVT4s+pd8mRx72u+zFgdjUFJD4dAEeu5g7+bLRFY18A/EsobdDBpq70TlCISGPefT6GUTBRpYI7TRHLF37NcK82a9YqUBmu6x9H8RVO/sNtnZHGwPyQqZrTUvAHUY2h1f4er0YvAuVxssgVf6VYzEYs24fceviS5GeObhAqbWAyN7Ki9kmbxJcgqgf7jhWXCOSPkpHtB65nZZlJtxH2qayr1RLie9y9OoJz4p3vjSQ4xoJNW5y3gRBMMryE3HMCvZygPOzx9ifwxxIGwTM1ytJFgA/iGCHPsLLf4QsMJeDbc9h8ZtAZ4HOtGzE2RbrHe0irwoOwEwQ5mcXGCtlkvTjRAzz0adlHpfk4VnqM9f0W/y6D5suOmflM0amPexK1oQuZ8hKl0sRN/kVsfTJ3RIDxbKzAyMIohkNS85CEGgGxzpB6X3dtXTJntwMBeddR/rs0tsBYdPax/I3hiyL9YWI/eNP6y6RTv8Kj95WZd9tTu9co33FHzTwcjRQg1YQWsuruDcaMb+aYvAS3VsGr9/MMp0HzBz0K5qj8/XKu9ezuopS6L7upF8faw+dWD62cbwf/vrBXgTHnyA+nSEutmoIDmmlTLbgnoCo4sE6vVHG77s8srIHWWpHAk+pc60g4h+h1WN2u1qMrOsDtE3/DvFsZ0MzJWkIJVnsoQlSGQkA7fAzayyyowPbvFhd/rA6U1MGvtrc/B65N31T+1aE4RcZiD0RMKbYuak1uQ51DmYtfbV9oVWW0aaao/x30Kb9zjO5Jg56ThA0VRZFD4SGi7gieNBXQY5MvO1dOhwGoA0hVG5qO0OL2GrqN/zApx8GhXilV4+aAaCWH4KjjEolGXGgDIoiv/jlnX6H7bFbCCz7mussMBz+RWC+UF+xbgG9IeZA8XKVXPyq1aahDm+rnHKrL0tZJ0VY06FUWFDIW2TKyNIMXWv/jC/6dlmces2Ei1cDtxHdLKbGYELz0DiR+PLJT8E6zvTQ93Z9bJWKI06/aB+zqt+vzauOWKcwEq4oxaarpFPKy4eY/f72BR2tltHpvw3Yn56DBLlMWpdqF5STIm2SE+3p6+mQO58PFi7i4OG37gBoYYPBxCmNSz8lUBwyzOc69pR61QikZw/uk8FGgyBnZNBKoRmKgihqbz8JdU+YasdTh5yycBXFkkeVcCWFIqFqzRYXWQAk1dlmf8FpPiPyOW9IFUASymA4aCBBmPyKigrmZqzP+V1UTS/rIetE0iITyXlNdgoWjEobZM/JFulSHrb4QQ+pDx40mNiRGUXde9SI3mT119sJtRPkVMY9LuS8brA8rpcXrIfXe2GWMnFQuKUqJwxSjQz6MT0maJw4vk+Dl9/+EputPruUNp2Lb3eGq75RNqovJcDhdkRvi4xb9+dXoyx9w4l8mh3zfg+bSt0nmfZW6S+i1bPNFmzivZkLIx+HWgJITvLhPpvvvXwHIfLB0t2XEMJQl8vp6f/F0lUsWaoFwV/CZYm7Ozvk4u7w9Q96XsQsOmZ6uFw4lZVZCidFCds7IhO2HPdDDfw1f09pQajuwFPrZU6R5H5NLpdjj8KvOLIBM2vweGU2rM73ozKIYJI+ZDcv4vwLv9CMEGN/Y19FElWLNCfL6Jzp68afQh0gKuPu+N9vYvFfcDLPiwJMkJzVzmNCvjBiWevFtpHEzp4oFOHA0kjZ83AzDWooKgN40S9zQPyVMJ1LcFlpZyulEIfLSzVd6dhbRSYout3D7GKB+r185sKYonYI6b5fqO0kMKQyC0V6NfuOa0fvO/Cit6ie+OfOKKBV56pSKiWt9Q52OkxhRlgBw4y9f4X4RU2A273vb1EFDYVSnEoUWYuZ2db5+1gzqISOguKbM6/Oab+3FAYWPMf+35RxkVbPZfF9UKIgknEf9/v3Raj1r1X5OwjVZQwPovEgCPph18AWr2ko8Y3n44N1kpO2qOPaCyfvIQiL/y6LlhXvmf2HhZJOU+Eh5LtDf1hpvnBL+T6YPYwHsXP746gD053LBPqRGxe/N7SB2Oypp65gxchX4dEHuIZWA/UCdSZjdBgX5Sbw6yzRy9GCBkctgqOJi/TS96Hu3Gf1uc95xbPrE26ijr1Z60Af7mYh6yvh9V5JVC+hi+h3jcVhHR18vai3ZclPVUQXtySOEsyXnWdHE0G6k0ClgOTEcPz1YZzJg7Ubh8UC6hcz+XnmxFEsou1EYnaocoNZbtqOjffpJ5fLuygFrZAjpjxFlMRxmi+erfNWQbAY3yfaldn3HDG5PxCF74/I9lwlN+A4d41kWNysUlz1sjUjeCpXjqb6+bAsCeaXF5aTVyuFpBrSnj/572SwdTUe1tvFqgJZ9jsWNLz266IKJsjBSnY4NBWJIMVBlH5TRM1Y68n3JUGp+7EqrKY+RFtpWbMS+11Qr+CoA2kXPLxttSbmL0L2WwWy88TqBhhxG9t1X1ho0s3TUHu4Iab3krQH5C8ubCy2QQNrqfQv41GiCtnvzJ6+pZHM6H0E61TLahNTkTWX4yMJa/vNTwuFpPHGCGZVmuIr9O/vv1n05kJw3lVXv46YaGLUR5OKwZcjTGPIBQJtI84PEKWou5reqnUBZr+Cd2u88xz5icM1lyojdK9j0p6Y83wY5w1g0IXj8VvAWW+2hyArLKmRlRCLSLn1uOHM3xJamV9ibm6PnW1JBE0DZQD9lTrJp738ST3uJJpVGBOIWQmYq2I8gmLPoaA0gsY3e65bBfPLqKk+k2gs/W+qNe5Wz0ZXOvGzgZ/24RtnRujGvyb2A0j5msPoB+ij6nK3u/S0lnMf3BETBFIGng7JIJrLWXNAWk7GdIf7zArnHrDkgaXFxFqGEVK2L5z40gRdwWGpOJacMMeRIHHWzcXH/szF2NKteyC5WcquYEpq+VdA+XkoBT8L0Vp8XXi8henK0M4dk9xJT7yjrHKw09CkeoqwPDOHOWJjMulIS1BtuuMCpEPK+Z6GohTn0MgGxuxdBI83xtyvWkzYlBleXxgVF/QoagvyydTbCoYlxhVZVoR3qaeOAmih0fg6x+1OXDXhpN9EFxTL/9D1WINTQMzR4jOFE9MtFdDdKcHym0qfcbLNRvcRsn9r84jPSKC/kaDfjG58tgHp7tUfUEpRFAFI1MaBe1l9aSiUEHf1CxwfycdEuA6+aGJ6N00nhtQxU4bWqNqJkSlZ/zVXMk3d7F+sRivog/FHCwPSxgfjrzc9cEnptGXywudUJZOycVLfkwe1cVP3lDB2pM9pfJl8YQEMT+0tjK0IteoIXB3two+HDZvfke9Cnfd2fIlEScLtRWJjCTSVy/tBcjwuHim1LvHbgrbG18oDGnr1yRa+XrFqXvWVsEvHNJPvICb0mkyh5DMg2bPA8QKs9TgOiXDB8akkL0Pjb3/ZIb8qZ9r6JZQJrssC3mXl58qpSdRp3brqtONFImLmub87uDITMYx99hXtqgorctZ0XHZ9wNTo78Gr2t8knryvmExpFHQInmxNUCt5qaqcpRjZCh/7G4Cr/RuqpFctjaa19Nd8+teKe3i2dgO5AIudvbnOLebfw5ZCBPVSWka8tmRSSwBJ2GwsN+8B0hZPdX59wGRt1NVAExXcFQiM8GiUHCOyIOZTo/4MGPXqmmBlOqL8bQojnX4UhgRxbk8sD3sd2l1wOszDqrJYnU0C1zcZbHOP0KttXYDHPJeMhCDlRXJVAdCpBZDcNAN+ojFpSEBb4V7g1SV1P+K60K/h+JullvN4wrUeqqNZ81QzjZfThu+blRU1IqGRhzd6hHbuZqQcYF1FDA9Lps9T7d3Sg6hNY195+T7UWtQ7pFNR8Cd9LCUTlTgqB/5vEqbtN2cwkTU0BM6SH8zrGWECiSq+w37ZNrl7QrvW6+o42Ry0XvcCyfr1hXMNN/g6zOEGGIy/PKr+QPZcp7Kfm+yoZ9hP/FIaNTwYcUep2xFgaVtSnHuscI2jgEitvp4M2Lkp43s/K+D7yQC1GylD8Y/OAhpXvtF0qG0OF1COS3w6cNjFL7U9UEDLKdKa/5YkqLRuZ4G2+n2ZIo1+KHAxugUIpdLaOxMt7R29qf5khK6i0cEGsD2lTKzy+2JJp3VEcZYElyBh9Dbu9AqueTpfBtoKPmekJNiw0RLp3+gZvoqLdS1hFiRalXbLQvw6h+bSazQzZ9G/pFqEGotht/VXrOd40ZckjXW5PiYBmr+6YHq1QPTl9hisKHKv7dl5opEUMD/x2sHEcat9Uby0VW3yEBiI3mccIIYcvU7kFsplVHRt1/a/uUVzWaSVt0Y+twHClFopz1Hjyr56ebjKyKCI8M6OsM5DxAivLiYHytqkGufIPEjyY1hKngsbZtd+pVGGag/MTU8MGtoFSsuWiqdBAHrgaTiHgFHak4CxJHZNr1arSkNLX4D6vi3ZXCS0IPTIvC76kDceJ8IDpUqKBifOE4f67DFz5yvuudhkInG6R7410XCPlSb52TocRzZ0fDVmp6+Sl/wwQa2crVLMuHvSK638LYTEG3n9cBPf0TubQtAmTo4FlFuCYlgua0ZpWqjAx1KJ6N/DTmbTXRX/ajXbDX775AdMnLFpSyL51rcDSybTLhGjz+3AIZzY1quJ5iZVrNt5JJHuFzFa0b5v0r9h572gfkFsicbZcxdYc6OG6BWTRg1m8J6KjJauW0lnArgWJWJQPBVElC1ZpdGhlZRUwouBn439onkqHzbvS6BLGNy9H9e6f4FCz+rcWNSkMdKYgXIUQpmbb/768JFvQlCk0a/cXIi5gvZNBzGKCpm/HFQ9cRrcbNMNTdYupv2uq/XtsX6I5g7tVJQKBYeA5WhWL58+DL7yyHJ2CYo5G1SMKICU9Jsjvs7ak055sRxKQmrApVdKW5XR2QQUF8kb7afCeHkazNVYMkkuveuzdD4mvcN4fClTiTi7JdKMKw2aU3VxtIg0JZBwnX5MMLgsJ3MWegcURSEu6tSPT7LxCJUmgLx3PZwKra0kXl5WYQ8oEKrT0P2N4lrbJfe75kt9Z8BXmE05nk37AueI5QPUziZ1zI4O0XI+ocJSrMlL9bmr65fS7z5iqKhl8Kt+1sFimCikZwyZ45h/jKu7lE9uPhP/13Y1THzcQ+9h+3UerTxqhu6+Ho+eI+RiL40h2sNKutXhS4i/Q6AsfFWg/J2rcpB6vqzSKmcpfDovNts6MXaPsGvG42TJ2W6uUWETdSV+d8UUdiX++gSh4zmSVMaESutegkrJPwjpO9vKcCpkdT8bfw5S+QbOBBPxBdmKSw5fuUASJx9EZ78kouM0Kkp706BT2N9Izn9/koX7ijHNnpZ15FHaYtU95przBZpgjEvpgVeCBViexcqMDgQwyJkGbrPY/ArhG2oxto/BfSXJ+Mn9/t6qK4wuouGQ8Sj2UqyGH4BRulh+sVG6bJ21Idyr0P1OhSSLxmjSyesD5J5MPwcYrFLwyYUlL7woV9IxM2VT9np+7aFVUSi5p9b33IXoCr4ouNObiPElA5cKJymn2bFl/Lfq6MdqfsP1UKFoio96Jb0EKA+7Ie/GLZxi6d80BOt8GYN4lNhDMCX/vLSeymWhFJoSajqUmG0NejGJ9KnZ0iSNlA3+Bbue2kVbJW6z+uKtG1BDIv8+j3z2LONB9pzLl1fBoNZYI2WogZSq0xlExLlybNLwYuberopA8T7ieb80OOcB0ZMUnqfOYVA42CNmNuVIuLN88FZCYqzM/wWM649oGZY1ZOZTNBKRbHCho4+CLmrNin+6dsupCLDYq7pMPw8F6Xut5zGOIMieKlRLtMPz5lEwogJ3cDi52GFFkwEocqOdtvnyMbZFKoUflTpwuT9pHoU+YXVl8Rilp9sEsioIEpnZcJZLDCcRFli7YAF/XLSfyYDbWzLHgptkCPGLq7CrOLG7LIUasXWHZbi6iUJSEakMgmF6ZmEETzZkYKxtzi8sWPqV/nMJ+yOglXkJTgd7P0MFOSJzkMU5gYxEp2QyYOFLnL6qoXMuI5YztABZfQFWAEXwZ9S4uBKHgIkcdgB7UP3KEPxd1PaK3p5dxQLC37L7aperb/4WUU+S9yx/iUz15UQGVyEarm+7EhxOlmomPRztTfNxhJNxZIietsF9qQr0qs3PclDpvMNGWqXT+nnVrFMH/PcXMXI4t1hbo/HpMpNwSfMh0KdUh5nz2s96N0m7b4n3qyxSSKokp3hJ4t9CriSjwTh5RZasGBHlyd+btTUivvnpHC2pbbrDMcJx15mWsRdat/Dw1Y0CZdbbBlFN19Iab39FM9qi1LWTZSVEcj/2mfU0xY3ce/0v1qXKXkqnpTRZMSjFwJPZpGw6dKOExB880rCJtdVHvD1joO6vHB+2+Qcf2ZHnTPjbZupv6ktB3Jsq38a+657RpTakC3XwbIoc9pXq38gq2CLm7+imMsz8hX4xjgSDLvp0Sg4u61RJWFQF0RgqpILaAjmOp1qVagOMcbSp2AdINLmq0vS3ndmr+m0e0Viuk2Gz9U83tLXf8pZwUf65Ed/YTLqCUZWRmTPktZRXWHRBjqkIjOLT90S8blDKXJ4hcEA8HnBj+z9qTqw7YITTpJFt34rirKUpRQmTgn8YoVG/6mLXapF6MeWcXnAIcCtCOdF5XNNVvfI4zr+KyB8BkvIbyuN9Y/4xFoiMX2rgK2kYGpC+0Z1BeGcX3uPKSFcUPoRvz0mrAK3PANmSMO/d7y5sQ2dSFcFFhmgYQTu1v/YiCj2JDZL/LPPt7matZHowbJEK1HjEFZOgtlqqj0i479ADYVPqDgzLEgJLtWHEXITKgc4tfRlsquL0RHw2yOVRFzrHKstEfqXoRrnB0Cunb76xAIx8Yr78Arepj18kE2YjOxRtLCa/hNhx+M1HYbC0QwYelmdk4IY6lAQ6FHGvmLIUvQslfy+8alUgYK8CRP/mWwTPi8wWav+trAp23hQKDpOFuicTVRwd1JS6MJc2C5dK/7SswoSxzG5z8cstRam1S3DWoxztUgYj0wrqLadwqsMMfXLtc8Tmk+FOrN81g4zhYFbIWVHRSHPxmkm1IpFm2xsjg56sANmvZ7bI9OVofpieHr1SBmzb33V2SgICX+5vyUJ2z6QtO0SQzEc2Wumx07o4dn+UcKro4FXfgQMFIVOC3ARu3cSYwUxQ1qE6oL9wTaq+Ck06A6T7/dV2QVfsv06e/uSGwYz89Z0Nrvu1NxGtPQJQl89Nz8fzRouqgWezNOdvm8IXOqbK0xvjxGQsmJhn193iLgTfN9v3yt/+PoAoAmXEWBUkxoypFOsGfsxVdktFh7QAlnxV5XdBESD8FYux1DdJgQ50XXvvGlYiUQ59sOhiybCCqVsLOmHG11kePEmZN0WUpnQhPiFreeye+4K8JxdH/PGnRk/KEa7gRqDuJR/VAsVXstsKInFV2/Pvc/5VsH1fd4xV8Kkguiw9ZXH8YFI/gKpBuOY2lSWldADjXdvX0R1mvHiWDDjGTYDlBsdqx49femZu9da2/eXKsSBnFICKgsRMoOkl8pMmXubrWJm8+9Zn0J1HFYahKVrc3aNwTD/e/WUh2oIp1Pv9yyBDYRHQ1f2u/FknT8mxDYsiGz9mqgc4tt4baN+RPNPeyRA/noygtEaKmh2A+sfVZBZ6IDEwh0YEaebHxY2cz/R8l8KLO8hbm3RB62NlX/moX4VN3kqd6UZ8ylSrLNvfOC+azdx70Jf5gqgAbnQR6ljqWsHF/mAqRmNoZg4RvYdaGwm3ZTU3k+xgLzx2Ud0iQNrgEsu9UfjmYO9iLH4GxLr7LgHgkDDAM9GfQ75f/qBj39gKN2k4Dyblo4WkcbAeR6xv5BsKxC9pbWw/r+wisSHudlf88UB6DLJBamEDaPR8iafWbzIKvSrhGSL1y5oN+FxSubw+H3N79UMEoEuwCbZs9uPklSc8jWhaHYVDFnfHr/QdiQus/Oh8xsfaWIJ8SXz5iH43Bz1ZZFR2ggBPJ53TUASjG+SLDFpDt0APxhF6hnNp3AfDdC0I5qlsNfvtTlGEBzDDrFx1wmW4L3LU/y5vNn4la8maOBeG5zJXO6c7/ztnQtPylzqt00iXKk2E6uglruZO8NZ2X1MDz7XzSU6+hXMBzGfax7izMVljfp5ISyh3n8hzow4rKmC+dML0VYWQqF9GHnc9y4nDKn1mDG6I379+Tku06LPQtoZ+vy+ZZl/N1vi1XKOdRrFgnAVSAUU9Jh78pn5h4tAB+bkUlZfJf13qONXklMIJTvKNxj2Jl+OW/cCmwiNKNgYV2992cZdVJoH9MWfAmsdBDGJpyBDq+9vqZ++JYSnUDV/uRpQ1Es3Sz7VpMnk5gg47HGbrSnRBZeVFa0q3Jiea9+gIrvoRK9Qf6z+M4JaY76Q2CF0vxEk1X5gw0WH+kYAP07djI/FFHui4BTBOaaaVoU/Exl8hc3xnnLkoKvUtJSwzedO+C+OvQVOErtUbiN7on4LaRTmOxn5c52P2QCGCbAPvqN1gq+3jn0bDd1CWqzMEV6FLCUXx15FGzpbvfeFSyDQOd+JeSqqRyyyJ69g3d7ZeTOuRI+azZ6Y2V/9V3TZ7NMZtOHDh6DvVrFnhOaIWY9Pay68vmqutHC7+en7bYMXYRKHeE/Qln3o1yBJvXZ1zNzl7/CaKYkXvBlCPf/NPVEbdx0wcIT0CN2vcNKgGEgNLzD+rdNwxQacpY5ISaE/WtdUcLpMtY39gXzif3projwQu7q8EKpOnhq+oLaysieaGi8rIs11QmYonyAovT6Z4jBJ/nPu3/0caJNZi6Pe4y+O+KqfjI3Sm3fPDomN4BHkg5xTmjRuolOHWhruQQF/z1zEkZrylV4sp08s6pVJyr7Y/XC4fUJEJbHVlozrdln0ZXVXWQ1Di67Ol6DzDGMBboMce6ePscapLP6lsCI/l6ZeOgR7mX4Jn0HxzFgE88YkUyScqA3LxCS3LThnGT62f7i7x+8zsPrWkB0QCnrbHQvzbMGCXZ2R0V6RsV/Ty2QR5UnWz7kxtiy8FaYruDLtPmUAA/BK20BajhhVtGdaBvnx4MkGo4J+Y5cGvstPdz3wNFj6jt1ZXBE8hultkcUuYFqBXJTKcAggbc9JKsMBx0OTkaN5KieyeIssnJseepJ/OUyAU+dPhmefuSDZTsrRrhs056q5VTimXUESSw23yDZN5NY1jUl9JWUP1dJvq3xC641RfbvLT8TgLjYzka5YT9bWXi1GsQ3p8uaickb0iCLlA6KHZ6LvXKI2fSJqwYzfldMngA4yUUuVXsbS2dgtELp7+FSmPtyXNdAy+OOqNjBziaPro9GTcaYKfaMj3JX8opQv7X1TbCyP1hT+qcMYz6E6cITMnbDm1TZhn1b+nxo+Jz1AG6B9HraXLF++ge+wfw6rr4F+cYD4Hk6aCHi9rX08ZranW8+wCCF9CgAJJpxf9ltBol4MoJXpP3Y6Ui/oi908hGUbxkUeLvjw4hrNL2XloepVyvgJVFHnl30awjF0AS5vBUDfLF2k4k/e128C2lGKX6VKlXun7Q5pqeSTH/bPGPYxdW/vQIrlGJnH5+xylSD2+04mGWUeW0q0uycKXAP3jXjzvv4g2nSY2yyBtaaRJuEGvONREPWfbDogWNm2XBj6nRLM8WD0tcWk06JBmTNg9mUPd15UuABERw42RSK2Q2KXOrtjvnADyHrPJkt2teV5ZZ27j/i9ZP4/AWOyKHKkbdMGLnFHQIpGM4G4OrBQasigd1CriVtHvxaWPij7MzDilZVrqsCspdKet34f9661iy66YVU/9rwHkdVi5YIziB3AXpiotQIrg0lL4T+OxuN+fvrAzFaLDZ2FO60CjgOHJAN8i0j5/91J8TQTbystAWtzFzz6bUS8jNQ+YkZBgrF4shdtCi3uOPrCCtO+5UuKK832PArMdFWQtBGwghHouvIPI99YA5kIX4xCFIMeNC2gTdxfHEqi3oJCP9FnylI+yAWi1P2T5bKP66xW9UYtD0Zd/yRCTS69ZmRKtJBfgWayw+KP769Twk3YNHjKdgPYp30599fhlaYNOwQF27PxCspyMatLunsXuJWUv+SywuZbcv2jTHu0UE89pAOraoStU4pSVVQKXQ2v2EHe+rnNt2L9P/TvyKEEagCZjYQ+RCnSRwJfDAzgZeVX+7LHJNonJLoLv/wFVeInup4+PEHnNnC4nz1hYA1ecPcfPNgEmyf0oP4lcCmPrpeuJuRglqUBhyLLEFHf3Hg0gEsp041LHWGF7TEAVM+u9CgpMyyIxP+cgshXkvAR2e6hfmrfiN8jqI29mHH/LbnnEPRyUG2gdD6nZPgpd3QzSvrKJqYUmN3ZURvqNIOW/Gszj9TSMu+P4SxszDVosBsZf1TgANFj9AKYFbPP5MmGTPyGUXhOrQf+6RR2/ro1xe6kB0pWIciptHfJDV5xAkFDYK1j1vqyQadqtzlcWUTzd6EdyUyKtFJJBwsZDXtKTLXF6NkeIsGQe6neA2e+Ba3Q0uWjds9V11DH0tzF+NQB/2ZUoIBN8ZslPvjKKEIt0WSbwKeiZnet+RO9y4I3qJ970HrVXS9tYhDZb8DcP+mrsr3LmimVcOS7csC5RsQ/M4Mg6/aDmRxq2mrtfG3JXA38+x0Vt62SBylNGxpgg1L4cYcKIJO40lB28/XwSONnC779dQ0zYraB/hRY7JpO7c2ghlq+q8Jzb+4Yj0YDiPYQjh/rNk90JF+1TWSDN75xr7XozXnHewWnrRprywXrG3/rS6EEE+1VJFaHvhM3jD4v9UxmLwSqRJ05Bpa4urggAMX1Wq80IbxJpp41SuT27GhMwsXjSB7YpfjVkGVNDZmp/A6wQyZIqqiyG0ywFatbQSqE08qfIDZD/bdGRuZUYH3dhOuh2uVlvZAo0GGnjfI9qHVHawUPCNuKX77a/WJOIVz3R7IGfEQa057zKGM6r6gZgn88NB/mp4Au0BIDqsiONtHIGiZh/kyPoCZI82oAENEnvjCV5P0E/17QiIjLzDquFoiga/iWG+5R3WfzF0WipXpJU71zFT6YWWVlH4AYRM+FjwHTWkVu/rvcDfR1WbR1NP8HRkoHrBdAjRFaE3sP2dU4puEyWM5R56brD7k9jLnBJGQPNItp+cEs/gdUmLvyBmf+tLUUHx3zVRV8FHw+eqyWjgu09IR+p2Ao5rUObUtt4IimtJYuZ925m2OQvWKFAeRcL1ZmlTBehUtadlUHvnLcwRMoT/fg+85NwGvIXKr8D/0oeNnDZMUIVS0qSPLDNgUIe6z5E4AaM7JEavmDZUu/fY2ry3j03xgFAfBBIn+IBz5+gI5+xquz2vxnwdoAbI7ivwuaHutZHgoPMALz0hON1YuqcTOrxpylgLWoGMGRhe3t6GANvYHhQOMGj6HsnRAOIDWkM7jHdCKntefUjp8uSq7jTjsbVpuE4hu+Fxi9f0KU0thcVJkpNrTuo18mvtl/NKgSkKu2HEqWqqRL2msfjTToHrEdWmXqgH1VXqn4bU3x1gvwSahx3Z/4X5jPAu5UCQS+1yltcS23DrmJqKG42hTJKSt0QhvrV/N9IKWnIatrfdmI4uR0Jlio8qqg9RSMu8EK44pUrJ/b5DFa701euTys4gjHEP7cmfJRySsNjYfnK+8txQcGn2/cmfEEnmCW8uS/LN29D6JoZUSN/ICuha580DbEsHEW/VB8MtPg00Twjtap7Bb64DoClJK3HTr97m9IUGzXMuoRAoVxdMdHBuTMR7HMMGn9D3VkeVCDwqCm0PZaiTFCW1uYn1gB9dZOgUDOtM1MwIppdPfnOECk6nNYX1aGvAHkaFfm9/JYj1DusMnXQPrdAa3w9hdGJSXFpv2ec9F8NaGTa1imxN9peZrLa6apdteWFl9VrE/XfudaCWBYfQBjauStYEgfOCREdDnFWEsyZ8cTxV+iLmZiGPS5Blf5E0zE7c716flZA68gujNMjAKRKrnRgd4epVIznzZICgpmaIOYFN0uJW3IKufrzXxGEXLIlmdK+Kzc6oKYzt/gM3xSZ0a8GMQrq27eNJlAN/5xo/ArLM9ixh6YGrY3nXFFMluEw96YnbwCgvbNCExS0bJa4k01Qvds9jFdm33nSJ6+7Lk+FC2y6A8GgzCk0Vggr6rYMVtT+siYiydp9jXhrjq8jN7qEonh+SDAtB/QbQ3Pm6QELTm8Lxyd6VuKqqiDhGEV45e4qQq1fS5HEldCnrt7A5vyAZ9a6UooUWu8NmTD+Fh6jHYZ643fAJHHIyvRvFWneT/8GWfB3b9Iv3bvqrRExxvqCBSB7OB0YcZ7Dl8biB9GXrIvJmkTQW2yWrolyYh8pMtIeVGfnOFWRyKdnxsTY7RcxV6kxtwWSRRgHSZMklGJN9xpLn3cyfKgEoVwbU2d88kJUjfhV77w4HSE232hAD/EOCiLcex9/2aakjZHNDpJsgZUtEfzKjZVekCKB7WJdORjU2ndVyF7giyCT9chp1wAaf14EJ+h29RZNpYN9BkffW1n7BIksJFfcrvOAgBY7b+jtEjKea0x+403qDk82oBsCSTZj34X3ckv700xtYTNnooUNGszGahoEKEGl+kZGXEVMfUSoznXP5Ii8xhpIMS5rvcqI0NG6zfNXqvgR4O0AArjRzvQBMe8HEkTgGhIPHa5KcA2p0ZBqtiS2ggdjSzHnmGVQOSR1h5e55CXwJEEXEAuF2MqHxKSMvSrLM29/egYJu8hGgJVXEVNAs9fMgxLuvvJfqf0qmQ0VVMJnPd3nJWUW1HGDcKW2JA3vU4vpJNgxiKXBhdZVbH8wEWbJGUj/qiRcB1BTqinsMSBCgE1DaeaEUIs8BSgwpjBck+etizJ+gvgDREJiC3Q1fhCTJIT8paR0UInNH3Dmqh2/QpJcbdhTvijKZHwssklrsCNUppMr7m9AXbf+9P7rdeRHTNUvWL72YX0sx6xz8KPrYNRZl6st1A+8U5VKXpgvXZ1BFG0xARLTSwaw+VoFSyNL62w/Lc01kypSX2tBK0SePmMooGFW/MdR+fGghBlQOfP3s7GkICA2viuTPnkNISlJvAhfWNARvAI5Vz6yksG9gZ2cd7gZueteKQhHw+ffwJVXS0swbMnmqJC4WHCirRQ+DaJ/s1G7Bl8jjmgyd7XkDjFy6mEFwgb/ZCNhaGKEhKUBcl/1huqc+UAZ1UawRK+uAwjqQHL6nRtGZf95jUSjF+DEKHSr+GDdlC+ujoTi+RWbqGzEMjwBaiKL4Hz1zKevcebBJmeLFcwP55SRrL4Sgm/UAjuB6YTXhspzsADG0Sr5lytgYguKyY1A8zFb3iUFMLwh+fbr4aCN/Vq5uAgtS6B0+3V7ko36qLmJt1d/vTh0VSsH0Fkg7KW6UGMg6uWASwjzxNFPqldSQ0K43wF8+Nx0ijxpbshliCezWTnMRoLNb6oayDVb1lwFQGRnGZsew3DX1Kc9t1sLk1kl5Xb8NuiJoRw6Qw8Rl5dLaL5AM6JsxiWuL8efv3Lox6jMwgKx/kW6wFr1IPQmiGCLxfcWNYPsRgsmG5TI/1BGQ52MLUdw3gEDMNxLVtod/DL5XEU348Qbltk8Vs/zL7fg59BfWeb7oI7SXWoX3+9N91Lk8aqlAph8UccwzgsP3DMJns+ZpNVRNO/jZjb3nEc7WAYHZ1wbHKeSUOmPaTI4VRok05p3CwkE+vv6LZWjI53uouq/ZUDO2Ov9psS8yEUnntRQz/FPk0Uj9gX7bfm9s/eAJyld/+2EL/2K5+jXFnyVegi2wNtgVIwti5OzNBO/joLJ3/mXGn2RdHFKl3ng9XToSQNTkPN0VI7eRTUcsclqVpieSYt+GGbnnm+yKv8FK2kWHCMQeTSsWApaoT2H0r+LQR36CxecqogTYPScU9CYHwFKtb6wUUfEFSte6+woQObmftLf9HkI7Mt5vu5G181b6DXM2Q8DF9GJGL3G55UWqUMc2JefKPGNkOhpzkVk+RI/1NBfwPcAyt1SqyrmsYvgWf3R5s+x9cwDjvOrnr90yMOGwBezMcap9hhAjE/3B9mkpZJ4qg2XmU3iYHJLmPlCvNbas1HuyOxjKJfgxEA/AA5Gi9CM1J5u/Kb3lX4p3fG8NA94pZEokbPro66okk1W4TQFyMfzIWg/U2JSdMHsWOmy3+9BIdO8Y9Xxn7HfBiZ7pcJs5UIzbuUSX0oACWCk9YfKZCjopyWlaMJp9JvuTMnCb9qOqYx+fk5UjFGLb3sOVntS5xiWuIvWax9mUF6hHlfx6u4Ld1Q7Bsql1HT+ukUMtNS9XmH/cpc8TDjuDA7VD/f7gFRQhSagEnOuZJ+zT1LwGYZj92iB609yY/3awAv/4M4WsxZMyEKsV7As8nL3tW9a5csViLnxiVKOwSbBacVyi0zdLpti1g2WiHKJLsxgM5c9IHnlZCdzfyCYEvH8GZMmyikz7aFJpn8zHlC5uvPpi4BwPxBHzaP+7OCAQBH7mwbFpT8VC1MOHkMaZGYaOfnR7Ac0IpKFR4m/ZJes65QZ7J4Awf36vsyAcVde1ui/bsfKE/7aiDlUk/L5Y0g/D9y0FjMHHquME9n2V1RTIL4SJyXI+Bj0aNlLJHxcDTaTun+MtKeRCcCyG3AmujEXL5qltT5e2U/Vo1dXmRy75bdF+rTBIo/zMDVhSBg94NTtvJLkUKNcV6zFAcsKvK3tG6ZEQ4EMN3RJAdWvAwB4hrUVV0ToW31Ms1iGPzVDAZFOaUSzFJm4W25+OOIYwUZej5N7qxDDf86fYGWl0Db4a6lB9+plbBzD/rAvRvy5Hpg8HSvhAHmVPzwmtkMmbHzhh9oPupqIMGLaEgATze9zu/r1V9a8NXgprQPUPj2UJsev+5s+EFRZGaP2N7bqa6QU/kZ/chc/YSsYSSZxBJED0OEJIkNDunvL0wJM81kQ/U66+25F/AKmdLQpOc0Lo/Y711iwdduHb+QEdMj8nZ7chuurtVWaOXRKkcw8pZUiS50paay2DitQxaX9SAV7DMe6qF/P8zeqeMlaqLOYvapS6m9ptMLLVk3BXUsnV/zXPMwxkUyv0PhDuwaijoWzlNAfxpxc35/baohTD9vATCPhzsRf3SyT3y3X8bY5eDvn2wQnFl8Y+x2uNEhZkXERybYPCq9Nk/7RButnmoHTIiONIuKZmvzrGElnZM0unX2CBjJNs83g5aotzSCxnA3mag5aRM/qxpwUDokXlGHoRhWsHbcfCvKFwFHsSS0VuLXsrVIvo/fvv3q6muPogVX26cF0PmBEk69eAnFzSwsCDlMU7VGf9Hp5OINppfW179BfDOg0psW4QeW5/XIqMOkHbniPGsVw2E0OvNxkt/lX57OSyPEWGVkVuoIngH0OsjAcFE5Mb6BOKhD95SLYKv6g2nS4juNjjSox6AqhieM5gJfCtTCgASIrau1/Qnq/FPDPuXnCUZGDejQPR89SCNMvfbeOnwKrf2JUfb2wLNAB83wAKfP70CUvfejaL19K5YvJZFs1BhX6MesBJXyw/XVf1Wkm6CA7gJ3pQeIT18BACTunAV36ilpF+dF5KZNzmHd5Y1JfxxD1++p2aQuiroSvKULiywKgSpEu01kblXPkb4miy1d/6TWSgklDGVeD8EijYgj+WHmod+v3jda5e+kTRmggugWA2m03TP/W5gHlsQlT4+Gr5hioUIHRa5jOqjPQHrec4yfiIw9Ug/1yBh/4U2zDYRl3Db4c7QsIgXtpWyXSeOsjESZrWxMHe9ur3Af1qkvVLyuOklHJKxPT3r8kIk+vp6eWh6i+yKRRjYMmWrGkm/NUv0a3aF0vksQ/CUo8KSVZnFf3CdsJfwXxpQcTwgX7aZl/nKJ4dRRfnURhDpue2mX/yrSTEzjhPCV+/oagtK8vrfT+rzSXR9RmOpn2anM6uidC4XTGShUBbADMHmwQKLxegShKbV/Tef1ByMVbsBgR+0FBWKbKdbPRNzmlglUmcFRCXNYUcqaKueSAntDBtaVEXrPjZFwkg5tfQ8BRYtG+hMHwN/X8cNHt1gxeddDCA+iUUeqC62JpRLwg/kCQK5HiTss4lr6PnF5EsozCPiUvMPlJAlbRgyqhp7HogM3SkgPspAAEncevwKFb0zw49fgjFD7+UB3Zf4H1VJchN8CWSFMrO1AKSS4TuP54wCuBKBiSmVL8xgtWvgbI2LEhG/15PjcTGbU61wHwhvi6HvgG7DLXrQAtgRwrGn/3mvRj+uniow/7GiTqp6/AkCAt5DBEAMswkiRLLD9yTzcdER2GkjIY0Yx2xNuML8mcSZLFl4HzeZCEI2qdAekZKGTTeSqoyVTE6zYpAbHpRuootwbGqYY0B3BTTp/ueBtqcJf1UYKXOF8wIh3/yaBr6r8yRR9LRtGwPk6TOjKNjQ53AvHDjiBXoDZBMBh0FNlWawRbWlb7pKjXgGSb4z0lJLYFu50t7QTyupPP6RNZP+ejdQRyRhZJHV2bdBu25PfU3HsBXJXtGTV90YdUV/kw96Eh6LSO/zfu0KBXsVgprFNy51TNqtWxwkOGgRAzZQCOjF4CzElLu5/rOkKB3ZEHEBkZyuThKbc/wWpR7b3e0PNrxylEY/95obUqrpfV+upyWhEdnHJw0y8zO3qzkFXHkMHvLHWUbPFZdgoJTx8PzZ5/i/tS2/FRhqfv7EMxSQgSKnJ/xhO9Uiajek7ULuNnSX/9gJ8KMIL6fKZQVV/INCuzSauZ/YQdrh9DE1+lB3Jh0q2Mw53AAJOx8U22819/YS8W0wDD1S1LOi7gyDdc4h3qGph69h5gRa9PXBVog4bV9ZvtTQ9A8iRa6R/idzi9ANON9s66AH9t0tKQmFWe4IVSf750HxIAOE1tdzy/o3vEwfAeMbSsYoetLK/AvomYW8s18UfVzKAGpJY5D5x+CTOId4qa7JvHuVcE5NxZ7xfOsAbyxEhzYQEnsVPNT3EruUyFfCf89PmvuKJyZXU3cJBuCPDwWGeHRP89/XelqUGaSuFgWlRM0E6C7YQuhPa0ccxCu3lDZ/dPjWQhHMofWLpj9a3GOnGTcjGEB4L5NGnKXoqX5Eybev+u5VfGHWdvyb5/SwV4E61gBG469m4prLcr2dk3eYi7Z1EnXsavNTSduCqKnKg1AC0CDQ42joumDwjEWzeoIuSAuGCpMY7HjVHdYbIgQypmuTb0isdV6AVhVB6vQ+isqkyBv6Gditrb341eiMxKsjOIpnl0xf3F35nC8cFc72H8aUMQiJj3vdIKD47FmIOW5YlT3ZcjTt4lmZ7KbeEsyM3CVAwe5pIGg33l+RBmOovDkDdFBJrGBpuA6MbI8pSl2M1yiDcOALaBVnKF52mBOMkaWPWZjxdc+yHCU39teapASJSlCi85kn6Svwb4g1Um+4V9t1IJ7RmOpTSpYjcW2LLjD2vpPKAKzStY5AHuDmML1s+1Io2sd0WcfxKX+boN4b45cYgqx67iLmTV6PLUsFdTH4la//JvViodqKdB+sFQZuFvdPm/fWj4Fy8ZKGJjGJBGXQeWgsOa1Ma9kaOEkh1u7dAs+8jiiTJn5Q7yVs0XyBshKDX6Lkr55yqfG5s+Jpbb1TiTO9WnPm8SRdN0AIIwX2XIwGCAKRq2vPOh2UrUPgbLT3wpjVRRxTgzG3ozfeI8hMSe5q9qLAD/m8+Y3jx1A90s8HfmZNFp+EM04cVEn6SiiwO3td7HUBvEFz32Sbgv9U5dDXoh5dcpS8CmTJXea0KEYddnzbYIHdF/M5xWII5UtozNn9iXjO0RfyHA716NwZE38I5VbosQRCJGTHdjqQPg2JY6Mv/OKNP/oEEZjwIScHve0XYpyhfZUu5FkYfn608YCVAjHmKGr3KqRi10PFcDbA3K9kVIn9JSm3WTjRZ+mCammzuTQKWBTE1Mla/nuDf4QDsZCO+XaIMbc1FbJPxUFWgZXNRW1oSkHq5HnYALnyKnagPHpTr95fyI4ms2sw/9k6ZB97J+rnoOSIwtd2qArikzrg+f9x4CilEoArBcnaH3v/DJB0wNyE+/ROvm6CLsC4tfzXDw68s17K8M//bska/GjkLnXGQmy3h5Vp/zSErWf7Psqj0tVeMONogSqowxrj64lUwkHstL7vOH0Hkm78hP3c5T9YZEMb8ighSsgdq+beITvwijxgyiyaxKOS/oP+x8UbRT+cmhMaIhpnTmMUc5uqiHA4pgsc8OpQrNBmpVe1R+Jhf10EPxkbz8pGSKG3TsG4oxCwNalc8Ul7o+haaW0dqBaMVsN144EVEM7/YCLRNDSV2usNv+cb6szOPbdSaX3HVAXUVXnDZ64MwC3Pr64g7+UDviO3GlaUfATxgQBjsnrHe3ujuA6tReE0vt7rMSUR8hSQBpcccqOLcUOjvD1KVxmo9TsThH9f1BMg352aaiuQ4HianpV/APVV909b729zzih4LRNrxnPGutFiUDWP6bP43Ig2Cm6lnUa3arCuwAPU5nI2UChQ5AYavAI6VO7TTxaybdVcsbGs+I0OBaRDSWglzNvWjPPmz2+VzaaEi8cZ580OOjhng+8lWY6GlXDo9oTr9BUGJ1KPjPyXx+rFXS+XJeVkAZC/WJIPoEpxJI61k2u1v1v6D+pa43CdpsDaT6DHisP3rogNMWDi1+SWPHvYddHx5EehSUDt159v0ntFSARhiZ0X6GTl/V+CwKz59koy2BQQr4i2QuCfgZgwKiU+KwwWUqQGUx4+OmpeWrJaGPjFn+S1ozECrVX7RDuLH1n2HgAJNOH8I8F9+p/SE1asD9gGJaW9InGah0zuDc4K4uX52y3tFxXckIrW7V3ChGFv3ZXCyjMYyqAMBd7eWrnplT3LPGkhNCXoVfJZRYcpYkXfxL6+Im2jcapKpIxuRaw21xVLGMfyvVtILrL0tjpd6sJaoTcwVnRyU/l1DPk8mVczRMXdAGpgSxr5TYQMwcknL0jnIvWHfhJB4E0C+GEFDNCOqu131v8nPJbFmyZcSkRLlYnMGo/Dm7hf1YalGRdstXeBRQgz08ucZmMxCGnlWWK6e7HcvgggfzNJO3fb/syyuyLckMtFaR6heg67F9Sai5ofNZmhQ0asYqtXPgrEm4U/MrOrc0V+LKCZQ0wZwjRYXJmF+ZLV035kYAU8ceh/hLnScvBFK/QEuyFC+rIJnW0QqrT0PSWSmZifwdtLDsvmPIn4wpUf7fiDyeJqe6WL+Y5bVg7Gsfp/JcVircLX7MOjF2DFQD4uCW+IQ+Nk31QHbjDw22NbuAUDTSP/MBua3mAPgeVewiy8JvWlbCHNO9jGkq2U5UKBoA5mScfisrXOMPV77MW10KyZZuWGVH6vvYMTzpe8n1i/VFPpK+RQZiUOSYbkO/RjpuyrXXYgLGPmYND1WUeOi1Pkow81f9xX/pIS8qseNzcqHuRNIALHwhH7vB+Lp/k8dfL6B9v3ZF2mfLwVJE/hD8au8YCxJUmjOS0d9Hcb7xCzQr4NBT9Uq+zgVe2un/22W/lMcJesLOgqeu6iPbCMyJBNrs7w2fJNmqPv1II30gHTWjk5iOy31Z4g76lTYn3UF7Utow2bgFUCjAmUXg/AyljrGWhL1ZjsH7MzjCNq7WOokMGNCkYXuKLSj1OGdHy3H+MjRBMLa+Fp3y14WPDuu4/kb53UXgZWlWeIQMyHVZ/RzJkyPB7q6ib5VPu7fsN0WSXtTbsZhVuMxG58RSRtAM2Z+dG/nXnvBTPL84DJTwX2y36xW/sj5pN4Xrp6MvXvyaeEvI7dLf75umVqlYUJQp++if60POCcBzFNW7AArW9FWq1O9CCp6Z/kJxsG90xUr3B0kcnY74VJKQabf8LdZu6d9aQFEot3TaKI1GhAz/faTZxPJuPbsh/njee1YokSNLP/AueU3T0wPzLnss+vETe6PQOavoEhxo+B9LV7FtqZIFv6bn2EGGuLszw92dr2+49Ua16tqBzC0RWxO6b/JE/mxHeCigZO3aCM8nN/VIxaFR3QnB0wOzVmk2+FXwf8Gj7Y6h9nK9F82qamcyMwWKSAB2c9scT2WNlxdhpxeCqg7cOBKfiDKdhRIyIZP6PIMJzovWu3L0sxGuYOjJSoF49uALRDpdCRZqHbfgy0v9Z/rONr/wDIzIDUtblhkpNZXRkeWoF/xouPEisVgfN6NjFyULKRwTFd+lNVERiqX9vXwdntL2OheSXtm0VgRBfa/KyQG8/9E6lNIF2IKCsNR284PhM6fJ7CUQuvCheu/5aTEuxnUwqJaqxiXWM1RLKlsWRgWkajMnlH/dzDygZzxf+u5GfjNTuJXEk9zSNitvQh80WcFqB1o+jfp+Gb+2LnDDTl8BBZT8EKKO0Z7IE8oMElOLzIMmCdeM8stNhRKPdI6st3FyR5TopMkm6eJXqO1WhJWrA5FziqwppoNUR22EUHKPWS5diiGv4dgT3pGMTj7anVgZ/BrSq+9PuQq2AALrAQ60hZ1mqAcJFTXSUi8tIGujYM4JaqohXBffUgidTxrbRQFPXDx3Tb8KnrB2RtPnpVDz8UZCpDbypfyg7UUUjuiG1MryzNqNMCGpvPQh8xHJ9IVgmRXdb11u7MOsFr+hL8iwdHVlHU7GLUrPjog8gH2yk9AqeRRV45yDkYk5yArNIp9hTh08qoTz3X35OmcocMr+8s3YTkqhrJO8OVjqK7vKxnb9++OizboiU4VjrrjLsrHrvXitY0B8nBnYN9OTM8FYI4ByuTSx90rR9fCfG/I1N0SqAlJeEbpCfz1XqHjttv9NdyUVnVxvCvppEgSEi2TP04eGWThcfNppl0P28S/KRN4J8d4UT3D0jzrP+W4v69y49kExDDPxgy7g7OgZ05VXF75u/eokmN+bpff7cuXHrymJ22+8v4kJJHmvLLYZ+QnlHw/43pn/HJJbN7MHPMPj3w86V78IEZ5Z8smSf8HUUQrgJ8JiT+Exg6gAaX5Ql3XQvzrmPPZIDIAtIuLasv/hQOyYPkshRsUWUOXEI6NfTTheZ/T7lFKUDL7EuoqmmIQNPUlV/5IGGp1rt+PgrbPjABmfhLN3EZo0wxkInblyfRlkZKl8QgMsMKvfXs1iE5iNd3ZpLrvOi0N/+lMSUJYeD3wijP11oVLksPJdWXzv+y0lAy/7h9EeljsHyQGPwco4V6xMcgrGGuTszFhH7goGuZpcab5mNRIsnIj5rC4x4ej7TvySipQ52FW+erLSeISz5tmhqgZ8LxV/M5paWDXmZGPOZZTiV1DnbgzyJczUdRqdIa4bzf82eLxHONwxM1UX6mT7RObhAYvPfmEnXySVtiv7XOa9lb0Wf2vjM1q4q6j/Cpw/X3xUm7B2Xz9jx5im9CjPUjk6XpPvzVVzIwoAo1nxsv/c3knW0o0oksBIJkvrk9nUjUq6WHzt+bp0ngSi8GGP3AZhcabFxYkcT+tUbjqlZN2ZWY1bB6JtYp5g12p2yBklaeoK6ci3hBnuUa3OD/qKoKrwv8cHSM7FvwZDylQpsrUfqUmyxxkCvd+BxArZxhyZ7QUb+ofNGQxhwE+aoqg+NTdZ6pN8rUq0S1/m7pArK1oKRZ684xGKsCW6qy5E3ho9f+xRQAm++o442xsGcsHfAgCvIEZLmX8riDgyzwUqTCe79F4CDrixa8bBkEPiE2/+EUCOk3odoIemwF+VR9xXG9rTvvJWTESzXMA1tLJCmj/D3tI+xdApix2hroJc/Gub6eyRxSx7zjn6oAKdq8yLu+1lRqDrNp45talvQOLKpkvBVhHU4qzRGEnM0gZ3RuZCa6/gFTTDL+BoYAS/Xb9ETHvHI5EC29GX6Rsv3rZkLCGuvC+9tQmsX3XYVjJW/o8KWMTcUg6ZdaOOMTcZXw/adq7cRqZAd2WkvwhufPqegE35bw35a424k2RpOQW7o2qLhRS31Z88Lkl4y47HWCM/JFGN1+73RLyog1skvs65gbMTwyXDda8hwgwOvbGv5CBvccaffK80PlpHJYle4ZF1CkDuQVTzM/fFdPS9UjE71qDvKvNRa8FnYfk2IV+D3L+PV4hDfzSfCC9+eYTj3o/mU0A/1DwRLwtJahYmU+CmSZDp3ZV5e5vm0RLxZ0fJjZIyYepfT/VHjLBbgdS0oOcHmOrycaH+exln+nuOBhWcR3KxFmmOm5V8yZHMfpLIlieENPZfhilcYntDMMWV6crcuWYpwMvepdpauleP3ERuoRAGyFxCt5b/qqmgu0RenK5wAXJVURd2FIIfLmI884uySagLHas9aw4sYFngv6jMF/tVZZofpdXOmGmBgjvjYbvj0nsmzodIaXxqHN01N+ToJi/jfN/WgVwrudWUrVScoaBIj4wUb+U+J/cDllMLJ1TTc8ilJaTn0n9rmsv5XMUyYkLQzHiLlnkj3xiubenTF3PmC7dd4sqSmABFVJJWLRq56TwdPp0zgwadf/tnRh2KvzHvXKJMdnw7YpOlesiDpfa6aVfH5deQrzQYrqFYQuqjYzNVL49R0+RcGiIB5F+rbNmcx2QaOzKH1NRkRBCV+LoBHTrp50rPWXDMZ3Ts2EjvooYR+/lXtL4N5dWFW2Y5jTKO3AAA/bdjnoMZjfNjwRKTHRxz707EsY7gbaYuv5WGpCKeXM5tjutmc5mxsWNJhwSrnMONWmdppUvqx6TkYttKCWfQXKZOH2qFOhvNDoKw1lpIUO6G50EpkdhOu1omyt8mu/vp5JmYP93TLsU0v70SjfC30enemK2vUgVFePNVwSBXtPpeztAXF6ovF4ktKLWcQnKhMX8rBxObSTvptoAn55iQ6tvYICAZl2IVaFYBYRFau5o2bU+Q8DLlBS/zEBCfSiV9bJ61KR6MCZMHVcFGvK/capfq9oc/CBX7p5k83m8GXpe9uaQAcLAvPRQKEbNp+yIt8GlQNyaHo5ve8EY6FnQl1yTBhYMsOdDd0lWs8Lz9RVNJOxY8DQqkEp6O7SuU45TjtWwb6aB3m7LTb8Up2KOXISjR/oe+ADwUVIRoQv5vo6T7wXn+DOJl6F5l4qcvJBcB8NwtWLHuhVppnUUb9EtIwYF+gR9ZMYw2oxEzIa5JuvrheF/+gJyRBVQLtFooup6fyPKehYSar54htLSS1LNgA8vHVLbRhnB7MDb91/uVVQ8NTasFUWLOuSZWMBgNI8ZQ8gVMkfllNLqEkl+C6a9W/SVvVUBleSaikIqgpXW//qMJjfHbHEXliNw3qgwCYPjjn5yIDjFXsKa3AlI0xUJYslD0P8eF1dRqhYxJXA2AKRM6sWuR07WaiFbwBXf5xYS8YftRlkUoOk026cyVbobdPh04IkFhPvUTURjCW9laOVV/njP8mxr1TYwCJDNQMdxbyZX8IAQd/w2TFoZrMymXlmeh+DytLvJGQN2gBzhZLjFyp1mdgeYDJchsPQzwdH+pKODnQnAQd9ir17incJHkE234DJD3Xpi9L1aAPH2GuAOJCl4o0j9uL9Uznk3uQxxlQOkCUfg0j1GNvqv0VzcRsxsiGasw24yTrW1A2vLGGsKXAAM08m8mHUvzSDLB23Gj2XRbX2xZHbek+/bGc4bHbia7UL/fWlrGk5Ysy7yqZ4zi/EgxZPyis7oxf3mUi4b63x1Os9yH/KawGpgiWRXNIlQkX4S0BUhibloLFe3NF8IvWeG6GOjIZ1xyWwljC55I75Os5MtdxE76mQR02FDpq9XngtG59BjsE5TJEb9/iqNtkdDZqNcbIsNqqggLWWMPGKRav0TAaJmLKjSPBmJWy6P5M77iUIqGsVRWcJdVP3wapU7rkuiFYzdS5jG5B9xg/rI+I0pAVOdUloorQP4CJJ/YD+BeXmEAs+0gdESMpKv/E10gB6nJBWSvUJqN/mDsk0z1aE5wvM+u973oRRNlAPAT7sqMwjr4yH/RAqszKVvfOSkUtd+dNrGZnoB3YscVn8KrWXATPtbXyZvwdvSgZ0ItCSGXFpu9vKWJxHnVD38/SIUT9lTmVm/OCA82xbJcikIPxNkP6w7+rPMqHqdnrwCToKQKKU1YLPwXqNTSsWQpFSuUbgT+929RFhH/ytyu0+MsdSabXTiJFYcyBS8vIRIyGLkovjYoQgF3NzVxb3HU7dRHsghoEv5DKGRCdcjld11bLi+IPGZ2IQdXlljgqjWs/kqElYT1V3pM7V9rUP7QgAhkpj3rIwkz+BV1bmx4xRFkKwTAqhJjDs9WT2QdnQ+M1L13MnUgu0khl4h4ZkP8hH5HMhDVVPNFjipLP+RJaSSwYw5N0w0uv0D+ClUlE+xtZJKH7l+YYJmJMBs4QpU/xaGxqBTmk15369ipRdqhGh2l0uGx93taOt1Nx2KENzz4zSMrZ0s5m9fpOsTQJ0XZ/m1JSJaVh8zxZI6mtD+ZDecyfNHcCKYGvIkK+FwP/e3/o5xSvatIbTiucrArGF8HTP/VPvzG3cuPeDVjGvhKfg6CUQHQ6O+/qWM+/NjhGJTLZ2g5fVTJE7yKp8Yqd5s2UEe6F0tazZiPeRbgg/MAdvAbljm3fHsVb3RttoZlmPaadGj2n78FQ7UGROlovk47Pt+zf0mDRrHp0rVsQ2oQBR/X679YEy+mbHO6v2XBpC5Gta53XY7ePEf7ZMo47KkGsQnYWa6xbgPF9mpUkzStdQrRPFO3PN1Em3YmbYC44lLcVHkKPoi6zpQsmuHtMDiDJYzKrSWDZAdRqRYjDgHn9dDh8MgRD5gR+hcsg4bCoIE6sYoOOsCwYsOKo60hl2719Libk5NNnb4hDNRdEHo6sdsef3WUJFuc+qHSo1VMeOWNhGnHSXKUheonDLUqswOGgwpewsXQhplAfQj1EDgyUhOWNT3eZ+IQHL73NiOodE8kVhbJBfejVIShhlFMJ2SBJhtGf6CEWx/7Imyfpgxe6nnsvCfqomA5UvXXEMRkBD5TF2IOJnwx1bWUX/b7gVZmyQpfjsSBFZmH8EdWKFV+ra20jwJvp1I4eEopWnVAOe79xc6Iy6078iUlKTtEC3FK97735jLV0EfUxsoWM1ElK/zSx5gyxz/RC2KUS5U+qFVStfXPP3or9k1raOQazsyip1OxUZb5eCREZJYE5gabebm2qROfSNeNivz+5qkDRciGmXZQ/W9Qm5D+1FogppkTu7F6CUEaEF+SEJFxH4jA3NqReNcc9Wcx3fwyoEZwchEWkmIZvQVYDigt0LOrTtsT7e6zRUWjAH7LYKb3+Bcgt26B8LIutIbKi1Vkx/kE8KoY9U8plLRXVvojfY8wq8s2qc71+KCoL+g3/tt1aCRp4KqvV643M1f5mhgw2hmFXfGt4TUD3wZXiO1+eHJ9knNu4VwMohDSCoyQ6XW5ZW1CiNPhqTNlfkfI7JFcVskd70NTpRnAd9QU90hFZbIgWH7rybgK7U3IWEyuMbZ5olkZolzOnlgBawPLP4CaVE53/hDcTiivC+py8sWVqmM6t+qO4A8YjyQwxi3rQn3i+W9etY+ehsmpaC+oYAJpm3y6yksUdYPZRsmevkkuS/1F/cixosob6oBGucwijfD4pUDBZ96TD/rj88Mgas5ppGHmA/HjpBmdGIeyXjHFnRzy7PAL7BvROpx3UbnJlAcCysLf2bh/w161ke/Fyi2bepFsZUSk6mQqddUzmo5Ta2wF4BNKhtXTT/eGJhzZR22tzt6igjPwKrcGWtgBcAm/KUtf5ldpEmCl9CqzFhxnM/BvRYUr6u1V3qcqU3+RI5T8jmHqy0MID/svh7oBigCO9a0B+oRr563ejjl+SPI5KP9FJCSCnqwczxDawVu97eygVL6Pp+iarLI9E7KNiTHAoO6EMj/p3K18Q+rJFNnSoBFUlpWfKLNAlGUMRqe+rK2dVJ7366RYDyrWvHw1NlkCxZJ2VlzNNVW0ylsH0TyKW0RCuaGDSZsOBwfUGxoIX8nPiCGIA+fpwnQyUkORYntv18+skoVBn6ythsjg2ToEBmi+EJQOV85MovccjWvDZvVcR6G3lDBfw5u/uGdMGJ7ectnx0Q5+9QIBYRvGXIVJLeO4TGEu3GQJ8MANsRw2u84u3Pl+DAAB9wsVOFXhpLO/qi7Dk1skBhpkZb/6zacvtYz69lvtUqrZysq0EOlPsJ+1k7MfYGLlTeuC/21x3ag140zmK/xXzvLsSq6nol97C8dQnW2W3OXOTg6xesi1RtK5DYNwrg+higAn3uUwBTxh4VncPKf1PctwK/DL6FHeY0KRrZfRhlP+WqWlvta5D7+UZ89+9qw8NUhdOztiui/QoOqwU630f7tlKUm/Jp8KwIPHpRgwu2ylZeIytNdzvDjZeE3rPvSjZ0bHVzaWDRdZj/8ltNTFLb+4CI/m/VU1OYIfrWLt+sjskKTetsVNQ27LRfa3e0ebqEY6lfk2Anz1UWlmf1kHXum4UcJKLMmRfQ0SU9TxpO9qOChrThfcKtl9VHe3ZGh1puDeckSYzVhd6wYifFznBcuDakmhKc7FOVfKGcvgYBKruLmEI9MUlNSloEZqFo/NZbLoipyhSF+nCvH3pyoYydhA6I03UfOL0Frxh/pejgI4vsN2OwT4XC5O7jY+ngXtTvrVclLLjR+/77d/EgnlKY7uYS0cmQ3R1F6xmR3WA0TQjEzgwTbfJNY6HfhDZ8f09IsHH2WaT7m55rwPzQtPlHbm6XEKnHgeGOnwGSWzV+q9T0F+agM/dIZHXz/fOKq1y1ZII7FAM9fZniSNSEwxzhG/drBYiqYuUAtxZWKfB9ptUpbq4VYQOL6fdBQSS/32ulQfNNhdLIJz7FFDYe+k6+cLrpKeF9daIL0AOi5/xOoiDoGd/YKQ4dlP3QEjLFdRzHSJJLrQ6VHq2mJ75bfKnrofVPOIqb4Q8HqUmIRuJc23FnLHrHcplpaNnuvQp52sarkLxmSv/BLq0FQfEsPWCiX0pLVNRNKnWLT1hEqm4ERWuSMUW57FkH76SwjvVI0nZ431AWICMPeSONmTVU8UAR3ph4b7fgM9XJQA/iAmClgy3y+KL1zUYT5Cxn5YTh4jG6/q9eanHaRsCOhPoag7quTxb2vzYtb7gXnTldBu8PX5KNlA1oKNviDC9c5dUb4u2kMb1FTSgb9ZdPwCDOR6f/SuVp6m4FHC5ATvfTbIiodKDUetibhmLqH0hCduy9OPJ8EiwA9eWyhcjmLXEOa9F+wo66GF6SZQmW1MibG5/oI4r5TJBlCuE/fsxu/SsBR+9zTGgdEbdopIefCDILQAmKm5DrC44VzM1u/WzMQbKxW9P44rshT5UyWCCO1oqvNV4/0pT3CwFLoANTOf8qufQK1TduWuOSfb1Rt/AKPV9h0W2/VG3X4yyr507ETuM5CjlJFHs4huOC0RYlDmDzn05ULWoD8ytazy8g3s0gZFr/GdWQhy2Z/XL+3o/dwVeRoMnLsvyqKZ47cs56mO+K008G+d2fKxTWcHpZ6B+ddq56qeZe7TDEK9kFQt2u9ZJ9lPDJXf757HIS3Vlx034S+T8wToSMHovkkn3AqwoPBs1D18lUhTEoS+fzI6glxaG3ZLPLv8JMby/lJs1HiAk48YpWPUj5zW7IquaJnmLG9fbKkkh8GE1VcRgaWyc/SdzS9UnZj3D2YZiTzVRYKk/kFniqxV0yQhgAlSJNnAtf1sBKCT7TfJSdSwxTaJsHcJDpzaJAGdWA6jVXB18yWnH6qi+qLuqtS5kHyEdz2J1NW31CABFjrMatQ8o5Y4JDe6eeXRD4ysiJUhBPB21PnluT7TdTOqOwZ1gyXDIbormpRGB1u5mqS4Kxt7t0QvXzrkV7r5mpu/2qA5ekLmC2n4CSGBfGSUVo+xnJE6ty7+/MBUGnf2SJIsSbZjTc/eWMVdbxYYd8UILTTPduqCv3gTxEQrlYZJB8lgeeIVMIZiXGqgE8CO+R3fdy/G8FNoX+8pM6NRl2m+UY3cBco7EbPiSHEpx/jOyfhg7oiLL6M493BGLejut/+OyhgqpkNPjs2aI3ORs15ZcLjqIn4inXoQ7VYzzax/vXnILVMu176EGACDqlG+af6gAs/2WNdXRuaKeVB6AyELodaYrdAHzc/ljBKUvqorucCPNcpX81I/Q9kYCkwuIIi7WL1bnFKAoQISUZAmM9/Sz0qLhHCPCkBgSBVt3RxsagOeWefCAGCFLDlS5MJg8XFAWKGg0ENJkDuRMEpG5yDNU4SPqlKui0QmFRXc3sHN3dhDdH9eE6vunOh+uf1YQZdPmVfqI7x1FcrOEyKFuV2L1t0DGSV8yEyS8f2MBmSpPvy4ZYosAVnIBqYzp2B8ah+6QR/Ay+3Kok+eVEV9XdAvSlI8cvS3zkDCLLS2JCZNcSNfFGhwbgF/oSHrd2qIf7NelXr1yXyTTtmoqnzTW84KZeoYiJgJmObmu6duB5RMQyvLuyuC/UqHskED1eE9ZVUHXXg6lKCy6eOaen3J+Jkw+W96nb+a6QPjVBWVkNb2SGSjeWDPrlpgX7NS62gWGKS8R8EMz9d7BfvXr9LhcvubF+NaWViQtXannYSmo6yDKjmm0kvVv/ItoWyBmjuY6a97GjdGF/XzJvX56YRhxco7lxS3mqiBn+bbWyKXbWArzDJxCheEXH9/I0dCgXew5oYcjWMbzfQnSlD118zfSpSFX+3dnIIApQ7k3pF7iynIFETm8HQcV0aqovNYfFfYyom29rRqZ7Ik2dNUKGbk7EPBazo0ojL3pQuc3N0Z1Cccaa8db1icnEnZ4DfufbrzHDFzDlzPa8t7VeNYTYaLMkVq0xRFrnRS047/DRojHoryFoZLXQv48OfPPVkKv0d3Pxw3W8dhJWykyUp0laUxsasZ+PLswB679h6NH1MkDX9wCZE4FVprf54hT9VKvUoO5YYkiad6vvTBeMYRs+iL34JzegFidg5w+a27orZNu/yeeCGmWDO/+itzrij11g6lf9z9RPhyEe8h+FofbEU3468355I1EIs7IBdUE9zcUbNsG4oUqW1Y1LmXUhDFXdM7AXVsFl/a0o1tr5h4MZhyF6LBm2PtmHXGrzfjZGf92825JV8oGqTUovyKZ3wq53W/52IBJqwDQMdzKpF4thB0XTnDTClv7s1bJRy0aRAETYNW8mKVIBUQSG8ywzGxBXe7ICZiujBF9Fltjpr0d0fOnXd5H+uYXbHf6DwAzMsuBFI/iIaIHyC6c57pKyrCUx1Nl7litm92DUWWEBXtjkpXxjXu7nAvhuLPWChBODf6bmlPR7zElEPpF0PokMOjI1Eeh51oD10VgGZtn7U8kb8RCflRqbCFaHmOxvhY0OXXEX2EAFlFH93UWhHyMrbQ/ar8a7P7a1tcHsJw4TjHGxrjyC/7xhPqB4JJV9yRtJfquOUsxW/o+y8cUnVSyrXBpfufkKyPtkCPcPfSz8VZCkJks/8L7amD8/hL+Eo00FwK3PzWKF8j+fsehY4vnYbYCzNwuoMmIn42Hm5QYo0zZKZTuZL1l1Rok642cm8L5Wkar9RSHWDCt/YbNHoMJ/7mp0aIK4jsjb9cqVeVDZEqJ31uokuaMbSqWoqbXy/q7xmRImSPVr1VnkmjVJp/8/1Qv2VsdYWtgu5FaEkwEItFl9nlP+ZE8fz9e4ARojw5gXoE2QfgSIyvYlAqefY141o7CArxcTFzBI+Lwp738/+q0wVNWX/GWLdODXl+fXOIJ/IvrWLqTFBKVqS8Lgo1qnoShxqPKaIp5ZXOhr0C/vPEfSU22mYL50Z9WhPLWtXLvJnF9zkuvy8itjEGzDpM5sYS14gCupvJTn01Qno4A3C26wqotmxk7Arvk0TEyS6kJM4egMtMsZAQs/C0bgC7TeU52Zl0JT8AnbzPs6to0JfsK8zNODNXiqp5ddg0WRzLZq7s6MbCrJ2ZutdTc4c0eu/bMVmkAxsXe3pS/qM4WqI/6FwA+zUQH23mDh8eJMFxoi7sDTnFvo0LHPYhnnbc5VVXvxA+C6+blkCM9l67O3W/KmBrkOG9VG+I2/HvSku0ypavrbvKtco3lu+5yn9Uooh6mWRoM3rvm89aPAS/dW3NI/YHdMUwGmqMbgTLUIKW5fzuG1n4bmAIxX11H/1mVXEdTZ7iAIwASif18be6bx+S+mR59LUe9XZjuqFDVoDeUfeq1l4UP8uj3ms3V814kboUQ2PT7pX7WtpnelGZKO3NDGnc3bq7RzP03JY8HZ4BIQNcTWYlMFAhhsqKMsuvJARmHC2jcGUXDutUGPDl71Mz+6+LGotC/4VFE1NHQTTWEGhqAVsejTBCMU90q5QXnKu8sDwm6Kk73Wsy3M5Rfg4VfTMTM76S4uNrrWg7i6gFX3+JXcMSh9jIGuvIYPTyxVAzG8Mweweutmyc/TpN+crdL2xpsXXC5R4xgWzNNFIT6gpmVr2x+ZCjXySbTzTu6SQiHZ95+fx8ohPGuM/L8k3m57BoAal8GgcfDx9IK8OwplQa0SM1Up2E1CBRsbLYy+N2/ttYoPg3XoO4zXYBVBOZ4oEa1MfWtJFQxVAGaS3CX6chyWIk5QClv/XBFAcYUzgfmE1h/6+NQpGATSd+BDrs35cl/+QUPIBhRtgLI7qJ0f6AeX/FSRyffEtjp6JcekGgbTBZ+PYvvOE9ZZ835s7Z+kZor4Ph1qYdLHwFqcvNGyobGpsj2byZVfyOOTUBhWoCJJpK0XkSQS0UIMzdceURRsRwI8iSvq+BRnTYox7un/3FHwlxPoO4fxESQkT8zqxPsw8E7VlRIHLguMo7IyzshX7ZuogYU1haP/GXv0b16SZfkbgXG418pDiBDAR2rU4APDWbjJvjVWlf+kLMktWSk0FXADitOdn8WmLHZPrm2nFAO/ISeLT7uY2vl+bVTbBMbot3a2dXBljqPsqFL8bToiOC8w5tv/QC23uOQCTlDrvcZNfxqwG/xXJAHgSNMj51qRnZlsiny/5g7O/NaODHOb0vmn/h/NiF9L6Vms6tlALnKlYkqM1D4JscLnze0rJMWP+Xoxod02maoyu3SyguzS502C2SYbFPtstTQGRqI5S1xbFiznLkS8pCJbjYS1YBBK2juqpKZFIl3ye75bRcXPF2D0gfkmavXMjq5ztoN5WUOnX5QebPZ8/DcKXQZEmU/YHERHjYphn/HnllvmBqgDml1fF+pBNnKVEGbaKN5de/lZykHGnxBmCo/ftVFXgMsQdfg3vYu4K9dFTy1PjlzgtsSGZdjUPVl0wiUH216WwWhdjUnFM+ml9LBGUqw3W4r9m2yI4TmIf/xWKHzToSmuR6jYUEYGTO7si6xG06WJTDlag5DmoiHdGnHl4tqyPqp40KJJB7AtJ2ktMXtSP2CRuj8mXWl6/66JpIfVtgO8L2W9KKElFR34yREFD+wsmy7W4sd1GkS/22037+driVXoMkoBpowZyB11+3aR0IUW5fCM3FHimNil552PKFHoXWXCN29DOxiCkRBrBBZh7/+FspJv0Qzar6q+U0EYX9D5vZ4MsXOPY5246dXqyL8re53ICyIpxS1n2lBtdnyTRiU4ihF5jL2nb4+i2voa3SIY7IINFA5MW9mXNGtJKtTYlZ+EBT1asBA9qZbtk6XfT7ZTd2Nh2ZB8MlYDDU8CFJXXbrXFb42peoxTVTCUXlKqLrDE8e/4VMmCRpjw+WJpAQbH9JE8YvKN6mfYjc51jBUunc5b8keFITPFvZZf8Fi3hEvBbIhqnJrJTmSYPytP/FgyH/8z1pSyLrEHfUq9jyxNgyCbbnxMAQ6W3Nq0PuNiaozGjKfE23SXu6xyovG329malZlj2aJclvy4z6/TeAD/n6TlgOC5+DonIZQMNe234wAHbyPjF+jRQwF45Aq2mYecGGtQT50MtmUEW+LSU6NrYEm94Uey4pHz+Y5uBEOsZ6xIbjqHia4vYc6z4Bne8N4C4V6FQ7MI/ely6qAy+A7uKbIkYVSWRcz4Xt6W0JLu3nnEftNQHeALlxdG2AW+Q/WLl2X3jOtn2f15B0iQHYnWTYyxNhiuBsXs+caUY01jg5/AwkFzPzDQSdFoqOEHchIVeeiKL4iC6mXcrywvpbe3DqN/9AqMDFRbefYB01RREvkDZ/mzx4tXZWl/nipw+CfLU+OyPmCrRAsby27PzvZh7Vu+tgAV/T87QtWuFcGa58CdCi33LFXu0kpmcwImwre+4FFS/fjV72N9qLs1RJymKFWPKYw7yOsgZghq/vk9Z1yJ9JH38lA2fEtu1MEv5FT9FIzfl1rImGgYpmNtcQxz3HV0iwXJvxr5yo2vWyKPjPskh8mSARGhuixvBbbzIdaQoKyuYvi2U9yWY+QjVEZ6qRj+HqpSWdOOML9qsDxXFLK137ducrLuMJUedokyrm9peCmvOm47lfwJ+szAfpwMtA7vzlLPfIZ0pSemFrvhstqcBhZ/F+KQtB5rmjS9fWKAkxuAjD62+RD+98pKIc1JZeTLCNXyy8UfagwOi2D0enWvZqjlRk2MCqRI4YOb/6thntixscthKU3RBJ0GrIkPbzA02eIKdOCuWDZXkh8N8shRk2WNx/Wjl5KT1bD3wkDpA4GBfx0uuCTsl1dfD4i0mNweedsJ2egArHGojfOp95yEauPS9ZQwCpCz0pd7dhY5LoY29vP9WWGvYpgglef05DBp146POZ1oOjCKxNtjjLrhbvNTnLnefnNiZJHbX2vZm/GvMuxd7rd7HzvYAntlnqKSuq5g53XxBqqFRruNHKkDDjw/ZmrXRuyQtEG8JdpUQ23Z8IiNgMgXHlIeRnxk3x81usbJP7zZm0CATMbywONWWc3wmbhKPl0MZisOrGVA00Nx/YR0EwPB8xyOHgZVbflwc157X1jbTvCuUnQCaktGQEySVtNVWMv9qSc/UhJCZLEOb3NzjzyLJEbtmKp1HBb4k8btbdwmb7UOo7hS7mfm0Pmd3iY75uP21TKUGx3P19SwpIc349SJh6kyDYC6sbppXyH4oYIIAktfGFkn8zV7w6m2Cne+rM2odfWqU9lcoWDeQcN7PiwD6LVRQm1pmZTPvgki1rZwbpXBBJGrHwj6aTVCqlnYlsXfsbA9WkrppuReHhD8nqbEB2eQtJp+BzvWoYn5R7FghQfkHM+2tGvcyEXCd2hGR6Fr8G/JojCPkXW/QnZDoIRs8yX4ZyHhmUyx4lC1LKFrI+UrFKroEy2AROfXL5HZh46wvCwNUErjWq+iI0iJSR9to3vKpv+hkwc+W6PvtME2b76ZU0J2D4Ou4+V7swVI5qmL6+KHjc0NMAl8XusVyKaCaGM70ilJnTFEtSyMEiconc6RNT8ueCHmSJPxMxkfoXvznzees1yNY+2ELdoT17aeNvztkeg6Ub9RLA0yEDsOwDcMW+DHRr/25bU6b2KiZOA2w6C8yQ6XFiUUpYfRw81dsgOvTkqAJ3vGolB9nkR5PKR/P6UNJPCqSvkdaz0brTCuk97VjuCcwhOP/dO/oLnwbRTUo+rX4gVPicM0Y2kYwubUenHeiDELJpf3bLG8U9+Vu698onyv8NPfDML5fALV+NWcmsR5PTbAvAS1M1XY6Ulj9y+e2hJOJ+2GUunvEhjEQiHen9G7fgEr47Q6t/di7u+4ZNZaGQD3DKWn1p41LED+IZgvSv1cA7zHbJqTpb+daTcPekkmPIr4uG5cHUR6O3k7KAr3a/jHmTzBbNbxm0ztav4IAC86GlRN2/Nbc7/M2K/+YXIdokkjZ5CL9AFPXe4pcKXiy0yitY6vakVSbdErHFX1vnwX+vY+Z2YwKKCbpWs4S2SYyaifMwYDE4nHJeSgFQmAR8/S7sq5MOCw/M+XOEADJ0zyOJTT7IvWcK1oUDqc3aG7ts8cUiyJj/vvgkuOIsGJtOheNDqSzI1zLk2NxHebi4f0JhaHqrrUSNQuw9F83rlFe3KnqaQ127tvAE7H35x24E8O31YCZr/ds0VyZOpt6qy+jd6gKHnVmeCHp0tXUcBs1Y7aASAVLnCLDlrhArpYtQ0kcVNAIf1+pEQy9iRIv5LRTzDoAW36HpKNOcwqaiMCL3ZQnvaFuAiFD1LKUilVptWhX1RUd6qUE/5JNRsZ6q7IUqXsZ9Yc7M1TKb71N4u4Aoly8gpqivptA3gE4TvjCO20LftrivyIP9w04J1/E7r9GzoTCQS7/QcyNF2CrTkRYAnSr8BkFXcb1/n/3dhMeKYXD/qQDdFZvM36rO8GVJXQgsW3FDdzth4xvpsF9hGtbKmptAcgNFLyqqcZ42M6jJ4xftVD7SFgSgte2Ii5KeXWSbdGPpC2FYDf+S9IuYvX970iE8YGi4l7XGcVZy2+KWSyGxKOi/tW3IWpuvo/WiHAMGuoTXzTC+UhNkqKM87siSE8P6Cre2AdCMfuWL9Nd/VNf8Q+sTXcuI/IM6IW1tv0np5H0/DE8oZzeVDh8mJeF1F/CbpQRkN9ujhpO4oXCNnCbE33u9uGq/kFNGc7FYuh9awZOjbeCP3meIXuu5rhy/UANRAmsWZTYgQeMYOk1jeR0ZQ1NjOk8ghM2zbQE4cbEvBVqe1/9DQS5XJIfU9FctovoR5JR5RuX+cHg2nWfwdPZK3OKLebcVGIjLQq5ApH3m5BvgxT1iOZp5p0i45ZUVWqEMkdhlCjZf8w6L0CCepMVwSf6qx84qVNkurrSTQ26TvnZz1ONWnZhCFHhTRKGKzFvGu1GyFsIxq7O+Sr2SiyVO8mVn6Dzm0OIybigL6Z3aDnwkk+zstL8OEZ38VIXE5uGTei45MsHwtTYXS9mamLl+BnRFJ0q1SO76K3qaAEeskh9ehlG6slKZW+ffgjOLVlquwvMfNbqUyuRfJQceqR72Q8yvwuMLvDRHYVcYzHAsta9PqKLJeLOb5y78agMQMWsue1EgFWZIgqKk1PTGaLGf26N0JwonfPaqUM+gWoMKhEYdfXHVO8Eg8kssqMRT6kdhJCoDq+OUezD3UF8IT+Ce9wKpMtMFmvRmMJ4Sr14AdKH+lg9EnQCeBuVORvG/f/tS2/YISD61KQLx1jVPLkhvX75HotbfjtO0fo/jBazhaxGdkzBTlW4bZ9mlnHuUQCclm3MzdtAv5MkLQEG/nJRlqxVjowt6XjQN5nbWAe3T7Ci+4+2h7sZ81GSxBD8qv2HxnODlOy3jNpy/zT5oUPCZKNAZWtMT0WAv2bx6p7kbjjmd2tkg6bvJEsOEBcNERcrU82MbWl6aFKh+2tr3fjGCXlZ05uE/k0F77OUKzejW2Wx+fgxlX2KoN7WdoFIRFJOhtsZPyV4QCl7s7PYaHYLAbLC5ar04FjD6L6pmFp7wiau4vpSjcuwuhVT+7MXcGYuG4J+H30dNwksTRv3RDCnp4odiltEOUUSlKhg3hmEy8UL1D4WhX3UsynlHhttDe2FTaUSB75ZHMEfquBGjhW6dKlP6Ghef182/y3iMHkLROkUw7LkS5KugiIn1XE8hp83+t5jQo/2NCDOjzZJ+mv3nQx1R/OstrS11CJSunEm9m7WXV9g7z0QfQocHshCSpVYamafQFrndCGrbsvp8XD6IlUP10icvANm3rLhZP6Os7AMGUke8Z2bvMgztOFv+69gpah+tschuo93jCLo1zUCJZXrsYt2K0Zff3XLbbQL249AfhlmQgBT88HPoTBwiJBDC11nkaRlAKIKEyfV33pnDZFAyM7kXYPjI0mS+90EwzNIwNDUdDS/JBs4GgHQhOsEpjMLbaFcmTmY0h7UC0Jgt/sYO8u0cPD/9RGmEHnDEhSmyhmbbE6kQxFAbQEyaU7QFJui/OVywIPVSRz/daLkXNgv0S7Y8Jbaz+cntuuCgF+VVSrg5CUpPTXpy9u+LYcFdVxZIBz97MeLHl9spq5kDvdx84gWQHRouyZv6iNY8z+9lXtcPWwxHwK9E5B3Qvkh1tBKuOs8oXqFEy8lIjE1qNYbzXHO1SO1FrW7ZRff+BYtbIWGD9TNdzUglzuogFmeuTOUrsmG6mkS+tmQ+x1O6M1qc5OTDeDkxr3Cv757BEQ8J9LSmKXIsaKjqULNHp0R/XVOEO8qgqhXCz8eQ8C3lkql4mUnwRcWdfMLzkrpbr/aoSxGn4RCwegeYqbwQJEO+eG9P3R+9L2KNJEVzKSQn2XogUTARFm7IdHcqvtmk2XinmH/OrrcNF7LOcpj02Ihi+Lv0ZipDxdJKvHIu71NAFuCdzo2E6KjTA+amxKx7tXDJyh9As2ZqnnvJjnoN4Y28+F2ZW/yS3iPe+3ryZU3M4n4Eb4kDGJwaC0v/Z/9wl0B+Lopkf22kZX2E+V3laLIBKBoXrGBCXx2xZOcWdmEqQ8g3/Jkf55H4UeAtG2RrSm+jFi0NjbCUAmf3UAC58OeE+BlxDYyszmqNhb7vNO4CZWUWz8bIXxXswfKinbLoU2MnzL4IcUwSZi/0OoEPHtz0J+PuCpwmVTanLqT7D4mTREv9TIFb/cGROiISzkUgFjFwzfEzbNj2w6Fojv3eOtrPmMoUKy6tONo9IGSggQlwaFexeodAmEBcRdeBrXN1c9wU4hRQO9p/1TdCu+BeXUlhEEzZrqn1ccrU4Kc6a77g83e5r2HLaMzPAh3jgsL0fSDAbiWXMxCsmekfoHmSzvwtQRduake7GmN+Mo/7lRq9tvapFPfxHIBRhuuqDiE8PzA78lVoDw73wrNR0y7DsgUzi3C3wn1bf1pAojYDCqeok7rg/m/nt6kL+whp3VdR/HlGCLLEK4S/cNDWvqKR4CawI9DxzOoRWYGb9fvG3UHWEtue0Yemy8FH3Wpcr0+i/AYZbQiZngGwXJxRPSGZ4tFxkwYaYpbiWqm8+Vj2wqNcUIoh7kya5V1U/OPSOqCLhHJp3FzXn7nGzLFaWkxArSoij0jLo/N2X4JrDDgLzB/DukbPvNi/EZKknG43w/rLJ+rjHTsX4Jnbi5pxh5wyoadymRyFFOMJqfNZbxP+vX77q9WCLtcAAwP0G3+jOjL5pOxniu8oPFSU/Msd0HyHcuAltFrnrHewrvgzAlWvspYCfxwyazdIuC1LtuRDMcPrccrGwGrehb6TjL9eprDSVZ93FoxQSPeZkI4GdxdUbn4zHkG/EWybXuBzSS9U2eI11wmycFSvjec1vRApg2CaUzGA1l/2aC7dOkVc/J2BmleLSC8fh38dOhp3XwSwWAC8MRaKcZAv5j9cy1q9LvXAZPmb6ffHNlZxZrafSISLr/Cm+oKvA6W7HETcCa9xKaRA8nNgF68XTtSSPMjq4cvhi96Eawww4mR/zGJcjgIpVF+V5QIu1UZZJ4q/mORJN8z4sddo9BJ/e/hKEdMY+Z+ULebMmNps8QxoCNgLU3b0Yrqx4aa2KCmfwkdgmf7GIifF/nXsDub8Wl8KDQu6fv0LvMWwRO0aC18kdAKFRgM0hiJM5n6M6GzkkBkk9eppxJ58YT7Nju6y+dcku3V7BtwDkMNxzSdw1jWQePf1RWCkPvP77JBLs2LnxjUGLv5c/tr1JKQICTKvD7UC99DETheuzs8S5qpUnRKxjSLPUXz95YJwGh1byCDpGS9TU0XTZQsu+CQpHNtg8YSHwmBkwgB4tMRJXfMCbRg6u/NwcnVkYppAnyflPnvmi2fImT3rLV5FOSPbN3cJfxt1igEnwJr8pRn9oiJTUD7Qejkjh15WnUsIktMgRgC+zPQBv8jDjznRBOfsNVigOFmObdvED4pccmGr++SaHOjT7LxkaSGd48Jt0qfCtPalvHQhQSo76ahU32z5iwoURiVIfnfsNFq35pQps9M2WyFK9vpZ11mWLyPtHZoTmflAIX3+jH+X+gIh5jfI1D6tbYMj2vCKdnGCL6tSx0azFr5AHXWQU59rgL+OS7+q3/8Xx7F5nOaIIBgdTXc638BXDv6hJ3ODL0yI4YwgiA083aIUaEYUsinY8nEeHwk1FxvayMAhZ2fGB6oNR44uG67+zusAlkRxkw/cmdkhQmplbhy34jqwG2pDV1bv+1N2NYjMyfckJToarBG1MVyPxjQEjVP2gOxo3EujD6+xKxVJq7Is0zgEmpEinJPhPFKZkmtSX0dL+W/rFcxkke/IhEbdHaG2DagIh+bzzaeQcKFUQlMw0uEehNiNIr9PjUFyLxSqzRAtbrfd//0Vu07i7WWjwx6SnWD6WayjBsr3AJ63KATzaKXvM0yzjx7/1cbnV3P92rmIxkGbuSsbAW8nZAIRGMenCFj4ly74qtsmtAZNemHJvlSe/z4NxZ7vYqnVFve99Ua82A5XeN0aEWb0BOysI+i5rYkB8rQVyV4mlPubdIvuZ545J01ElvNpFB3dQiDCUk8LuN2oTpiL+//FXce24ziS/Zra9qE3S3pvRasdRU+RoujN1w+h96prsqZnJk+fyUltnoSHIEFcRMSNAAjkKGfkU+ObmRCofprCqInZ9IuF1bd7vMDpnvql6jupS8lrgY5UNCq77zEi9qNoKQHdiBjrc2zay4pvh/Pmo+bh5esIOFueaTEJ1RHXqGDt7GvukTsR6YJUfTbTV85biQCjLDpyBOE+/BhfbpKWvV/WKAqNzzNri3AxuR5YWJK/5bPBv5F2x0FYK1DFPa3n+SY/Y+qWhl32XCvPXkm6VsmuX3DCazZzADeyhA1Hk2CLGPVgEhjFclXCq+A0h51RS7Zi60ngtoCGbfSK0JS3SCA13VvkkL+2K/hxokEyUi8XGR7aLUEROaN779VZ3Jwqi5Gb43cUFSOh7RUhKcYi+17BCWGHUWl3H7+Pw/7eaciBn/kZJJe/IKAV1X3VL/SnEVzOC5nanrcRrFa1MuXo5uY8VsIjzt0NFdZ0sodufU4upJaVJAjC5ygHrv3NPTkFzPp0pYb7BZpziQyyT6VZEVUnQTVJY25TZ9SuNKxGRoS4gbSBoM0N5rdVeJOtodnT1PbA6rHn7iji84rrE+kKjPnzGmhG9XoBI0XiRHsI5VcCyQ+mQX5HGpwvrSKemzE2D/vYhS5h0y6stjrW9EG12ppy0rEsWVnPPmh5/QqvghFO9ownQmgXZtA7dTOtVd7eh/Ky+lAZuD6W9plntaineVShcvGygmU/kwMXcS0gmBbgsaswFptS0NqM5LEnvW9GyNHCvU0+RG3OV5obiMoYUMc+6sx72QlZe1ui3kqvqs0mho7RzcKap0IvEDR1d6NIH1cxHsCjtjt5RLlhmyqxQPlRwPulMht5DNgzG2mjv2OL5OTvh7XyzNRzd78BCftATNLQKdjCvA+fl1EBEYr4pDUfCMSJgdgPfosq5B4bflRvLZ589SUyaXjAzgFKifczc8ZhWQvLH6f5cId2ZF8KVKiSeY8NcUL1ysVfSJPE29pKGhp2zPWZzJvvsgFTaS/BqwMZHSwVAXELGt7V5EZTd8d5DdmplojRiIrTnmfLH7yyVa40ZxD9wJV3jsDj3GOpayRl2YqDFSFG98QKMacIJ+BUy5Oc93z6dnenCdbLg/5FtZdujF4CQ48mZLDwEYOVIM2xkDwPJsmMxzC/KUX0XzyqNW5o70iI3zu3OSLFtUt20TZXBR0AgvwmedsFlzzY/EmloTbik3NamH8zXGkL5GvIt8UAXre6KGnd3LKlKKoppgXR6E8xLFa/qEidh5u1XFHuDjeBkz5gayRoaH4HaABBzxHw0iJFFPgs8pW57O27N8lgfBS5Nk5mOo8LrDNc56iBuq3z48wmhGOilqcNhJeiuuL9gcbxvRm3ZN7ItgrqT24ecdR2anqlI1qo1WygicjukItQSJyVjIpicDcHhaFRbWSqMiy1BW9p38xHYs/JK7BcUcwfF5l54JF2C32O9ZS3Lt1z54HYaX9XCkKlno/BMFqDrxcnSfnaCcYVJr8GT+Y+vSvsS3QImFaX8B7Mgun15LaGGD3crbFKkQ9f/jV+Yvga6N67Tz1LpNw3qozeKWaOMkL9XHsJCj/QbJqIMJEJtdFENj5uKkpBXgnsxU2sK/MOyUNu8zNRl5b76MymxNBXFEUP1dSdwnpDhcK1yQoRQgippm0/W9sufH8PlT3CdwXsDgG3YrFtOkHeERuS2Xmiw/6WpPizQio3j50xakZx9x503DvJCGbYDJ0SpVC1lESYlm5OkpxuO6TCaDc8ck0M/ZMr3mnl2ATIWNAL3R53lbqis/YGewlZXh4KvLYmfhyAvX92nimaEmrLaXzKnwWoIC1w9xRAsygcXSHr5b1R+FR6YdtoxPEsg7NGsjaqYb51HcxoOrCVgVfkYHNPuh1fsQazbx6sQ7xCgLNHb0OWtJmuNqaDBqbKBUUSvEjOahwa0PgKR3z79fLrF4rdzY0dPyF08dVKEsMhKYwuvI/FkRImhKZpXcHrnQBwQ7A1tQWJriEbRkwehvOp3WOQrhKX5+Xd5pp5cSCDtefuinbM+107lc7I4cE0KjaWl8n0NU8zkRabIaGC1GcI4+fpRRAiWXZbEPGlA6HCmwzvirXC1xlpDvANjTdamW5TpuIMPMz89vTBLU8sxnc9jYrUCyjhdAQLMPjxhikgaKA1ez85kBHN2bFjrvErObonlvNE0gRN6+hahBRlTjp/VQv44g4XKOAqRW48UJwp+PdpyyzRSrEqVn0oziIRL06NTGBe049v/SNI3jSV6+I0G6W8VS/26jPjyXrdYyCs9jWUqq9ssLP4bI1pygamssUIpvOD6Gqf8keTWZIzQeb3Mw/BvgIoM1ed/VTomkQMfXDQ13meyUYdPXtFL8cGiWyfpt1NTXZB0hKuDJeEgJEAYBUmSnjR/E2Q+YZtDMNHWyZ1NoM3Wfj6yAmdL7g3XXxF0d7CsWHc10HeoqwTRAvuTgwGETo7GCK3JhKD0x2e0PzJduvlHihSUlOp4xU+l+riNuG9gW2tIyNaEV9uA0Yy2d2QBUsxURgFPAoX5GFK44gg3XwvPXCqL0PXlUfd0yW8ORF3CeGb837yHV/FjrigJJF17yEJlefWGJnbBUk/t/MjpCbtrhAbc1UN5QbrrfO0iyJVdl/pKMXAIOTiTfKI4aquD9LjMQ4qhN7nUsjWpmNc7GXr+qNQjJlRBtuq72y1CiUhE7Egyx03bld0D02Qi6f3r7UC6X3yufvDiuPHJgotDrYW9t5PBEqXQJPAacRP+9LRUJFjhtsslMlxaupHIh/GEU/gLKp2o99QX0Hqp34nEQRBp7hg3Ff5fDiKxhBpioqs/aYtHySks3vtcQaLsTLD5BTIphGYQzzmzzsYx9WXsvwMH5uiCWG8YQcDoSjHrFojGNiBYbskrHqLEJl+JnqJX3EksjGj4Abhvh94PMPmmBxtrGxoyZDGU5bR3YnWqiUVP6gZo1z3XWO5jn9w/dnwazl9cuGwiMNwEEBIGEZppOkMyULpxmFTyz3J7Opn3BcRsrLEye6vmJaTS/bpImS2vHwqxLVdhOPSbIx9dFOWToZBq15MvFeSTNfyxBPO3C+HGpzrUbWYy0JXeJP7+zrCBBKJ0eA41KK2Eg+X4yFua9e4yqtTMBJ9yfxx8L0oclDIvWoBNr1gqq7eUzt2jkGGoEqORuN1GcbcjRpKoorEpy4ehN/iOXbfGp5gpJ0nPE9BoGiy1Y2v8DufsO8z7gMNcUOxy6+AfT8dsWXK/Iwn386BNeGZ5WFtzEtwNGaJz8/m4iAfZIAknRThpuFsZ8noMg72BdJXbQnJ3ILQGGyRa8R35+Zco2zWaTGsqVGIk13cfWwEId/6RtDH/GzbbU5kimSe6V4GHPYS41FNdGJzO7M0NrrU6SoiCpl7hC9046SZmR9fBIa1NP2eBjnZnophJ+wae5gyqwh2e3cHZBRXJAf1riOVr2mbzm5uEXliA5gdiGXEgb6fdAJ0ITA1MQtv0KJHVo1FRvVer4cySW6d1CIcW+gzkVUM517SE6PuTFG9Idc3mySLa483UKUSlFk7riiGkaJrHM6SckwNd0auT0BhbrX7vZDUujM7ftuLBJbIdFHFcAxG8ZgMMVTE3h27YxybmhvHvu9XLK1lvlUE7aXn6Ewtl+3rI7DulmrXB8NB8Z5gPEfK6Ngr91d4bCJcvZ/0ZMUuDdfHXXmlzr5hMPC89nIRZm3zWi+57D/Ijl3mFNumjSdZ4rUNvIXmhdkt988OomTJN+Sgx5a4oiSeBeXZ0mNqGKe1eQi7b4pqvwO9KahHX10k40aUl23PIhWb5nSinKp5opx6NDLLE6TGmnq2x428bLusuCHHYxtIBJL5GgwPMNOmG6ZV6oAqCDxIXpHkSdznAQS3L1mQL1Vd+kaVJ04sq4Qx9XE5JOl+TCLtX+Rd4M92IxsMxKg1wfMJN9ABfhw3/7CM8oLwVfpK8Hxu/uWsaigKmqEw2UO+tFR+sjNrHp2bBfla8aIhU8YoGz1PEAixw9jFqZnbMov2OmUZfbKqj8X3UhkeUYFTSwglmdF58woL/KbLpfJgjnyaxxjJLytvbi8+ZHoBLrMbOthuYnW25GYVl/tuy0Y3xCx7Azowt3rwodSRnX9IZnfGjrV+9gvOoQisKmW5yeUdSSxLhDt7foNnkgYrbJX9iLIohGf9ahk11yDcH2wkpxh/evGEgLhtELT5MV2c94mwalUo803Ri6IYl2me5vdCE/ydMptBd/MsM8IhXiYHrIES+7srJ8gyetzFj/iDAl6QVVi8i+H3bL3JNvlsIEgGFpOKyOYCcuTeyIwQxKtLQS7tCqpAITxbl02jWG4z75xUr4CkJa4jhvJMCpqtzSbIwNbvIXy9sTQvCvezz6bneURHLO/h9NUKbx4K5YsnRKVp8UajEQbnlIA0JM9NHj8o6Hk2F0kDrAd4dMkginXHiITaTn3juNIz4fE+I1Bsgxj+3BYrYsW+RGMnqtO795lb49mSdEf4IPObcT1vOD0oD0/fQtiYQTC1Zk5liCD6pbRxKbU1bFaxgVI5SguooWLvDMlgqcTWB2WTl22oJOVeHb1OK+XJG8J+/6tMCTjD2K6RFl52Ka46KZ46RtwaxthwR9R2TordutPFmjWYS1aKWK53L9n4zV3jlb9kPbHbmE64ZGWtrC6OdTKGvycAOym+10fMRByQbRl/FK57Slo8hYqwAdl7KmiVdMlevhk4wO1kjfCyJNc9pTgdONFpeIPZPXHYGdDeTldLNlUuWctnb5NTXfd913tceh/ZdnMGAchqzhwqzK99VmylWLoZ3oDT1lwuYiAAZ27+FchpOBcryh8o/wd6hf7IHwj0Tsb8NYMSQEC+ytZ8nPP9qwwGRajwB8p1u5T3XT6Px1Xl+79XGPAlcnzXhol/4F8lW53N1Vcp+V2pyuuymn8sS6av3+U/r/05RORzRzCtuXN52/7ZgM93BKqz7/aS3+1N2iX/KvoqmOaj/S4Y++WV5UAAup65mrvrYjx8fW3yeT5u9QnqJcvcX0X9OFd92b+SVu/793e9q3fGI/qW//yIwY9/kPi/7EP4uwX9MqbfTaC+iuZkLPM/a303NM/K/H/s5zFvk7le8x8u/q/67FvU7uurJX/hQ/8JxzdACE78eI2vVn2L/a3r/9mOn0OD/t/RAM97+/756l/XH/aXAYT8NEDY7wMIxuAf8SHgX4UPCv8efH4KAhj+fRggKPR3LSGgX4YC+ntQ+EtL/oH/oCc/jdBvNGMoTv+/4YP/dnzIfw8g5DeqEEb9XYGQP1XqF0D0E27/l0IE/7uOBiZ+H0Q4jfwAEYr9Oh363UzgbwD9PD7478OHoH6k0r/QxmG/iQn8d/j8PJWG0d8HEPlfbNz/FVUDa5z6fv7P1cfkXRl9loMa/wE= \ No newline at end of file diff --git a/src/diagrams/cattle-load-balancer.xml b/src/diagrams/cattle-load-balancer.xml deleted file mode 100644 index 873947d74b..0000000000 --- a/src/diagrams/cattle-load-balancer.xml +++ /dev/null @@ -1 +0,0 @@ -7Vxbc5s4FP41ntl9SAYkLvZj4ibb3clOM5vudPsog2KzxYgF2XH661cCARYCG9eASUr7EDgIgfR956JzhCdwvt79FqFw9SdxsT8BmrubwA8TAHR9BtgfLnlNJZZhpIJl5LmiUSF48r5jIdSEdOO5OJYaUkJ86oWy0CFBgB0qyVAUkRe52TPx5aeGaIkVwZODfFX6xXPpKpVOTa2Qf8TecpU9WdfElTXKGgtBvEIuedkTwbsJnEeE0PRovZtjn09eNi/pffc1V/MXi3BAm9ygPXynd3H4AL/46483f5uf//hkX4letsjfiAFPgOWz/m5db8tfmr6KmbD+2/A3vaV4R6+Q7y2DCbzhE88ejyN2wK4lExzQqziBkV/Vp+GuuJcdLcXf5Bm8cYKdTyLpIRMAocv/q/fOEaXslYB2F2y9iARrPnzRHxt82qX8GCZORpNJgTQwEJFN4GI+Szq7/LLyKH4KkcOvvjBSM9mKrn1xOUeRnzhk7TnsmI89phH5huf5UKA9nc/m9/mVjD2Aj9vz/b2WmmabN1MmF9P6QcwpvN3iiHqMjDfiAiWhmLZ7tPZ8rlMhCUMvYCy/9dEC+7fI+bZMRpQ9ICABG8otYUPyKL8Davks8P7xrpZQek5Tpt+YrDGNXlkTcYMhiJ1ptjh9KdTEzmSrPRWBmRAJ1VzmPRfsZQeCwCeQeVpB5hLWfGrC2rELI4IWWXPt1Dmx5DkBpjopEAJ1UnS9q0mZKZPykcRc6/ThKQJ2nrVnvczwiCwIJUK61xoaxsy8a642jShfTywVcwHyla6CrJvTCpBBVyBnb6CirKrAiPJpKGe92EBS7SGgrhq8FQojsntVQGc4hvyQXXVwHB8HfpH7kU8b6nvchSRymT0SPRrBmYPfhr9cRsj1cNG/cHWdUWBWMu7XpkoBUOXyOmOAat1HBnTKAH1oFACwNoSPQxRUxvAFtFci9uaxerRc/MKiMx7Kgzl3HTzAy4/1X9VY3FkhEUnkgXb6zJpIezAczJ1KtQ9pw7l1RcFZKeq2Ff6Zep/0U1eQL3hRGV6O6LfugirQt3pFH9Sgr4adI/rtu58K+PWMIb3gbxoKzthd4idxSiK6IksSIP+ukO4BqMmI451H/xFifvyVHzMHy84C9rbJpWs7P/8qmv6LKX0VmUO0SSAvHvxAOKo5O6QYgv/jXmwTbXM+4cC94WlDdrrwifMtFd17vl80EM8y2JmL4lV+Lz95RJStToJEwo1gRgY+LT9ABTa1ZBM5+EC7DHCKoiU+1GFd6iLCPqLeVn699qlidkKVlB6CLLpMldaZciI5hoM8uCjyaijQCvJgwNAPwS5M34ZdgH3bBYkaSbvOyVF2MW+VKRe1I6Aqv99fsWoR9VJXSiKSQ4lW154ttOLKgXj0+RlbDuunz7pSYzp3EB3LCVpQUXrRKvKzRlfBMawqrvZcjzIMpSBVkbHquSSVPU2tVqi+YKxWHGTXkItSUM1MCpjVVeMI84kwD7cqBQ0F9rEm0SkHBleWguZIgZ4pMLi6FC8klTggykVjbrgTDgysMARtBf+0NKDGeCP87XsBa6rA32tlCKqLdaH+I/69uICL14asbmpDB9K++Ul7mb2B1oay1MLR1F4WiR1N7aXu+lKpPav3JPC1qVkyXTQTZIJHHHlsYHxB986KBx0QpyYp1xNxOqot9VlaevM2xngbNkZdd7RAFX2PKPubE/Z9VUYWfSTLCWS5qF2B6sban7HWNLNsiKxGAa7umtievJ9a07Fwe2DFJqNqG3B75u78CKjef9ZVLesrnw2sVPat6NGS9rlmRtz6SLzkW8YsGWOV6FHCPbWT4qYCeqUftmaTOtKnpY7SASodJRzKh3MGrdQPix4IcpmEKToKnMTAqTEYt4UlmyOBKfR5H3khOmo+1p7rJlytMnIyfyU7V8fMRhbipI/+dLviS8iK3EtnFRhDTb2+UVuQe5V6z9TcFhyNTYzGscnskrGJoRbYVJ1UEf+5dLK88aGpUnaWDjNmZ+mg3jgd1vIK48DG64ssPxors91QmfVWIgA2Eeh1r0HIPXLchUvOPilvm0h2X0QC75VI53qFnolkqBW6v3BM/C2O+XBJslC10Job8WARhwfXp4Xkit1c/MSA9vtjuuDlgeuBpuBIU4XzHe0Yytl7eMWbOcvzNgu1tYA9slQ15cXIrGKLkFGxNaC81Ghvy/dUQbPzDwLe42dDjS1Y0/3daXLuYl8CnBch1dJCcWwHfdFIi5pa0MWWO+pOgpbdFGzupozRTXXlpkw4NDfVScHooD2SjFHasG17tGdujlqnI8CDdu1R48+Q0q1l3QXa1alY3ZJ3WMFZ6ff2Su0N82B7dpC+QbuLQ+vCnAUD33XTNmebfoJ7NmfPMmV2K9UhCZbajEIDb3EipOfSqUW8s5ThUbyNy+Jd+ass1yykYN35ZOkFCvwDTA83YFKHGWSgWUoAUhV/dFbGsdU8/6i0P6S0oKnSXnTPSfaapS3To9aetIqYdqa17LT4teY0YCt+8xre/Q8= \ No newline at end of file diff --git a/src/diagrams/healthcheck.xml b/src/diagrams/healthcheck.xml deleted file mode 100644 index 0027497600..0000000000 --- a/src/diagrams/healthcheck.xml +++ /dev/null @@ -1 +0,0 @@ -7Vrbcts2EP0azTQP4vAiUtSjJFtOM7Hrqd1p0zeYhEjUJMGAkCzn67MAwQtIUVIq1clMEz2YXNz3nD0LgBk5y3R3w1Ae39IQJyPbDHcj52pk29bEMeGPsLwqy2zmlJaIkVDZGsMD+YKVUTWMNiTEhVaRU5pwkuvGgGYZDrhmQ4zRF73amib6qDmKcM/wEKCkb/2ThDwurb5rNvb3mERxNbJlqpIUVZWVoYhRSF9aJud65CwZpbx8SndLnAjvVX4p260GSuuJMZzxUxrYZYMtSjZqbSPbS6DpIiRbMT/+qhbtfd6ISS043vExSkiUjZy58DGMhBk8QJn0ZcbHhURMlFp+vmvawlOk/soxRGUJU0KZNsjIdpxQ/Pptf0dZEMsBt5bhwZ8HjoLnqkdYadmpPhCY5Xoqq60tzWZ0k4VYuMSC4peYcPyQo0CUvgCFwRbzNFHFNWTiJaApCeBZrL7gjD7jZb0YZ+ovZ8tVXVJRxRYrJ0nSqmmaU3fug1059kp51VlsMeMEmDdXBZzmynErlJJERFBO85xkQOlFgp5wsgB3RHJF1QAZzWApCwpLIly0cMzaC22qKPaIIfGuZVLUucE0xZy9QpWq1PPKJiqQPcXqlyYo3MoWtwLCrhQAqUCM6q4brsKDousA1/dxtwOs8EN++kJrxUBPVQ/mQQe4pqU5wLK8ngdq1Wp7wLL98z3gDQbvvhho2+5Ak4+EzBPbE0R6xR8tinCwNtdWNzwYfaKcKmurtjOZzNzr02NuD43sAR71+XKcDxeIiFmPD3cRyXY9oMD3uXjMGQ1wURwH66mWlN82PCFCTaRdR1yD9DQILPE7D/TZDDlISGfEUEhw079SvbNgU6UTV4vySV/mavzaoHqXUDmrB+qbq5w109dvWe6pKncBDzg/Ve77qtzp24I34YPf48N7jBIew66w2Qc2uN6SgNECsy0J8D7Uf2rjvwT7B9DGfRugDp44hJObeqWMxzSiGUquG2sHqBay/2DOX9XhE20kRk0PH6kIF1kP7wj/q/X8ScS4MXXFawZLqsvES6uwopo8kR5Eo6AbFmCN/hyxCHPNJJZ6EDGGE8TJVj/E7gNANb2nRJ7PFNL2zNSg7kJYTkk1alCcM4ZeW9VyUaEYHsY19WF8t93b0eru4eqW5x6oDg/ldBv+1f47jZLTSzKwpk+bTp++kZ4F4MLnJceuggQVhchCpXlFkmassKqk1AEsqlxkrGDDtvUcK8qbhu22WC+KQlTEdb3BTckRRs/eiNG2zh1n1qF0Oc2zKW1PO+O45kGSeo53qP7ZLP3w683dKr575gv/j8dFkfy9+2CPbXdwo1XkKKsypm1AnVuaEU4Z4Cw3kxxB3mMSjyKnWQiu8FAqEmX2VOT65qrd1/AurF0LqA2jlNkd8gqkFfB8PVgBWd38pdgEIl2/O2sgKpaQ0U7Xa0SSDcPvjMG+OwEvbsj0OIbuyBe12xaxpPgCtd3FyL06ZXeWkjCUYjFwxwPZfi3/1TM6ed/fCdDpwXQ/Ng1zMtXpOZ6Ur2dG40zr1DJcvQO6XheYd+Lgm/XZ/4/02db02Xx7fW4kedqW5J50f7s+z77bjmM80XVz7J6mz30B9ic6uaYdcl1o79IT7sM638sLbyHzw7eGbVWzhMxrByuzc5K6nMDLAcCr5loqMM1xJlgJ0SL093F5fwFZT9EzLlTiuLl+lNz8vMGFHOL94+OZYyDhGtF/TAtBEFMmrXRfmvy/5BH/aB4xLX0vPrYukkfGfqfXTg8XSSTVUfPQ2bM64JFUfk9sIzkIS1XwUcBzTwvCCT0FN5UT5FDzIi+/egqOoOplTXZC9xdqNlcx5+Jz6Vx4wV4FYWYaBIBdE8h1zAhoKj6aIQ4BsxJ2IPUqJ5mk3BgFclr2auILFmPEgnhs2b6RZ1EZGQHJogXlHLqBFU+qibdWxMrLgJqyCV43YXDWZynHmhodZtn9awlH0aR9K+Fc4KvMvm10hxh1Dt4L0FEKDJKn48qKf+kuEh/ijSgNEyOWmt67V1L86VwgTeWvfX9lHkDyEtCZVgc6x5kY/etmx90D3gGlGAAPXptv7aUKNP9lwbn+Cg== \ No newline at end of file diff --git a/src/diagrams/hostPort.xml b/src/diagrams/hostPort.xml deleted file mode 100644 index 3cfc54e303..0000000000 --- a/src/diagrams/hostPort.xml +++ /dev/null @@ -1 +0,0 @@ -7Vlbc6s2EP41PNoDCGz8GDtJ2+lpmmk6056njgwyVg8gKuQY59dXAgkQCJvEPr3MHHvGhtVKYne/vQkLbNLyOwrz/U8kQonl2lFpgXvLdR0P2PxPUE41ZbkENSGmOJJMLeEFvyFJlPPiA45QoTEyQhKGc50YkixDIdNokFJy1Nl2JNF3zWGMBoSXECZD6m84YvuaGvh2S/8e4XivdnZsOZJCxSwJxR5G5NghgQcLbCghrL5Kyw1KhPKUXup5jyOjzYNRlLEpE9x6witMDlI2y10kfOo6wq/i+dhJCr346yAeas1QyWYwwXFmgTuhY74TovyCj1W6zNisqCwmRp0gL9u5/CqW/9UegrkyU0KotonlAhCJ73Duj4ctohli3P6uvUkORbV7vR6Xs15S34aTK2kU1dUEcyk5ZBESCnH48HGPGXrJYShGjxy/nLZnaSKHG4OJm5CkOOTXQvaCUfIFbRpRwDLYrDaPzYgCiivkxknS4bTtpX8XcLpU673UKVi/Isowx92dHGAkl2p7hClOhPvkJM9xxgG9TuAWJWsYfokridQGGcm4KGvCRcJMzAB2owWxPipHseM0iOSujEiKGD1xFjVBubH0Ysdd1ffH1if8leTZd/zBDSTsoPTDuFm7hSq/kGg1I3dlQG7PsEIP+aigMjjArWK3362Ala4AVymkowDPpADHBXP/ehUsR53X5AVd2hMPyRecZksNbqQz/tf8CIU7e+f0HYSSLWFEUjvcwPNW/oPJ6/aE4jfODNXD9t0wxVEkpJ7mRatpIOqAxvEMoAns6xETfEPM/x8xwP8HEWOsEIxxlqtbpJgs/kU+gd2SfhV56/5s3pkSjs/rZdbLRzNnoaJsR1OubQrI3g1UtfjmXP+Oc9VV0STPqtE7hJCCjDt0LTNgFjcAjONcQIy5Mt5Vn2Fl/BTjrJyKDm7wXFzmlISoKC4jZNsUlj8fWIJFTXk+0o0jp4eTxvIxhRFGLSpk5aoDWUPqNGQ54jtArHMtZuQMX486XjC33WHYcezl3PF4j7hUv0NQ+WC+WPp2+/FuEL+9AcZyQlndnakEcXXUsM9GDWnHiwFjF4QoDPtm3ZADxVWH+YSOpvhwCyP2UgcAhsyxNLUyqxuEgWEr81By0TKYVC2ucIlLOZcTZ+5ZRF/f7iyadKqgPtTRChhgfYtQOYSxKaS1aIxgsa9gbPcga0JmD4e+Lb6qo34mBWaYGPvyTz0GHoAYSUdTFKkj56Y5lrJNeFahOS1jcXw2J7sdDtH8UCBaVL+T2/eReqnbni6+kr3AO+zlXGmvJoHc2l4D0xgN2LfXK0IwnScwrxfRI2o3Sk7KXo/V51qTq1wFNAc2VDumPgLcoo8w1To9QKAoRi/ylueoPYkJj4EPLbWnyw5M/kSMneRBMTxUemxX+ESq9kPwcUXR0+9C/zyWydvPVrc1EQ8xrl75zDwlhYpLqpRBGiOVFYDZChQlkOFXfX2TVuXUZ4KrMrBUubx34Gb3DFM/g5zV2uaOUnjqsOWCoRjfp3+uFdg9S9cLtnZvRJwGBWOjBFPhLIlqh/744bkh1hVsVbGsgtWwZhFH0jocKCrwm0x1wrRSYs7try3/3uTao3XsyLFqW4iPObLW+3JIXJmcL2TnGUe0o5mtaWauhZ2+rNsDHU9PBWJWPxa8ExP+LaMBKjHrODm/+zzB/6fGkLPVaZPHnA9HFNcUUbwrI8rkOG1K3FpPuoOh/kZIV8GgLd2Tgock1XLUDvw1TzBu04tEPgoib9S3P9aEXHid0OsknaWpCwkMCfoDp1f8tn3LWDts+64WPPwN \ No newline at end of file diff --git a/src/diagrams/kubernetes-load-balancer.xml b/src/diagrams/kubernetes-load-balancer.xml deleted file mode 100644 index b680a4759f..0000000000 --- a/src/diagrams/kubernetes-load-balancer.xml +++ /dev/null @@ -1 +0,0 @@ -7Zxbc5s4FIB/jWd2H5JBXO3HxE3a7ma3mc3udPsoGxWzlRGL5djpr68E4iIEGMdg0wzpTANHQkI6n86RjkQmxny9fx/BcPUHcRGe6Jq7nxjvJro+szT2Pxe8JALLmCYCL/LdRARywZP/HQmheM7b+i7aSBkpIZj6oSxckiBASyrJYBSRnZztK8FyrSH0kCJ4WkKsSj/7Ll0l0mnaLC7/gHxvldYMNJGyhmlmIdisoEt2BZFxNzHmESE0uVrv5wjzvkv7JXnuviY1e7EIBbTNA9rDd3q3CR+Mz3j94eYf6+/fPjlXopRniLeiwRPdxqy8W9d/5i9NX0RP2P9v+ZveUrSnVxD7XjAxbnjHs+pRxC5YWtzBAb3axGrkqWAa7vNn2ZUnfsd18Myx7jCJpEomumG4/J/67O/bBYoCRBkUujbH201ce1Iea3xSpFwNE8etSaW61DA9ItvARbyXAEverXyKnkK45Kk7xjSTregai+RMi/xmSdb+kl3ztm9oRL6hedYUw5nOZ/P7LCWlR+ft9jEu5NQ0x7qZMrno1neiT43bZxRRn8F4IxIoCUW33cO1j/mYCkkY+gGj/BbDBcK3cPnNi1uUVhCQgDXllrAm+ZQ/YWhZL/Dy0b4WKJBhyoY3ImtEoxeWRTxgCrDFyE453+XDxLGFbFUYImY6QqAYml5Wck4vuxAAHwGzUUVzSdm8b8LaxgsrAhdpdu3oTin3iqVdW0rHGIaudgwAvXWMrnTMn8xQM4kxvNGAll+1r6CMeUQWhBIhLeQ2THNm3bUfO624b6BLVbxQ9BVQ+QfWtELNem9qNurUbI5qPlXNaTGOLo3vIajdrHXiwsGqXrzspkGVm/4YeBHaxH6WZY8Ixg2utsQXQybkl2FElryMg4wtMsf1aUuxz31WLJdBlUhsRU7GWRcO2oug66O8fOFb+6NtVnImFb4kA6tIm9EbbNYI25uFDQyONluhbbmCnDJ1ZjcYCDJfVe2auvCZvTEwk2evtqMAYIFz6t9R9L9Di8p566j+7v2NPVXUb59V/dO64T/q/ywuoGL8g5SRsxCQ4lZQNHI99CRuSURXxCMBxHe5tKBBTVY52vv0Xy5mji25+5KmsCnHSyGJ334RBfyHKH0RkUm4jVWe1/tAuFYzOiQ/zn/4gmkbPWc8ocC94WFJdrvAZPktEd37GOcZRF0mu3PhZpU9y28eIWXrniCWsKlUBgPvldegwLqWbKMlasoo1hcURh5qLNGuhitCGFL/WX6/7lFRpwQdoAIKoGjXjiWxIpESgzOy0paV6SVZUf2Kwk7fgUJbiRMqptbRKuba/UUJZ0qniOiRaoPH6FETWEOOEaZvoGpZHQKjlo/T8nBDhKDK4I1Rm58ianMAtsFFCIHqR0bW3ghrgwsQ6uqWV4rEJoRBJWy5aq/EyQPOXOQtftH5Coa96py7Q76Rm12DX1UeRSgCFBhM6hw6g4OOUDQjOLD4ZNoqJT6pTplH7Xfu7Cq0f9bwZLpqVLQ/bk6cw/1cPDhpqccqughOShEnOeBUCEC98eikWLAcDDilCj8YcKoLx7QOOMWPst6BL4UMIfEDuimU/MgF9cxapTOapey21ZCbXSTV58Rm7TgBYqsXiM8aYX/L2OonYnuShXPU/VeFltdFibRDUaKZ7RjQPuh4xJKn2vcpgaAWmj9zrKgUBdbVGc2sYj1l9uXSzNlJ1gC0tQYde60G/3gR29B2u8R0WpoBcOp2yau8l6mX8ATN7gvYs6b8PTkwbUR2kMjO+kVWsnMdcGSqvu4vtCH4Of7ShOv7ZqLbcM3dQbDYJF4hifwsovInJ7nkij2c76NpHx+TcNNUa8yqt89qtMs6iU+BN+bsyLUf3ADKhsQJrr313k/r0XHStzCGJVtKp2LLx6xw5P3t5qYbTGec1+tvcHHauVlMTkicahYV12s54Noo/MgLSeBoUmp6rjItP2meKLJ702qdNqscWeyLRasXFm2zBxZPnrtqB0IvNmjK39Pc1e5nYDj1I0MaFskwGfDI0LsdGa3DLgmUFwu7VO1qdrfGaWGNjtTi6QG8+iOV9eZe4q5DShzQkhLzVEqqTZE+lU1XeY7Ys7dOmy9va12zKT0rDhPPDxQY+R8FKK0GpEl/OpVXZ/cHz2qtfdeNca5afsimUAoutoC8jsYO1gO6ZisLgKr5f28nvpyqj0KPNiHSCPsZ7Qk4xp50aUL0y5oQO+VPmJBZibPkvfozIbpCHz8tM9qQY2yINb20DXnN7LSdyWi12S0t4YZpaDo0Ga0/nUjOvJ19L6C8VSV2tuvWU+X8xuzA3oFjN+XvZ/2VTrTaekmEF2TXbh/hUhPoa40tcpQNBXbziCKf9Rc3kLGwfHwHuBZyFKtb3ImNW89kqXk9iv+DWDv9eEJQigMAp91s+nwbD07FhzfMQe6Z6D0mC8gLeCCQR/CZq4PBkh97Hp1ns/N0Slo3KwLyVZ9XvcKZstv8r7glUOR/Cs+4+wE= \ No newline at end of file diff --git a/src/diagrams/nodePort.xml b/src/diagrams/nodePort.xml deleted file mode 100644 index dabe39be70..0000000000 --- a/src/diagrams/nodePort.xml +++ /dev/null @@ -1 +0,0 @@ -7VtRc+I2EP41zLQPMLJlG3g8SHLt9JpmJg/tPXWELYx6xqK2SMj9+kq2ZFuWDCYQyE0DM8GsJEva/b7V7poM4Hy9+5yhzep3GuFk4IJoN4A3A9d1PAfwDyF5kRJ/Mi0lcUYiKasFj+Q7lkI5MN6SCOdaR0ZpwshGF4Y0TXHINBnKMvqsd1vSRJ91g2JsCB5DlJjSP0nEVqV04oNa/gsm8UrN7ADZskaqsxTkKxTR54YI3g7gPKOUlVfr3RwnQntKL+W4u47WamEZTlmfAW454AklW7m3gRskfOgsIk9ifexFbjr4dysWNWN4x4YoIXE6gJ+EjvlMOOMXvK3QZcqGeWEx0epMNrt6LL+K5Wcxh+hcmCmhmTbJwIUwEm9z7G/bBc5SzLj9XTBPtnkxe3k/vs/ylvo0XFzsRkldbWNuRrdphIVCHN78vCIMP25QKFqfOYC5bMXWiWyuDCa+hHRNQn4t9p6zjH7D82orcDyZT+d3VYsCiiv2TZKk0ROAsf9pwuVSrTdSp3D2hDNGOO4+yQZGN1Jtd2hNEsGfDd1sSMoBPUvQAiczFH6Lix2pCVKa8q3MKN8SYWIEBJUWmkCR2BFT4l1DJIHzGdM1ZtkL76JaoQSxpLGC/3NNibEC/qpBh4r/SNIwrm5dI5VfSLB2IH1qIPd2x5WWoqTABRH7altaKEYoMBe6SGMuHLrOUcqofApaqJuCvUoKdB0NXVNJU2jqyD+HijwLuVsaqeAsMBGhfFXwALQwr0Fb4qmFYR+It0LhA80JI9SK5S+tDgvKGF13gp1uWUJSPrty5cBGFL6PjdjSeheLM2dEl0sS4tE2x1le/LVbedphZtOcDXt5wRvZCx5hL+dEe0HPm/q3b2AvwzRWA7bt9YQxWo8StClvorvkppvVfF9G+WKolDY2d1e8TjW5bPWhxmALgT2Lk4NnAAR0DgNCuTSl9gQv60Out/uyOL9ut+9PWwqZGhpxbX7fOYffH58csBQa6gxJ+NGQKtk9j1wbMUSz6UcJLXC4BEunL28qp2CQeEUz8p13RmqxdvKbWIL9w4qL4Efx0gKgfdHosniZsPnGo9HhJqO7l84Y1PTghefjg0Kc54dhsagiuj9KT9phgTWJIjHJHrh0nQHcB0ciYGodGTp6NXj2g5Mj3gZM64jL4p9M7HQ6Ii8Y+Zongs4IuN60eo3HhmNywHjkeDxVG6u/lrMcjoKxD+qXdwbY2RKtq7vyAOjR6WVdObTFO00qWpNF08HXBBlK7go3n8WLn1wYiMTU5UsBxSldXTs/m1xuuXvbGhaZ5QDYS/f/5wlwiNjuNU8AW1r0cQK8zxOgAyg/4gngnxV2KXdWDzQT4x5x9sST3bf1SL1zr5bJD/sseDwsq8RXZcEPiInaTyFxgZDGCRLUugF9oXYw6WlBrarpNrAV2DLB4Aw+y7dFrS274ijGj/IrB8aKxjRFyW0tbbG5YeV/MGMvssyOtoVV6zt8ocKx6wWHmsBi1v065Yuk2yyUvVx56DOUxVjLlU3dZzhBjDzp9z9Fje6hiAMsUagHF3O+dlLUt+/xs0nDjaBgEW9A4DrB23OwOyqQ3DgYEEQ+nkReJ52Ng7/TrP1LKFUpUnHHmxjcmVqo457D73ZXDD5svt/mJ3lMD3ptj3lBq3s9aqk/hsOEgcVhehdymGryD/JcljyBfz3qQPPJ2jWowzWVvfwlDDdyffX9qxbRHUslxxZ7+CdSSQ59oKTggzrxoP7UwPH1O5RLkINatqlW0S80PHtMwz/RWlCquMn9Hze3f//6UAnLLsczWJThdSRkOCffZfUKFK6Ba6PQjz8b+Dc2anVmvB3PvusE6sQ6WkmKTr4OwciZAmnj10JHdaHLZY5PhYVyGO+r4AgnoBUU9C45VrOd4tsOHWe2iuNHcfANHw9VMD398dBZAOLaaPNRHHyXxcESO3uKg5qjecelQcf2nP/1oLuPSbr7wNvxeHulY7LjzfF4/tsEnBnBXwtvKkto4K1OyybAwMg1k6/lJMRh2DaxHsH2Klodb9DAKGC4lUT7jaPlLJqebiWFp1emXa3fhOEdYWU25ctvXwdGomXkWW9V51DG0CrDUtbMztxLFTq895H0nqNeNLaoseOkPLsafeecWuuDWQ2xRb9L6hpcEbL+WRHaQ9fOdXXtXU7X/Gv9Lxhl8l3/Jwu8/Q8= \ No newline at end of file diff --git a/src/diagrams/probes.xml b/src/diagrams/probes.xml deleted file mode 100644 index f4202b615a..0000000000 --- a/src/diagrams/probes.xml +++ /dev/null @@ -1 +0,0 @@ -7Vrdc9o4EP9rmLYPMLKFAzwCCb2Za5tM25lrnzrCFkYXY7m2HMj99ae1JX8bTELczF2TB+zV12o/frsra4CXu8P7kATbj9yh3sBEzmGArwemaYwxkj9AeUwp0xlOCW7IHNUpJ3xh/1BFVOPcmDk0KnUUnHuCBWWizX2f2qJEI2HI9+VuG+6VVw2IS2uELzbx6tS/mCO2ahcWyul/UOZu9coGUi07ojsrQrQlDt8XSPhmgJch5yJ92h2W1APhabmk41YtrRljIfVFlwFmOuCBeLHa28C88uTQhcMegD/xqDZ99TMGphaCHsSQeMz1B3gOMpYr0VA+yLZElr4YRonGoNWYBod8rHxy1W+yBnRO1OTxsLTIwMTYgf/62M/Et7fJgg/mSG4O/RmvaehTIQ3CREsvjhJ20gXkxtM1yutKcrI9TTVLOzVDHvsOBQkZsnm/ZYJ+CYgNrXtp0JK2FTtPNWcahBeb75gtn0EYkQj5PV1me8OT6XK2XGUt2nJMEATzvEJPhCbWfCrpSs7XSsh48UBDwaQhzlWD4IGS44rsmAf+FPAgYL608IVH1tRbEPveTXakF/C5L7ey4HJLTMAIjDIpFC1HGRMsSQ8FkrKk95TvqAgfZRfVao6VVSu3vlKv+9xHLO0j24J/YD2OKL90s6lz05UPynpbTP+qwZQriqWOdGP1ykOx5S73iXeTUxdl1RfU/DcV4lEhEYkFBwFmM3zgoIakHz0w8a3w/B2sYTSx4NWXW8ra4KXQKM0oSOYGeDqmDtjDUWWE1COCPZShqkmyaugdZ4kXKiWOK0o0ZrPyFBGPQ5uqUUVoOXciQUKXitpE8zAkj4VuAXSI2hmeSPkV1zGRdZSvan9jcrx/bR/l/vIh5Ti30kwZnQxXB8NjhgsOHHT30CzykbWeAQ0ajUV7blUmZt11s+hbdF3TuoDr4tYg1ATeRdonmVqcwPp12ID+5Y6vDf6pvUEbo4rrIV/zBHeAWuiNx+OZddM9WNTNKLPA00jfiz2M8SWRWyMyGllFTC4CsmrSkJzoVaKTmKdgfG17JIpAsyl5xbxsct/RnVRYlRTVjs4IG+1KSeFWu2lKS5FT0WaXjgtd1aS5KfjtJ5f5hybPq6hTR7sg5DaNotNuts6ymNtYeAwkndDLWi85YzfnMeD/ee46mxFMIFtzQ+Iwms+vLOJZDqeDUAWfrWkNnzPMLvrj1SXcEdX0dxF3nBT98fX63MX9qznNMHCLip+cLpX0e15Oojd9VkhuruNUMKvVcfeybPOo+O9F7ywenwdAtejdgibHi7oXwBpsjayOYDNrN/rOueD0ydlw607PT4crGT+e1QTQVMdeoozF9aB6IRNHR01c2U9vuWmLGcsxN8sVWrXg87RFw3VNKtU12W6T6sxLmK51WnUlbTgk2iZqRBWVNWmmogcLwb8W5B2PmGC8Mfv/UOkgNSX4rrU84Gl+tcyOTlGT+nQCtzu4cMQ74psNs+lI+plNAxGNHphD+Y/Ag7r4xXXdh1sap3X70kW6VT2I6LdIbz8q/l2l91Kldz+P7adKN2o6+R9W6VoIhYoBq+OLYpWOx4NfVKXj+una7yq9W5V+9geQX1ql1wPSy1TpBX+clBwSvVqHHDc4JP5lDjl+QiD9XVu/ytr6uQjRb229+/rIr76xu2+O9fE+Nva33z/dDsfdqpYkELBdcjGiqOXm7OVk7dFeEmySP9klWWweBYUqRL9s2AGscKH4ud4KATc/5iAJc2U7vjliMrneMGmtoSxKdvDBnwgif4AegaCk8PgwZj6wOnS4fS+JhikBe3XH9zT8MfzxmSawNQp8tzWFr1tDq+px9VDBaKpNcV31mvas6NAOOl0venh0I+pQY4xk060P6LplcAPDT5L9ZUKQDyjDIhTGIPnmGxiXYEX+kh1Yn7+O4IdAT4ncPuQuUlUSJoAhDsxCW8CdhEsi3kTVsS/Jp5SDz3wXBPc1EVHKCGUivdcSyVgIDEM21StjJAl3USA9BNh6G8U25H3vQGjAmMNp5L8BVb7dEObFIX3Xlb0jmQn11nzfKSkpJRfZgVWeWaByZpGnH3ly8ZSrHGhQu8qRndzUAl3lVs25EaUhe9Gf2U5mKmVk0jCjr5498/OEiarwVclZWz5P1CYaGpWZcoKeim82ERU1eDvvE0ZjXLuEl5gdES/KIA/Qpj/YC6nMbbphHmJ+n/iSikdXk5KrTQo44MJlQEzZ7o8zyBQiJZhMj6msEhWv40j6xjLig/J9w/60Ctib2JpQEQJYg2fip1XKz1gKMho1SK0/gG5GzDpg9ovjL4zPnSvJXvG5dqZwodt2llFeB6PKBePq7Tk0LfeflfqXb8/1FSzka37FOu2eX1THN/8C \ No newline at end of file diff --git a/src/fonts/lato/lato-regular.woff b/src/fonts/lato/lato-regular.woff new file mode 100644 index 0000000000..42f1997c35 Binary files /dev/null and b/src/fonts/lato/lato-regular.woff differ diff --git a/src/fonts/lato/lato-regular.woff2 b/src/fonts/lato/lato-regular.woff2 new file mode 100644 index 0000000000..ff60934dd0 Binary files /dev/null and b/src/fonts/lato/lato-regular.woff2 differ diff --git a/src/fonts/poppins/poppins-regular.woff b/src/fonts/poppins/poppins-regular.woff new file mode 100644 index 0000000000..2a04ae3225 Binary files /dev/null and b/src/fonts/poppins/poppins-regular.woff differ diff --git a/src/fonts/poppins/poppins-regular.woff2 b/src/fonts/poppins/poppins-regular.woff2 new file mode 100644 index 0000000000..b69e0091c2 Binary files /dev/null and b/src/fonts/poppins/poppins-regular.woff2 differ diff --git a/src/fonts/roboto-mono/roboto-mono-regular.woff b/src/fonts/roboto-mono/roboto-mono-regular.woff new file mode 100644 index 0000000000..f319fbfa46 Binary files /dev/null and b/src/fonts/roboto-mono/roboto-mono-regular.woff differ diff --git a/src/fonts/roboto-mono/roboto-mono-regular.woff2 b/src/fonts/roboto-mono/roboto-mono-regular.woff2 new file mode 100644 index 0000000000..ed384d22fd Binary files /dev/null and b/src/fonts/roboto-mono/roboto-mono-regular.woff2 differ diff --git a/src/img/rancher/monitoring-components.svg b/src/img/rancher/monitoring-components.svg deleted file mode 100644 index 6e526036a6..0000000000 --- a/src/img/rancher/monitoring-components.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
    Prometheus custom resource
    Prometheus cu...
    Node Exporter
    Node Exporter
    PushProx Proxy
    PushProx Proxy
    The node exporter exposes metrics to PushProx through an outbound connection, allowing monitoring on hardened clusters.
    The node exporter exposes metri...
    ServiceMonitor custom resources
    ServiceMonitor cust...
    PodMonitor custom resources
    PodMonitor custom r...
    ServiceMonitors and PodMonitors declaratively specify how services and pods should be monitored. They use labels to scrape metrics from pods.
    ServiceMonitors and PodMonitors...
    When Prometheus Operator observes ServiceMonitors and PodMonitors being created, it updates the scrape configuration of the Prometheus custom resource.
    When Prometheus Operator observes...
    Scrape Configuration
    Scrape Configuration
    PushProx proxies Prometheus's requests for the metrics defined by ServiceMonitors and PodMonitors.
    PushProx proxies Prometheus's r...
    Prometheus RuleGroup
    Prometheus RuleGro...
    Alerting Rules
    Alerting Rules
    Recording Rules
    Recording Rules
    Prometheus scrapes all targets in the scrape configuration on a recurring schedule based on the scrape interval, storing the results in its time series database.

    Rules define what Prometheus metrics or time series database queries should result in alerts being fired.

    Recording Rules are not directly used for alerting. They create new time series of precomputed queries. These new time series data can then be queried to generate alerts.
    Prometheus scrapes all targets...
    Alertmanager custom resource
    Alertmanager...
    Routes
    Routes
    Prometheus evaluates the recording rules against the time series database. It fires alerts whenever an alerting rule evaluates to a positive number.
    Prometheus evaluates the re...
    Alertmanager uses routes to group, label and filter the fired alerts to translate them into useful notifications.
    Alertmanager uses routes...
    Receivers
    Receivers
    Alertmanager uses the  Receiver configuration to send notifications to Slack, PagerDuty, SMS, or other types of receivers.
    Alertmanager uses the  Re...
    Viewer does not support full SVG 1.1
    \ No newline at end of file diff --git a/src/img/rancher/open-rancher-app.png b/src/img/rancher/open-rancher-app.png deleted file mode 100644 index 2817d0efe2..0000000000 Binary files a/src/img/rancher/open-rancher-app.png and /dev/null differ diff --git a/src/img/rancher/search-app-registrations.png b/src/img/rancher/search-app-registrations.png deleted file mode 100644 index 4ab244da88..0000000000 Binary files a/src/img/rancher/search-app-registrations.png and /dev/null differ diff --git a/src/img/rancher/set-hostport.gif b/src/img/rancher/set-hostport.gif deleted file mode 100644 index 9d5100df4c..0000000000 Binary files a/src/img/rancher/set-hostport.gif and /dev/null differ diff --git a/src/img/rancher/set-nodeport.gif b/src/img/rancher/set-nodeport.gif deleted file mode 100644 index 5a328169f8..0000000000 Binary files a/src/img/rancher/set-nodeport.gif and /dev/null differ diff --git a/src/pages/versions.md b/src/pages/versions.md new file mode 100644 index 0000000000..0d0657e9d6 --- /dev/null +++ b/src/pages/versions.md @@ -0,0 +1,200 @@ +--- +title: Rancher Documentation Versions +--- + +### Current versions + +Below are the documentation and release notes for the currently released version of Rancher 2.6.x: + + + + + + + + +
    v2.6.8DocumentationRelease NotesSupport Matrix
    + +Below are the documentation and release notes for the currently released version of Rancher 2.5.x: + + + + + + + + +
    v2.5.16DocumentationRelease NotesSupport Matrix
    + +
    + +### Past versions + +Below are the documentation and release notes for previous versions of Rancher 2.6.x: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    v2.6.7DocumentationRelease NotesSupport Matrix
    v2.6.6DocumentationRelease NotesSupport Matrix
    v2.6.5DocumentationRelease NotesSupport Matrix
    v2.6.4DocumentationRelease NotesSupport Matrix
    v2.6.3DocumentationRelease NotesSupport Matrix
    v2.6.2DocumentationRelease NotesSupport Matrix
    v2.6.1DocumentationRelease NotesSupport Matrix
    v2.6.0DocumentationRelease NotesSupport Matrix
    + +
    + +Below are the documentation and release notes for previous versions of Rancher 2.5.x: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    v2.5.15DocumentationRelease NotesSupport Matrix
    v2.5.14DocumentationRelease NotesSupport Matrix
    v2.5.13DocumentationRelease NotesSupport Matrix
    v2.5.12DocumentationRelease NotesSupport Matrix
    v2.5.11DocumentationRelease NotesSupport Matrix
    v2.5.10DocumentationRelease NotesSupport Matrix
    v2.5.9DocumentationRelease NotesSupport Matrix
    v2.5.8DocumentationRelease NotesSupport Matrix
    v2.5.7DocumentationRelease NotesSupport Matrix
    v2.5.6DocumentationRelease NotesSupport Matrix
    v2.5.5DocumentationRelease NotesSupport Matrix
    v2.5.4DocumentationRelease NotesSupport Matrix
    v2.5.3DocumentationRelease NotesSupport Matrix
    v2.5.2DocumentationRelease NotesSupport Matrix
    v2.5.1DocumentationRelease NotesSupport Matrix
    v2.5.0DocumentationRelease NotesSupport Matrix
    + +
    + +### Legacy versions (EOL) + +Below is the documentation for legacy versions of Rancher 2.0 - 2.4.x: + + + + + + +
    v2.0 - v2.4Documentation
    \ No newline at end of file diff --git a/content/.gitkeep b/static/.nojekyll similarity index 100% rename from content/.gitkeep rename to static/.nojekyll diff --git a/static/fonts/.gitkeep b/static/fonts/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/static/img/.gitkeep b/static/img/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/static/img/rancher/Google-Credentials-tab.png b/static/img/Google-Credentials-tab.png similarity index 100% rename from static/img/rancher/Google-Credentials-tab.png rename to static/img/Google-Credentials-tab.png diff --git a/static/img/rancher/Google-Enable-APIs-Screen.png b/static/img/Google-Enable-APIs-Screen.png similarity index 100% rename from static/img/rancher/Google-Enable-APIs-Screen.png rename to static/img/Google-Enable-APIs-Screen.png diff --git a/static/img/rancher/Google-Enable-Admin-APIs.png b/static/img/Google-Enable-Admin-APIs.png similarity index 100% rename from static/img/rancher/Google-Enable-Admin-APIs.png rename to static/img/Google-Enable-Admin-APIs.png diff --git a/static/img/rancher/Google-OAuth-consent-screen-tab.png b/static/img/Google-OAuth-consent-screen-tab.png similarity index 100% rename from static/img/rancher/Google-OAuth-consent-screen-tab.png rename to static/img/Google-OAuth-consent-screen-tab.png diff --git a/static/img/rancher/Google-Select-UniqueID-column.png b/static/img/Google-Select-UniqueID-column.png similarity index 100% rename from static/img/rancher/Google-Select-UniqueID-column.png rename to static/img/Google-Select-UniqueID-column.png diff --git a/static/img/rancher/Google-svc-acc-step1.png b/static/img/Google-svc-acc-step1.png similarity index 100% rename from static/img/rancher/Google-svc-acc-step1.png rename to static/img/Google-svc-acc-step1.png diff --git a/static/img/rancher/Google-svc-acc-step2.png b/static/img/Google-svc-acc-step2.png similarity index 100% rename from static/img/rancher/Google-svc-acc-step2.png rename to static/img/Google-svc-acc-step2.png diff --git a/static/img/rancher/Google-svc-acc-step3-key-creation.png b/static/img/Google-svc-acc-step3-key-creation.png similarity index 100% rename from static/img/rancher/Google-svc-acc-step3-key-creation.png rename to static/img/Google-svc-acc-step3-key-creation.png diff --git a/src/img/rancher/add-custom-metrics.gif b/static/img/add-custom-metrics.gif similarity index 100% rename from src/img/rancher/add-custom-metrics.gif rename to static/img/add-custom-metrics.gif diff --git a/src/img/rancher/add-ingress-form.png b/static/img/add-ingress-form.png similarity index 100% rename from src/img/rancher/add-ingress-form.png rename to static/img/add-ingress-form.png diff --git a/src/img/rancher/add-ingress.gif b/static/img/add-ingress.gif similarity index 100% rename from src/img/rancher/add-ingress.gif rename to static/img/add-ingress.gif diff --git a/src/img/rancher/add-node-label.gif b/static/img/add-node-label.gif similarity index 100% rename from src/img/rancher/add-node-label.gif rename to static/img/add-node-label.gif diff --git a/static/img/rancher/add-persistent-volume.png b/static/img/add-persistent-volume.png similarity index 100% rename from static/img/rancher/add-persistent-volume.png rename to static/img/add-persistent-volume.png diff --git a/src/img/rancher/add-pod-label.gif b/static/img/add-pod-label.gif similarity index 100% rename from src/img/rancher/add-pod-label.gif rename to static/img/add-pod-label.gif diff --git a/src/img/rancher/add-record.png b/static/img/add-record.png similarity index 100% rename from src/img/rancher/add-record.png rename to static/img/add-record.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-1.png b/static/img/adfs/adfs-add-rpt-1.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-1.png rename to static/img/adfs/adfs-add-rpt-1.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-10.png b/static/img/adfs/adfs-add-rpt-10.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-10.png rename to static/img/adfs/adfs-add-rpt-10.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-11.png b/static/img/adfs/adfs-add-rpt-11.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-11.png rename to static/img/adfs/adfs-add-rpt-11.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-2.png b/static/img/adfs/adfs-add-rpt-2.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-2.png rename to static/img/adfs/adfs-add-rpt-2.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-3.png b/static/img/adfs/adfs-add-rpt-3.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-3.png rename to static/img/adfs/adfs-add-rpt-3.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-4.png b/static/img/adfs/adfs-add-rpt-4.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-4.png rename to static/img/adfs/adfs-add-rpt-4.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-5.png b/static/img/adfs/adfs-add-rpt-5.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-5.png rename to static/img/adfs/adfs-add-rpt-5.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-6.png b/static/img/adfs/adfs-add-rpt-6.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-6.png rename to static/img/adfs/adfs-add-rpt-6.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-7.png b/static/img/adfs/adfs-add-rpt-7.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-7.png rename to static/img/adfs/adfs-add-rpt-7.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-8.png b/static/img/adfs/adfs-add-rpt-8.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-8.png rename to static/img/adfs/adfs-add-rpt-8.png diff --git a/assets/img/rancher/adfs/adfs-add-rpt-9.png b/static/img/adfs/adfs-add-rpt-9.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-rpt-9.png rename to static/img/adfs/adfs-add-rpt-9.png diff --git a/assets/img/rancher/adfs/adfs-add-tcr-1.png b/static/img/adfs/adfs-add-tcr-1.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-tcr-1.png rename to static/img/adfs/adfs-add-tcr-1.png diff --git a/assets/img/rancher/adfs/adfs-add-tcr-2.png b/static/img/adfs/adfs-add-tcr-2.png similarity index 100% rename from assets/img/rancher/adfs/adfs-add-tcr-2.png rename to static/img/adfs/adfs-add-tcr-2.png diff --git a/assets/img/rancher/adfs/adfs-edit-cr.png b/static/img/adfs/adfs-edit-cr.png similarity index 100% rename from assets/img/rancher/adfs/adfs-edit-cr.png rename to static/img/adfs/adfs-edit-cr.png diff --git a/assets/img/rancher/adfs/adfs-overview.png b/static/img/adfs/adfs-overview.png similarity index 100% rename from assets/img/rancher/adfs/adfs-overview.png rename to static/img/adfs/adfs-overview.png diff --git a/assets/img/rancher/airgap/edit-system-default-registry.png b/static/img/airgap/edit-system-default-registry.png similarity index 100% rename from assets/img/rancher/airgap/edit-system-default-registry.png rename to static/img/airgap/edit-system-default-registry.png diff --git a/assets/img/rancher/airgap/enter-system-default-registry.png b/static/img/airgap/enter-system-default-registry.png similarity index 100% rename from assets/img/rancher/airgap/enter-system-default-registry.png rename to static/img/airgap/enter-system-default-registry.png diff --git a/assets/img/rancher/airgap/privateregistry.svg b/static/img/airgap/privateregistry.svg similarity index 100% rename from assets/img/rancher/airgap/privateregistry.svg rename to static/img/airgap/privateregistry.svg diff --git a/assets/img/rancher/airgap/privateregistrypushpull.svg b/static/img/airgap/privateregistrypushpull.svg similarity index 100% rename from assets/img/rancher/airgap/privateregistrypushpull.svg rename to static/img/airgap/privateregistrypushpull.svg diff --git a/assets/img/rancher/airgap/settings.png b/static/img/airgap/settings.png similarity index 100% rename from assets/img/rancher/airgap/settings.png rename to static/img/airgap/settings.png diff --git a/assets/img/rancher/airgap/system-charts-setting.png b/static/img/airgap/system-charts-setting.png similarity index 100% rename from assets/img/rancher/airgap/system-charts-setting.png rename to static/img/airgap/system-charts-setting.png diff --git a/assets/img/rancher/airgap/system-charts-update.png b/static/img/airgap/system-charts-update.png similarity index 100% rename from assets/img/rancher/airgap/system-charts-update.png rename to static/img/airgap/system-charts-update.png diff --git a/static/img/rancher/alertmanager-ui.png b/static/img/alertmanager-ui.png similarity index 100% rename from static/img/rancher/alertmanager-ui.png rename to static/img/alertmanager-ui.png diff --git a/static/img/rancher/app-readme.png b/static/img/app-readme.png similarity index 100% rename from static/img/rancher/app-readme.png rename to static/img/app-readme.png diff --git a/src/img/rancher/auth-providers.svg b/static/img/auth-providers.svg similarity index 100% rename from src/img/rancher/auth-providers.svg rename to static/img/auth-providers.svg diff --git a/static/img/rancher/backup-container.png b/static/img/backup-container.png similarity index 100% rename from static/img/rancher/backup-container.png rename to static/img/backup-container.png diff --git a/assets/img/rancher/backup_restore/backup/backup.png b/static/img/backup_restore/backup/backup.png similarity index 100% rename from assets/img/rancher/backup_restore/backup/backup.png rename to static/img/backup_restore/backup/backup.png diff --git a/assets/img/rancher/backup_restore/backup/encryption.png b/static/img/backup_restore/backup/encryption.png similarity index 100% rename from assets/img/rancher/backup_restore/backup/encryption.png rename to static/img/backup_restore/backup/encryption.png diff --git a/assets/img/rancher/backup_restore/backup/schedule.png b/static/img/backup_restore/backup/schedule.png similarity index 100% rename from assets/img/rancher/backup_restore/backup/schedule.png rename to static/img/backup_restore/backup/schedule.png diff --git a/assets/img/rancher/backup_restore/backup/storageLocation.png b/static/img/backup_restore/backup/storageLocation.png similarity index 100% rename from assets/img/rancher/backup_restore/backup/storageLocation.png rename to static/img/backup_restore/backup/storageLocation.png diff --git a/assets/img/rancher/backup_restore/restore/default.png b/static/img/backup_restore/restore/default.png similarity index 100% rename from assets/img/rancher/backup_restore/restore/default.png rename to static/img/backup_restore/restore/default.png diff --git a/assets/img/rancher/backup_restore/restore/encryption.png b/static/img/backup_restore/restore/encryption.png similarity index 100% rename from assets/img/rancher/backup_restore/restore/encryption.png rename to static/img/backup_restore/restore/encryption.png diff --git a/assets/img/rancher/backup_restore/restore/existing.png b/static/img/backup_restore/restore/existing.png similarity index 100% rename from assets/img/rancher/backup_restore/restore/existing.png rename to static/img/backup_restore/restore/existing.png diff --git a/assets/img/rancher/backup_restore/restore/restore.png b/static/img/backup_restore/restore/restore.png similarity index 100% rename from assets/img/rancher/backup_restore/restore/restore.png rename to static/img/backup_restore/restore/restore.png diff --git a/assets/img/rancher/backup_restore/restore/s3store.png b/static/img/backup_restore/restore/s3store.png similarity index 100% rename from assets/img/rancher/backup_restore/restore/s3store.png rename to static/img/backup_restore/restore/s3store.png diff --git a/static/img/rancher/banzai-cloud-logging-operator.png b/static/img/banzai-cloud-logging-operator.png similarity index 100% rename from static/img/rancher/banzai-cloud-logging-operator.png rename to static/img/banzai-cloud-logging-operator.png diff --git a/assets/img/rancher/bpg/hub-and-spoke.png b/static/img/bpg/hub-and-spoke.png similarity index 100% rename from assets/img/rancher/bpg/hub-and-spoke.png rename to static/img/bpg/hub-and-spoke.png diff --git a/assets/img/rancher/bpg/regional.png b/static/img/bpg/regional.png similarity index 100% rename from assets/img/rancher/bpg/regional.png rename to static/img/bpg/regional.png diff --git a/assets/img/rancher/bulk-key-values.gif b/static/img/bulk-key-values.gif similarity index 100% rename from assets/img/rancher/bulk-key-values.gif rename to static/img/bulk-key-values.gif diff --git a/static/img/rancher/calico-diagram.svg b/static/img/calico-diagram.svg similarity index 100% rename from static/img/rancher/calico-diagram.svg rename to static/img/calico-diagram.svg diff --git a/static/img/rancher/calico-logo.png b/static/img/calico-logo.png similarity index 100% rename from static/img/rancher/calico-logo.png rename to static/img/calico-logo.png diff --git a/assets/img/rancher/canal-diagram.png b/static/img/canal-diagram.png similarity index 100% rename from assets/img/rancher/canal-diagram.png rename to static/img/canal-diagram.png diff --git a/static/img/rancher/canal-logo.png b/static/img/canal-logo.png similarity index 100% rename from static/img/rancher/canal-logo.png rename to static/img/canal-logo.png diff --git a/src/img/rancher/cattle-load-balancer.svg b/static/img/cattle-load-balancer.svg similarity index 100% rename from src/img/rancher/cattle-load-balancer.svg rename to static/img/cattle-load-balancer.svg diff --git a/static/img/rancher/choose-release-version.png b/static/img/choose-release-version.png similarity index 100% rename from static/img/rancher/choose-release-version.png rename to static/img/choose-release-version.png diff --git a/static/img/rancher/cilium-logo.png b/static/img/cilium-logo.png similarity index 100% rename from static/img/rancher/cilium-logo.png rename to static/img/cilium-logo.png diff --git a/static/img/rancher/click-endpoints.png b/static/img/click-endpoints.png similarity index 100% rename from static/img/rancher/click-endpoints.png rename to static/img/click-endpoints.png diff --git a/static/img/rancher/click-register-new-app.png b/static/img/click-register-new-app.png similarity index 100% rename from static/img/rancher/click-register-new-app.png rename to static/img/click-register-new-app.png diff --git a/static/img/rancher/cloud-provider.png b/static/img/cloud-provider.png similarity index 100% rename from static/img/rancher/cloud-provider.png rename to static/img/cloud-provider.png diff --git a/static/img/rancher/cluster-compute-resources-dashboard.png b/static/img/cluster-compute-resources-dashboard.png similarity index 100% rename from static/img/rancher/cluster-compute-resources-dashboard.png rename to static/img/cluster-compute-resources-dashboard.png diff --git a/static/img/rancher/cluster-options-yaml.png b/static/img/cluster-options-yaml.png similarity index 100% rename from static/img/rancher/cluster-options-yaml.png rename to static/img/cluster-options-yaml.png diff --git a/static/img/rancher/cluster-options.png b/static/img/cluster-options.png similarity index 100% rename from static/img/rancher/cluster-options.png rename to static/img/cluster-options.png diff --git a/static/img/rancher/clusterdiagram.svg b/static/img/clusterdiagram.svg similarity index 100% rename from static/img/rancher/clusterdiagram.svg rename to static/img/clusterdiagram.svg diff --git a/static/img/rancher/cni-logo.png b/static/img/cni-logo.png similarity index 100% rename from static/img/rancher/cni-logo.png rename to static/img/cni-logo.png diff --git a/static/img/rancher/container-port-field.png b/static/img/container-port-field.png similarity index 100% rename from static/img/rancher/container-port-field.png rename to static/img/container-port-field.png diff --git a/static/img/rancher/create-backup-tarball.png b/static/img/create-backup-tarball.png similarity index 100% rename from static/img/rancher/create-backup-tarball.png rename to static/img/create-backup-tarball.png diff --git a/static/img/rancher/default-grafana-dashboards.png b/static/img/default-grafana-dashboards.png similarity index 100% rename from static/img/rancher/default-grafana-dashboards.png rename to static/img/default-grafana-dashboards.png diff --git a/src/img/rancher/deploy-service.gif b/static/img/deploy-service.gif similarity index 100% rename from src/img/rancher/deploy-service.gif rename to static/img/deploy-service.gif diff --git a/src/img/rancher/deploy-workload-hostport.png b/static/img/deploy-workload-hostport.png similarity index 100% rename from src/img/rancher/deploy-workload-hostport.png rename to static/img/deploy-workload-hostport.png diff --git a/src/img/rancher/deploy-workload-load-balancer.png b/static/img/deploy-workload-load-balancer.png similarity index 100% rename from src/img/rancher/deploy-workload-load-balancer.png rename to static/img/deploy-workload-load-balancer.png diff --git a/src/img/rancher/deploy-workload-nodeport.png b/static/img/deploy-workload-nodeport.png similarity index 100% rename from src/img/rancher/deploy-workload-nodeport.png rename to static/img/deploy-workload-nodeport.png diff --git a/static/diagrams/cluster-ports.xml b/static/img/diagrams/cluster-ports.xml similarity index 100% rename from static/diagrams/cluster-ports.xml rename to static/img/diagrams/cluster-ports.xml diff --git a/static/diagrams/clusterdiagram.xml b/static/img/diagrams/clusterdiagram.xml similarity index 100% rename from static/diagrams/clusterdiagram.xml rename to static/img/diagrams/clusterdiagram.xml diff --git a/static/diagrams/kubernetes-resource-quota.xml b/static/img/diagrams/kubernetes-resource-quota.xml similarity index 100% rename from static/diagrams/kubernetes-resource-quota.xml rename to static/img/diagrams/kubernetes-resource-quota.xml diff --git a/static/diagrams/rancher-diagram-template.xml b/static/img/diagrams/rancher-diagram-template.xml similarity index 100% rename from static/diagrams/rancher-diagram-template.xml rename to static/img/diagrams/rancher-diagram-template.xml diff --git a/static/diagrams/rancher-resource-quota-override.xml b/static/img/diagrams/rancher-resource-quota-override.xml similarity index 100% rename from static/diagrams/rancher-resource-quota-override.xml rename to static/img/diagrams/rancher-resource-quota-override.xml diff --git a/static/diagrams/rancher-resource-quota.xml b/static/img/diagrams/rancher-resource-quota.xml similarity index 100% rename from static/diagrams/rancher-resource-quota.xml rename to static/img/diagrams/rancher-resource-quota.xml diff --git a/static/diagrams/rancher2ha-l4.xml b/static/img/diagrams/rancher2ha-l4.xml similarity index 100% rename from static/diagrams/rancher2ha-l4.xml rename to static/img/diagrams/rancher2ha-l4.xml diff --git a/static/diagrams/rancher2ha-l7.xml b/static/img/diagrams/rancher2ha-l7.xml similarity index 100% rename from static/diagrams/rancher2ha-l7.xml rename to static/img/diagrams/rancher2ha-l7.xml diff --git a/static/diagrams/ranchercomponentsdiagram.xml b/static/img/diagrams/ranchercomponentsdiagram.xml similarity index 100% rename from static/diagrams/ranchercomponentsdiagram.xml rename to static/img/diagrams/ranchercomponentsdiagram.xml diff --git a/static/img/rancher/dir-backup-tarball-clear.png b/static/img/dir-backup-tarball-clear.png similarity index 100% rename from static/img/rancher/dir-backup-tarball-clear.png rename to static/img/dir-backup-tarball-clear.png diff --git a/static/img/rancher/dir-backup-tarball.png b/static/img/dir-backup-tarball.png similarity index 100% rename from static/img/rancher/dir-backup-tarball.png rename to static/img/dir-backup-tarball.png diff --git a/static/img/rancher/docker-container-ps-output.png b/static/img/docker-container-ps-output.png similarity index 100% rename from static/img/rancher/docker-container-ps-output.png rename to static/img/docker-container-ps-output.png diff --git a/static/img/rancher/downloadsshkeys.png b/static/img/downloadsshkeys.png similarity index 100% rename from static/img/rancher/downloadsshkeys.png rename to static/img/downloadsshkeys.png diff --git a/static/img/rancher/edit-cluster.png b/static/img/edit-cluster.png similarity index 100% rename from static/img/rancher/edit-cluster.png rename to static/img/edit-cluster.png diff --git a/src/img/rancher/edit-migration-workload.gif b/static/img/edit-migration-workload.gif similarity index 100% rename from src/img/rancher/edit-migration-workload.gif rename to static/img/edit-migration-workload.gif diff --git a/src/img/rancher/enable-cluster-monitoring.gif b/static/img/enable-cluster-monitoring.gif similarity index 100% rename from src/img/rancher/enable-cluster-monitoring.gif rename to static/img/enable-cluster-monitoring.gif diff --git a/src/img/rancher/enable-project-monitoring.gif b/static/img/enable-project-monitoring.gif similarity index 100% rename from src/img/rancher/enable-project-monitoring.gif rename to static/img/enable-project-monitoring.gif diff --git a/static/img/rancher/encapsulated-network.png b/static/img/encapsulated-network.png similarity index 100% rename from static/img/rancher/encapsulated-network.png rename to static/img/encapsulated-network.png diff --git a/static/img/rancher/enter-azure-reply-url.png b/static/img/enter-azure-reply-url.png similarity index 100% rename from static/img/rancher/enter-azure-reply-url.png rename to static/img/enter-azure-reply-url.png diff --git a/static/img/rancher/example-grafana-link.png b/static/img/example-grafana-link.png similarity index 100% rename from static/img/rancher/example-grafana-link.png rename to static/img/example-grafana-link.png diff --git a/static/img/rancher/example-service-link.png b/static/img/example-service-link.png similarity index 100% rename from static/img/rancher/example-service-link.png rename to static/img/example-service-link.png diff --git a/static/img/farm-k8s-transparent.svg b/static/img/farm-k8s-transparent.svg deleted file mode 100644 index e0a4b5077b..0000000000 --- a/static/img/farm-k8s-transparent.svg +++ /dev/null @@ -1,3069 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/static/img/farm-k8s.svg b/static/img/farm-k8s.svg deleted file mode 100644 index ea2ec5ee2d..0000000000 --- a/static/img/farm-k8s.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/img/rancher/flannel-diagram.png b/static/img/flannel-diagram.png similarity index 100% rename from static/img/rancher/flannel-diagram.png rename to static/img/flannel-diagram.png diff --git a/static/img/rancher/flannel-logo.png b/static/img/flannel-logo.png similarity index 100% rename from static/img/rancher/flannel-logo.png rename to static/img/flannel-logo.png diff --git a/static/img/rancher/fleet-architecture.svg b/static/img/fleet-architecture.svg similarity index 100% rename from static/img/rancher/fleet-architecture.svg rename to static/img/fleet-architecture.svg diff --git a/static/img/rancher/global-menu.png b/static/img/global-menu.png similarity index 100% rename from static/img/rancher/global-menu.png rename to static/img/global-menu.png diff --git a/static/img/rancher/globalpermissionfinal.png b/static/img/globalpermissionfinal.png similarity index 100% rename from static/img/rancher/globalpermissionfinal.png rename to static/img/globalpermissionfinal.png diff --git a/assets/img/rancher/globalpermissionrole.png b/static/img/globalpermissionrole.png similarity index 100% rename from assets/img/rancher/globalpermissionrole.png rename to static/img/globalpermissionrole.png diff --git a/assets/img/rancher/globalpermissionuser.png b/static/img/globalpermissionuser.png similarity index 100% rename from assets/img/rancher/globalpermissionuser.png rename to static/img/globalpermissionuser.png diff --git a/static/img/rancher/grafana-default-dashboard.png b/static/img/grafana-default-dashboard.png similarity index 100% rename from static/img/rancher/grafana-default-dashboard.png rename to static/img/grafana-default-dashboard.png diff --git a/static/img/rancher/grouped-vs-standalone-links.png b/static/img/grouped-vs-standalone-links.png similarity index 100% rename from static/img/rancher/grouped-vs-standalone-links.png rename to static/img/grouped-vs-standalone-links.png diff --git a/assets/img/rancher/ha/nlb/add-targets-targetgroup-443.png b/static/img/ha/nlb/add-targets-targetgroup-443.png similarity index 100% rename from assets/img/rancher/ha/nlb/add-targets-targetgroup-443.png rename to static/img/ha/nlb/add-targets-targetgroup-443.png diff --git a/assets/img/rancher/ha/nlb/added-targets-targetgroup-443.png b/static/img/ha/nlb/added-targets-targetgroup-443.png similarity index 100% rename from assets/img/rancher/ha/nlb/added-targets-targetgroup-443.png rename to static/img/ha/nlb/added-targets-targetgroup-443.png diff --git a/assets/img/rancher/ha/nlb/create-targetgroup-443-advanced.png b/static/img/ha/nlb/create-targetgroup-443-advanced.png similarity index 100% rename from assets/img/rancher/ha/nlb/create-targetgroup-443-advanced.png rename to static/img/ha/nlb/create-targetgroup-443-advanced.png diff --git a/assets/img/rancher/ha/nlb/create-targetgroup-443.png b/static/img/ha/nlb/create-targetgroup-443.png similarity index 100% rename from assets/img/rancher/ha/nlb/create-targetgroup-443.png rename to static/img/ha/nlb/create-targetgroup-443.png diff --git a/assets/img/rancher/ha/nlb/create-targetgroup-80-advanced.png b/static/img/ha/nlb/create-targetgroup-80-advanced.png similarity index 100% rename from assets/img/rancher/ha/nlb/create-targetgroup-80-advanced.png rename to static/img/ha/nlb/create-targetgroup-80-advanced.png diff --git a/assets/img/rancher/ha/nlb/create-targetgroup-80.png b/static/img/ha/nlb/create-targetgroup-80.png similarity index 100% rename from assets/img/rancher/ha/nlb/create-targetgroup-80.png rename to static/img/ha/nlb/create-targetgroup-80.png diff --git a/assets/img/rancher/ha/nlb/ec2-loadbalancing.png b/static/img/ha/nlb/ec2-loadbalancing.png similarity index 100% rename from assets/img/rancher/ha/nlb/ec2-loadbalancing.png rename to static/img/ha/nlb/ec2-loadbalancing.png diff --git a/assets/img/rancher/ha/nlb/edit-targetgroup-443.png b/static/img/ha/nlb/edit-targetgroup-443.png similarity index 100% rename from assets/img/rancher/ha/nlb/edit-targetgroup-443.png rename to static/img/ha/nlb/edit-targetgroup-443.png diff --git a/static/img/rancher/ha/rancher2ha-l7.svg b/static/img/ha/rancher2ha-l7.svg similarity index 100% rename from static/img/rancher/ha/rancher2ha-l7.svg rename to static/img/ha/rancher2ha-l7.svg diff --git a/static/img/rancher/ha/rancher2ha.svg b/static/img/ha/rancher2ha.svg similarity index 100% rename from static/img/rancher/ha/rancher2ha.svg rename to static/img/ha/rancher2ha.svg diff --git a/static/img/header-farm.svg b/static/img/header-farm.svg deleted file mode 100644 index 125d0093b8..0000000000 --- a/static/img/header-farm.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/src/img/rancher/health-check-section.png b/static/img/health-check-section.png similarity index 100% rename from src/img/rancher/health-check-section.png rename to static/img/health-check-section.png diff --git a/src/img/rancher/healthcheck-cmd-exec.png b/static/img/healthcheck-cmd-exec.png similarity index 100% rename from src/img/rancher/healthcheck-cmd-exec.png rename to static/img/healthcheck-cmd-exec.png diff --git a/src/img/rancher/healthcheck.svg b/static/img/healthcheck.svg similarity index 100% rename from src/img/rancher/healthcheck.svg rename to static/img/healthcheck.svg diff --git a/static/img/rancher/helm-app-2.6.png b/static/img/helm-app-2.6.png similarity index 100% rename from static/img/rancher/helm-app-2.6.png rename to static/img/helm-app-2.6.png diff --git a/static/img/rancher/horizontal-pod-autoscaler.jpg b/static/img/horizontal-pod-autoscaler.jpg similarity index 100% rename from static/img/rancher/horizontal-pod-autoscaler.jpg rename to static/img/horizontal-pod-autoscaler.jpg diff --git a/static/img/rancher/horizontal-pod-autoscaler.svg b/static/img/horizontal-pod-autoscaler.svg similarity index 100% rename from static/img/rancher/horizontal-pod-autoscaler.svg rename to static/img/horizontal-pod-autoscaler.svg diff --git a/src/img/rancher/hostPort.svg b/static/img/hostPort.svg similarity index 100% rename from src/img/rancher/hostPort.svg rename to static/img/hostPort.svg diff --git a/static/img/rancher/iaas-scale-nodes.png b/static/img/iaas-scale-nodes.png similarity index 100% rename from static/img/rancher/iaas-scale-nodes.png rename to static/img/iaas-scale-nodes.png diff --git a/src/img/rancher/import-yaml-error.png b/static/img/import-yaml-error.png similarity index 100% rename from src/img/rancher/import-yaml-error.png rename to static/img/import-yaml-error.png diff --git a/src/img/rancher/imported-workloads.png b/static/img/imported-workloads.png similarity index 100% rename from src/img/rancher/imported-workloads.png rename to static/img/imported-workloads.png diff --git a/static/img/rancher/istio-ingress.svg b/static/img/istio-ingress.svg similarity index 100% rename from static/img/rancher/istio-ingress.svg rename to static/img/istio-ingress.svg diff --git a/static/img/rancher/k3s-architecture-ha-server.png b/static/img/k3s-architecture-ha-server.png similarity index 100% rename from static/img/rancher/k3s-architecture-ha-server.png rename to static/img/k3s-architecture-ha-server.png diff --git a/static/img/rancher/k3s-architecture-single-server.png b/static/img/k3s-architecture-single-server.png similarity index 100% rename from static/img/rancher/k3s-architecture-single-server.png rename to static/img/k3s-architecture-single-server.png diff --git a/static/img/rancher/k3s-ha-architecture.svg b/static/img/k3s-ha-architecture.svg similarity index 100% rename from static/img/rancher/k3s-ha-architecture.svg rename to static/img/k3s-ha-architecture.svg diff --git a/static/img/k3s/k3s-production-setup.svg b/static/img/k3s-production-setup.svg similarity index 100% rename from static/img/k3s/k3s-production-setup.svg rename to static/img/k3s-production-setup.svg diff --git a/static/img/rancher/k3s-server-storage.svg b/static/img/k3s-server-storage.svg similarity index 100% rename from static/img/rancher/k3s-server-storage.svg rename to static/img/k3s-server-storage.svg diff --git a/static/img/rancher/k3s-single-node-server-architecture.svg b/static/img/k3s-single-node-server-architecture.svg similarity index 100% rename from static/img/rancher/k3s-single-node-server-architecture.svg rename to static/img/k3s-single-node-server-architecture.svg diff --git a/assets/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png b/static/img/keycloak/keycloak-saml-client-builtin-mappers.png similarity index 100% rename from assets/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png rename to static/img/keycloak/keycloak-saml-client-builtin-mappers.png diff --git a/assets/img/rancher/keycloak/keycloak-saml-client-configuration.png b/static/img/keycloak/keycloak-saml-client-configuration.png similarity index 100% rename from assets/img/rancher/keycloak/keycloak-saml-client-configuration.png rename to static/img/keycloak/keycloak-saml-client-configuration.png diff --git a/assets/img/rancher/keycloak/keycloak-saml-client-group-mapper.png b/static/img/keycloak/keycloak-saml-client-group-mapper.png similarity index 100% rename from assets/img/rancher/keycloak/keycloak-saml-client-group-mapper.png rename to static/img/keycloak/keycloak-saml-client-group-mapper.png diff --git a/src/img/rancher/kubernetes-load-balancer.svg b/static/img/kubernetes-load-balancer.svg similarity index 100% rename from src/img/rancher/kubernetes-load-balancer.svg rename to static/img/kubernetes-load-balancer.svg diff --git a/static/img/rancher/kubernetes-resource-quota.svg b/static/img/kubernetes-resource-quota.svg similarity index 100% rename from static/img/rancher/kubernetes-resource-quota.svg rename to static/img/kubernetes-resource-quota.svg diff --git a/src/img/rancher/layer-4-lb-config-map.png b/static/img/layer-4-lb-config-map.png similarity index 100% rename from src/img/rancher/layer-4-lb-config-map.png rename to static/img/layer-4-lb-config-map.png diff --git a/assets/img/rancher/ldapsearch-group.png b/static/img/ldapsearch-group.png similarity index 100% rename from assets/img/rancher/ldapsearch-group.png rename to static/img/ldapsearch-group.png diff --git a/assets/img/rancher/ldapsearch-user.png b/static/img/ldapsearch-user.png similarity index 100% rename from assets/img/rancher/ldapsearch-user.png rename to static/img/ldapsearch-user.png diff --git a/src/img/rancher/linked-service-workload.png b/static/img/linked-service-workload.png similarity index 100% rename from src/img/rancher/linked-service-workload.png rename to static/img/linked-service-workload.png diff --git a/static/img/rancher/listening-port-field.png b/static/img/listening-port-field.png similarity index 100% rename from static/img/rancher/listening-port-field.png rename to static/img/listening-port-field.png diff --git a/src/img/rancher/liveness-check.png b/static/img/liveness-check.png similarity index 100% rename from src/img/rancher/liveness-check.png rename to static/img/liveness-check.png diff --git a/src/img/rancher/load-balancer-links.png b/static/img/load-balancer-links.png similarity index 100% rename from src/img/rancher/load-balancer-links.png rename to static/img/load-balancer-links.png diff --git a/src/img/rancher/load-balancer-ssl-certs.png b/static/img/load-balancer-ssl-certs.png similarity index 100% rename from src/img/rancher/load-balancer-ssl-certs.png rename to static/img/load-balancer-ssl-certs.png diff --git a/static/img/rancher/longhorn-architecture.svg b/static/img/longhorn-architecture.svg similarity index 100% rename from static/img/rancher/longhorn-architecture.svg rename to static/img/longhorn-architecture.svg diff --git a/static/img/rancher/longhorn-logo.png b/static/img/longhorn-logo.png similarity index 100% rename from static/img/rancher/longhorn-logo.png rename to static/img/longhorn-logo.png diff --git a/static/img/rancher/longhorn-screenshot.png b/static/img/longhorn-screenshot.png similarity index 100% rename from static/img/rancher/longhorn-screenshot.png rename to static/img/longhorn-screenshot.png diff --git a/src/img/rancher/migrate-schedule-workloads.png b/static/img/migrate-schedule-workloads.png similarity index 100% rename from src/img/rancher/migrate-schedule-workloads.png rename to static/img/migrate-schedule-workloads.png diff --git a/static/img/rancher/monitoring-components.svg b/static/img/monitoring-components.svg similarity index 100% rename from static/img/rancher/monitoring-components.svg rename to static/img/monitoring-components.svg diff --git a/static/img/rancher/monitoring-v2-architecture-overview.svg b/static/img/monitoring-v2-architecture-overview.svg similarity index 100% rename from static/img/rancher/monitoring-v2-architecture-overview.svg rename to static/img/monitoring-v2-architecture-overview.svg diff --git a/assets/img/rancher/monitoring/migration/alert_2.4_to_2.5_source.png b/static/img/monitoring/migration/alert_2.4_to_2.5_source.png similarity index 100% rename from assets/img/rancher/monitoring/migration/alert_2.4_to_2.5_source.png rename to static/img/monitoring/migration/alert_2.4_to_2.5_source.png diff --git a/assets/img/rancher/monitoring/migration/alert_2.4_to_2.5_target.png b/static/img/monitoring/migration/alert_2.4_to_2.5_target.png similarity index 100% rename from assets/img/rancher/monitoring/migration/alert_2.4_to_2.5_target.png rename to static/img/monitoring/migration/alert_2.4_to_2.5_target.png diff --git a/static/img/rancher/move-namespaces.png b/static/img/move-namespaces.png similarity index 100% rename from static/img/rancher/move-namespaces.png rename to static/img/move-namespaces.png diff --git a/static/img/rancher/neuvector-architecture.png b/static/img/neuvector-architecture.png similarity index 100% rename from static/img/rancher/neuvector-architecture.png rename to static/img/neuvector-architecture.png diff --git a/static/img/rancher/neuvector-security-containers.png b/static/img/neuvector-security-containers.png similarity index 100% rename from static/img/rancher/neuvector-security-containers.png rename to static/img/neuvector-security-containers.png diff --git a/static/img/rancher/new-app-registration-1.png b/static/img/new-app-registration-1.png similarity index 100% rename from static/img/rancher/new-app-registration-1.png rename to static/img/new-app-registration-1.png diff --git a/static/img/rancher/new-app-registration-2.png b/static/img/new-app-registration-2.png similarity index 100% rename from static/img/rancher/new-app-registration-2.png rename to static/img/new-app-registration-2.png diff --git a/static/img/rancher/new-app-registration.png b/static/img/new-app-registration.png similarity index 100% rename from static/img/rancher/new-app-registration.png rename to static/img/new-app-registration.png diff --git a/static/img/rancher/no-ingress.png b/static/img/no-ingress.png similarity index 100% rename from static/img/rancher/no-ingress.png rename to static/img/no-ingress.png diff --git a/static/img/rancher/node-drain.png b/static/img/node-drain.png similarity index 100% rename from static/img/rancher/node-drain.png rename to static/img/node-drain.png diff --git a/static/img/rancher/node-edit.png b/static/img/node-edit.png similarity index 100% rename from static/img/rancher/node-edit.png rename to static/img/node-edit.png diff --git a/src/img/rancher/node-schedule-advanced-options.png b/static/img/node-schedule-advanced-options.png similarity index 100% rename from src/img/rancher/node-schedule-advanced-options.png rename to static/img/node-schedule-advanced-options.png diff --git a/src/img/rancher/node-schedule-antiaffinity.png b/static/img/node-schedule-antiaffinity.png similarity index 100% rename from src/img/rancher/node-schedule-antiaffinity.png rename to static/img/node-schedule-antiaffinity.png diff --git a/src/img/rancher/node-scheduling-affinity.png b/static/img/node-scheduling-affinity.png similarity index 100% rename from src/img/rancher/node-scheduling-affinity.png rename to static/img/node-scheduling-affinity.png diff --git a/src/img/rancher/node-scheduling-labels.png b/static/img/node-scheduling-labels.png similarity index 100% rename from src/img/rancher/node-scheduling-labels.png rename to static/img/node-scheduling-labels.png diff --git a/src/img/rancher/node-scheduling.png b/static/img/node-scheduling.png similarity index 100% rename from src/img/rancher/node-scheduling.png rename to static/img/node-scheduling.png diff --git a/static/img/rancher/node-template-engine-options-rke1.png b/static/img/node-template-engine-options-rke1.png similarity index 100% rename from static/img/rancher/node-template-engine-options-rke1.png rename to static/img/node-template-engine-options-rke1.png diff --git a/src/img/rancher/nodePort.svg b/static/img/nodePort.svg similarity index 100% rename from src/img/rancher/nodePort.svg rename to static/img/nodePort.svg diff --git a/static/img/rancher/nodeport-dropdown.png b/static/img/nodeport-dropdown.png similarity index 100% rename from static/img/rancher/nodeport-dropdown.png rename to static/img/nodeport-dropdown.png diff --git a/src/img/rancher/one-six-schedule.png b/static/img/one-six-schedule.png similarity index 100% rename from src/img/rancher/one-six-schedule.png rename to static/img/one-six-schedule.png diff --git a/static/img/rancher/open-rancher-app.png b/static/img/open-rancher-app.png similarity index 100% rename from static/img/rancher/open-rancher-app.png rename to static/img/open-rancher-app.png diff --git a/static/img/os/.DS_Store b/static/img/os/.DS_Store deleted file mode 100644 index 5008ddfcf5..0000000000 Binary files a/static/img/os/.DS_Store and /dev/null differ diff --git a/static/img/os/RancherOS_aliyun1.jpg b/static/img/os/RancherOS_aliyun1.jpg deleted file mode 100644 index 7cc8c5c0d9..0000000000 Binary files a/static/img/os/RancherOS_aliyun1.jpg and /dev/null differ diff --git a/static/img/os/RancherOS_aliyun2.jpg b/static/img/os/RancherOS_aliyun2.jpg deleted file mode 100644 index cd84c1a7b8..0000000000 Binary files a/static/img/os/RancherOS_aliyun2.jpg and /dev/null differ diff --git a/static/img/os/Rancher_aws7.png b/static/img/os/Rancher_aws7.png deleted file mode 100644 index 3aedfed870..0000000000 Binary files a/static/img/os/Rancher_aws7.png and /dev/null differ diff --git a/static/img/os/Rancher_aws8.png b/static/img/os/Rancher_aws8.png deleted file mode 100644 index 77a52a2452..0000000000 Binary files a/static/img/os/Rancher_aws8.png and /dev/null differ diff --git a/static/img/os/Rancher_aws9.png b/static/img/os/Rancher_aws9.png deleted file mode 100644 index 02698e85d6..0000000000 Binary files a/static/img/os/Rancher_aws9.png and /dev/null differ diff --git a/static/img/os/Rancher_disk1.png b/static/img/os/Rancher_disk1.png deleted file mode 100644 index 9a95220076..0000000000 Binary files a/static/img/os/Rancher_disk1.png and /dev/null differ diff --git a/static/img/os/Rancher_disk2.png b/static/img/os/Rancher_disk2.png deleted file mode 100644 index cd9bd37ae9..0000000000 Binary files a/static/img/os/Rancher_disk2.png and /dev/null differ diff --git a/static/img/os/Rancher_disk3.png b/static/img/os/Rancher_disk3.png deleted file mode 100644 index 8314d0546c..0000000000 Binary files a/static/img/os/Rancher_disk3.png and /dev/null differ diff --git a/static/img/os/Rancher_gce1.png b/static/img/os/Rancher_gce1.png deleted file mode 100644 index 2d73ba7f59..0000000000 Binary files a/static/img/os/Rancher_gce1.png and /dev/null differ diff --git a/static/img/os/Rancher_gce4.png b/static/img/os/Rancher_gce4.png deleted file mode 100644 index bd47979b57..0000000000 Binary files a/static/img/os/Rancher_gce4.png and /dev/null differ diff --git a/static/img/os/Rancher_gce5.png b/static/img/os/Rancher_gce5.png deleted file mode 100644 index 2160f99e4d..0000000000 Binary files a/static/img/os/Rancher_gce5.png and /dev/null differ diff --git a/static/img/os/Rancher_gce6.png b/static/img/os/Rancher_gce6.png deleted file mode 100644 index 92e5327731..0000000000 Binary files a/static/img/os/Rancher_gce6.png and /dev/null differ diff --git a/static/img/os/Rancher_gce7.png b/static/img/os/Rancher_gce7.png deleted file mode 100644 index 226d94153a..0000000000 Binary files a/static/img/os/Rancher_gce7.png and /dev/null differ diff --git a/static/img/os/Rancher_gce8.png b/static/img/os/Rancher_gce8.png deleted file mode 100644 index 1b83dd494f..0000000000 Binary files a/static/img/os/Rancher_gce8.png and /dev/null differ diff --git a/static/img/os/Rancher_gce9.png b/static/img/os/Rancher_gce9.png deleted file mode 100644 index 5e72ba7bb9..0000000000 Binary files a/static/img/os/Rancher_gce9.png and /dev/null differ diff --git a/static/img/os/Rancher_iso1.png b/static/img/os/Rancher_iso1.png deleted file mode 100644 index 47b5f3e41c..0000000000 Binary files a/static/img/os/Rancher_iso1.png and /dev/null differ diff --git a/static/img/os/Rancher_iso2.png b/static/img/os/Rancher_iso2.png deleted file mode 100644 index 071af69c17..0000000000 Binary files a/static/img/os/Rancher_iso2.png and /dev/null differ diff --git a/static/img/os/Rancher_iso3.png b/static/img/os/Rancher_iso3.png deleted file mode 100644 index 8a38e8209c..0000000000 Binary files a/static/img/os/Rancher_iso3.png and /dev/null differ diff --git a/static/img/os/Rancher_iso4.png b/static/img/os/Rancher_iso4.png deleted file mode 100644 index 3ad6727057..0000000000 Binary files a/static/img/os/Rancher_iso4.png and /dev/null differ diff --git a/static/img/os/Rancher_iso5.png b/static/img/os/Rancher_iso5.png deleted file mode 100644 index ee6136aed6..0000000000 Binary files a/static/img/os/Rancher_iso5.png and /dev/null differ diff --git a/static/img/os/Rancher_iso6.png b/static/img/os/Rancher_iso6.png deleted file mode 100644 index df3e9abae2..0000000000 Binary files a/static/img/os/Rancher_iso6.png and /dev/null differ diff --git a/static/img/os/Rancher_iso7.png b/static/img/os/Rancher_iso7.png deleted file mode 100644 index b86f083eb1..0000000000 Binary files a/static/img/os/Rancher_iso7.png and /dev/null differ diff --git a/static/img/os/Rancher_iso8.png b/static/img/os/Rancher_iso8.png deleted file mode 100644 index 5259bd8b62..0000000000 Binary files a/static/img/os/Rancher_iso8.png and /dev/null differ diff --git a/static/img/os/Rancher_platform1.png b/static/img/os/Rancher_platform1.png deleted file mode 100644 index 1901cb45ba..0000000000 Binary files a/static/img/os/Rancher_platform1.png and /dev/null differ diff --git a/static/img/os/Rancher_platform2.png b/static/img/os/Rancher_platform2.png deleted file mode 100644 index 02cdc5cba5..0000000000 Binary files a/static/img/os/Rancher_platform2.png and /dev/null differ diff --git a/static/img/os/cloud-config.png b/static/img/os/cloud-config.png deleted file mode 100644 index f5dbe6cb5c..0000000000 Binary files a/static/img/os/cloud-config.png and /dev/null differ diff --git a/src/img/rancher/output-dot-text.png b/static/img/output-dot-text.png similarity index 100% rename from src/img/rancher/output-dot-text.png rename to static/img/output-dot-text.png diff --git a/static/img/rancher/permissions.png b/static/img/permissions.png similarity index 100% rename from static/img/rancher/permissions.png rename to static/img/permissions.png diff --git a/static/img/rancher/persistent-volume.png b/static/img/persistent-volume.png similarity index 100% rename from static/img/rancher/persistent-volume.png rename to static/img/persistent-volume.png diff --git a/static/img/rancher/placeholder-ref-2.png b/static/img/placeholder-ref-2.png similarity index 100% rename from static/img/rancher/placeholder-ref-2.png rename to static/img/placeholder-ref-2.png diff --git a/static/img/rancher/placeholder-ref.png b/static/img/placeholder-ref.png similarity index 100% rename from static/img/rancher/placeholder-ref.png rename to static/img/placeholder-ref.png diff --git a/static/img/rancher/platform.png b/static/img/platform.png similarity index 100% rename from static/img/rancher/platform.png rename to static/img/platform.png diff --git a/static/img/rancher/port-communications.svg b/static/img/port-communications.svg similarity index 100% rename from static/img/rancher/port-communications.svg rename to static/img/port-communications.svg diff --git a/static/img/rancher/principal-ID.png b/static/img/principal-ID.png similarity index 100% rename from static/img/rancher/principal-ID.png rename to static/img/principal-ID.png diff --git a/src/img/rancher/probes.svg b/static/img/probes.svg similarity index 100% rename from src/img/rancher/probes.svg rename to static/img/probes.svg diff --git a/static/img/rancher/prometheus-graph-ui.png b/static/img/prometheus-graph-ui.png similarity index 100% rename from static/img/rancher/prometheus-graph-ui.png rename to static/img/prometheus-graph-ui.png diff --git a/static/img/rancher/prometheus-rules-ui.png b/static/img/prometheus-rules-ui.png similarity index 100% rename from static/img/rancher/prometheus-rules-ui.png rename to static/img/prometheus-rules-ui.png diff --git a/static/img/rancher/prometheus-targets-ui.png b/static/img/prometheus-targets-ui.png similarity index 100% rename from static/img/rancher/prometheus-targets-ui.png rename to static/img/prometheus-targets-ui.png diff --git a/static/img/rancher/pushprox-process.svg b/static/img/pushprox-process.svg similarity index 100% rename from static/img/rancher/pushprox-process.svg rename to static/img/pushprox-process.svg diff --git a/static/img/rancher/questions.png b/static/img/questions.png similarity index 100% rename from static/img/rancher/questions.png rename to static/img/questions.png diff --git a/static/img/rancher/rancher-app-2.6.png b/static/img/rancher-app-2.6.png similarity index 100% rename from static/img/rancher/rancher-app-2.6.png rename to static/img/rancher-app-2.6.png diff --git a/static/img/rancher/rancher-architecture-cluster-controller.svg b/static/img/rancher-architecture-cluster-controller.svg similarity index 100% rename from static/img/rancher/rancher-architecture-cluster-controller.svg rename to static/img/rancher-architecture-cluster-controller.svg diff --git a/static/img/rancher/rancher-architecture-node-roles.svg b/static/img/rancher-architecture-node-roles.svg similarity index 100% rename from static/img/rancher/rancher-architecture-node-roles.svg rename to static/img/rancher-architecture-node-roles.svg diff --git a/static/img/rancher/rancher-architecture-rancher-api-server.svg b/static/img/rancher-architecture-rancher-api-server.svg similarity index 100% rename from static/img/rancher/rancher-architecture-rancher-api-server.svg rename to static/img/rancher-architecture-rancher-api-server.svg diff --git a/static/img/rancher/rancher-architecture-rancher-components.svg b/static/img/rancher-architecture-rancher-components.svg similarity index 100% rename from static/img/rancher/rancher-architecture-rancher-components.svg rename to static/img/rancher-architecture-rancher-components.svg diff --git a/static/img/rancher/rancher-architecture-separation-of-rancher-server.svg b/static/img/rancher-architecture-separation-of-rancher-server.svg similarity index 100% rename from static/img/rancher/rancher-architecture-separation-of-rancher-server.svg rename to static/img/rancher-architecture-separation-of-rancher-server.svg diff --git a/static/img/rancher/rancher-architecture.svg b/static/img/rancher-architecture.svg similarity index 100% rename from static/img/rancher/rancher-architecture.svg rename to static/img/rancher-architecture.svg diff --git a/static/img/rancher-logo-horiz-blue.svg b/static/img/rancher-logo-horiz-blue.svg deleted file mode 100644 index 1820fb5174..0000000000 --- a/static/img/rancher-logo-horiz-blue.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/rancher-logo-horiz-color.svg b/static/img/rancher-logo-horiz-color.svg similarity index 100% rename from static/imgs/rancher-logo-horiz-color.svg rename to static/img/rancher-logo-horiz-color.svg diff --git a/static/img/rancher-logo-horiz-white.svg b/static/img/rancher-logo-horiz-white.svg deleted file mode 100644 index 9d1d4c9698..0000000000 --- a/static/img/rancher-logo-horiz-white.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/img/rancher/rancher-on-prem-vsphere.svg b/static/img/rancher-on-prem-vsphere.svg similarity index 100% rename from static/img/rancher/rancher-on-prem-vsphere.svg rename to static/img/rancher-on-prem-vsphere.svg diff --git a/static/img/rancher/rancher-resource-quota-override.svg b/static/img/rancher-resource-quota-override.svg similarity index 100% rename from static/img/rancher/rancher-resource-quota-override.svg rename to static/img/rancher-resource-quota-override.svg diff --git a/static/img/rancher/rancher-resource-quota.png b/static/img/rancher-resource-quota.png similarity index 100% rename from static/img/rancher/rancher-resource-quota.png rename to static/img/rancher-resource-quota.png diff --git a/static/img/rancher/rancher-storage.svg b/static/img/rancher-storage.svg similarity index 100% rename from static/img/rancher/rancher-storage.svg rename to static/img/rancher-storage.svg diff --git a/static/img/rancher/rancher-version.png b/static/img/rancher-version.png similarity index 100% rename from static/img/rancher/rancher-version.png rename to static/img/rancher-version.png diff --git a/static/img/rancher/add-custom-metrics.gif b/static/img/rancher/add-custom-metrics.gif deleted file mode 100644 index 9c6405a343..0000000000 Binary files a/static/img/rancher/add-custom-metrics.gif and /dev/null differ diff --git a/static/img/rancher/add-ingress-form.png b/static/img/rancher/add-ingress-form.png deleted file mode 100644 index 405ff3abf1..0000000000 Binary files a/static/img/rancher/add-ingress-form.png and /dev/null differ diff --git a/static/img/rancher/add-ingress.gif b/static/img/rancher/add-ingress.gif deleted file mode 100644 index b9a3f449d5..0000000000 Binary files a/static/img/rancher/add-ingress.gif and /dev/null differ diff --git a/static/img/rancher/add-node-label.gif b/static/img/rancher/add-node-label.gif deleted file mode 100644 index 9c41e77406..0000000000 Binary files a/static/img/rancher/add-node-label.gif and /dev/null differ diff --git a/static/img/rancher/add-pod-label.gif b/static/img/rancher/add-pod-label.gif deleted file mode 100644 index b78da3ce7c..0000000000 Binary files a/static/img/rancher/add-pod-label.gif and /dev/null differ diff --git a/static/img/rancher/add-record.png b/static/img/rancher/add-record.png deleted file mode 100644 index 8838a5ea6f..0000000000 Binary files a/static/img/rancher/add-record.png and /dev/null differ diff --git a/static/img/rancher/auth-providers.svg b/static/img/rancher/auth-providers.svg deleted file mode 100644 index 8b53323d25..0000000000 --- a/static/img/rancher/auth-providers.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Rancher
    Authentication
    Proxy
    [Not supported by viewer]
    Authentication Providers
    [Not supported by viewer]
    \ No newline at end of file diff --git a/static/img/rancher/cattle-load-balancer.svg b/static/img/rancher/cattle-load-balancer.svg deleted file mode 100644 index 70db25baa0..0000000000 --- a/static/img/rancher/cattle-load-balancer.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Cattle Environment
    [Not supported by viewer]
    Host 1
    Host 1
    Host 2
    Host 2
    haproxy
    haproxy
    haproxy
    haproxy
    chat 1
    [Not supported by viewer]
    web 1
    web 1
    web 2
    web 2

    <div style="text-align: center ; font-size: 18px"><br></div>
    Host 3
    Host 3
    Host 4
    Host 4
    haproxy
    haproxy
    haproxy
    haproxy
    chat 2
    chat 2
    web 3
    web 3
    chat 3
    chat 3

    <div style="text-align: center ; font-size: 18px"><br></div>
    Load Balancer 1
    Load Balancer 1
    Load Balancer 2
    Load Balancer 2
    Resolves to: 

    - Host 1 IP: 80
    - Host 2 IP: 80
    [Not supported by viewer]
    Resolves to: 

    - Host 3 IP: 80
    - Host 4 IP: 80
    [Not supported by viewer]
    web.com/login
    web.com/login
    chat.com/login
    chat.com/login
    \ No newline at end of file diff --git a/static/img/rancher/deploy-service.gif b/static/img/rancher/deploy-service.gif deleted file mode 100644 index bf97d1690e..0000000000 Binary files a/static/img/rancher/deploy-service.gif and /dev/null differ diff --git a/static/img/rancher/deploy-workload-hostport.png b/static/img/rancher/deploy-workload-hostport.png deleted file mode 100644 index ec6193df3c..0000000000 Binary files a/static/img/rancher/deploy-workload-hostport.png and /dev/null differ diff --git a/static/img/rancher/deploy-workload-load-balancer.png b/static/img/rancher/deploy-workload-load-balancer.png deleted file mode 100644 index 4751b599a2..0000000000 Binary files a/static/img/rancher/deploy-workload-load-balancer.png and /dev/null differ diff --git a/static/img/rancher/deploy-workload-nodeport.png b/static/img/rancher/deploy-workload-nodeport.png deleted file mode 100644 index d1cfa67e35..0000000000 Binary files a/static/img/rancher/deploy-workload-nodeport.png and /dev/null differ diff --git a/static/img/rancher/edit-migration-workload.gif b/static/img/rancher/edit-migration-workload.gif deleted file mode 100644 index f9510b8ff9..0000000000 Binary files a/static/img/rancher/edit-migration-workload.gif and /dev/null differ diff --git a/static/img/rancher/enable-cluster-monitoring.gif b/static/img/rancher/enable-cluster-monitoring.gif deleted file mode 100644 index baef3cc248..0000000000 Binary files a/static/img/rancher/enable-cluster-monitoring.gif and /dev/null differ diff --git a/static/img/rancher/enable-project-monitoring.gif b/static/img/rancher/enable-project-monitoring.gif deleted file mode 100644 index f44c67eb8f..0000000000 Binary files a/static/img/rancher/enable-project-monitoring.gif and /dev/null differ diff --git a/static/img/rancher/health-check-section.png b/static/img/rancher/health-check-section.png deleted file mode 100644 index 4a4bfafe12..0000000000 Binary files a/static/img/rancher/health-check-section.png and /dev/null differ diff --git a/static/img/rancher/healthcheck-cmd-exec.png b/static/img/rancher/healthcheck-cmd-exec.png deleted file mode 100644 index 06b6b22ab6..0000000000 Binary files a/static/img/rancher/healthcheck-cmd-exec.png and /dev/null differ diff --git a/static/img/rancher/healthcheck.svg b/static/img/rancher/healthcheck.svg deleted file mode 100644 index 55b573e578..0000000000 --- a/static/img/rancher/healthcheck.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Rancher v1.6 Stack
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    Nginx
    Nginx
    Node
    [Not supported by viewer]
    Healthcheck
    Microservice
    [Not supported by viewer]
    2. Monitored container responds 
    to check with a response (success)
    or no response (failure).
    [Not supported by viewer]
    1. Healthcheck Microservice 
    checks for open port (TCP)
    or makes a GET request (HTTP)
    across hosts to monitored container.
    [Not supported by viewer]
    \ No newline at end of file diff --git a/static/img/rancher/hostPort.svg b/static/img/rancher/hostPort.svg deleted file mode 100644 index 4e73ab4ae0..0000000000 --- a/static/img/rancher/hostPort.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Kubernetes Cluster
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    Nginx
    [Not supported by viewer]
    port: 80
    port: 80
    External Client
    <Node_IP>: 9890
    <Node_IP>: 9890
    hostPort: 9890
    [Not supported by viewer]
    \ No newline at end of file diff --git a/static/img/rancher/import-yaml-error.png b/static/img/rancher/import-yaml-error.png deleted file mode 100644 index 8af7a0878c..0000000000 Binary files a/static/img/rancher/import-yaml-error.png and /dev/null differ diff --git a/static/img/rancher/imported-workloads.png b/static/img/rancher/imported-workloads.png deleted file mode 100644 index 75142fd051..0000000000 Binary files a/static/img/rancher/imported-workloads.png and /dev/null differ diff --git a/static/img/rancher/kubernetes-load-balancer.svg b/static/img/rancher/kubernetes-load-balancer.svg deleted file mode 100644 index bf9de1a398..0000000000 --- a/static/img/rancher/kubernetes-load-balancer.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Kubernetes Cluster
    [Not supported by viewer]
    Node 3
    Node 3
    Node 4
    Node 4
    Ingress Controller
    [Not supported by viewer]
    Ingress Controller
    [Not supported by viewer]
    chat 2
    chat 2
    web 3
    web 3
    chat 3
    chat 3
    Node 1
    Node 1
    Node 2
    Node 2
    Ingress Controller
    [Not supported by viewer]
    Ingress Controller
    [Not supported by viewer]
    chat 1
    [Not supported by viewer]
    web 1
    web 1
    web 2
    web 2
    Resolves to: 

    - Node 1 IP: 80
    - Node 2 IP: 80
    - Node 3 IP: 80
    - Nod 4 IP: 80
    [Not supported by viewer]
    web.com/login
    web.com/login
    chat.com/login
    chat.com/login
    Nginx Global Load Balancer
    Nginx Global Load Balancer
    \ No newline at end of file diff --git a/static/img/rancher/layer-4-lb-config-map.png b/static/img/rancher/layer-4-lb-config-map.png deleted file mode 100644 index cf5c9dc168..0000000000 Binary files a/static/img/rancher/layer-4-lb-config-map.png and /dev/null differ diff --git a/static/img/rancher/linked-service-workload.png b/static/img/rancher/linked-service-workload.png deleted file mode 100644 index e0a1da0981..0000000000 Binary files a/static/img/rancher/linked-service-workload.png and /dev/null differ diff --git a/static/img/rancher/liveness-check.png b/static/img/rancher/liveness-check.png deleted file mode 100644 index e88cb297aa..0000000000 Binary files a/static/img/rancher/liveness-check.png and /dev/null differ diff --git a/static/img/rancher/load-balancer-links.png b/static/img/rancher/load-balancer-links.png deleted file mode 100644 index 5121abd079..0000000000 Binary files a/static/img/rancher/load-balancer-links.png and /dev/null differ diff --git a/static/img/rancher/load-balancer-ssl-certs.png b/static/img/rancher/load-balancer-ssl-certs.png deleted file mode 100644 index 246ffd618f..0000000000 Binary files a/static/img/rancher/load-balancer-ssl-certs.png and /dev/null differ diff --git a/static/img/rancher/migrate-schedule-workloads.png b/static/img/rancher/migrate-schedule-workloads.png deleted file mode 100644 index c6ab638ac9..0000000000 Binary files a/static/img/rancher/migrate-schedule-workloads.png and /dev/null differ diff --git a/static/img/rancher/node-schedule-advanced-options.png b/static/img/rancher/node-schedule-advanced-options.png deleted file mode 100644 index 1d83edc767..0000000000 Binary files a/static/img/rancher/node-schedule-advanced-options.png and /dev/null differ diff --git a/static/img/rancher/node-schedule-antiaffinity.png b/static/img/rancher/node-schedule-antiaffinity.png deleted file mode 100644 index 74bd0455b5..0000000000 Binary files a/static/img/rancher/node-schedule-antiaffinity.png and /dev/null differ diff --git a/static/img/rancher/node-scheduling-affinity.png b/static/img/rancher/node-scheduling-affinity.png deleted file mode 100644 index 28d4490823..0000000000 Binary files a/static/img/rancher/node-scheduling-affinity.png and /dev/null differ diff --git a/static/img/rancher/node-scheduling-labels.png b/static/img/rancher/node-scheduling-labels.png deleted file mode 100644 index 4e1a634e74..0000000000 Binary files a/static/img/rancher/node-scheduling-labels.png and /dev/null differ diff --git a/static/img/rancher/node-scheduling.png b/static/img/rancher/node-scheduling.png deleted file mode 100644 index 953208144c..0000000000 Binary files a/static/img/rancher/node-scheduling.png and /dev/null differ diff --git a/static/img/rancher/nodePort.svg b/static/img/rancher/nodePort.svg deleted file mode 100644 index 05508617e6..0000000000 --- a/static/img/rancher/nodePort.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Kubernetes Cluster
    [Not supported by viewer]
    External Client
    Node
    [Not supported by viewer]
    kube-proxy
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    kube-proxy
    [Not supported by viewer]
    nodePort Service
    [Not supported by viewer]
    port: 30216
    [Not supported by viewer]
    port: 30216
    [Not supported by viewer]
    port: 30216
    [Not supported by viewer]
    <NODE_IP>: 30216
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    kube-proxy
    [Not supported by viewer]
    Nginx
    [Not supported by viewer]
    port: 80
    port: 80
    \ No newline at end of file diff --git a/static/img/rancher/one-six-schedule.png b/static/img/rancher/one-six-schedule.png deleted file mode 100644 index 5bc05d915f..0000000000 Binary files a/static/img/rancher/one-six-schedule.png and /dev/null differ diff --git a/static/img/rancher/output-dot-text.png b/static/img/rancher/output-dot-text.png deleted file mode 100644 index ca39b2867b..0000000000 Binary files a/static/img/rancher/output-dot-text.png and /dev/null differ diff --git a/static/img/rancher/probes.svg b/static/img/rancher/probes.svg deleted file mode 100644 index 007abfda6c..0000000000 --- a/static/img/rancher/probes.svg +++ /dev/null @@ -1,2 +0,0 @@ - -
    Rancher v2.0 Kubernetes Cluster
    <div style="text-align: center ; font-size: 18px"><font color="#3d3d3d">Rancher v2.0 Kubernetes Cluster</font></div>
    Node
    [Not supported by viewer]
    Nginx
    Nginx<br>
    kubelet
    [Not supported by viewer]
    Node
    [Not supported by viewer]
    Nginx
    Nginx<br>
    kubelet
    [Not supported by viewer]
    1. On this node, the kubelet runs
     a liveness probe on a pod that's 
    running. The pod either sends backs 
    a response (success) or doesn't (failure) 
    [Not supported by viewer]
    2. On this node, the kubelets runs a
     readiness probe on a pod that's in 
    the process of restarting. The probe 
    finds that the pod is busy,so Kubernetes
     does not send it any requests.  
    [Not supported by viewer]
    \ No newline at end of file diff --git a/static/img/rancher/readiness-check-http.png b/static/img/rancher/readiness-check-http.png deleted file mode 100644 index 1b2b19c2a7..0000000000 Binary files a/static/img/rancher/readiness-check-http.png and /dev/null differ diff --git a/static/img/rancher/readiness-check-tcp.png b/static/img/rancher/readiness-check-tcp.png deleted file mode 100644 index 0ba9869eb7..0000000000 Binary files a/static/img/rancher/readiness-check-tcp.png and /dev/null differ diff --git a/static/img/rancher/readiness-check.png b/static/img/rancher/readiness-check.png deleted file mode 100644 index f978079aff..0000000000 Binary files a/static/img/rancher/readiness-check.png and /dev/null differ diff --git a/static/img/rancher/resolve-affinity.png b/static/img/rancher/resolve-affinity.png deleted file mode 100644 index d705a2c4fd..0000000000 Binary files a/static/img/rancher/resolve-affinity.png and /dev/null differ diff --git a/static/img/rancher/resolve-global.png b/static/img/rancher/resolve-global.png deleted file mode 100644 index 583c500b8f..0000000000 Binary files a/static/img/rancher/resolve-global.png and /dev/null differ diff --git a/static/img/rancher/resolve-health-checks.png b/static/img/rancher/resolve-health-checks.png deleted file mode 100644 index 3b7bfe282d..0000000000 Binary files a/static/img/rancher/resolve-health-checks.png and /dev/null differ diff --git a/static/img/rancher/resolve-links.png b/static/img/rancher/resolve-links.png deleted file mode 100644 index 1f0544268f..0000000000 Binary files a/static/img/rancher/resolve-links.png and /dev/null differ diff --git a/static/img/rancher/resolve-load-balancer.png b/static/img/rancher/resolve-load-balancer.png deleted file mode 100644 index a03951098c..0000000000 Binary files a/static/img/rancher/resolve-load-balancer.png and /dev/null differ diff --git a/static/img/rancher/resolve-ports.png b/static/img/rancher/resolve-ports.png deleted file mode 100644 index 0383ff5f7d..0000000000 Binary files a/static/img/rancher/resolve-ports.png and /dev/null differ diff --git a/static/img/rancher/resolve-pull-image.png b/static/img/rancher/resolve-pull-image.png deleted file mode 100644 index a822469d79..0000000000 Binary files a/static/img/rancher/resolve-pull-image.png and /dev/null differ diff --git a/static/img/rancher/resolve-scale.png b/static/img/rancher/resolve-scale.png deleted file mode 100644 index 5d36dec666..0000000000 Binary files a/static/img/rancher/resolve-scale.png and /dev/null differ diff --git a/static/img/rancher/resource-constraint-settings.png b/static/img/rancher/resource-constraint-settings.png deleted file mode 100644 index 68bf73cfc5..0000000000 Binary files a/static/img/rancher/resource-constraint-settings.png and /dev/null differ diff --git a/static/img/rancher/schedule-specific-node.png b/static/img/rancher/schedule-specific-node.png deleted file mode 100644 index 211bd90a19..0000000000 Binary files a/static/img/rancher/schedule-specific-node.png and /dev/null differ diff --git a/static/img/rancher/scheduled-nodes.png b/static/img/rancher/scheduled-nodes.png deleted file mode 100644 index 14807de68f..0000000000 Binary files a/static/img/rancher/scheduled-nodes.png and /dev/null differ diff --git a/static/img/rancher/separate-check.png b/static/img/rancher/separate-check.png deleted file mode 100644 index d094073c02..0000000000 Binary files a/static/img/rancher/separate-check.png and /dev/null differ diff --git a/static/img/rancher/view-edit-yaml.png b/static/img/rancher/view-edit-yaml.png deleted file mode 100644 index 36574ffa61..0000000000 Binary files a/static/img/rancher/view-edit-yaml.png and /dev/null differ diff --git a/static/img/rancher/workload-scale.png b/static/img/rancher/workload-scale.png deleted file mode 100644 index f8aa87a6d5..0000000000 Binary files a/static/img/rancher/workload-scale.png and /dev/null differ diff --git a/static/img/rancher/workload-type-option.png b/static/img/rancher/workload-type-option.png deleted file mode 100644 index 02c74e29a6..0000000000 Binary files a/static/img/rancher/workload-type-option.png and /dev/null differ diff --git a/static/img/rancher/workload-type.png b/static/img/rancher/workload-type.png deleted file mode 100644 index cfa3493381..0000000000 Binary files a/static/img/rancher/workload-type.png and /dev/null differ diff --git a/assets/img/rancher/rancher_overview.png b/static/img/rancher_overview.png similarity index 100% rename from assets/img/rancher/rancher_overview.png rename to static/img/rancher_overview.png diff --git a/assets/img/rancher/rancher_overview_2.png b/static/img/rancher_overview_2.png similarity index 100% rename from assets/img/rancher/rancher_overview_2.png rename to static/img/rancher_overview_2.png diff --git a/static/img/rancher/ranchercomponentsdiagram-2.6.svg b/static/img/ranchercomponentsdiagram-2.6.svg similarity index 100% rename from static/img/rancher/ranchercomponentsdiagram-2.6.svg rename to static/img/ranchercomponentsdiagram-2.6.svg diff --git a/static/img/rancher/ranchercomponentsdiagram.svg b/static/img/ranchercomponentsdiagram.svg similarity index 100% rename from static/img/rancher/ranchercomponentsdiagram.svg rename to static/img/ranchercomponentsdiagram.svg diff --git a/assets/img/rancher/rancherroles1.png b/static/img/rancherroles1.png similarity index 100% rename from assets/img/rancher/rancherroles1.png rename to static/img/rancherroles1.png diff --git a/static/img/rancher/rancherroles2.png b/static/img/rancherroles2.png similarity index 100% rename from static/img/rancher/rancherroles2.png rename to static/img/rancherroles2.png diff --git a/assets/img/rancher/rancheruser.png b/static/img/rancheruser.png similarity index 100% rename from assets/img/rancher/rancheruser.png rename to static/img/rancheruser.png diff --git a/src/img/rancher/readiness-check-http.png b/static/img/readiness-check-http.png similarity index 100% rename from src/img/rancher/readiness-check-http.png rename to static/img/readiness-check-http.png diff --git a/src/img/rancher/readiness-check-tcp.png b/static/img/readiness-check-tcp.png similarity index 100% rename from src/img/rancher/readiness-check-tcp.png rename to static/img/readiness-check-tcp.png diff --git a/src/img/rancher/readiness-check.png b/static/img/readiness-check.png similarity index 100% rename from src/img/rancher/readiness-check.png rename to static/img/readiness-check.png diff --git a/src/img/rancher/resolve-affinity.png b/static/img/resolve-affinity.png similarity index 100% rename from src/img/rancher/resolve-affinity.png rename to static/img/resolve-affinity.png diff --git a/src/img/rancher/resolve-global.png b/static/img/resolve-global.png similarity index 100% rename from src/img/rancher/resolve-global.png rename to static/img/resolve-global.png diff --git a/src/img/rancher/resolve-health-checks.png b/static/img/resolve-health-checks.png similarity index 100% rename from src/img/rancher/resolve-health-checks.png rename to static/img/resolve-health-checks.png diff --git a/src/img/rancher/resolve-links.png b/static/img/resolve-links.png similarity index 100% rename from src/img/rancher/resolve-links.png rename to static/img/resolve-links.png diff --git a/src/img/rancher/resolve-load-balancer.png b/static/img/resolve-load-balancer.png similarity index 100% rename from src/img/rancher/resolve-load-balancer.png rename to static/img/resolve-load-balancer.png diff --git a/src/img/rancher/resolve-ports.png b/static/img/resolve-ports.png similarity index 100% rename from src/img/rancher/resolve-ports.png rename to static/img/resolve-ports.png diff --git a/src/img/rancher/resolve-pull-image.png b/static/img/resolve-pull-image.png similarity index 100% rename from src/img/rancher/resolve-pull-image.png rename to static/img/resolve-pull-image.png diff --git a/src/img/rancher/resolve-scale.png b/static/img/resolve-scale.png similarity index 100% rename from src/img/rancher/resolve-scale.png rename to static/img/resolve-scale.png diff --git a/src/img/rancher/resource-constraint-settings.png b/static/img/resource-constraint-settings.png similarity index 100% rename from src/img/rancher/resource-constraint-settings.png rename to static/img/resource-constraint-settings.png diff --git a/static/img/rancher/rke-etcd-backup.png b/static/img/rke-etcd-backup.png similarity index 100% rename from static/img/rancher/rke-etcd-backup.png rename to static/img/rke-etcd-backup.png diff --git a/static/img/rancher/rke-server-storage.svg b/static/img/rke-server-storage.svg similarity index 100% rename from static/img/rancher/rke-server-storage.svg rename to static/img/rke-server-storage.svg diff --git a/src/img/rancher/schedule-specific-node.png b/static/img/schedule-specific-node.png similarity index 100% rename from src/img/rancher/schedule-specific-node.png rename to static/img/schedule-specific-node.png diff --git a/src/img/rancher/scheduled-nodes.png b/static/img/scheduled-nodes.png similarity index 100% rename from src/img/rancher/scheduled-nodes.png rename to static/img/scheduled-nodes.png diff --git a/static/img/rancher/search-app-registrations.png b/static/img/search-app-registrations.png similarity index 100% rename from static/img/rancher/search-app-registrations.png rename to static/img/search-app-registrations.png diff --git a/static/img/rancher/search-azure-ad.png b/static/img/search-azure-ad.png similarity index 100% rename from static/img/rancher/search-azure-ad.png rename to static/img/search-azure-ad.png diff --git a/static/img/rancher/search-enterprise-applications.png b/static/img/search-enterprise-applications.png similarity index 100% rename from static/img/rancher/search-enterprise-applications.png rename to static/img/search-enterprise-applications.png diff --git a/static/img/rancher/select-client-secret.png b/static/img/select-client-secret.png similarity index 100% rename from static/img/rancher/select-client-secret.png rename to static/img/select-client-secret.png diff --git a/static/img/rancher/select-required-permissions-1.png b/static/img/select-required-permissions-1.png similarity index 100% rename from static/img/rancher/select-required-permissions-1.png rename to static/img/select-required-permissions-1.png diff --git a/static/img/rancher/select-required-permissions-2.png b/static/img/select-required-permissions-2.png similarity index 100% rename from static/img/rancher/select-required-permissions-2.png rename to static/img/select-required-permissions-2.png diff --git a/static/img/rancher/select-required-permissions.png b/static/img/select-required-permissions.png similarity index 100% rename from static/img/rancher/select-required-permissions.png rename to static/img/select-required-permissions.png diff --git a/src/img/rancher/separate-check.png b/static/img/separate-check.png similarity index 100% rename from src/img/rancher/separate-check.png rename to static/img/separate-check.png diff --git a/assets/img/rancher/set-hostport.gif b/static/img/set-hostport.gif similarity index 100% rename from assets/img/rancher/set-hostport.gif rename to static/img/set-hostport.gif diff --git a/assets/img/rancher/set-nodeport.gif b/static/img/set-nodeport.gif similarity index 100% rename from assets/img/rancher/set-nodeport.gif rename to static/img/set-nodeport.gif diff --git a/static/img/rancher/set-up-scraping.svg b/static/img/set-up-scraping.svg similarity index 100% rename from static/img/rancher/set-up-scraping.svg rename to static/img/set-up-scraping.svg diff --git a/static/img/rancher/shibboleth-with-openldap-groups.svg b/static/img/shibboleth-with-openldap-groups.svg similarity index 100% rename from static/img/rancher/shibboleth-with-openldap-groups.svg rename to static/img/shibboleth-with-openldap-groups.svg diff --git a/static/img/rancher/sign-in-external.png b/static/img/sign-in-external.png similarity index 100% rename from static/img/rancher/sign-in-external.png rename to static/img/sign-in-external.png diff --git a/static/img/rancher/sign-in.png b/static/img/sign-in.png similarity index 100% rename from static/img/rancher/sign-in.png rename to static/img/sign-in.png diff --git a/static/img/rancher/sign-out-local.png b/static/img/sign-out-local.png similarity index 100% rename from static/img/rancher/sign-out-local.png rename to static/img/sign-out-local.png diff --git a/static/img/rancher/solution_overview.drawio.svg b/static/img/solution_overview.drawio.svg similarity index 100% rename from static/img/rancher/solution_overview.drawio.svg rename to static/img/solution_overview.drawio.svg diff --git a/static/img/rancher/splunk/splunk1.jpg b/static/img/splunk/splunk1.jpg similarity index 100% rename from static/img/rancher/splunk/splunk1.jpg rename to static/img/splunk/splunk1.jpg diff --git a/static/img/rancher/splunk/splunk2.jpg b/static/img/splunk/splunk2.jpg old mode 100755 new mode 100644 similarity index 100% rename from static/img/rancher/splunk/splunk2.jpg rename to static/img/splunk/splunk2.jpg diff --git a/static/img/rancher/splunk/splunk3.jpg b/static/img/splunk/splunk3.jpg similarity index 100% rename from static/img/rancher/splunk/splunk3.jpg rename to static/img/splunk/splunk3.jpg diff --git a/static/img/rancher/splunk/splunk4.jpg b/static/img/splunk/splunk4.jpg similarity index 100% rename from static/img/rancher/splunk/splunk4.jpg rename to static/img/splunk/splunk4.jpg diff --git a/static/img/rancher/splunk/splunk5.jpg b/static/img/splunk/splunk5.jpg similarity index 100% rename from static/img/rancher/splunk/splunk5.jpg rename to static/img/splunk/splunk5.jpg diff --git a/static/img/rancher/stop-rancher-container.gif b/static/img/stop-rancher-container.gif similarity index 100% rename from static/img/rancher/stop-rancher-container.gif rename to static/img/stop-rancher-container.gif diff --git a/static/img/rancher/storage-classes.png b/static/img/storage-classes.png similarity index 100% rename from static/img/rancher/storage-classes.png rename to static/img/storage-classes.png diff --git a/static/img/rancher/unencapsulated-network.png b/static/img/unencapsulated-network.png similarity index 100% rename from static/img/rancher/unencapsulated-network.png rename to static/img/unencapsulated-network.png diff --git a/static/img/rancher/update-scrape-config.svg b/static/img/update-scrape-config.svg similarity index 100% rename from static/img/rancher/update-scrape-config.svg rename to static/img/update-scrape-config.svg diff --git a/static/img/rancher/user-settings.png b/static/img/user-settings.png similarity index 100% rename from static/img/rancher/user-settings.png rename to static/img/user-settings.png diff --git a/static/img/rancher/users-page.png b/static/img/users-page.png similarity index 100% rename from static/img/rancher/users-page.png rename to static/img/users-page.png diff --git a/src/img/rancher/view-edit-yaml.png b/static/img/view-edit-yaml.png similarity index 100% rename from src/img/rancher/view-edit-yaml.png rename to static/img/view-edit-yaml.png diff --git a/assets/img/rancher/vsphere-cluster-create-1.png b/static/img/vsphere-cluster-create-1.png similarity index 100% rename from assets/img/rancher/vsphere-cluster-create-1.png rename to static/img/vsphere-cluster-create-1.png diff --git a/assets/img/rancher/vsphere-node-driver-cloudprovider.png b/static/img/vsphere-node-driver-cloudprovider.png similarity index 100% rename from assets/img/rancher/vsphere-node-driver-cloudprovider.png rename to static/img/vsphere-node-driver-cloudprovider.png diff --git a/assets/img/rancher/vsphere-node-template-1.png b/static/img/vsphere-node-template-1.png similarity index 100% rename from assets/img/rancher/vsphere-node-template-1.png rename to static/img/vsphere-node-template-1.png diff --git a/assets/img/rancher/vsphere-node-template-2.png b/static/img/vsphere-node-template-2.png similarity index 100% rename from assets/img/rancher/vsphere-node-template-2.png rename to static/img/vsphere-node-template-2.png diff --git a/assets/img/rke/vsphere-nodedriver-enable-uuid.png b/static/img/vsphere-nodedriver-enable-uuid.png similarity index 100% rename from assets/img/rke/vsphere-nodedriver-enable-uuid.png rename to static/img/vsphere-nodedriver-enable-uuid.png diff --git a/assets/img/rancher/vsphere-storage-class.png b/static/img/vsphere-storage-class.png similarity index 100% rename from assets/img/rancher/vsphere-storage-class.png rename to static/img/vsphere-storage-class.png diff --git a/static/img/rancher/weave-logo.png b/static/img/weave-logo.png similarity index 100% rename from static/img/rancher/weave-logo.png rename to static/img/weave-logo.png diff --git a/assets/img/rancher/workload-add-volume.png b/static/img/workload-add-volume.png similarity index 100% rename from assets/img/rancher/workload-add-volume.png rename to static/img/workload-add-volume.png diff --git a/static/img/rancher/workload-persistent-data.png b/static/img/workload-persistent-data.png similarity index 100% rename from static/img/rancher/workload-persistent-data.png rename to static/img/workload-persistent-data.png diff --git a/src/img/rancher/workload-scale.png b/static/img/workload-scale.png similarity index 100% rename from src/img/rancher/workload-scale.png rename to static/img/workload-scale.png diff --git a/src/img/rancher/workload-type-option.png b/static/img/workload-type-option.png similarity index 100% rename from src/img/rancher/workload-type-option.png rename to static/img/workload-type-option.png diff --git a/src/img/rancher/workload-type.png b/static/img/workload-type.png similarity index 100% rename from src/img/rancher/workload-type.png rename to static/img/workload-type.png diff --git a/static/imgs/button-arrow.png b/static/imgs/button-arrow.png deleted file mode 100644 index 773985449f..0000000000 Binary files a/static/imgs/button-arrow.png and /dev/null differ diff --git a/static/imgs/docs/icon-document-for-note.svg b/static/imgs/docs/icon-document-for-note.svg deleted file mode 100644 index df18e009f3..0000000000 --- a/static/imgs/docs/icon-document-for-note.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/docs/icon-document.svg b/static/imgs/docs/icon-document.svg deleted file mode 100644 index 8fb8b73fb7..0000000000 --- a/static/imgs/docs/icon-document.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/docs/icon-search.svg b/static/imgs/docs/icon-search.svg deleted file mode 100644 index f3dcbe5544..0000000000 --- a/static/imgs/docs/icon-search.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - diff --git a/static/imgs/docs/screen1-background-ground-lower.svg b/static/imgs/docs/screen1-background-ground-lower.svg deleted file mode 100644 index e66bef2ffd..0000000000 --- a/static/imgs/docs/screen1-background-ground-lower.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/docs/screen1-background-ground-upper.svg b/static/imgs/docs/screen1-background-ground-upper.svg deleted file mode 100644 index eaae309788..0000000000 --- a/static/imgs/docs/screen1-background-ground-upper.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/footer-background.svg b/static/imgs/footer-background.svg deleted file mode 100644 index 7cf4d51cf0..0000000000 --- a/static/imgs/footer-background.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/header-customers-featured-thumbnail.png b/static/imgs/header-customers-featured-thumbnail.png deleted file mode 100644 index 8d775972e5..0000000000 Binary files a/static/imgs/header-customers-featured-thumbnail.png and /dev/null differ diff --git a/static/imgs/icon-facebook.svg b/static/imgs/icon-facebook.svg deleted file mode 100644 index fc380b86fb..0000000000 --- a/static/imgs/icon-facebook.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/icon-github.svg b/static/imgs/icon-github.svg deleted file mode 100644 index 0e3d0dcc8f..0000000000 --- a/static/imgs/icon-github.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/icon-linkedin.svg b/static/imgs/icon-linkedin.svg deleted file mode 100644 index fb78ceb0f6..0000000000 --- a/static/imgs/icon-linkedin.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/icon-search-mobile.svg b/static/imgs/icon-search-mobile.svg deleted file mode 100644 index 80fad42c9c..0000000000 --- a/static/imgs/icon-search-mobile.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - diff --git a/static/imgs/icon-search.svg b/static/imgs/icon-search.svg deleted file mode 100644 index 3f89a02996..0000000000 --- a/static/imgs/icon-search.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - diff --git a/static/imgs/icon-slack.svg b/static/imgs/icon-slack.svg deleted file mode 100644 index adf00c858d..0000000000 --- a/static/imgs/icon-slack.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/icon-twitter.svg b/static/imgs/icon-twitter.svg deleted file mode 100644 index a5ad051813..0000000000 --- a/static/imgs/icon-twitter.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/icon-youtube.svg b/static/imgs/icon-youtube.svg deleted file mode 100644 index 2ff28e1b37..0000000000 --- a/static/imgs/icon-youtube.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/imgs/rancher-logo-cow-blue.svg b/static/imgs/rancher-logo-cow-blue.svg deleted file mode 100644 index 8353dec673..0000000000 --- a/static/imgs/rancher-logo-cow-blue.svg +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - \ No newline at end of file diff --git a/static/imgs/rancher-logo-horiz-color.png b/static/imgs/rancher-logo-horiz-color.png deleted file mode 100644 index a9d1c626e5..0000000000 Binary files a/static/imgs/rancher-logo-horiz-color.png and /dev/null differ diff --git a/static/imgs/rancher-logo-only-color.png b/static/imgs/rancher-logo-only-color.png deleted file mode 100644 index 3bb46b79ca..0000000000 Binary files a/static/imgs/rancher-logo-only-color.png and /dev/null differ diff --git a/static/imgs/rancher-logo-only-color.svg b/static/imgs/rancher-logo-only-color.svg deleted file mode 100644 index 205a0e9315..0000000000 --- a/static/imgs/rancher-logo-only-color.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/static/xml-sitemap.xsl b/static/xml-sitemap.xsl deleted file mode 100644 index 62f26bbb2b..0000000000 --- a/static/xml-sitemap.xsl +++ /dev/null @@ -1,130 +0,0 @@ - - - - - - - XML Sitemap - - - - - - - -
    -

    XML Sitemap

    -

    - Generated by Yoast's WordPress SEO plugin, this is an XML Sitemap, meant for consumption by search engines. -

    -

    - You can find more information about XML sitemaps on sitemaps.org. -

    -

    - This sitemap contains URLs. -

    - - - - - - - - - - - - - - - - - - - - - - - -
    URLPriorityImagesChange Freq.Last Change
    - - - - - - - - - - - - - - -
    -
    - - -
    -
    \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/backups/backup/backup.md b/versioned_docs/version-2.0-2.4/backups/backup/backup.md new file mode 100644 index 0000000000..9859799570 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/backups/backup/backup.md @@ -0,0 +1,22 @@ +--- +title: Backup +weight: 50 +aliases: + - /rancher/v2.0-v2.4/en/installation/after-installation/ + - /rancher/v2.0-v2.4/en/backups/ + - /rancher/v2.0-v2.4/en/backups/backups + - /rancher/v2.0-v2.4/en/backups/legacy/backup + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/ + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/ + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/ +--- +This section contains information about how to create backups of your Rancher data and how to restore them in a disaster scenario. + + - Rancher server backups: + - [Rancher installed on a K3s Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md) + - [Rancher installed on an RKE Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) + - [Rancher installed with Docker](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) + +For information on backing up Rancher launched Kubernetes clusters, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md) + +If you are looking to back up your [Rancher launched Kubernetes cluster](../../pages-for-subheaders/launch-kubernetes-with-rancher.md), please refer [here](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/backups/restore/restore.md b/versioned_docs/version-2.0-2.4/backups/restore/restore.md new file mode 100644 index 0000000000..98211eedc3 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/backups/restore/restore.md @@ -0,0 +1,16 @@ +--- +title: Restore +weight: 1010 +aliases: + - /rancher/v2.0-v2.4/en/backups/restorations + - /rancher/v2.0-v2.4/en/backups/legacy/restore + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/ +--- +If you lose the data on your Rancher Server, you can restore it if you have backups stored in a safe location. + +- [Restoring backups for Rancher installed with Docker](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md) +- [Restoring backups for Rancher installed on an RKE Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md) +- [Restoring backups for Rancher installed on a K3s Kubernetes cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md) + +If you are looking to restore your [Rancher launched Kubernetes cluster](../../pages-for-subheaders/launch-kubernetes-with-rancher.md), please refer to [this section](../../how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/contribute-to-rancher.md b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md new file mode 100644 index 0000000000..da4e11a73a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/contribute-to-rancher.md @@ -0,0 +1,122 @@ +--- +title: Contributing to Rancher +weight: 27 +aliases: + - /rancher/v2.0-v2.4/en/faq/contributing/ +--- + +This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. + +For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: + +- How to set up the Rancher development environment and run tests +- The typical flow of an issue through the development lifecycle +- Coding guidelines and development best practices +- Debugging and troubleshooting +- Developing the Rancher API + +On the Rancher Users Slack, the channel for developers is **#developer**. + +# Repositories + +All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. + +Repository | URL | Description +-----------|-----|------------- +Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. +Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. +API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. +User Interface | https://github.com/rancher/ui | This repository is the source of the UI. +(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. +machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. +kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. +RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. +CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. +(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. +Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. +loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. + +To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. + +![Rancher diagram](/img/ranchercomponentsdiagram.svg)
    +Rancher components used for provisioning/managing Kubernetes clusters. + +# Building + +Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. + +The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. + +# Bugs, Issues or Questions + +If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. + +If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). + +### Checklist for Filing Issues + +Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. + +>**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. +>**Important:** Please remove any sensitive data as it will be publicly viewable. + +- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: + - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce + - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used + - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` + - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer + - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host + - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it +- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. + - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. +- **Logs:** Provide data/logs from the used resources. + - Rancher + - Docker install + + ``` + docker logs \ + --timestamps \ + $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') + ``` + - Kubernetes install using `kubectl` + + > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ``` + kubectl -n cattle-system \ + logs \ + -l app=rancher \ + --timestamps=true + ``` + - Docker install using `docker` on each of the nodes in the RKE cluster + + ``` + docker logs \ + --timestamps \ + $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') + ``` + - Kubernetes Install with RKE Add-On + + > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ``` + kubectl -n cattle-system \ + logs \ + --timestamps=true \ + -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') + ``` + - System logging (these might not all exist, depending on operating system) + - `/var/log/messages` + - `/var/log/syslog` + - `/var/log/kern.log` + - Docker daemon logging (these might not all exist, depending on operating system) + - `/var/log/docker.log` +- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. + +# Docs + +If you have any updates to our documentation, please make any pull request to our docs repo. + +- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. + +- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/versioned_docs/version-2.0-2.4/explanations.md b/versioned_docs/version-2.0-2.4/explanations.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/skipped-tests/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/cis-scans/skipped-tests/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-alerts/default-alerts.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-alerts/default-alerts.md new file mode 100644 index 0000000000..b107afb212 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-alerts/default-alerts.md @@ -0,0 +1,60 @@ +--- +title: Default Alerts for Cluster Monitoring +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/alerts/default-alerts + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/cluster-alerts/default-alerts + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts +--- + +When you create a cluster, some alert rules are predefined. These alerts notify you about signs that the cluster could be unhealthy. You can receive these alerts if you configure a [notifier](../notifiers.md) for them. + +Several of the alerts use Prometheus expressions as the metric that triggers the alert. For more information on how expressions work, you can refer to the Rancher [documentation about Prometheus expressions](../cluster-monitoring/expression.md) or the Prometheus [documentation about querying metrics](https://prometheus.io/docs/prometheus/latest/querying/basics/). + +# Alerts for etcd +Etcd is the key-value store that contains the state of the Kubernetes cluster. Rancher provides default alerts if the built-in monitoring detects a potential problem with etcd. You don't have to enable monitoring to receive these alerts. + +A leader is the node that handles all client requests that need cluster consensus. For more information, you can refer to this [explanation of how etcd works.](https://rancher.com/blog/2019/2019-01-29-what-is-etcd/#how-does-etcd-work) + +The leader of the cluster can change in response to certain events. It is normal for the leader to change, but too many changes can indicate a problem with the network or a high CPU load. With longer latencies, the default etcd configuration may cause frequent heartbeat timeouts, which trigger a new leader election. + +| Alert | Explanation | +|-------|-------------| +| A high number of leader changes within the etcd cluster are happening | A warning alert is triggered when the leader changes more than three times in one hour. | +| Database usage close to the quota 500M | A warning alert is triggered when the size of etcd exceeds 500M.| +| Etcd is unavailable | A critical alert is triggered when etcd becomes unavailable. | +| Etcd member has no leader | A critical alert is triggered when the etcd cluster does not have a leader for at least three minutes. | + + +# Alerts for Kubernetes Components +Rancher provides alerts when core Kubernetes system components become unhealthy. + +Controllers update Kubernetes resources based on changes in etcd. The [controller manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) monitors the cluster desired state through the Kubernetes API server and makes the necessary changes to the current state to reach the desired state. + +The [scheduler](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) service is a core component of Kubernetes. It is responsible for scheduling cluster workloads to nodes, based on various configurations, metrics, resource requirements and workload-specific requirements. + +| Alert | Explanation | +|-------|-------------| +| Controller Manager is unavailable | A critical warning is triggered when the cluster’s controller-manager becomes unavailable. | +| Scheduler is unavailable | A critical warning is triggered when the cluster’s scheduler becomes unavailable. | + + +# Alerts for Events +Kubernetes events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by the scheduler or why some pods were evicted from the node. In the Rancher UI, from the project view, you can see events for each workload. + +| Alert | Explanation | +|-------|-------------| +| Get warning deployment event | A warning alert is triggered when a warning event happens on a deployment. | + + +# Alerts for Nodes +Alerts can be triggered based on node metrics. Each computing resource in a Kubernetes cluster is called a node. Nodes can be either bare-metal servers or virtual machines. + +| Alert | Explanation | +|-------|-------------| +| High CPU load | A warning alert is triggered if the node uses more than 100 percent of the node’s available CPU seconds for at least three minutes. | +| High node memory utilization | A warning alert is triggered if the node uses more than 80 percent of its available memory for at least three minutes. | +| Node disk is running full within 24 hours | A critical alert is triggered if the disk space on the node is expected to run out in the next 24 hours based on the disk growth over the last 6 hours. | + +# Project-level Alerts +When you enable monitoring for the project, some project-level alerts are provided. For details, refer to the [section on project-level alerts.](../../../reference-guides/rancher-project-tools/project-alerts.md) diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/elasticsearch/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/elasticsearch.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/elasticsearch/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/elasticsearch.md diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/fluentd/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/fluentd.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/fluentd/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/fluentd.md diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/kafka/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/kafka.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/kafka/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/kafka.md diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/splunk.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/splunk.md new file mode 100644 index 0000000000..ea64c24e5f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/splunk.md @@ -0,0 +1,81 @@ +--- +title: Splunk +weight: 300 +aliases: + - /rancher/v2.0-v2.4/en/tasks/logging/splunk/ + - /rancher/v2.0-v2.4/en/tools/logging/splunk/ + - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging/splunk + - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging/splunk + - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/splunk/ + - /rancher/v2.x/en/cluster-admin/tools/logging/splunk +--- + +If your organization uses [Splunk](https://www.splunk.com/), you can configure Rancher to send it Kubernetes logs. Afterwards, you can log into your Splunk server to view logs. + +>**Prerequisites:** +> +>- Configure HTTP event collection for your Splunk Server (Splunk Enterprise or Splunk Cloud). +>- Either create a new token or copy an existing token. +> +>For more information, see [Splunk Documentation](http://docs.splunk.com/Documentation/Splunk/7.1.2/Data/UsetheHTTPEventCollector#About_Event_Collector_tokens). + +## Splunk Configuration + +1. In the **Endpoint** field, enter the IP address and port for you Splunk instance (i.e. `http://splunk-server:8088`) + + * Splunk usually uses port `8088`. If you're using Splunk Cloud, you'll need to work with [Splunk support](https://www.splunk.com/en_us/support-and-services.html) to get an endpoint URL. + +1. Enter the **Token** you obtained while completing the prerequisites (i.e., when you created a token in Splunk). + +1. In the **Source** field, enter the name of the token as entered in Splunk. + +1. **Optional:** Provide one or more [index](http://docs.splunk.com/Documentation/Splunk/7.1.2/Indexer/Aboutindexesandindexers) that's allowed for your token. + +## SSL Configuration + +If your instance of Splunk uses SSL, your **Endpoint** will need to begin with `https://`. With the correct endpoint, the **SSL Configuration** form is enabled and ready to be completed. + +1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. + + - You can use either a self-signed certificate or one provided by a certificate authority. + + - You can generate a self-signed certificate using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + +1. Enter your **Client Key Password**. + +1. Select whether or not you want to verify your SSL. + + * If you are using a self-signed certificate, select **Enabled - Input trusted server certificate**, provide the **CA Certificate PEM**. You can copy and paste the certificate or upload it using the **Read from a file** button. + * If you are using a certificate from a certificate authority, select **Enabled - Input trusted server certificate**. You do not need to provide a **CA Certificate PEM**. + +## Viewing Logs + +1. Log into your Splunk server. + +1. Click on **Search & Reporting**. The number of **Indexed Events** listed should be increasing. + +1. Click on Data Summary and select the Sources tab. + ![View Logs](/img/splunk/splunk4.jpg) + +1. To view the actual logs, click on the source that you declared earlier. + ![View Logs](/img/splunk/splunk5.jpg) + +## Troubleshooting + +You can use curl to see if **HEC** is listening for HTTP event data. + +``` +$ curl http://splunk-server:8088/services/collector/event \ + -H 'Authorization: Splunk 8da70994-b1b0-4a79-b154-bfaae8f93432' \ + -d '{"event": "hello world"}' +``` + +If Splunk is configured correctly, you should receive **json** data returning `success code 0`. You should be able +to send logging data to HEC. + +If you received an error, check your configuration in Splunk and Rancher. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/syslog/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/syslog.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/cluster-logging/syslog/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-logging/syslog.md diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics.md new file mode 100644 index 0000000000..f081f977ef --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics.md @@ -0,0 +1,119 @@ +--- +title: Cluster Metrics +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/cluster-metrics + - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/cluster-metrics + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/cluster-metrics/ +--- + +_Available as of v2.2.0_ + +Cluster metrics display the hardware utilization for all nodes in your cluster, regardless of its role. They give you a global monitoring insight into the cluster. + +Some of the biggest metrics to look out for: + +- **CPU Utilization** + + High load either indicates that your cluster is running efficiently or that you're running out of CPU resources. + +- **Disk Utilization** + + Be on the lookout for increased read and write rates on nodes nearing their disk capacity. This advice is especially true for etcd nodes, as running out of storage on an etcd node leads to cluster failure. + +- **Memory Utilization** + + Deltas in memory utilization usually indicate a memory leak. + +- **Load Average** + + Generally, you want your load average to match your number of logical CPUs for the cluster. For example, if your cluster has 8 logical CPUs, the ideal load average would be 8 as well. If you load average is well under the number of logical CPUs for the cluster, you may want to reduce cluster resources. On the other hand, if your average is over 8, your cluster may need more resources. + +## Finding Node Metrics + +1. From the **Global** view, navigate to the cluster that you want to view metrics. + +1. Select **Nodes** in the navigation bar. + +1. Select a specific node and click on its name. + +1. Click on **Node Metrics**. + +[_Get expressions for Cluster Metrics_](./expression.md#cluster-metrics) + +### Etcd Metrics + +>**Note:** Only supported for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +Etcd metrics display the operations of the etcd database on each of your cluster nodes. After establishing a baseline of normal etcd operational metrics, observe them for abnormal deltas between metric refreshes, which indicate potential issues with etcd. Always address etcd issues immediately! + +You should also pay attention to the text at the top of the etcd metrics, which displays leader election statistics. This text indicates if etcd currently has a leader, which is the etcd instance that coordinates the other etcd instances in your cluster. A large increase in leader changes implies etcd is unstable. If you notice a change in leader election statistics, you should investigate them for issues. + +Some of the biggest metrics to look out for: + +- **Etcd has a leader** + + etcd is usually deployed on multiple nodes and elects a leader to coordinate its operations. If etcd does not have a leader, its operations are not being coordinated. + +- **Number of leader changes** + + If this statistic suddenly grows, it usually indicates network communication issues that constantly force the cluster to elect a new leader. + +[_Get expressions for Etcd Metrics_](./expression.md#etcd-metrics) + +### Kubernetes Components Metrics + +Kubernetes components metrics display data about the cluster's individual Kubernetes components. Primarily, it displays information about connections and latency for each component: the API server, controller manager, scheduler, and ingress controller. + +>**Note:** The metrics for the controller manager, scheduler and ingress controller are only supported for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +When analyzing Kubernetes component metrics, don't be concerned about any single standalone metric in the charts and graphs that display. Rather, you should establish a baseline for metrics considered normal following a period of observation, e.g. the range of values that your components usually operate within and are considered normal. After you establish this baseline, be on the lookout for large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. + +Some of the more important component metrics to monitor are: + +- **API Server Request Latency** + + Increasing API response times indicate there's a generalized problem that requires investigation. + +- **API Server Request Rate** + + Rising API request rates usually coincide with increased API response times. Increased request rates also indicate a generalized problem requiring investigation. + +- **Scheduler Preemption Attempts** + + If you see a spike in scheduler preemptions, it's an indication that you're running out of hardware resources, as Kubernetes is recognizing it doesn't have enough resources to run all your pods and is prioritizing the more important ones. + +- **Scheduling Failed Pods** + + Failed pods can have a variety of causes, such as unbound persistent volume claims, exhausted hardware resources, non-responsive nodes, etc. + +- **Ingress Controller Request Process Time** + + How fast ingress is routing connections to your cluster services. + +[_Get expressions for Kubernetes Component Metrics_](./expression.md#kubernetes-components-metrics) + +## Rancher Logging Metrics + +Although the Dashboard for a cluster primarily displays data sourced from Prometheus, it also displays information for cluster logging, provided that you have [configured Rancher to use a logging service](../../../pages-for-subheaders/cluster-logging.md). + +[_Get expressions for Rancher Logging Metrics_](./expression.md#rancher-logging-metrics) + +## Finding Workload Metrics + +Workload metrics display the hardware utilization for a Kubernetes workload. You can also view metrics for [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), [stateful sets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) and so on. + +1. From the **Global** view, navigate to the project that you want to view workload metrics. + +1. From the main navigation bar, choose **Resources > Workloads.** In versions before v2.3.0, choose **Workloads** on the main navigation bar. + +1. Select a specific workload and click on its name. + +1. In the **Pods** section, select a specific pod and click on its name. + + - **View the Pod Metrics:** Click on **Pod Metrics**. + - **View the Container Metrics:** In the **Containers** section, select a specific container and click on its name. Click on **Container Metrics**. + +[_Get expressions for Workload Metrics_](./expression.md#workload-metrics) diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/custom-metrics.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/custom-metrics.md new file mode 100644 index 0000000000..fd90daf406 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/custom-metrics.md @@ -0,0 +1,493 @@ +--- +title: Prometheus Custom Metrics Adapter +weight: 5 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/custom-metrics + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/custom-metrics + - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/custom-metrics/ + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/custom-metrics/ +--- + +After you've enabled [cluster level monitoring](../../../pages-for-subheaders/cluster-monitoring.md), You can view the metrics data from Rancher. You can also deploy the Prometheus custom metrics adapter then you can use the HPA with metrics stored in cluster monitoring. + +## Deploy Prometheus Custom Metrics Adapter + +We are going to use the [Prometheus custom metrics adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/releases/tag/v0.5.0), version v0.5.0. This is a great example for the [custom metrics server](https://github.com/kubernetes-incubator/custom-metrics-apiserver). And you must be the *cluster owner* to execute following steps. + +- Get the service account of the cluster monitoring is using. It should be configured in the workload ID: `statefulset:cattle-prometheus:prometheus-cluster-monitoring`. And if you didn't customize anything, the service account name should be `cluster-monitoring`. + +- Grant permission to that service account. You will need two kinds of permission. +One role is `extension-apiserver-authentication-reader` in `kube-system`, so you will need to create a `Rolebinding` to in `kube-system`. This permission is to get api aggregation configuration from config map in `kube-system`. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: custom-metrics-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: cluster-monitoring + namespace: cattle-prometheus +``` + +The other one is cluster role `system:auth-delegator`, so you will need to create a `ClusterRoleBinding`. This permission is to have subject access review permission. + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: custom-metrics:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: cluster-monitoring + namespace: cattle-prometheus +``` + +- Create configuration for custom metrics adapter. Following is an example configuration. There will be a configuration details in next session. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: adapter-config + namespace: cattle-prometheus +data: + config.yaml: | + rules: + - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' + seriesFilters: [] + resources: + overrides: + namespace: + resource: namespace + pod_name: + resource: pod + name: + matches: ^container_(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' + seriesFilters: + - isNot: ^container_.*_seconds_total$ + resources: + overrides: + namespace: + resource: namespace + pod_name: + resource: pod + name: + matches: ^container_(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' + seriesFilters: + - isNot: ^container_.*_total$ + resources: + overrides: + namespace: + resource: namespace + pod_name: + resource: pod + name: + matches: ^container_(.*)$ + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_total$ + resources: + template: <<.Resource>> + name: + matches: "" + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_seconds_total + resources: + template: <<.Resource>> + name: + matches: ^(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: [] + resources: + template: <<.Resource>> + name: + matches: ^(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) + resourceRules: + cpu: + containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>) + nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>) + resources: + overrides: + instance: + resource: node + namespace: + resource: namespace + pod_name: + resource: pod + containerLabel: container_name + memory: + containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) + nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) + resources: + overrides: + instance: + resource: node + namespace: + resource: namespace + pod_name: + resource: pod + containerLabel: container_name + window: 1m +``` + +- Create HTTPS TLS certs for your api server. You can use following command to create a self-signed cert. + +```bash +openssl req -new -newkey rsa:4096 -x509 -sha256 -days 365 -nodes -out serving.crt -keyout serving.key -subj "/C=CN/CN=custom-metrics-apiserver.cattle-prometheus.svc.cluster.local" +# And you will find serving.crt and serving.key in your path. And then you are going to create a secret in cattle-prometheus namespace. +kubectl create secret generic -n cattle-prometheus cm-adapter-serving-certs --from-file=serving.key=./serving.key --from-file=serving.crt=./serving.crt +``` + +- Then you can create the prometheus custom metrics adapter. And you will need a service for this deployment too. Creating it via Import YAML or Rancher would do. Please create those resources in `cattle-prometheus` namespaces. + +Here is the prometheus custom metrics adapter deployment. +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: custom-metrics-apiserver + name: custom-metrics-apiserver + namespace: cattle-prometheus +spec: + replicas: 1 + selector: + matchLabels: + app: custom-metrics-apiserver + template: + metadata: + labels: + app: custom-metrics-apiserver + name: custom-metrics-apiserver + spec: + serviceAccountName: cluster-monitoring + containers: + - name: custom-metrics-apiserver + image: directxman12/k8s-prometheus-adapter-amd64:v0.5.0 + args: + - --secure-port=6443 + - --tls-cert-file=/var/run/serving-cert/serving.crt + - --tls-private-key-file=/var/run/serving-cert/serving.key + - --logtostderr=true + - --prometheus-url=http://prometheus-operated/ + - --metrics-relist-interval=1m + - --v=10 + - --config=/etc/adapter/config.yaml + ports: + - containerPort: 6443 + volumeMounts: + - mountPath: /var/run/serving-cert + name: volume-serving-cert + readOnly: true + - mountPath: /etc/adapter/ + name: config + readOnly: true + - mountPath: /tmp + name: tmp-vol + volumes: + - name: volume-serving-cert + secret: + secretName: cm-adapter-serving-certs + - name: config + configMap: + name: adapter-config + - name: tmp-vol + emptyDir: {} + +``` + +Here is the service of the deployment. +```yaml +apiVersion: v1 +kind: Service +metadata: + name: custom-metrics-apiserver + namespace: cattle-prometheus +spec: + ports: + - port: 443 + targetPort: 6443 + selector: + app: custom-metrics-apiserver +``` + +- Create API service for your custom metric server. + +```yaml +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.custom.metrics.k8s.io +spec: + service: + name: custom-metrics-apiserver + namespace: cattle-prometheus + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 + +``` + +- Then you can verify your custom metrics server by `kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1`. If you see the return datas from the api, it means that the metrics server has been successfully set up. + +- You create HPA with custom metrics now. Here is an example of HPA. You will need to create a nginx deployment in your namespace first. + +```yaml +kind: HorizontalPodAutoscaler +apiVersion: autoscaling/v2beta1 +metadata: + name: nginx +spec: + scaleTargetRef: + # point the HPA at the nginx deployment you just created + apiVersion: apps/v1 + kind: Deployment + name: nginx + # autoscale between 1 and 10 replicas + minReplicas: 1 + maxReplicas: 10 + metrics: + # use a "Pods" metric, which takes the average of the + # given metric across all pods controlled by the autoscaling target + - type: Pods + pods: + metricName: memory_usage_bytes + targetAverageValue: 5000000 +``` + +And then, you should see your nginx is scaling up. HPA with custom metrics works. + +## Configuration of prometheus custom metrics adapter + +> Refer to https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md + +The adapter determines which metrics to expose, and how to expose them, +through a set of "discovery" rules. Each rule is executed independently +(so make sure that your rules are mutually exclusive), and specifies each +of the steps the adapter needs to take to expose a metric in the API. + +Each rule can be broken down into roughly four parts: + +- *Discovery*, which specifies how the adapter should find all Prometheus + metrics for this rule. + +- *Association*, which specifies how the adapter should determine which + Kubernetes resources a particular metric is associated with. + +- *Naming*, which specifies how the adapter should expose the metric in + the custom metrics API. + +- *Querying*, which specifies how a request for a particular metric on one + or more Kubernetes objects should be turned into a query to Prometheus. + +A basic config with one rule might look like: + +```yaml +rules: +# this rule matches cumulative cAdvisor metrics measured in seconds +- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}' + resources: + # skip specifying generic resource<->label mappings, and just + # attach only pod and namespace resources by mapping label names to group-resources + overrides: + namespace: {resource: "namespace"}, + pod_name: {resource: "pod"}, + # specify that the `container_` and `_seconds_total` suffixes should be removed. + # this also introduces an implicit filter on metric family names + name: + # we use the value of the capture group implicitly as the API name + # we could also explicitly write `as: "$1"` + matches: "^container_(.*)_seconds_total$" + # specify how to construct a query to fetch samples for a given series + # This is a Go template where the `.Series` and `.LabelMatchers` string values + # are available, and the delimiters are `<<` and `>>` to avoid conflicts with + # the prometheus query language + metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" +``` + +### Discovery + +Discovery governs the process of finding the metrics that you want to +expose in the custom metrics API. There are two fields that factor into +discovery: `seriesQuery` and `seriesFilters`. + +`seriesQuery` specifies Prometheus series query (as passed to the +`/api/v1/series` endpoint in Prometheus) to use to find some set of +Prometheus series. The adapter will strip the label values from this +series, and then use the resulting metric-name-label-names combinations +later on. + +In many cases, `seriesQuery` will be sufficient to narrow down the list of +Prometheus series. However, sometimes (especially if two rules might +otherwise overlap), it's useful to do additional filtering on metric +names. In this case, `seriesFilters` can be used. After the list of +series is returned from `seriesQuery`, each series has its metric name +filtered through any specified filters. + +Filters may be either: + +- `is: `, which matches any series whose name matches the specified + regex. + +- `isNot: `, which matches any series whose name does not match the + specified regex. + +For example: + +```yaml +# match all cAdvisor metrics that aren't measured in seconds +seriesQuery: '{__name__=~"^container_.*_total",container_name!="POD",namespace!="",pod_name!=""}' +seriesFilters: + isNot: "^container_.*_seconds_total" +``` + +### Association + +Association governs the process of figuring out which Kubernetes resources +a particular metric could be attached to. The `resources` field controls +this process. + +There are two ways to associate resources with a particular metric. In +both cases, the value of the label becomes the name of the particular +object. + +One way is to specify that any label name that matches some particular +pattern refers to some group-resource based on the label name. This can +be done using the `template` field. The pattern is specified as a Go +template, with the `Group` and `Resource` fields representing group and +resource. You don't necessarily have to use the `Group` field (in which +case the group is guessed by the system). For instance: + +```yaml +# any label `kube__` becomes . in Kubernetes +resources: + template: "kube_<<.Group>>_<<.Resource>>" +``` + +The other way is to specify that some particular label represents some +particular Kubernetes resource. This can be done using the `overrides` +field. Each override maps a Prometheus label to a Kubernetes +group-resource. For instance: + +```yaml +# the microservice label corresponds to the apps.deployment resource +resource: + overrides: + microservice: {group: "apps", resource: "deployment"} +``` + +These two can be combined, so you can specify both a template and some +individual overrides. + +The resources mentioned can be any resource available in your kubernetes +cluster, as long as you've got a corresponding label. + +### Naming + +Naming governs the process of converting a Prometheus metric name into +a metric in the custom metrics API, and vice versa. It's controlled by +the `name` field. + +Naming is controlled by specifying a pattern to extract an API name from +a Prometheus name, and potentially a transformation on that extracted +value. + +The pattern is specified in the `matches` field, and is just a regular +expression. If not specified, it defaults to `.*`. + +The transformation is specified by the `as` field. You can use any +capture groups defined in the `matches` field. If the `matches` field +doesn't contain capture groups, the `as` field defaults to `$0`. If it +contains a single capture group, the `as` field defautls to `$1`. +Otherwise, it's an error not to specify the as field. + +For example: + +```yaml +# match turn any name _total to _per_second +# e.g. http_requests_total becomes http_requests_per_second +name: + matches: "^(.*)_total$" + as: "${1}_per_second" +``` + +### Querying + +Querying governs the process of actually fetching values for a particular +metric. It's controlled by the `metricsQuery` field. + +The `metricsQuery` field is a Go template that gets turned into +a Prometheus query, using input from a particular call to the custom +metrics API. A given call to the custom metrics API is distilled down to +a metric name, a group-resource, and one or more objects of that +group-resource. These get turned into the following fields in the +template: + +- `Series`: the metric name +- `LabelMatchers`: a comma-separated list of label matchers matching the + given objects. Currently, this is the label for the particular + group-resource, plus the label for namespace, if the group-resource is + namespaced. +- `GroupBy`: a comma-separated list of labels to group by. Currently, + this contains the group-resource label used in `LabelMatchers`. + +For instance, suppose we had a series `http_requests_total` (exposed as +`http_requests_per_second` in the API) with labels `service`, `pod`, +`ingress`, `namespace`, and `verb`. The first four correspond to +Kubernetes resources. Then, if someone requested the metric +`pods/http_request_per_second` for the pods `pod1` and `pod2` in the +`somens` namespace, we'd have: + +- `Series: "http_requests_total"` +- `LabelMatchers: "pod=~\"pod1|pod2",namespace="somens"` +- `GroupBy`: `pod` + +Additionally, there are two advanced fields that are "raw" forms of other +fields: + +- `LabelValuesByName`: a map mapping the labels and values from the + `LabelMatchers` field. The values are pre-joined by `|` + (for used with the `=~` matcher in Prometheus). +- `GroupBySlice`: the slice form of `GroupBy`. + +In general, you'll probably want to use the `Series`, `LabelMatchers`, and +`GroupBy` fields. The other two are for advanced usage. + +The query is expected to return one value for each object requested. The +adapter will use the labels on the returned series to associate a given +series back to its corresponding object. + +For example: + +```yaml +# convert cumulative cAdvisor metrics into rates calculated over 2 minutes +metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)" +``` diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/expression.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/expression.md new file mode 100644 index 0000000000..5e85a78720 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/expression.md @@ -0,0 +1,436 @@ +--- +title: Prometheus Expressions +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/expression + - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/expression + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/expression + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/ +--- + +The PromQL expressions in this doc can be used to configure [alerts.](../../../pages-for-subheaders/cluster-alerts.md) + +> Before expressions can be used in alerts, monitoring must be enabled. For more information, refer to the documentation on enabling monitoring [at the cluster level](../../../pages-for-subheaders/cluster-monitoring.md) or [at the project level.](./project-monitoring.md) + +For more information about querying Prometheus, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) + + + +- [Cluster Metrics](#cluster-metrics) + - [Cluster CPU Utilization](#cluster-cpu-utilization) + - [Cluster Load Average](#cluster-load-average) + - [Cluster Memory Utilization](#cluster-memory-utilization) + - [Cluster Disk Utilization](#cluster-disk-utilization) + - [Cluster Disk I/O](#cluster-disk-i-o) + - [Cluster Network Packets](#cluster-network-packets) + - [Cluster Network I/O](#cluster-network-i-o) +- [Node Metrics](#node-metrics) + - [Node CPU Utilization](#node-cpu-utilization) + - [Node Load Average](#node-load-average) + - [Node Memory Utilization](#node-memory-utilization) + - [Node Disk Utilization](#node-disk-utilization) + - [Node Disk I/O](#node-disk-i-o) + - [Node Network Packets](#node-network-packets) + - [Node Network I/O](#node-network-i-o) +- [Etcd Metrics](#etcd-metrics) + - [Etcd Has a Leader](#etcd-has-a-leader) + - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) + - [Number of Failed Proposals](#number-of-failed-proposals) + - [GRPC Client Traffic](#grpc-client-traffic) + - [Peer Traffic](#peer-traffic) + - [DB Size](#db-size) + - [Active Streams](#active-streams) + - [Raft Proposals](#raft-proposals) + - [RPC Rate](#rpc-rate) + - [Disk Operations](#disk-operations) + - [Disk Sync Duration](#disk-sync-duration) +- [Kubernetes Components Metrics](#kubernetes-components-metrics) + - [API Server Request Latency](#api-server-request-latency) + - [API Server Request Rate](#api-server-request-rate) + - [Scheduling Failed Pods](#scheduling-failed-pods) + - [Controller Manager Queue Depth](#controller-manager-queue-depth) + - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) + - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) + - [Ingress Controller Connections](#ingress-controller-connections) + - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) +- [Rancher Logging Metrics](#rancher-logging-metrics) + - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) + - [Fluentd Input Rate](#fluentd-input-rate) + - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) + - [Fluentd Output Rate](#fluentd-output-rate) +- [Workload Metrics](#workload-metrics) + - [Workload CPU Utilization](#workload-cpu-utilization) + - [Workload Memory Utilization](#workload-memory-utilization) + - [Workload Network Packets](#workload-network-packets) + - [Workload Network I/O](#workload-network-i-o) + - [Workload Disk I/O](#workload-disk-i-o) +- [Pod Metrics](#pod-metrics) + - [Pod CPU Utilization](#pod-cpu-utilization) + - [Pod Memory Utilization](#pod-memory-utilization) + - [Pod Network Packets](#pod-network-packets) + - [Pod Network I/O](#pod-network-i-o) + - [Pod Disk I/O](#pod-disk-i-o) +- [Container Metrics](#container-metrics) + - [Container CPU Utilization](#container-cpu-utilization) + - [Container Memory Utilization](#container-memory-utilization) + - [Container Disk I/O](#container-disk-i-o) + + + +# Cluster Metrics + +### Cluster CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | +| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | + +### Cluster Load Average + +| Catalog | Expression | +| --- | --- | +| Detail |
    load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
    load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
    load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
    | +| Summary |
    load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
    load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
    load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
    | + +### Cluster Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | +| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | + +### Cluster Disk Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | +| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | + +### Cluster Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
    written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
    | +| Summary |
    read`sum(rate(node_disk_read_bytes_total[5m]))`
    written`sum(rate(node_disk_written_bytes_total[5m]))`
    | + +### Cluster Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    | +| Summary |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    | + +### Cluster Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    | +| Summary |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    | + +# Node Metrics + +### Node CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | +| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | + +### Node Load Average + +| Catalog | Expression | +| --- | --- | +| Detail |
    load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    | +| Summary |
    load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    | + +### Node Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | +| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | + +### Node Disk Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | +| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | + +### Node Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
    written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
    | +| Summary |
    read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
    written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
    | + +### Node Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    | +| Summary |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    | + +### Node Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    | +| Summary |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    | + +# Etcd Metrics + +### Etcd Has a Leader + +`max(etcd_server_has_leader)` + +### Number of Times the Leader Changes + +`max(etcd_server_leader_changes_seen_total)` + +### Number of Failed Proposals + +`sum(etcd_server_proposals_failed_total)` + +### GRPC Client Traffic + +| Catalog | Expression | +| --- | --- | +| Detail |
    in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
    out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
    | +| Summary |
    in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
    out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
    | + +### Peer Traffic + +| Catalog | Expression | +| --- | --- | +| Detail |
    in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
    out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
    | +| Summary |
    in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
    out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
    | + +### DB Size + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | +| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | + +### Active Streams + +| Catalog | Expression | +| --- | --- | +| Detail |
    lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
    watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
    | +| Summary |
    lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
    watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
    | + +### Raft Proposals + +| Catalog | Expression | +| --- | --- | +| Detail |
    applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
    committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
    pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
    failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
    | +| Summary |
    applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
    committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
    pending`sum(increase(etcd_server_proposals_pending[5m]))`
    failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
    | + +### RPC Rate + +| Catalog | Expression | +| --- | --- | +| Detail |
    total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
    fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
    | +| Summary |
    total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
    fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
    | + +### Disk Operations + +| Catalog | Expression | +| --- | --- | +| Detail |
    commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
    fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
    | +| Summary |
    commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
    fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
    | + +### Disk Sync Duration + +| Catalog | Expression | +| --- | --- | +| Detail |
    wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
    db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
    | +| Summary |
    wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
    db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
    | + +# Kubernetes Components Metrics + +### API Server Request Latency + +| Catalog | Expression | +| --- | --- | +| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | +| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | + +### API Server Request Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | +| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | + +### Scheduling Failed Pods + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | +| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | + +### Controller Manager Queue Depth + +| Catalog | Expression | +| --- | --- | +| Detail |
    volumes`sum(volumes_depth) by instance`
    deployment`sum(deployment_depth) by instance`
    replicaset`sum(replicaset_depth) by instance`
    service`sum(service_depth) by instance`
    serviceaccount`sum(serviceaccount_depth) by instance`
    endpoint`sum(endpoint_depth) by instance`
    daemonset`sum(daemonset_depth) by instance`
    statefulset`sum(statefulset_depth) by instance`
    replicationmanager`sum(replicationmanager_depth) by instance`
    | +| Summary |
    volumes`sum(volumes_depth)`
    deployment`sum(deployment_depth)`
    replicaset`sum(replicaset_depth)`
    service`sum(service_depth)`
    serviceaccount`sum(serviceaccount_depth)`
    endpoint`sum(endpoint_depth)`
    daemonset`sum(daemonset_depth)`
    statefulset`sum(statefulset_depth)`
    replicationmanager`sum(replicationmanager_depth)`
    | + +### Scheduler E2E Scheduling Latency + +| Catalog | Expression | +| --- | --- | +| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | +| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | + +### Scheduler Preemption Attempts + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | +| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | + +### Ingress Controller Connections + +| Catalog | Expression | +| --- | --- | +| Detail |
    reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
    waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
    writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
    accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
    active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
    handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
    | +| Summary |
    reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
    waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
    writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
    accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
    active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
    handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
    | + +### Ingress Controller Request Process Time + +| Catalog | Expression | +| --- | --- | +| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | +| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | + +# Rancher Logging Metrics + + +### Fluentd Buffer Queue Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | + +### Fluentd Input Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | + +### Fluentd Output Errors Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | +| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | + +### Fluentd Output Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | + +# Workload Metrics + +### Workload CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +### Workload Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | +| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | + +### Workload Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +### Workload Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +### Workload Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +# Pod Metrics + +### Pod CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    | +| Summary |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    | + +### Pod Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | +| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | + +### Pod Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | +| Summary |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | + +### Pod Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | +| Summary |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | + +### Pod Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
    | +| Summary |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | + +# Container Metrics + +### Container CPU Utilization + +| Catalog | Expression | +| --- | --- | +| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | + +### Container Memory Utilization + +`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` + +### Container Disk I/O + +| Catalog | Expression | +| --- | --- | +| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/project-monitoring.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/project-monitoring.md new file mode 100644 index 0000000000..b24ec94a69 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/project-monitoring.md @@ -0,0 +1,84 @@ +--- +title: Project Monitoring +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/project-monitoring + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring +--- + +_Available as of v2.2.4_ + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. + +This section covers the following topics: + +- [Monitoring scope](#monitoring-scope) +- [Permissions to configure project monitoring](#permissions-to-configure-project-monitoring) +- [Enabling project monitoring](#enabling-project-monitoring) +- [Project-level monitoring resource requirements](#project-level-monitoring-resource-requirements) +- [Project metrics](#project-metrics) + +### Monitoring Scope + +Using Prometheus, you can monitor Rancher at both the [cluster level](../../../pages-for-subheaders/cluster-monitoring.md) and project level. For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. + +- [Cluster monitoring](../../../pages-for-subheaders/cluster-monitoring.md/) allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. + + - Kubernetes control plane + - etcd database + - All nodes (including workers) + +- Project monitoring allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. + +### Permissions to Configure Project Monitoring + +Only [administrators](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure project level monitoring. Project members can only view monitoring metrics. + +### Enabling Project Monitoring + +> **Prerequisite:** Cluster monitoring must be [enabled.](../../../pages-for-subheaders/cluster-monitoring.md) + +1. Go to the project where monitoring should be enabled. Note: When cluster monitoring is enabled, monitoring is also enabled by default in the **System** project. + +1. Select **Tools > Monitoring** in the navigation bar. + +1. Select **Enable** to show the [Prometheus configuration options](../../../pages-for-subheaders/cluster-monitoring.md/prometheus/). Enter in your desired configuration options. + +1. Click **Save**. + +### Project-Level Monitoring Resource Requirements + +Container| CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable +---------|---------------|---------------|-------------|-------------|------------- +Prometheus|750m| 750Mi | 1000m | 1000Mi | Yes +Grafana | 100m | 100Mi | 200m | 200Mi | No + + +**Result:** A single application,`project-monitoring`, is added as an [application](../../../pages-for-subheaders/helm-charts-in-rancher.md) to the project. After the application is `active`, you can start viewing project metrics through the [Rancher dashboard](../../../pages-for-subheaders/cluster-monitoring.md/) or directly from Grafana. + +> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. + +### Project Metrics +[Workload metrics](./expression.md#workload-metrics) are available for the project if monitoring is enabled at the [cluster level](../../../pages-for-subheaders/cluster-monitoring.md/) and at the [project level.](#enabling-project-monitoring) + +You can monitor custom metrics from any [exporters.](https://prometheus.io/docs/instrumenting/exporters/) You can also expose some custom endpoints on deployments without needing to configure Prometheus for your project. + +> **Example:** +> A [Redis](https://redis.io/) application is deployed in the namespace `redis-app` in the project `Datacenter`. It is monitored via [Redis exporter](https://github.com/oliver006/redis_exporter). After enabling project monitoring, you can edit the application to configure the Advanced Options -> Custom Metrics section. Enter the `Container Port` and `Path` and select the `Protocol`. + +To access a project-level Grafana instance, + +1. From the **Global** view, navigate to a cluster that has monitoring enabled. + +1. Go to a project that has monitoring enabled. + +1. From the project view, click **Apps.** In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. + +1. Go to the `project-monitoring` application. + +1. In the `project-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. + +1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. + +**Results:** You will be logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/prometheus.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/prometheus.md new file mode 100644 index 0000000000..fee3aa1aeb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/prometheus.md @@ -0,0 +1,112 @@ +--- +title: Prometheus Configuration +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/prometheus + - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/prometheus/ + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/prometheus/ +--- + +_Available as of v2.2.0_ + +While configuring monitoring at either the [cluster level](../../../pages-for-subheaders/cluster-monitoring.md) or [project level](./project-monitoring.md), there are multiple options that can be configured. + +- [Basic Configuration](#basic-configuration) +- [Advanced Options](#advanced-options) +- [Node Exporter](#node-exporter) +- [Persistent Storage](#persistent-storage) +- [Remote Storage](#remote-storage) + +# Basic Configuration + +Option | Description +-------|------------- +Data Retention | How long your Prometheus instance retains monitoring data scraped from Rancher objects before it's purged. +[Enable Node Exporter](#node-exporter) | Whether or not to deploy the node exporter. +Node Exporter Host Port | The host port on which data is exposed, i.e. data that Prometheus collects from your node hardware. Required if you have enabled the node exporter. +[Enable Persistent Storage](#persistent-storage) for Prometheus | Whether or not to configure storage for Prometheus so that metrics can be retained even if the Prometheus pod fails. +[Enable Persistent Storage](#persistent-storage) for Grafana | Whether or not to configure storage for Grafana so that the Grafana dashboards and configuration can be retained even if the Grafana pod fails. +Prometheus [CPU Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU resource limit for the Prometheus pod. +Prometheus [CPU Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) | CPU reservation for the Prometheus pod. +Prometheus [Memory Limit](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource limit for the Prometheus pod. +Prometheus [Memory Reservation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-memory) | Memory resource requests for the Prometheus pod. +Selector | Ability to select the nodes in which Prometheus and Grafana pods are deployed to. To use this option, the nodes must have labels. + +# Advanced Options + +Since monitoring is an [application](https://github.com/rancher/system-charts/tree/dev/charts/rancher-monitoring) from the [Rancher catalog](../../../pages-for-subheaders/helm-charts-in-rancher.md), it can be configured like any other catalog application, by passing in values to Helm. + +> **Warning:** Any modification to the application without understanding the entire application can lead to catastrophic errors. + +### Prometheus RemoteRead and RemoteWrite + +_Available as of v2.4.0_ + +Prometheus RemoteRead and RemoteWrite can be configured as custom answers in the **Advanced Options** section. + +For more information on remote endpoints and storage, refer to the [Prometheus documentation.](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) + +The Prometheus operator documentation contains the full [RemoteReadSpec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) + +An example configuration would be: + +| Variable | Value | +|--------------|------------| +| `prometheus.remoteWrite[0].url` | `http://mytarget.com` | + +### LivenessProbe and ReadinessProbe + +_Available as of v2.4.0_ + +Prometheus LivenessProbe and ReadinessProbe can be configured as custom answers in the **Advanced Options** section. + +The Kubernetes probe spec is [here.](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#probe-v1-core) + +Some example key-value pairs are: + +| Variable | Value | +|--------------|------------| +| `prometheus.livenessProbe.timeoutSeconds` | 60 | +| `prometheus.readinessProbe.timeoutSeconds` | 60 | + +# Node Exporter + +The [node exporter](https://github.com/prometheus/node_exporter/blob/master/README.md) is a popular open source exporter, which exposes the metrics for hardware and \*NIX kernels OS. It is designed to monitor the host system. However, there are still issues with namespaces when running it in a container, mostly around filesystem mount spaces. In order to monitor actual network metrics for the container network, the node exporter must be deployed with the `hostNetwork` mode. + +When configuring Prometheus and enabling the node exporter, enter a host port in the **Node Exporter Host Port** that will not produce port conflicts with existing applications. The host port chosen must be open to allow internal traffic between Prometheus and the Node Exporter. + +>**Warning:** In order for Prometheus to collect the metrics of the node exporter, after enabling cluster monitoring, you must open the Node Exporter Host Port in the host firewall rules to allow intranet access. By default, `9796` is used as that host port. + +# Persistent Storage + +>**Prerequisite:** Configure one or more StorageClasses to use as [persistent storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) for your Prometheus or Grafana pod. + +By default, when you enable Prometheus for either a cluster or project, all monitoring data that Prometheus collects is stored on its own pod. With local storage, if the Prometheus or Grafana pods fail, all the data is lost. Rancher recommends configuring an external persistent storage to the cluster. With the external persistent storage, if the Prometheus or Grafana pods fail, the new pods can recover using data from the persistent storage. + +When enabling persistent storage for Prometheus or Grafana, specify the size of the persistent volume and select the StorageClass. + +# Remote Storage + +>**Prerequisite:** Need a remote storage endpoint to be available. The possible list of integrations is available [here](https://prometheus.io/docs/operating/integrations/) + +Using advanced options, remote storage integration for the Prometheus installation can be configured as follows: + +``` +prometheus.remoteWrite[0].url = http://remote1/push +prometheus.remoteWrite[0].remoteTimeout = 33s + +prometheus.remoteWrite[1].url = http://remote2/push + + +prometheus.remoteRead[0].url = http://remote1/read +prometheus.remoteRead[0].proxyUrl = http://proxy.url +prometheus.remoteRead[0].bearerToken = token-value + +prometheus.remoteRead[1].url = http://remote2/read +prometheus.remoteRead[1].remoteTimeout = 33s +prometheus.remoteRead[1].readRecent = true +``` + +Additional fields can be set up based on the [ReadSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec) and [RemoteWriteSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec) diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md new file mode 100644 index 0000000000..d5d795a387 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics.md @@ -0,0 +1,66 @@ +--- +title: Viewing Metrics +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring/viewing-metrics + - /rancher/v2.0-v2.4/en/cluster-admin/tools/monitoring/viewing-metrics + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring/viewing-metrics + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/viewing-metrics/ +--- + +_Available as of v2.2.0_ + +After you've enabled monitoring at either the [cluster level](../../../pages-for-subheaders/cluster-monitoring.md) or [project level](./project-monitoring.md), you will want to be start viewing the data being collected. There are multiple ways to view this data. + +## Rancher Dashboard + +>**Note:** This is only available if you've enabled monitoring at the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/). Project specific analytics must be viewed using the project's Grafana instance. + +Rancher's dashboards are available at multiple locations: + +- **Cluster Dashboard**: From the **Global** view, navigate to the cluster. +- **Node Metrics**: From the **Global** view, navigate to the cluster. Select **Nodes**. Find the individual node and click on its name. Click **Node Metrics.** +- **Workload Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Click **Workload Metrics.** +- **Pod Metrics**: From the **Global** view, navigate to the project. Select **Workloads > Workloads**. Find the individual workload and click on its name. Find the individual pod and click on its name. Click **Pod Metrics.** +- **Container Metrics**: From the **Global** view, navigate to the project. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the individual workload and click on its name. Find the individual pod and click on its name. Find the individual container and click on its name. Click **Container Metrics.** + +Prometheus metrics are displayed and are denoted with the Grafana icon. If you click on the icon, the metrics will open a new tab in Grafana. + +Within each Prometheus metrics widget, there are several ways to customize your view. + +- Toggle between two views: + - **Detail**: Displays graphs and charts that let you view each event in a Prometheus time series + - **Summary** Displays events in a Prometheus time series that are outside the norm. +- Change the range of the time series that you're viewing to see a more refined or expansive data sample. +- Customize the data sample to display data between specific dates and times. + +When analyzing these metrics, don't be concerned about any single standalone metric in the charts and graphs. Rather, you should establish a baseline for your metrics over the course of time, e.g. the range of values that your components usually operate within and are considered normal. After you establish the baseline, be on the lookout for any large deltas in the charts and graphs, as these big changes usually indicate a problem that you need to investigate. + +## Grafana + +If you've enabled monitoring at either the [cluster level](monitoring-alerting/legacy/monitoring/cluster-monitoring/) or [project level](project-admin/tools/monitoring/), Rancher automatically creates a link to Grafana instance. Use this link to view monitoring data. + +Grafana allows you to query, visualize, alert, and ultimately, understand your cluster and workload data. For more information on Grafana and its capabilities, visit the [Grafana website](https://grafana.com/grafana). + +### Authentication + +Rancher determines which users can access the new Grafana instance, as well as the objects they can view within it, by validating them against the user's [cluster or project roles](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md). In other words, a user's access in Grafana mirrors their access in Rancher. + +When you go to the Grafana instance, you will be logged in with the username `admin` and the password `admin`. If you log out and log in again, you will be prompted to change your password. You will only have access to the URL of the Grafana instance if you have access to view the corresponding metrics in Rancher. So for example, if your Rancher permissions are scoped to the project level, you won't be able to see the Grafana instance for cluster-level metrics. + +### Accessing the Cluster-level Grafana Instance + +1. From the **Global** view, navigate to a cluster that has monitoring enabled. + +1. Go to the **System** project view. This project is where the cluster-level Grafana instance runs. + +1. Click **Apps.** In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. + +1. Go to the `cluster-monitoring` application. + +1. In the `cluster-monitoring` application, there are two `/index.html` links: one that leads to a Grafana instance and one that leads to a Prometheus instance. When you click the Grafana link, it will redirect you to a new webpage for Grafana, which shows metrics for the cluster. + +1. You will be signed in to the Grafana instance automatically. The default username is `admin` and the default password is `admin`. For security, we recommend that you log out of Grafana, log back in with the `admin` password, and change your password. + +**Results:** You are logged into Grafana from the Grafana instance. After logging in, you can view the preset Grafana dashboards, which are imported via the [Grafana provisioning mechanism](http://docs.grafana.org/administration/provisioning/#dashboards), so you cannot modify them directly. For now, if you want to configure your own dashboards, clone the original and modify the new copy. diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/resources/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/disable-istio.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/disabling-istio/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/disable-istio.md diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/rbac-for-istio.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/rbac/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/rbac-for-istio.md diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes/_index.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/release-notes.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/release-notes/_index.md rename to versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/istio/release-notes.md diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md new file mode 100644 index 0000000000..14c9dc514f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/notifiers.md @@ -0,0 +1,207 @@ +--- +title: Notifiers +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/notifiers + - /rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/notifiers + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/notifiers/ + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/default-alerts/ +--- + +Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. + +Rancher integrates with a variety of popular IT services, including: + +- **Slack**: Send alert notifications to your Slack channels. +- **Email**: Choose email recipients for alert notifications. +- **PagerDuty**: Route notifications to staff by phone, SMS, or personal email. +- **WebHooks**: Update a webpage with alert notifications. +- **WeChat**: (Available as of v2.2.0) Send alert notifications to your Enterprise WeChat contacts. +- **DingTalk**: (Available as of v2.4.6) Send alert notifications to DingTalk using a webhook. +- **Microsoft Teams**: (Available as of v2.4.6) Send alert notifications to Teams using a webhook. + +This section covers the following topics: + +- [Roles-based access control for notifiers](#roles-based-access-control-for-notifiers) +- [Adding notifiers](#adding-notifiers) +- [Configuration](#configuration) +- [Managing notifiers](#managing-notifiers) +- [Example payload for a webhook alert notifier](#example-payload-for-a-webhook-alert-notifier) + +# Roles-based Access Control for Notifiers + +Notifiers are configured at the cluster level. This model ensures that only cluster owners need to configure notifiers, leaving project owners to simply configure alerts in the scope of their projects. You don't need to dispense privileges like SMTP server access or cloud account access. + +# Adding Notifiers + +Set up a notifier so that you can begin configuring and sending alerts. + +1. From the **Global View**, open the cluster that you want to add a notifier. +1. From the main menu, select **Tools > Notifiers**. Then click **Add Notifier**. +1. Select the service you want to use as your notifier, and then fill out the form. For help filling out the form, refer to the configuration section below. +1. Click **Test.** You should receive a notification confirming that the notifier is configured correctly. +1. Click **Add** to complete adding the notifier. + +**Result:** Your notifier is added to Rancher. + +# Configuration + +- [Slack](#slack) +- [Email](#email) +- [PagerDuty](#pagerduty) +- [Webhook](#webhook) +- [WeChat](#wechat) +- [DingTalk](#dingtalk) +- [Microsoft Teams](#microsoft-teams) + +### Slack + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| URL | From Slack, create a webhook. For instructions, see the [Slack Documentation](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack). Then enter the Slack webhook URL. | +| Default Channel | Enter the name of the channel that you want to send alert notifications in the following format: `#`. Both public and private channels are supported. | +| Proxy URL | Proxy for the Slack webhook. | +| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +**Validation:** Click **Test**. If the test is successful, the Slack channel you're configuring for the notifier outputs **Slack setting validated.** + +### Email + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| Default Recipient Address | Enter the email address that you want to receive the notification. | +| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +SMTP Server Configuration: + +| Field | Explanation | +|----------|----------------------| +| Sender | Enter an email address available on your mail server that you want to send the notification. | +| Host | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com` | +| Port | In the **Port** field, enter the port used for email. Typically, TLS uses `587` and SSL uses `465`. | +| Use TLS | If you're using TLS, make sure **Use TLS** is selected. | +| Username | Username to authenticate with the SMTP server. | +| Password | Password to authenticate with the SMTP server. | + +**Validation:** Click **Test**. If the test is successful, Rancher prints **settings validated** and you receive a test notification email. + +### PagerDuty + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| Default Integration Key | From PagerDuty, create a Prometheus integration. For instructions, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). Then enter the integration key. +| Service Key | The same as the integration key. For instructions on creating a Prometheus integration, see the [PagerDuty Documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). Then enter the integration key. | +| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +**Validation:** Click **Test**. If the test is successful, your PagerDuty endpoint outputs **PagerDuty setting validated.** + +### Webhook + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| URL | Using the app of your choice, create a webhook URL. | +| Proxy URL | Proxy for the webhook. | +| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +**Validation:** Click **Test**. If the test is successful, the URL you're configuring as a notifier outputs **Webhook setting validated.** + +### WeChat + +_Available as of v2.2.0_ + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| Corporation ID | Enter the "EnterpriseID" of your corporation. You can get it fro the [Profile page](https://work.weixin.qq.com/wework_admin/frame#profile). | +| Application Agent ID | From Enterprise WeChat, create an application in the [Application page](https://work.weixin.qq.com/wework_admin/frame#apps), and then enter the "AgentId" of this application. You will also need to enter the application secret. | +| Application Secret | The secret that corresponds to the Application Agent ID. | +| Recipient Type | Party, tag, or user. | +| Default Recipient | The default recipient ID should correspond to the recipient type. It should be the party ID, tag ID or user account that you want to receive the notification. You could get contact information from [Contacts page](https://work.weixin.qq.com/wework_admin/frame#contacts). | +| Proxy URL | If you are using a proxy, enter the proxy URL. | +| Send Resolved Alerts | _Available as of v2.3.0_ Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +**Validation:** Click **Test.** If the test is successful, you should receive an alert message. + +### DingTalk + +_Available as of v2.4.6_ + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| Webhook URL | Enter the DingTalk webhook URL. For help setting up the webhook, refer to the [DingTalk documentation.](https://www.alibabacloud.com/help/doc-detail/52872.htm) | +| Secret | Optional: Enter a secret for the DingTalk webhook. | +| Proxy URL | Optional: Enter a proxy for the DingTalk webhook. | +| Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +**Validation:** Click **Test.** If the test is successful, the DingTalk notifier output is **DingTalk setting validated.** + +### Microsoft Teams + +_Available as of v2.4.6_ + +| Field | Explanation | +|----------|----------------------| +| Name | Enter a **Name** for the notifier. | +| Webhook URL | Enter the Microsoft Teams webhook URL. For help setting up the webhook, refer to the [Teams Documentation.](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook) | +| Proxy URL | Optional: Enter a proxy for the Teams webhook. | +| Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage) | + +**Validation:** Click **Test.** If the test is successful, the Teams notifier output is **MicrosoftTeams setting validated.** + +# Managing Notifiers + +After you set up notifiers, you can manage them. From the **Global** view, open the cluster that you want to manage your notifiers. Select **Tools > Notifiers**. You can: + +- **Edit** their settings that you configured during their initial setup. +- **Clone** them, to quickly setup slightly different notifiers. +- **Delete** them when they're no longer necessary. + +# Example Payload for a Webhook Alert Notifier + +```json +{ + "receiver": "c-2a3bc:kube-components-alert", + "status": "firing", + "alerts": [ + { + "status": "firing", + "labels": { + "alert_name": "Scheduler is unavailable", + "alert_type": "systemService", + "cluster_name": "mycluster (ID: c-2a3bc)", + "component_name": "scheduler", + "group_id": "c-2a3bc:kube-components-alert", + "logs": "Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused", + "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service", + "severity": "critical" + }, + "annotations": {}, + "startsAt": "2020-01-30T19:18:13.321684733Z", + "endsAt": "0001-01-01T00:00:00Z", + "generatorURL": "" + } + ], + "groupLabels": { + "component_name": "scheduler", + "rule_id": "c-2a3bc:kube-components-alert_scheduler-system-service" + }, + "commonLabels": { + "alert_name": "Scheduler is unavailable", + "alert_type": "systemService", + "cluster_name": "mycluster (ID: c-2a3bc)" + } +} +``` +# What's Next? + +After creating a notifier, set up alerts to receive notifications of Rancher system events. + +- [Cluster owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) can set up alerts at the [cluster level](../../pages-for-subheaders/cluster-alerts.md). +- [Project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can set up alerts at the [project level](../../reference-guides/rancher-project-tools/project-alerts.md). diff --git a/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/opa-gatekeeper.md b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/opa-gatekeeper.md new file mode 100644 index 0000000000..287a22f629 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/explanations/integrations-in-rancher/opa-gatekeeper.md @@ -0,0 +1,99 @@ +--- +title: OPA Gatekeeper +weight: 17 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/opa-gatekeeper + - /rancher/v2.0-v2.4/en/opa-gatekeper/Open%20Policy%20Agent + - /rancher/v2.0-v2.4/en/opa-gatekeper +--- +_Available as of v2.4.0_ + +To ensure consistency and compliance, every organization needs the ability to define and enforce policies in its environment in an automated way. [OPA (Open Policy Agent)](https://www.openpolicyagent.org/) is a policy engine that facilitates policy-based control for cloud native environments. Rancher provides the ability to enable OPA Gatekeeper in Kubernetes clusters, and also installs a couple of built-in policy definitions, which are also called constraint templates. + +OPA provides a high-level declarative language that lets you specify policy as code and ability to extend simple APIs to offload policy decision-making. + +[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is a project that provides integration between OPA and Kubernetes. OPA Gatekeeper provides: + +- An extensible, parameterized policy library. +- Native Kubernetes CRDs for instantiating the policy library, also called “constraints." +- Native Kubernetes CRDs for extending the policy library, also called "constraint templates." +- Audit functionality. + +To read more about OPA, please refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/) + +# How the OPA Gatekeeper Integration Works + +Kubernetes provides the ability to extend API server functionality via admission controller webhooks, which are invoked whenever a resource is created, updated or deleted. Gatekeeper is installed as a validating webhook and enforces policies defined by Kubernetes custom resource definitions. In addition to the admission control usage, Gatekeeper provides the capability to audit existing resources in Kubernetes clusters and mark current violations of enabled policies. + +OPA Gatekeeper is made available via Rancher's Helm system chart, and it is installed in a namespace named `gatekeeper-system.` + +# Enabling OPA Gatekeeper in a Cluster + +> **Prerequisites:** +> +> - Only administrators and cluster owners can enable OPA Gatekeeper. +> - The dashboard needs to be enabled using the `dashboard` feature flag. For more information, refer to the [section on enabling experimental features.](../../pages-for-subheaders/enable-experimental-features.md) + +1. Navigate to the cluster's **Dashboard** view. +1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** +1. To install Gatekeeper with the default configuration, click on **Enable Gatekeeper (v0.1.0) with defaults.** +1. To change any default configuration, click on **Customize Gatekeeper yaml configuration.** + +# Constraint Templates + +[Constraint templates](https://github.com/open-policy-agent/gatekeeper#constraint-templates) are Kubernetes custom resources that define the schema and Rego logic of the OPA policy to be applied by Gatekeeper. For more information on the Rego policy language, refer to the [official documentation.](https://www.openpolicyagent.org/docs/latest/policy-language/) + +When OPA Gatekeeper is enabled, Rancher installs some templates by default. + +To list the constraint templates installed in the cluster, go to the left side menu under OPA Gatekeeper and click on **Templates.** + +Rancher also provides the ability to create your own constraint templates by importing YAML definitions. + +# Creating and Configuring Constraints + +[Constraints](https://github.com/open-policy-agent/gatekeeper#constraints) are Kubernetes custom resources that define the scope of objects to which a specific constraint template applies to. The complete policy is defined by constraint templates and constraints together. + +> **Prerequisites:** OPA Gatekeeper must be enabled in the cluster. + +To list the constraints installed, go to the left side menu under OPA Gatekeeper, and click on **Constraints.** + +New constraints can be created from a constraint template. + +Rancher provides the ability to create a constraint by using a convenient form that lets you input the various constraint fields. + +The **Edit as yaml** option is also available to configure the the constraint's yaml definition. + +### Exempting Rancher's System Namespaces from Constraints + +When a constraint is created, ensure that it does not apply to any Rancher or Kubernetes system namespaces. If the system namespaces are not excluded, then it is possible to see many resources under them marked as violations of the constraint. + +To limit the scope of the constraint only to user namespaces, always specify these namespaces under the **Match** field of the constraint. + +Also, the constraint may interfere with other Rancher functionality and deny system workloads from being deployed. To avoid this, exclude all Rancher-specific namespaces from your constraints. + +# Enforcing Constraints in your Cluster + +When the **Enforcement Action** is **Deny,** the constraint is immediately enabled and will deny any requests that violate the policy defined. By default, the enforcement value is **Deny.** + +When the **Enforcement Action** is **Dryrun,** then any resources that violate the policy are only recorded under the constraint's status field. + +To enforce constraints, create a constraint using the form. In the **Enforcement Action** field, choose **Deny.** + +# Audit and Violations in your Cluster + +OPA Gatekeeper runs a periodic audit to check if any existing resource violates any enforced constraint. The audit-interval (default 300s) can be configured while installing Gatekeeper. + +On the Gatekeeper page, any violations of the defined constraints are listed. + +Also under **Constraints,** the number of violations of the constraint can be found. + +The detail view of each constraint lists information about the resource that violated the constraint. + +# Disabling Gatekeeper + +1. Navigate to the cluster's Dashboard view +1. On the left side menu, expand the cluster menu and click on **OPA Gatekeeper.** +1. Click the **⋮ > Disable**. + +**Result:** Upon disabling OPA Gatekeeper, all constraint templates and constraints will also be deleted. + diff --git a/versioned_docs/version-2.0-2.4/faq.md b/versioned_docs/version-2.0-2.4/faq.md new file mode 100644 index 0000000000..2c580cdfba --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq.md @@ -0,0 +1,72 @@ +--- +title: FAQ +weight: 25 +aliases: + - /rancher/v2.0-v2.4/en/about/ +--- + +This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. + +See [Technical FAQ](faq/technical-items.md), for frequently asked technical questions. + +
    + +**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** + +When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. + +
    + +**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** + +Yes. + +
    + +**Does Rancher support Windows?** + +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.](pages-for-subheaders/use-windows-clusters.md) + +
    + +**Does Rancher support Istio?** + +As of Rancher 2.3.0, we support [Istio.](pages-for-subheaders/istio.md) + +Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) + +
    + +**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** + +Secrets management is on our roadmap but we haven't assigned it to a specific release yet. + +
    + +**Does Rancher v2.x support RKT containers as well?** + +At this time, we only support Docker. + +
    + +**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and imported Kubernetes?** + +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. + +
    + +**Are you planning on supporting Traefik for existing setups?** + +We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. + +
    + +**Can I import OpenShift Kubernetes clusters into v2.x?** + +Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. + +
    + +**Are you going to integrate Longhorn?** + +Yes. Longhorn was on a bit of a hiatus while we were working on v2.0. We plan to re-engage on the project. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md b/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md new file mode 100644 index 0000000000..4eb3d0b562 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/container-network-interface-providers.md @@ -0,0 +1,154 @@ +--- +title: Container Network Interface (CNI) Providers +description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you +weight: 2300 +--- + +## What is CNI? + +CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. + +Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. + +![CNI Logo](/img/cni-logo.png) + +For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). + +### What Network Models are Used in CNI? + +CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). + +#### What is an Encapsulated Network? + +This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. + +In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. + +This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. + +CNI network providers using this network model include Flannel, Canal, and Weave. + +![Encapsulated Network](/img/encapsulated-network.png) + +#### What is an Unencapsulated Network? + +This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). + +In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. + +This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. + +CNI network providers using this network model include Calico and Romana. + +![Unencapsulated Network](/img/unencapsulated-network.png) + +### What CNI Providers are Provided by Rancher? + +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave (Weave is available as of v2.2.0). You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. + +#### Canal + +![Canal Logo](/img/canal-logo.png) + +Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. + +In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. + +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) + +![](/img/canal-diagram.png) + +For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) + +#### Flannel + +![Flannel Logo](/img/flannel-logo.png) + +Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). + +Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. + +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +![Flannel Diagram](/img/flannel-diagram.png) + +For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). + +#### Calico + +![Calico Logo](/img/calico-logo.png) + +Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. + +Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. + +Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +![Calico Diagram](/img/calico-diagram.svg) + +For more information, see the following pages: + +- [Project Calico Official Site](https://www.projectcalico.org/) +- [Project Calico GitHub Page](https://github.com/projectcalico/calico) + + +#### Weave + +![Weave Logo](/img/weave-logo.png) + +_Available as of v2.2.0_ + +Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. + +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +For more information, see the following pages: + +- [Weave Net Official Site](https://www.weave.works/) + +### CNI Features by Provider + +The following table summarizes the different features available for each CNI network provider provided by Rancher. + +| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | +| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | +| Canal | Encapsulated (VXLAN) | No | Yes | No | K8S API | No | Yes | +| Flannel | Encapsulated (VXLAN) | No | No | No | K8S API | No | No | +| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8S API | No | Yes | +| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | + +- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) + +- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. + +- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. + +- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. + +- External Datastore: CNI network providers with this feature need an external datastore for its data. + +- Encryption: This feature allows cyphered and secure network control and data planes. + +- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. + +#### CNI Community Popularity + +The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. + +| Provider | Project | Stars | Forks | Contributors | +| ---- | ---- | ---- | ---- | ---- | +| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | +| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | +| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | +| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 | + +
    +### Which CNI Provider Should I Use? + +It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. + +As of Rancher v2.0.7, Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. + +### How can I configure a CNI network provider? + +Please see [Cluster Options](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) and the options for [Network Plug-ins](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.0-v2.4/en/faq/kubectl/_index.md b/versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/faq/kubectl/_index.md rename to versioned_docs/version-2.0-2.4/faq/install-and-configure-kubectl.md diff --git a/versioned_docs/version-2.0-2.4/faq/networking.md b/versioned_docs/version-2.0-2.4/faq/networking.md new file mode 100644 index 0000000000..580786492c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/networking.md @@ -0,0 +1,9 @@ +--- +title: Networking +weight: 8005 +--- + +Networking FAQ's + +- [CNI Providers](container-network-interface-providers.md) + diff --git a/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md new file mode 100644 index 0000000000..d2180ad5d7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/rancher-is-no-longer-needed.md @@ -0,0 +1,57 @@ +--- +title: Rancher is No Longer Needed +weight: 8010 +aliases: + - /rancher/v2.0-v2.4/en/installation/removing-rancher/cleaning-cluster-nodes/ + - /rancher/v2.0-v2.4/en/installation/removing-rancher/ + - /rancher/v2.0-v2.4/en/admin-settings/removing-rancher/ + - /rancher/v2.0-v2.4/en/admin-settings/removing-rancher/rancher-cluster-nodes/ +--- + +This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. + +- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) +- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) +- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) +- [What if I don't want my imported cluster managed by Rancher?](#what-if-i-don-t-want-my-imported-cluster-managed-by-rancher) +- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) + +### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? + +If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. + +### If the Rancher server is deleted, how do I access my downstream clusters? + +The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: + +- **Imported clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. +- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. +- **RKE clusters:** To access an [RKE cluster,](../pages-for-subheaders/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../pages-for-subheaders/rancher-manager-architecture.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. + +### What if I don't want Rancher anymore? + +If you [installed Rancher on a Kubernetes cluster,](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) remove Rancher by using the [System Tools](../reference-guides/system-tools.md) with the `remove` subcommand. + +If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. + +Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) + +### What if I don't want my imported cluster managed by Rancher? + +If an imported cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was imported into Rancher. + +To detach the cluster, + +1. From the **Global** view in Rancher, go to the **Clusters** tab. +2. Go to the imported cluster that should be detached from Rancher and click **⋮ > Delete.** +3. Click **Delete.** + +**Result:** The imported cluster is detached from Rancher and functions normally outside of Rancher. + +### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? + +At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. + +The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) + +For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/versioned_docs/version-2.0-2.4/faq/security.md b/versioned_docs/version-2.0-2.4/faq/security.md new file mode 100644 index 0000000000..aee42e0fb9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/security.md @@ -0,0 +1,15 @@ +--- +title: Security +weight: 8007 + +--- + +**Is there a Hardening Guide?** + +The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. + +
    + +**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** + +We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.0-2.4/faq/technical-items.md b/versioned_docs/version-2.0-2.4/faq/technical-items.md new file mode 100644 index 0000000000..50752cdfec --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/technical-items.md @@ -0,0 +1,196 @@ +--- +title: Technical +weight: 8006 +--- + +### How can I reset the administrator password? + +Docker Install: +``` +$ docker exec -ti reset-password +New password for default administrator (user-xxxxx): + +``` + +Kubernetes install (Helm): +``` +$ KUBECONFIG=./kube_config_rancher-cluster.yml +$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password +New password for default administrator (user-xxxxx): + +``` + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +Kubernetes install (RKE add-on): +``` +$ KUBECONFIG=./kube_config_rancher-cluster.yml +$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- reset-password +New password for default administrator (user-xxxxx): + +``` + +### I deleted/deactivated the last admin, how can I fix it? +Docker Install: +``` +$ docker exec -ti ensure-default-admin +New default administrator (user-xxxxx) +New password for default administrator (user-xxxxx): + +``` + +Kubernetes install (Helm): +``` +$ KUBECONFIG=./kube_config_rancher-cluster.yml +$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin +New password for default administrator (user-xxxxx): + +``` + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +Kubernetes install (RKE add-on): +``` +$ KUBECONFIG=./kube_config_rancher-cluster.yml +$ kubectl --kubeconfig $KUBECONFIG exec -n cattle-system $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name=="cattle-server") | .metadata.name') -- ensure-default-admin +New password for default admin user (user-xxxxx): + +``` + +### How can I enable debug logging? + +See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) + +### My ClusterIP does not respond to ping + +ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. + +### Where can I manage Node Templates? + +Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. + +### Why is my Layer-4 Load Balancer in `Pending` state? + +The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](../pages-for-subheaders/set-up-cloud-providers.md) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) + +### Where is the state of Rancher stored? + +- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. +- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. + +### How are the supported Docker versions determined? + +We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. + +### How can I access nodes created by Rancher? + +SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. + +![Download Keys](/img/downloadsshkeys.png) + +Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) + +``` +$ ssh -i id_rsa user@ip_of_node +``` + +### How can I automate task X in Rancher? + +The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: + +* Visit `https://your_rancher_ip/v3` and browse the API options. +* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) + +### The IP address of a node changed, how can I recover? + +A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. + +When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) to clean the node. + +When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. + +### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? + +You can add additional arguments/binds/environment variables via the [Config File](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). + +### How do I check if my certificate chain is valid? + +Use the `openssl verify` command to validate your certificate chain: + +>**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. + +``` +SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem +rancher.yourdomain.com.pem: OK +``` + +If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: + +``` +SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem +rancher.yourdomain.com.pem: OK +``` + +If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). + +``` +-----BEGIN CERTIFICATE----- +%YOUR_CERTIFICATE% +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +%YOUR_INTERMEDIATE_CERTIFICATE% +-----END CERTIFICATE----- +``` + +If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: + +``` +openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem +subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com +issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA +``` + +### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? + +Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. + +Check `Common Name`: + +``` +openssl x509 -noout -subject -in cert.pem +subject= /CN=rancher.my.org +``` + +Check `Subject Alternative Names`: + +``` +openssl x509 -noout -in cert.pem -text | grep DNS + DNS:rancher.my.org +``` + +### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? + +This is due to a combination of the following default Kubernetes settings: + +* kubelet + * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) +* kube-controller-manager + * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) + * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) + * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) + +See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. + +In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. + +* kube-apiserver (Kubernetes v1.13 and up) + * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. + * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. + +### Can I use keyboard shortcuts in the UI? + +Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. diff --git a/content/rancher/v2.6/en/faq/telemetry/_index.md b/versioned_docs/version-2.0-2.4/faq/telemetry.md similarity index 100% rename from content/rancher/v2.6/en/faq/telemetry/_index.md rename to versioned_docs/version-2.0-2.4/faq/telemetry.md diff --git a/versioned_docs/version-2.0-2.4/faq/upgrades-to-2x.md b/versioned_docs/version-2.0-2.4/faq/upgrades-to-2x.md new file mode 100644 index 0000000000..bae4da16a6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/faq/upgrades-to-2x.md @@ -0,0 +1,106 @@ +--- +title: Questions about Upgrading to Rancher v2.x +weight: 1 +aliases: + - /rancher/v2.x/en/faq/upgrades-to-2x/ +--- + +This page contains frequently asked questions about the changes between Rancher v1.x and v2.x, and how to upgrade from Rancher v1.x to v2.x. + +# Kubernetes + +**What does it mean when you say Rancher v2.x is built on Kubernetes?** + +Rancher v2.x is a complete container management platform built 100% on Kubernetes leveraging its Custom Resource and Controller framework. All features are written as a CustomResourceDefinition (CRD) which extends the existing Kubernetes API and can leverage native features such as RBAC. + +
    + +**Do you plan to implement upstream Kubernetes, or continue to work on your own fork?** + +We're still going to provide our distribution when you select the default option of having us create your Kubernetes cluster, but it will be very close to upstream. + +
    + +**Does this release mean that we need to re-train our support staff in Kubernetes?** + +Yes. Rancher will offer the native Kubernetes functionality via `kubectl` but will also offer our own UI dashboard to allow you to deploy Kubernetes workload without having to understand the full complexity of Kubernetes. However, to fully leverage Kubernetes, we do recommend understanding Kubernetes. We do plan on improving our UX with subsequent releases to make Kubernetes easier to use. + +
    + +**Is a Rancher compose going to make a Kubernetes pod? Do we have to learn both now? We usually use the filesystem layer of files, not the UI.** + +No. Unfortunately, the differences were enough such that we cannot support Rancher compose anymore in 2.x. We will be providing both a tool and guides to help with this migration. + +
    + +**If we use Kubernetes native YAML files for creating resources, should we expect that to work as expected, or do we need to use Rancher/Docker compose files to deploy infrastructure?** + +Absolutely. + +# Cattle + +**How does Rancher v2.x affect Cattle?** + +Cattle will not supported in v2.x as Rancher has been re-architected to be based on Kubernetes. You can, however, expect majority of Cattle features you use will exist and function similarly on Kubernetes. We will develop migration tools in Rancher v2.1 to help you transform your existing Rancher Compose files into Kubernetes YAML files. + +
    + +**Can I migrate existing Cattle workloads into Kubernetes?** + +Yes. In the upcoming Rancher v2.1 release we will provide a tool to help translate existing Cattle workloads in Compose format to Kubernetes YAML format. You will then be able to deploy those workloads on the v2.x platform. + +# Feature Changes + +**Can we still add our own infrastructure services, which had a separate view/filter in 1.6.x?** + +Yes. You can manage Kubernetes storage, networking, and its vast ecosystem of add-ons. + +
    + +**Are there changes to default roles available now or going forward? Will the Kubernetes alignment impact plans for roles/RBAC?** + +The default roles will be expanded to accommodate the new Rancher 2.x features, and will also take advantage of the Kubernetes RBAC (Role-Based Access Control) capabilities to give you more flexibility. + +
    + +**Will there be any functions like network policies to separate a front-end container from a back-end container through some kind of firewall in v2.x?** + +Yes. You can do so by leveraging Kubernetes' network policies. + +
    + +**What about the CLI? Will that work the same way with the same features?** + +Yes. Definitely. + +# Environments & Clusters + +**Can I still create templates for environments and clusters?** + +Starting with 2.0, the concept of an environment has now been changed to a Kubernetes cluster as going forward, only the Kubernetes orchestration engine is supported. + +Kubernetes RKE Templates is on our roadmap for 2.x. Please refer to our Release Notes and documentation for all the features that we currently support. + +
    + +**Can you still add an existing host to an environment? (i.e. not provisioned directly from Rancher)** + +Yes. We still provide you with the same way of executing our Rancher agents directly on hosts. + +# Upgrading/Migrating + +**How would the migration from v1.x to v2.x work?** + +Due to the technical difficulty in transforming a Docker container into a pod running Kubernetes, upgrading will require users to "replay" those workloads from v1.x into new v2.x environments. We plan to ship with a tool in v2.1 to translate existing Rancher Compose files into Kubernetes YAML files. You will then be able to deploy those workloads on the v2.x platform. + +
    + +**Is it possible to upgrade from Rancher v1.x to v2.x without any disruption to Cattle and Kubernetes clusters?** + +At this time, we are still exploring this scenario and taking feedback. We anticipate that you will need to launch a new Rancher instance and then relaunch on v2.x. Once you've moved to v2.x, upgrades will be in place, as they are in v1.6. + +# Support + +**Are you planning some long-term support releases for Rancher v1.6?** + +That is definitely the focus of the v1.6 stream. We're continuing to improve that release, fix bugs, and maintain it. New releases of the v1.6 stream are announced in the [Rancher forums.](https://forums.rancher.com/c/announcements) The Rancher wiki contains the [v1.6 release notes.](https://github.com/rancher/rancher/wiki/Rancher-1.6) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started.md b/versioned_docs/version-2.0-2.4/getting-started.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher.md new file mode 100644 index 0000000000..ff2a54ba95 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher.md @@ -0,0 +1,344 @@ +--- +title: 4. Install Rancher +weight: 400 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-installation/install-rancher/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-system-charts/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher + - /rancher/v2.0-v2.4/en/installation/air-gap/install-rancher + - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/install-rancher + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/install-rancher/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + + + + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes Installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher in five parts: + +- [A. Add the Helm Chart Repository](#a-add-the-helm-chart-repository) +- [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration) +- [C. Render the Rancher Helm Template](#c-render-the-rancher-helm-template) +- [D. Install Rancher](#d-install-rancher) +- [E. For Rancher versions before v2.3.0, Configure System Charts](#e-for-rancher-versions-before-v2-3-0-configure-system-charts) + +### A. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, initialize `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements](../../../resources/choose-a-rancher-version.md) to choose a version of Helm to install Rancher. + ```plain + helm init -c + ``` + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../../../resources/choose-a-rancher-version.md). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. +```plain +helm fetch rancher-/rancher +``` + +> Want additional options? See the Rancher [Helm chart options](../../../../../reference-guides/installation-references/helm-chart-options.md). + +### B. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../../../../reference-guides/installation-references/helm-chart-options#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
    This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
    This option must be passed when rendering the Rancher Helm template. | no | + +### C. Render the Rancher Helm Template + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | `` | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. + +
    + Option A-Default Self-Signed Certificate + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +> **Note:** +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](../../../resources/upgrade-cert-manager.md). + +1. From a system connected to the internet, add the cert-manager repo to Helm. + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.14.2 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + ```plain + helm template ./cert-manager-v0.14.2.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.14/deploy/manifests/00-crds.yaml + ``` +1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + + ```plain + helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +
    + +
    + Option B: Certificates From Files using Kubernetes Secrets + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | + +```plain + helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain + helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +Then refer to [Adding TLS Secrets](../../../resources/add-tls-secrets.md) to publish the certificate files so Rancher and the ingress controller can use them. + +
    + +### D. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +
    + Self-Signed Certificate Installs - Install Cert-manager + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + +> **Important:** +> If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false flag to your kubectl apply command above else you will receive a validation error relating to the x-kubernetes-preserve-unknown-fields field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +
    + +Install Rancher: + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` + +**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. + +### E. For Rancher versions before v2.3.0, Configure System Charts + +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](../../../resources/local-system-charts.md). + +### Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](../../../../../reference-guides/installation-references/helm-chart-options.md) +- [Adding TLS secrets](../../../resources/add-tls-secrets.md) +- [Troubleshooting Rancher Kubernetes Installations](../../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) + +
    + + +The Docker installation is for Rancher users that are wanting to **test** out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. **Important: If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +> **Do you want to...** +> +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](../../../../../reference-guides/installation-references/helm-chart-options#additional-trusted-cas). +> - Record all transactions with the Rancher API? See [API Auditing](../../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log). + +- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.](installation/options/local-system-charts/) + +Choose from the following options: + +
    + Option A-Default Self-Signed Certificate + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](../../../resources/choose-a-rancher-version.md) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    +
    + Option B-Bring Your Own Certificate: Self-Signed + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/options/server-tags/) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    +
    + Option C-Bring Your Own Certificate: Signed by Recognized CA + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisite:** The certificate files must be in PEM format. + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/options/server-tags/) that you want to install. | + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    + +If you are installing Rancher v2.3.0+, the installation is complete. + +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](installation/options/local-system-charts/). + +
    +
    diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes.md new file mode 100644 index 0000000000..0d7508f8f2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes.md @@ -0,0 +1,84 @@ +--- +title: '3. Install Kubernetes with RKE (Kubernetes Installs Only)' +weight: 300 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-kube + - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/launch-kubernetes + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/launch-kubernetes/ +--- + +This section is about how to prepare to launch a Kubernetes cluster which is used to deploy Rancher server for your air gapped environment. + +Since a Kubernetes Installation requires a Kubernetes cluster, we will create a Kubernetes cluster using [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE). Before being able to start your Kubernetes cluster, you'll need to [install RKE](https://rancher.com/docs/rke/latest/en/installation/) and create a RKE config file. + +- [A. Create an RKE Config File](#a-create-an-rke-config-file) +- [B. Run RKE](#b-run-rke) +- [C. Save Your Files](#c-save-your-files) + +### A. Create an RKE Config File + +From a system that can access ports 22/tcp and 6443/tcp on your host nodes, use the sample below to create a new file named `rancher-cluster.yml`. This file is a Rancher Kubernetes Engine configuration file (RKE config file), which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes](./prepare-nodes.md) you created. + +> **Tip:** For more details on the options available, see the RKE [Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +
    RKE Options
    + +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gap network. | +| `user` | ✓ | A user that can run docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### B. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### C. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### [Next: Install Rancher](../../../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry.md new file mode 100644 index 0000000000..c570c82755 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry.md @@ -0,0 +1,282 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-installation/prepare-private-reg/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/prepare-private-registry/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/prepare-private-registry/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-for-private-reg/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/populate-private-registry + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/populate-private-registry/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> **Prerequisites:** You must have a [private registry](https://docs.docker.com/registry/deploying/) available to use. +> +> **Note:** Populating the private registry with images is the same process for HA and Docker installations, the differences in this section is based on whether or not you are planning to provision a Windows cluster or not. + +By default, all images used to [provision Kubernetes clusters](../../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) or launch any [tools](../../../../../reference-guides/rancher-cluster-tools.md) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gap installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, we provide the steps of how to populate your private registry assuming you are provisioning Linux only clusters, but if you plan on provisioning any [Windows clusters](../../../../../pages-for-subheaders/use-windows-clusters.md), there are separate instructions to support the images needed for a Windows cluster. + + + + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +A. Find the required assets for your Rancher version
    +B. Collect all the required images
    +C. Save the images to your workstation
    +D. Populate the private registry + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets*.* + +2. From the release's **Assets** section, download the following files: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### B. Collect all the required images (For Kubernetes Installs using Rancher Generated Self-Signed Certificate) + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](../../../resources/upgrade-cert-manager.md/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.14.2 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### C. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### D. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` + +
    + + +_Available as of v2.3.0_ + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +### Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +A. Find the required assets for your Rancher version
    +B. Save the images to your Windows Server workstation
    +C. Prepare the Docker daemon
    +D. Populate the private registry + +
    + Collecting and Populating Windows Images into the Private Registry"%}} + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + +| Release File | Description | +|------------------------|-------------------| +| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | +| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | +| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + +### B. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + + ```plain + ./rancher-save-images.ps1 + ``` + + **Step Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + +### C. Prepare the Docker daemon + +Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ``` + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + +### D. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. + +1. Using `powershell`, log into your private registry if required: + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.ps1 --registry + ``` + +
    + +### Linux Steps + +The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +A. Find the required assets for your Rancher version
    +B. Collect all the required images
    +C. Save the images to your Linux workstation
    +D. Populate the private registry + +
    + Collecting and Populating Linux Images into the Private Registry + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +### A. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +|----------------------------|------| +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### B. Collect all the required images + +**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + + 1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](../../../resources/upgrade-cert-manager.md). + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.14.2 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + + 2. Sort and unique the images list to remove any overlap between the sources: + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### C. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### D. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. The `rancher-images.txt` / `rancher-windows-images.txt` image list is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. + +1. Log into your private registry if required: + ```plain + docker login + ``` + +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry + ``` + +
    + +
    +
    + +### [Next: Kubernetes Installs - Launch a Kubernetes Cluster with RKE](../../../other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) + +### [Next: Docker Installs - Install Rancher](../../../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes.md new file mode 100644 index 0000000000..c00632bebc --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes.md @@ -0,0 +1,112 @@ +--- +title: '1. Prepare your Node(s)' +weight: 100 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/provision-hosts + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/provision-host + - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2/prepare-nodes + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/prepare-nodes/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section is about how to prepare your node(s) to install Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + +# Prerequisites + + + + +### OS, Docker, Hardware, and Networking + +Make sure that your node(s) fulfill the general [installation requirements.](../../../../../pages-for-subheaders/installation-requirements.md) + +### Private Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). + +### CLI Tools + +The following CLI tools are required for the Kubernetes Install. Make sure these tools are installed on your workstation and available in your `$PATH`. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [rke](https://rancher.com/docs/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](../../../resources/choose-a-rancher-version.md) to choose a version of Helm to install Rancher. + + + + + +### OS, Docker, Hardware, and Networking + +Make sure that your node(s) fulfill the general [installation requirements.](../../../../../pages-for-subheaders/installation-requirements.md) + +### Private Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [Docker documentation](https://docs.docker.com/registry/). + + + + +# Set up Infrastructure + + + + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
    Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers
    + +![Rancher HA](/img/ha/rancher2ha.svg) + +### A. Provision three air gapped Linux hosts according to our requirements + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +View hardware and software requirements for each of your cluster nodes in [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). + +### B. Set up your Load Balancer + +When setting up the Kubernetes cluster that will run the Rancher server components, an Ingress controller pod will be deployed on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. + +You will need to configure a load balancer as a basic Layer 4 TCP forwarder to direct traffic to these ingress controller pods. The exact configuration will vary depending on your environment. + +> **Important:** +> Only use this load balancer (i.e, the `local` cluster Ingress) to load balance the Rancher server. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. + +**Load Balancer Configuration Samples:** + +- For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) +- For an example showing how to set up an Amazon NLB load balancer, refer to [this page.](../../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +
    + + +The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker installation to a Kubernetes Installation. + +Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +### A. Provision a single, air gapped Linux host according to our Requirements + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +View hardware and software requirements for each of your cluster nodes in [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). + + +
    + +### [Next: Collect and Publish Images to your Private Registry](../../../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md new file mode 100644 index 0000000000..3af4b3fe53 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca.md @@ -0,0 +1,165 @@ +--- +title: Template for an RKE Cluster with a Certificate Signed by Recognized CA and a Layer 4 Load Balancer +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate-recognizedca + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate-recognizedca/ +--- + +RKE uses a cluster.yml file to install and configure your Kubernetes cluster. + +This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. + +The following template can be used for the cluster.yml if you have a setup with: + +- Certificate signed by a recognized CA +- Layer 4 load balancer +- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) + +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +```yaml +nodes: + - address: # hostname or IP to access nodes + user: # root user (usually 'root') + role: [controlplane,etcd,worker] # K8s roles for node + ssh_key_path: # path to PEM file + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +addons: |- + --- + kind: Namespace + apiVersion: v1 + metadata: + name: cattle-system + --- + kind: ServiceAccount + apiVersion: v1 + metadata: + name: cattle-admin + namespace: cattle-system + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: cattle-crb + namespace: cattle-system + subjects: + - kind: ServiceAccount + name: cattle-admin + namespace: cattle-system + roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-ingress + namespace: cattle-system + type: Opaque + data: + tls.crt: # ssl cert for ingress. If self-signed, must be signed by same CA as cattle server + tls.key: # ssl key for ingress. If self-signed, must be signed by same CA as cattle server + --- + apiVersion: v1 + kind: Service + metadata: + namespace: cattle-system + name: cattle-service + labels: + app: cattle + spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: cattle + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: # FQDN to access cattle server + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + tls: + - secretName: cattle-keys-ingress + hosts: + - # FQDN to access cattle server + --- + kind: Deployment + apiVersion: extensions/v1beta1 + metadata: + namespace: cattle-system + name: cattle + spec: + replicas: 1 + template: + metadata: + labels: + app: cattle + spec: + serviceAccountName: cattle-admin + containers: + # Rancher install via RKE addons is only supported up to v2.0.8 + - image: rancher/rancher:v2.0.8 + args: + - --no-cacerts + imagePullPolicy: Always + name: cattle-server + # env: + # - name: HTTP_PROXY + # value: "http://your_proxy_address:port" + # - name: HTTPS_PROXY + # value: "http://your_proxy_address:port" + # - name: NO_PROXY + # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" + livenessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 60 + periodSeconds: 60 + readinessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 20 + periodSeconds: 10 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md new file mode 100644 index 0000000000..86c06424c7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate.md @@ -0,0 +1,180 @@ +--- +title: Template for an RKE Cluster with a Self-signed Certificate and Layer 4 Load Balancer +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-certificate + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-certificate/ +--- +RKE uses a cluster.yml file to install and configure your Kubernetes cluster. + +This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. + +The following template can be used for the cluster.yml if you have a setup with: + +- Self-signed SSL +- Layer 4 load balancer +- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) + +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +```yaml +nodes: + - address: # hostname or IP to access nodes + user: # root user (usually 'root') + role: [controlplane,etcd,worker] # K8s roles for node + ssh_key_path: # path to PEM file + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +addons: |- + --- + kind: Namespace + apiVersion: v1 + metadata: + name: cattle-system + --- + kind: ServiceAccount + apiVersion: v1 + metadata: + name: cattle-admin + namespace: cattle-system + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: cattle-crb + namespace: cattle-system + subjects: + - kind: ServiceAccount + name: cattle-admin + namespace: cattle-system + roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-ingress + namespace: cattle-system + type: Opaque + data: + tls.crt: # ssl cert for ingress. If selfsigned, must be signed by same CA as cattle server + tls.key: # ssl key for ingress. If selfsigned, must be signed by same CA as cattle server + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: # CA cert used to sign cattle server cert and key + --- + apiVersion: v1 + kind: Service + metadata: + namespace: cattle-system + name: cattle-service + labels: + app: cattle + spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + app: cattle + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: # FQDN to access cattle server + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + tls: + - secretName: cattle-keys-ingress + hosts: + - # FQDN to access cattle server + --- + kind: Deployment + apiVersion: extensions/v1beta1 + metadata: + namespace: cattle-system + name: cattle + spec: + replicas: 1 + template: + metadata: + labels: + app: cattle + spec: + serviceAccountName: cattle-admin + containers: + # Rancher install via RKE addons is only supported up to v2.0.8 + - image: rancher/rancher:v2.0.8 + imagePullPolicy: Always + name: cattle-server + # env: + # - name: HTTP_PROXY + # value: "http://your_proxy_address:port" + # - name: HTTPS_PROXY + # value: "http://your_proxy_address:port" + # - name: NO_PROXY + # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" + livenessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 60 + periodSeconds: 60 + readinessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 20 + periodSeconds: 10 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + volumeMounts: + - mountPath: /etc/rancher/ssl + name: cattle-keys-volume + readOnly: true + volumes: + - name: cattle-keys-volume + secret: + defaultMode: 420 + secretName: cattle-keys-server +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md new file mode 100644 index 0000000000..59cf796e1e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate.md @@ -0,0 +1,161 @@ +--- +title: Template for an RKE Cluster with a Self-signed Certificate and SSL Termination on Layer 7 Load Balancer +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-certificate + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-certificate/ +--- + +RKE uses a cluster.yml file to install and configure your Kubernetes cluster. + +This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. + +The following template can be used for the cluster.yml if you have a setup with: + +- Layer 7 load balancer with self-signed SSL termination (HTTPS) +- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) + +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +```yaml +nodes: + - address: # hostname or IP to access nodes + user: # root user (usually 'root') + role: [controlplane,etcd,worker] # K8s roles for node + ssh_key_path: # path to PEM file + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +addons: |- + --- + kind: Namespace + apiVersion: v1 + metadata: + name: cattle-system + --- + kind: ServiceAccount + apiVersion: v1 + metadata: + name: cattle-admin + namespace: cattle-system + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: cattle-crb + namespace: cattle-system + subjects: + - kind: ServiceAccount + name: cattle-admin + namespace: cattle-system + roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: # CA cert used to sign cattle server cert and key + --- + apiVersion: v1 + kind: Service + metadata: + namespace: cattle-system + name: cattle-service + labels: + app: cattle + spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: cattle + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable redirect to ssl + spec: + rules: + - host: + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + --- + kind: Deployment + apiVersion: extensions/v1beta1 + metadata: + namespace: cattle-system + name: cattle + spec: + replicas: 1 + template: + metadata: + labels: + app: cattle + spec: + serviceAccountName: cattle-admin + containers: + # Rancher install via RKE addons is only supported up to v2.0.8 + - image: rancher/rancher:v2.0.8 + imagePullPolicy: Always + name: cattle-server + # env: + # - name: HTTP_PROXY + # value: "http://your_proxy_address:port" + # - name: HTTPS_PROXY + # value: "http://your_proxy_address:port" + # - name: NO_PROXY + # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" + livenessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 60 + periodSeconds: 60 + readinessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 20 + periodSeconds: 10 + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - mountPath: /etc/rancher/ssl + name: cattle-keys-volume + readOnly: true + volumes: + - name: cattle-keys-volume + secret: + defaultMode: 420 + secretName: cattle-keys-server +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md new file mode 100644 index 0000000000..57107fbcc4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca.md @@ -0,0 +1,145 @@ +--- +title: Template for an RKE Cluster with a Recognized CA Certificate and SSL Termination on Layer 7 Load Balancer +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/cluster-yml-templates/3-node-externalssl-recognizedca + - /rancher/v2.x/en/installation/resources/advanced/cluster-yml-templates/3-node-externalssl-recognizedca/ +--- + +RKE uses a cluster.yml file to install and configure your Kubernetes cluster. + +This template is intended to be used for RKE add-on installs, which are only supported up to Rancher v2.0.8. Please use the Rancher Helm chart if you are installing a newer Rancher version. + +The following template can be used for the cluster.yml if you have a setup with: + +- Layer 7 load balancer with SSL termination (HTTPS) +- [NGINX Ingress controller](https://kubernetes.github.io/ingress-nginx/) + +> For more options, refer to [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +```yaml +nodes: + - address: # hostname or IP to access nodes + user: # root user (usually 'root') + role: [controlplane,etcd,worker] # K8s roles for node + ssh_key_path: # path to PEM file + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +addons: |- + --- + kind: Namespace + apiVersion: v1 + metadata: + name: cattle-system + --- + kind: ServiceAccount + apiVersion: v1 + metadata: + name: cattle-admin + namespace: cattle-system + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: cattle-crb + namespace: cattle-system + subjects: + - kind: ServiceAccount + name: cattle-admin + namespace: cattle-system + roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io + --- + apiVersion: v1 + kind: Service + metadata: + namespace: cattle-system + name: cattle-service + labels: + app: cattle + spec: + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + app: cattle + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/ssl-redirect: "false" # Disable redirect to ssl + spec: + rules: + - host: + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + --- + kind: Deployment + apiVersion: extensions/v1beta1 + metadata: + namespace: cattle-system + name: cattle + spec: + replicas: 1 + template: + metadata: + labels: + app: cattle + spec: + serviceAccountName: cattle-admin + containers: + # Rancher install via RKE addons is only supported up to v2.0.8 + - image: rancher/rancher:v2.0.8 + args: + - --no-cacerts + imagePullPolicy: Always + name: cattle-server + # env: + # - name: HTTP_PROXY + # value: "http://your_proxy_address:port" + # - name: HTTPS_PROXY + # value: "http://your_proxy_address:port" + # - name: NO_PROXY + # value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,your_network_ranges_that_dont_need_proxy_to_access" + livenessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 60 + periodSeconds: 60 + readinessProbe: + httpGet: + path: /ping + port: 80 + initialDelaySeconds: 20 + periodSeconds: 10 + ports: + - containerPort: 80 + protocol: TCP +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md new file mode 100644 index 0000000000..0b5d6ee338 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md @@ -0,0 +1,257 @@ +--- +title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer +weight: 252 +aliases: + - /rancher/v2.0-v2.4/en/installation/single-node/single-node-install-external-lb/ + - /rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb + - /rancher/v2.0-v2.4/en/installation/options/single-node-install-external-lb + - /rancher/v2.0-v2.4/en/installation/single-node-install-external-lb +--- + +For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. + +A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. + +This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. + +> **Want to skip the external load balancer?** +> See [Docker Installation](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md) instead. + +## Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.](../../../../pages-for-subheaders/installation-requirements.md) + +## Installation Outline + + + +- [1. Provision Linux Host](#1-provision-linux-host) +- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) +- [3. Configure Load Balancer](#3-configure-load-balancer) + + + +## 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements](../../../../pages-for-subheaders/installation-requirements.md) to launch your Rancher Server. + +## 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> **Do you want to...** +> +> - Complete an Air Gap Installation? +> - Record all transactions with the Rancher API? +> +> See [Advanced Options](#advanced-options) below before continuing. + +Choose from the following options: + +
    + Option A-Bring Your Own Certificate: Self-Signed + +If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. + +> **Prerequisites:** +> Create a self-signed certificate. +> +> - The certificate files must be in PEM format. + +**To Install Rancher Using a Self-Signed Cert:** + +1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest + ``` + +
    +
    + Option B-Bring Your Own Certificate: Signed by Recognized CA +If your cluster is public facing, it's best to use a certificate signed by a recognized CA. + +> **Prerequisites:** +> +> - The certificate files must be in PEM format. + +**To Install Rancher Using a Cert Signed by a Recognized CA:** + +If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. + +1. Enter the following command. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest --no-cacerts + ``` + +
    + +## 3. Configure Load Balancer + +When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. + +The load balancer or proxy has to be configured to support the following: + +- **WebSocket** connections +- **SPDY** / **HTTP/2** protocols +- Passing / setting the following headers: + + | Header | Value | Description | + |--------|-------|-------------| + | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. + | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

    **Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. + | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. + | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. +### Example NGINX configuration + +This NGINX configuration is tested on NGINX 1.14. + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server rancher-server:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` + +
    + +## What's Next? + +- **Recommended:** Review [Single Node Backup and Restore](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
    + +## FAQ and Troubleshooting + +For help troubleshooting certificates, see [this section.](../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +## Advanced Options + +### API Auditing + +If you want to record all transactions with the Rancher API, enable the [API Auditing](../../../installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) feature by adding the flags below into your install command. + + -e AUDIT_LEVEL=1 \ + -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ + -e AUDIT_LOG_MAXAGE=20 \ + -e AUDIT_LOG_MAXBACKUP=20 \ + -e AUDIT_LOG_MAXSIZE=100 \ + +### Air Gap + +If you are visiting this page to complete an [Air Gap Installation](../../../../pages-for-subheaders/air-gap-helm2.md), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + rancher/rancher:latest +``` + +This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + +``` +upstream rancher { + server rancher-server:80; +} + +map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; +} + +server { + listen 443 ssl http2; + server_name rancher.yourdomain.com; + ssl_certificate /etc/your_certificate_directory/fullchain.pem; + ssl_certificate_key /etc/your_certificate_directory/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } +} + +server { + listen 80; + server_name rancher.yourdomain.com; + return 301 https://$server_name$request_uri; +} +``` + +
    + diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md new file mode 100644 index 0000000000..9590438ced --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md @@ -0,0 +1,569 @@ +--- +title: Enabling the API Audit Log to Record System Events +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/api-audit-log/ + - /rancher/v2.0-v2.4/en/installation/api-auditing +--- + +You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. + +You can enable API Auditing during Rancher installation or upgrade. + +## Enabling API Audit Log + +The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. + +- [Docker Install](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +- [Kubernetes Install](../../../../reference-guides/installation-references/helm-chart-options.md#api-audit-log) + +## API Audit Log Options + +The usage below defines rules about what the audit log should record and what data it should include: + +| Parameter | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
    `1` - Log event metadata.
    `2` - Log event metadata and request body.
    `3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

    See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | +| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

    Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
    | +| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | +| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | +| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | + +
    + +### Audit Log Levels + +The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. + +| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | +| --------------------- | ---------------- | ------------ | ----------------- | ------------- | +| `0` | | | | | +| `1` | ✓ | | | | +| `2` | ✓ | ✓ | | | +| `3` | ✓ | ✓ | ✓ | ✓ | + +## Viewing API Audit Logs + +### Docker Install + +Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. + +### Kubernetes Install + +Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. + +The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. + +#### CLI + +```bash +kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log +``` + +#### Rancher Web GUI + +1. From the context menu, select **Cluster: local > System**. +1. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. +1. Pick one of the `rancher` pods and select **⋮ > View Logs**. +1. From the **Logs** drop-down, select `rancher-audit-log`. + +#### Shipping the Audit Log + +You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Rancher Tools - Logging](../../../../pages-for-subheaders/cluster-logging.md) for details. + +## Audit Log Samples + +After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. + +### Metadata Level + +If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. + +```json +{ + "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", + "requestURI": "/v3/schemas", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "GET", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:22:43 +0800" +} +``` + +### Metadata and Request Body Level + +If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. + +The code sample below depicts an API request, with both its metadata header and body. + +```json +{ + "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:28:08 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my description", + "volumes": [] + } +} +``` + +### Metadata, Request Body, and Response Body Level + +If you set your `AUDIT_LEVEL` to `3`, Rancher logs: + +- The metadata header and body for every API request. +- The metadata header and body for every API response. + +#### Request + +The code sample below depicts an API request, with both its metadata header and body. + +```json +{ + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my decript", + "volumes": [] + } +} +``` + +#### Response + +The code sample below depicts an API response, with both its metadata header and body. + +```json +{ + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "responseStatus": "200", + "stage": "ResponseComplete", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "responseBody": { + "actionLinks": { + "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", + "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", + "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" + }, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements" + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container" + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "links": { + "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", + "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" + }, + "name": "nginx", + "namespaceId": "default", + "paused": false, + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + } + } +} +``` diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx.md new file mode 100644 index 0000000000..8bb9a6bf3a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx.md @@ -0,0 +1,82 @@ +--- +title: NGINX +weight: 270 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nginx + - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nginx/ +--- +NGINX will be configured as Layer 4 load balancer (TCP) that forwards connections to one of your Rancher nodes. + +>**Note:** +> In this configuration, the load balancer is positioned in front of your nodes. The load balancer can be any host capable of running NGINX. +> +> One caveat: do not use one of your Rancher nodes as the load balancer. + +## Install NGINX + +Start by installing NGINX on the node you want to use as a load balancer. NGINX has packages available for all known operating systems. The versions tested are `1.14` and `1.15`. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation on how to install and enable the NGINX `stream` module on your operating system. + +## Create NGINX Configuration + +After installing NGINX, you need to update the NGINX configuration file, `nginx.conf`, with the IP addresses for your nodes. + +1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. + +2. From `nginx.conf`, replace both occurrences (port 80 and port 443) of ``, ``, and `` with the IPs of your [nodes](../../../../../../pages-for-subheaders/helm2-create-nodes-lb.md). + + >**Note:** See [NGINX Documentation: TCP and UDP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/) for all configuration options. + +
    Example NGINX config
    + ``` + worker_processes 4; + worker_rlimit_nofile 40000; + + events { + worker_connections 8192; + } + + stream { + upstream rancher_servers_http { + least_conn; + server :80 max_fails=3 fail_timeout=5s; + server :80 max_fails=3 fail_timeout=5s; + server :80 max_fails=3 fail_timeout=5s; + } + server { + listen 80; + proxy_pass rancher_servers_http; + } + + upstream rancher_servers_https { + least_conn; + server :443 max_fails=3 fail_timeout=5s; + server :443 max_fails=3 fail_timeout=5s; + server :443 max_fails=3 fail_timeout=5s; + } + server { + listen 443; + proxy_pass rancher_servers_https; + } + } + ``` + +3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. + +4. Load the updates to your NGINX configuration by running the following command: + + ``` + # nginx -s reload + ``` + +## Option - Run NGINX as Docker container + +Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/nginx.conf:/etc/nginx/nginx.conf \ + nginx:1.14 +``` diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb.md new file mode 100644 index 0000000000..e7b5c1304a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb.md @@ -0,0 +1,178 @@ +--- +title: Amazon NLB +weight: 277 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb/nlb + - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/nlb/ +--- +## Objectives + +Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. + +1. [Create Target Groups](#create-target-groups) + + Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +2. [Register Targets](#register-targets) + + Add your Linux nodes to the target groups. + +3. [Create Your NLB](#create-your-nlb) + + Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. + +> **Note:** Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ELB or ALB. + +## Create Target Groups + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX ingress controller on the nodes will make sure that port 80 gets redirected to port 443. + +Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. + +The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. + +![](/img/ha/nlb/ec2-loadbalancing.png) + +Click **Create target group** to create the first target group, regarding TCP port 443. + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. + +Option | Setting +--------------------------------------|------------------------------------ +Target Group Name | `rancher-tcp-443` +Protocol | `TCP` +Port | `443` +Target type | `instance` +VPC | Choose your VPC +Protocol
    (Health Check) | `HTTP` +Path
    (Health Check) | `/healthz` +Port (Advanced health check) | `override`,`80` +Healthy threshold (Advanced health) | `3` +Unhealthy threshold (Advanced) | `3` +Timeout (Advanced) | `6 seconds` +Interval (Advanced) | `10 second` +Success codes | `200-399` + +*** +**Screenshot Target group TCP port 443 settings**
    +![](/img/ha/nlb/create-targetgroup-443.png) + +*** +**Screenshot Target group TCP port 443 Advanced settings**
    +![](/img/ha/nlb/create-targetgroup-443-advanced.png) + +*** + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. + +Option | Setting +--------------------------------------|------------------------------------ +Target Group Name | `rancher-tcp-80` +Protocol | `TCP` +Port | `80` +Target type | `instance` +VPC | Choose your VPC +Protocol
    (Health Check) | `HTTP` +Path
    (Health Check) | `/healthz` +Port (Advanced health check) | `traffic port` +Healthy threshold (Advanced health) | `3` +Unhealthy threshold (Advanced) | `3` +Timeout (Advanced) | `6 seconds` +Interval (Advanced) | `10 second` +Success codes | `200-399` + +*** +**Screenshot Target group TCP port 80 settings**
    +![](/img/ha/nlb/create-targetgroup-80.png) + +*** +**Screenshot Target group TCP port 80 Advanced settings**
    +![](/img/ha/nlb/create-targetgroup-80-advanced.png) + +*** + +## Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +![](/img/ha/nlb/edit-targetgroup-443.png) + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +*** +**Screenshot Add targets to target group TCP port 443**
    + +![](/img/ha/nlb/add-targets-targetgroup-443.png) + +*** +**Screenshot Added targets to target group TCP port 443**
    + +![](/img/ha/nlb/added-targets-targetgroup-443.png) + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +## Create Your NLB + +Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. + +5. Complete the **Step 1: Configure Load Balancer** form. + - **Basic Configuration** + + - Name: `rancher` + - Scheme: `internal` or `internet-facing` + + The Scheme that you choose for your NLB is dependent on the configuration of your instances/VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. + - **Listeners** + + Add the **Load Balancer Protocols** and **Load Balancer Ports** below. + - `TCP`: `443` + + - **Availability Zones** + + - Select Your **VPC** and **Availability Zones**. + +6. Complete the **Step 2: Configure Routing** form. + + - From the **Target Group** drop-down, choose **Existing target group**. + + - From the **Name** drop-down, choose `rancher-tcp-443`. + + - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. + +8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. + +9. After AWS creates the NLB, click **Close**. + +## Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to...** + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting.md new file mode 100644 index 0000000000..94749fa0c7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting.md @@ -0,0 +1,26 @@ +--- +title: Troubleshooting +weight: 276 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init/troubleshooting + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/troubleshooting/ +--- + +### Helm commands show forbidden + +When Helm is initiated in the cluster without specifying the correct `ServiceAccount`, the command `helm init` will succeed but you won't be able to execute most of the other `helm` commands. The following error will be shown: + +``` +Error: configmaps is forbidden: User "system:serviceaccount:kube-system:default" cannot list configmaps in the namespace "kube-system" +``` + +To resolve this, the server component (`tiller`) needs to be removed and added with the correct `ServiceAccount`. You can use `helm reset --force` to remove the `tiller` from the cluster. Please check if it is removed using `helm version --server`. + +``` +helm reset --force +Tiller (the Helm server-side component) has been uninstalled from your Kubernetes Cluster. +helm version --server +Error: could not find tiller +``` + +When you have confirmed that `tiller` has been removed, please follow the steps provided in [Initialize Helm (Install tiller)](../../../../../../pages-for-subheaders/helm2-helm-init.md) to install `tiller` with the correct `ServiceAccount`. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options.md new file mode 100644 index 0000000000..45a156e14e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options.md @@ -0,0 +1,248 @@ +--- +title: Chart Options +weight: 276 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/chart-options/ +--- + +### Common Options + +| Option | Default Value | Description | +| --- | --- | --- | +| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | +| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | +| `letsEncrypt.email` | " " | `string` - Your email address | +| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | +| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | + +
    + +### Advanced Options + +| Option | Default Value | Description | +| --- | --- | --- | +| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | +| `addLocal` | "auto" | `string` - Have Rancher detect and import the local Rancher server cluster | +| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | +| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | +| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.level` | 0 | `int` - set the [API Audit Log](../../../advanced-use-cases/enable-api-audit-log.md) level. 0 is off. [0-3] | +| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackups` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | +| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | +| `debug` | false | `bool` - set debug flag on rancher server | +| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | +| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | +| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | +| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | +| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | +| `noProxy` | "127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" | `string` - comma separated list of hostnames or ip address not to use the proxy | +| `resources` | {} | `map` - rancher pod resource requests & limits | +| `rancherImage` | "rancher/rancher" | `string` - rancher image source | +| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | +| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | +| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ _Available as of v2.3.0_ | +| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. _Available as of v2.3.0_ + +
    + +### API Audit Log + +Enabling the [API Audit Log](installation/api-auditing/). + +You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools](../../../../../../pages-for-subheaders/cluster-logging.md) for the `System` Project on the Rancher server cluster. + +```plain +--set auditLog.level=1 +``` + +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools](../../../../../../pages-for-subheaders/cluster-logging.md/) for the Rancher server cluster or System Project. + +Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. + +### Setting Extra Environment Variables + +_Available as of v2.2.0_ + +You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +### TLS settings + +_Available as of v2.2.0_ + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +See [TLS settings](../../../../../../reference-guides/installation-references/tls-settings.md) for more information and options. + +### Import `local` Cluster + +By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. + +If this is a concern in your environment you can set this option to "false" on your initial install. + +> Note: This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. + +```plain +--set addLocal="false" +``` + +### Customizing your Ingress + +To customize or use a different ingress with Rancher server you can set your own Ingress annotations. + +Example on setting a custom certificate issuer: + +```plain +--set ingress.extraAnnotations.'certmanager\.k8s\.io/cluster-issuer'=ca-key-pair +``` + +_Available as of v2.0.15, v2.1.10 and v2.2.4_ + +Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. + +```plain +--set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' +``` + +### HTTP Proxy + +Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. + +Add your IP exceptions to the `noProxy` list. Make sure you add the Service cluster IP range (default: 10.43.0.1/16) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. + +```plain +--set proxy="http://:@:/" +--set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16" +``` + +### Additional Trusted CAs + +If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. + +```plain +--set additionalTrustedCAs=true +``` + +Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. + +```plain +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem +``` + +### Private Registry and Air Gap Installs + +For details on installing Rancher with a private registry, see: + +- [Air Gap: Docker Install](../../../../../../reference-guides/installation-references/tls-settings.md) +- [Air Gap: Kubernetes Install](../../../../../../pages-for-subheaders/air-gap-helm2.md) + + +### External TLS Termination + +We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. + +You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. + +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](./tls-secrets.md) to add the CA cert for Rancher. + +Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. + +#### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + +#### Required Headers + +* `Host` +* `X-Forwarded-Proto` +* `X-Forwarded-Port` +* `X-Forwarded-For` + +#### Recommended Timeouts + +* Read Timeout: `1800 seconds` +* Write Timeout: `1800 seconds` +* Connect Timeout: `30 seconds` + +#### Health Checks + +Rancher will respond `200` to health checks on the `/healthz` endpoint. + + +#### Example NGINX config + +This NGINX configuration is tested on NGINX 1.14. + + >**Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +* Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. +* Replace both occurrences of `FQDN` to the DNS name for Rancher. +* Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server IP_NODE_1:80; + server IP_NODE_2:80; + server IP_NODE_3:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/tls-secrets.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/tls-secrets/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/tls-secrets.md diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/troubleshooting.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/helm-rancher/troubleshooting/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/troubleshooting.md diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/kubernetes-rke/troubleshooting.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/kubernetes-rke/troubleshooting/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/kubernetes-rke/troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing.md new file mode 100644 index 0000000000..f7f0f09c8f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing.md @@ -0,0 +1,57 @@ +--- +title: Enable API Auditing +weight: 300 +aliases: + - /rke/latest/en/config-options/add-ons/api-auditing/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/api-auditing + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/api-auditing/ +--- + +>**Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../resources/choose-a-rancher-version.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +If you're using RKE to install Rancher, you can use directives to enable API Auditing for your Rancher install. You can know what happened, when it happened, who initiated it, and what cluster it affected. API auditing records all requests and responses to and from the Rancher API, which includes use of the Rancher UI and any other use of the Rancher API through programmatic use. + +## In-line Arguments + +Enable API Auditing using RKE by adding arguments to your Rancher container. + +To enable API auditing: + +- Add API Auditing arguments (`args`) to your Rancher container. +- Declare a `mountPath` in the `volumeMounts` directive of the container. +- Declare a `path` in the `volumes` directive. + +For more information about each argument, its syntax, and how to view API Audit logs, see [Rancher v2.0 Documentation: API Auditing](../../enable-api-audit-log.md). + +```yaml +... +containers: + - image: rancher/rancher:latest + imagePullPolicy: Always + name: cattle-server + args: ["--audit-log-path", "/var/log/auditlog/rancher-api-audit.log", "--audit-log-maxbackup", "5", "--audit-log-maxsize", "50", "--audit-level", "2"] + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + volumeMounts: + - mountPath: /etc/rancher/ssl + name: cattle-keys-volume + readOnly: true + - mountPath: /var/log/auditlog + name: audit-log-dir + volumes: + - name: cattle-keys-volume + secret: + defaultMode: 420 + secretName: cattle-keys-server + - name: audit-log-dir + hostPath: + path: /var/log/rancher/auditlog + type: Directory +``` diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md new file mode 100644 index 0000000000..2998033e5e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md @@ -0,0 +1,183 @@ +--- +title: Amazon NLB Configuration +weight: 277 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha-server-install/nlb/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb/nlb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/nlb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../../resources/helm-version-requirements.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on](../../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +## Objectives + +Configuring an Amazon NLB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. + +1. [Create Target Groups](#create-target-groups) + + Begin by creating two target groups for the **TCP** protocol, one regarding TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +2. [Register Targets](#register-targets) + + Add your Linux nodes to the target groups. + +3. [Create Your NLB](#create-your-nlb) + + Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. + + +## Create Target Groups + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but its convenient to add a listener for port 80 which will be redirected to port 443 automatically. The NGINX controller on the nodes will make sure that port 80 gets redirected to port 443. + +Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started, make sure to select the **Region** where your EC2 instances (Linux nodes) are created. + +The Target Groups configuration resides in the **Load Balancing** section of the **EC2** service. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. + +![](/img/ha/nlb/ec2-loadbalancing.png) + +Click **Create target group** to create the first target group, regarding TCP port 443. + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. Screenshots of the configuration are shown just below the table. + +Option | Setting +--------------------------------------|------------------------------------ +Target Group Name | `rancher-tcp-443` +Protocol | `TCP` +Port | `443` +Target type | `instance` +VPC | Choose your VPC +Protocol
    (Health Check) | `HTTP` +Path
    (Health Check) | `/healthz` +Port (Advanced health check) | `override`,`80` +Healthy threshold (Advanced health) | `3` +Unhealthy threshold (Advanced) | `3` +Timeout (Advanced) | `6 seconds` +Interval (Advanced) | `10 second` +Success codes | `200-399` + +*** +**Screenshot Target group TCP port 443 settings**
    +![](/img/ha/nlb/create-targetgroup-443.png) + +*** +**Screenshot Target group TCP port 443 Advanced settings**
    +![](/img/ha/nlb/create-targetgroup-443-advanced.png) + +*** + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. Screenshots of the configuration are shown just below the table. + +Option | Setting +--------------------------------------|------------------------------------ +Target Group Name | `rancher-tcp-80` +Protocol | `TCP` +Port | `80` +Target type | `instance` +VPC | Choose your VPC +Protocol
    (Health Check) | `HTTP` +Path
    (Health Check) | `/healthz` +Port (Advanced health check) | `traffic port` +Healthy threshold (Advanced health) | `3` +Unhealthy threshold (Advanced) | `3` +Timeout (Advanced) | `6 seconds` +Interval (Advanced) | `10 second` +Success codes | `200-399` + +*** +**Screenshot Target group TCP port 80 settings**
    +![](/img/ha/nlb/create-targetgroup-80.png) + +*** +**Screenshot Target group TCP port 80 Advanced settings**
    +![](/img/ha/nlb/create-targetgroup-80-advanced.png) + +*** + +## Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +![](/img/ha/nlb/edit-targetgroup-443.png) + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +*** +**Screenshot Add targets to target group TCP port 443**
    + +![](/img/ha/nlb/add-targets-targetgroup-443.png) + +*** +**Screenshot Added targets to target group TCP port 443**
    + +![](/img/ha/nlb/added-targets-targetgroup-443.png) + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +## Create Your NLB + +Use Amazon's Wizard to create an Network Load Balancer. As part of this process, you'll add the target groups you created in [Create Target Groups](#create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. + +5. Complete the **Step 1: Configure Load Balancer** form. + - **Basic Configuration** + + - Name: `rancher` + - Scheme: `internet-facing` + - **Listeners** + + Add the **Load Balancer Protocols** and **Load Balancer Ports** below. + - `TCP`: `443` + + - **Availability Zones** + + - Select Your **VPC** and **Availability Zones**. + +6. Complete the **Step 2: Configure Routing** form. + + - From the **Target Group** drop-down, choose **Existing target group**. + + - From the **Name** drop-down, choose `rancher-tcp-443`. + + - Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +7. Complete **Step 3: Register Targets**. Since you registered your targets earlier, all you have to do is click **Next: Review**. + +8. Complete **Step 4: Review**. Look over the load balancer details and click **Create** when you're satisfied. + +9. After AWS creates the NLB, click **Close**. + +## Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to...** + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb.md new file mode 100644 index 0000000000..0fd61d9585 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb.md @@ -0,0 +1,105 @@ +--- +title: Amazon ALB Configuration +weight: 277 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/alb/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/alb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/alb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Kubernetes Rancher. For details, see the [Kubernetes Install ](../../../../../resources/choose-a-rancher-version.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +## Objectives + +Configuring an Amazon ALB is a multistage process. We've broken it down into multiple tasks so that it's easy to follow. + +1. [Create Target Group](#create-target-group) + + Begin by creating one target group for the http protocol. You'll add your Linux nodes to this group. + +2. [Register Targets](#register-targets) + + Add your Linux nodes to the target group. + +3. [Create Your ALB](#create-your-alb) + + Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target groups you created in **1. Create Target Groups**. + + +## Create Target Group + +Your first ALB configuration step is to create one target group for HTTP. + +Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. + +The document below will guide you through this process. Use the data in the tables below to complete the procedure. + +[Amazon Documentation: Create a Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-target-group.html) + +### Target Group (HTTP) + +Option | Setting +----------------------------|------------------------------------ +Target Group Name | `rancher-http-80` +Protocol | `HTTP` +Port | `80` +Target type | `instance` +VPC | Choose your VPC +Protocol
    (Health Check) | `HTTP` +Path
    (Health Check) | `/healthz` + +## Register Targets + +Next, add your Linux nodes to your target group. + +[Amazon Documentation: Register Targets with Your Target Group](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-register-targets.html) + +### Create Your ALB + +Use Amazon's Wizard to create an Application Load Balancer. As part of this process, you'll add the target group you created in [Create Target Group](#create-target-group). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Application Load Balancer**. + +5. Complete the **Step 1: Configure Load Balancer** form. + - **Basic Configuration** + + - Name: `rancher-http` + - Scheme: `internet-facing` + - IP address type: `ipv4` + - **Listeners** + + Add the **Load Balancer Protocols** and **Load Balancer Ports** below. + - `HTTP`: `80` + - `HTTPS`: `443` + + - **Availability Zones** + + - Select Your **VPC** and **Availability Zones**. + +6. Complete the **Step 2: Configure Security Settings** form. + + Configure the certificate you want to use for SSL termination. + +7. Complete the **Step 3: Configure Security Groups** form. + +8. Complete the **Step 4: Configure Routing** form. + + - From the **Target Group** drop-down, choose **Existing target group**. + + - Add target group `rancher-http-80`. + +9. Complete **Step 5: Register Targets**. Since you registered your targets earlier, all you have to do it click **Next: Review**. + +10. Complete **Step 6: Review**. Look over the load balancer details and click **Create** when you're satisfied. + +11. After AWS creates the ALB, click **Close**. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx.md new file mode 100644 index 0000000000..0f588c569b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx.md @@ -0,0 +1,42 @@ +--- +title: NGINX Configuration +weight: 277 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/nginx/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb/nginx + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/nginx/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../../resources/choose-a-rancher-version.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +## Install NGINX + +Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. + +For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +## Create NGINX Configuration + +See [Example NGINX config](../../helm-rancher/chart-options.md#example-nginx-config). + +## Run NGINX + +* Reload or restart NGINX + + ```` + # Reload NGINX + nginx -s reload + + # Restart NGINX + # Depending on your Linux distribution + service nginx restart + systemctl restart nginx + ```` + +## Browse to Rancher UI + +You should now be to able to browse to `https://FQDN`. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy.md new file mode 100644 index 0000000000..c4457c287b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy.md @@ -0,0 +1,72 @@ +--- +title: HTTP Proxy Configuration +weight: 277 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/proxy + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/proxy/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../resources/choose-a-rancher-version.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. + +Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. + +Environment variable | Purpose +--------------------------|--------- +HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) +HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) +NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) + +> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. + +## Kubernetes installation + +When using Kubernetes installation, the environment variables need to be added to the RKE Config File template. + +* [Kubernetes Installation with External Load Balancer (TCP/Layer 4) RKE Config File Template](../../../../../../pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md#5-download-rke-config-file-template) +* [Kubernetes Installation with External Load Balancer (HTTPS/Layer 7) RKE Config File Template](../../../../../../pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md#5-download-rke-config-file-template) + +The environment variables should be defined in the `Deployment` inside the RKE Config File Template. You only have to add the part starting with `env:` to (but not including) `ports:`. Make sure the indentation is identical to the preceding `name:`. Required values for `NO_PROXY` are: + +* `localhost` +* `127.0.0.1` +* `0.0.0.0` +* Configured `service_cluster_ip_range` (default: `10.43.0.0/16`) + +The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage of the proxy when accessing network range `192.168.10.0/24`, the configured `service_cluster_ip_range` (`10.43.0.0/16`) and every hostname under the domain `example.com`. If you have changed the `service_cluster_ip_range`, you have to update the value below accordingly. + +```yaml +... +--- + kind: Deployment + apiVersion: extensions/v1beta1 + metadata: + namespace: cattle-system + name: cattle + spec: + replicas: 1 + template: + metadata: + labels: + app: cattle + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:latest + imagePullPolicy: Always + name: cattle-server + env: + - name: HTTP_PROXY + value: "http://192.168.10.1:3128" + - name: HTTPS_PROXY + value: "http://192.168.10.1:3128" + - name: NO_PROXY + value: "localhost,127.0.0.1,0.0.0.0,10.43.0.0/16,192.168.10.0/24,example.com" + ports: +... +``` diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md new file mode 100644 index 0000000000..37ed0aa90d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md @@ -0,0 +1,51 @@ +--- +title: 404 - default backend +weight: 30 +aliases: + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/404-default-backend/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/404-default-backend + - /404-default-backend/ + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../../resources/helm-version-requirements.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. + +When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. + +### Possible causes + +The nginx ingress controller is not able to serve the configured host in `rancher-cluster.yml`. This should be the FQDN you configured to access Rancher. You can check if it is properly configured by viewing the ingress that is created by running the following command: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress -n cattle-system -o wide +``` + +Check if the `HOSTS` column is displaying the FQDN you configured in the template, and that the used nodes are listed in the `ADDRESS` column. If that is configured correctly, we can check the logging of the nginx ingress controller. + +The logging of the nginx ingress controller will show why it cannot serve the requested host. To view the logs, you can run the following command + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx +``` + +Errors + +* `x509: certificate is valid for fqdn, not your_configured_fqdn` + +The used certificates do not contain the correct hostname. Generate new certificates that contain the chosen FQDN to access Rancher and redeploy. + +* `Port 80 is already in use. Please check the flag --http-port` + +There is a process on the node occupying port 80, this port is needed for the nginx ingress controller to route requests to Rancher. You can find the process by running the command: `netstat -plant | grep \:80`. + +Stop/kill the process and redeploy. + +* `unexpected error creating pem file: no valid PEM formatted block found` + +The base64 encoded string configured in the template is not valid. Please check if you can decode the configured string using `base64 -D STRING`, this should return the same output as the content of the file you used to generate the string. If this is correct, please check if the base64 encoded string is placed directly after the key, without any newlines before, in between or after. (For example: `tls.crt: LS01..`) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting.md new file mode 100644 index 0000000000..981d12bb23 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting.md @@ -0,0 +1,163 @@ +--- +title: Generic troubleshooting +weight: 5 +aliases: + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/generic-troubleshooting/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/generic-troubleshooting + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/generic-troubleshooting/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../../resources/helm-version-requirements.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +Below are steps that you can follow to determine what is wrong in your cluster. + +### Double check if all the required ports are opened in your (host) firewall + +Double check if all the [required ports](../../../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. + +### All nodes should be present and in **Ready** state + +To check, run the command: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes +``` + +If a node is not shown in this output or a node is not in **Ready** state, you can check the logging of the `kubelet` container. Login to the node and run `docker logs kubelet`. + +### All pods/jobs should be in **Running**/**Completed** state + +To check, run the command: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get pods --all-namespaces +``` + +If a pod is not in **Running** state, you can dig into the root cause by running: + +#### Describe pod + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml describe pod POD_NAME -n NAMESPACE +``` + +#### Pod container logs + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs POD_NAME -n NAMESPACE +``` + +If a job is not in **Completed** state, you can dig into the root cause by running: + +#### Describe job + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml describe job JOB_NAME -n NAMESPACE +``` + +#### Logs from the containers of pods of the job + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=JOB_NAME -n NAMESPACE +``` + +### Check ingress + +Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (address(es) it will be routed to). + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get ingress --all-namespaces +``` + +### List all Kubernetes cluster events + +Kubernetes cluster events are stored, and can be retrieved by running: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml get events --all-namespaces +``` + +### Check Rancher container logging + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=cattle -n cattle-system +``` + +### Check NGINX ingress controller logging + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l app=ingress-nginx -n ingress-nginx +``` + +### Check if overlay network is functioning correctly + +The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. + +To test the overlay network, you can launch the following `DaemonSet` definition. This will run an `alpine` container on every host, which we will use to run a `ping` test between containers on all hosts. + +1. Save the following file as `ds-alpine.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: alpine + spec: + selector: + matchLabels: + name: alpine + template: + metadata: + labels: + name: alpine + spec: + tolerations: + - effect: NoExecute + key: "node-role.kubernetes.io/etcd" + value: "true" + - effect: NoSchedule + key: "node-role.kubernetes.io/controlplane" + value: "true" + containers: + - image: alpine + imagePullPolicy: Always + name: alpine + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + ``` + +2. Launch it using `kubectl --kubeconfig kube_config_rancher-cluster.yml create -f ds-alpine.yml` +3. Wait until `kubectl --kubeconfig kube_config_rancher-cluster.yml rollout status ds/alpine -w` returns: `daemon set "alpine" successfully rolled out`. +4. Run the following command to let each container on every host ping each other (it's a single line command). + + ``` + echo "=> Start"; kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | while read spod shost; do kubectl --kubeconfig kube_config_rancher-cluster.yml get pods -l name=alpine -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | while read tip thost; do kubectl --kubeconfig kube_config_rancher-cluster.yml --request-timeout='10s' exec $spod -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $shost cannot reach $thost; fi; done; done; echo "=> End" + ``` + +5. When this command has finished running, the output indicating everything is correct is: + + ``` + => Start + => End + ``` + +If you see error in the output, that means that the [required ports](../../../../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for overlay networking are not opened between the hosts indicated. + +Example error output of a situation where NODE1 had the UDP ports blocked. + +``` +=> Start +command terminated with exit code 1 +NODE2 cannot reach NODE1 +command terminated with exit code 1 +NODE3 cannot reach NODE1 +command terminated with exit code 1 +NODE1 cannot reach NODE2 +command terminated with exit code 1 +NODE1 cannot reach NODE3 +=> End +``` diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md new file mode 100644 index 0000000000..4a3f350ad4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md @@ -0,0 +1,64 @@ +--- +title: Failed to get job complete status +weight: 20 +aliases: + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/job-complete-status/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting/job-complete-status + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/job-complete-status/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](../../../../../resources/helm-version-requirements.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +To debug issues around this error, you will need to download the command-line tool `kubectl`. See [Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) how to download `kubectl` for your platform. + +When you have made changes to `rancher-cluster.yml`, you will have to run `rke remove --config rancher-cluster.yml` to clean the nodes, so it cannot conflict with previous configuration errors. + +### Failed to deploy addon execute job [rke-user-includes-addons]: Failed to get job complete status + +Something is wrong in the addons definitions, you can run the following command to get the root cause in the logging of the job: + +``` +kubectl --kubeconfig kube_config_rancher-cluster.yml logs -l job-name=rke-user-addon-deploy-job -n kube-system +``` + +#### error: error converting YAML to JSON: yaml: line 9: + +The structure of the addons definition in `rancher-cluster.yml` is wrong. In the different resources specified in the addons section, there is a error in the structure of the YAML. The pointer `yaml line 9` references to the line number of the addon that is causing issues. + +Things to check +
      +
        +
      • Is each of the base64 encoded certificate string placed directly after the key, for example: `tls.crt: LS01...`, there should be no newline/space before, in between or after.
      • +
      • Is the YAML properly formatted, each indentation should be 2 spaces as shown in the template files.
      • +
      • Verify the integrity of your certificate by running this command `cat MyCertificate | base64 -d` on Linux, `cat MyCertificate | base64 -D` on Mac OS . If any error exists, the command output will tell you.
      • +
      +
    + +#### Error from server (BadRequest): error when creating "/etc/config/rke-user-addon.yaml": Secret in version "v1" cannot be handled as a Secret + +The base64 string of one of the certificate strings is wrong. The log message will try to show you what part of the string is not recognized as valid base64. + +Things to check +
      +
        +
      • Check if the base64 string is valid by running one of the commands below:
      • + +``` +# MacOS +echo BASE64_CRT | base64 -D +# Linux +echo BASE64_CRT | base64 -d +# Windows +certutil -decode FILENAME.base64 FILENAME.verify +``` + +
      +
    + +#### The Ingress "cattle-ingress-http" is invalid: spec.rules[0].host: Invalid value: "IP": must be a DNS name, not an IP address + +The host value can only contain a host name, as it is needed by the ingress controller to match the hostname and pass to the correct backend. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md new file mode 100644 index 0000000000..fff3c15389 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md @@ -0,0 +1,108 @@ +--- +title: Opening Ports with firewalld +weight: 1 +--- + +> We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. + +Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. + +For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: + +``` +Chain INPUT (policy ACCEPT) +target prot opt source destination +ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +ACCEPT icmp -- anywhere anywhere +ACCEPT all -- anywhere anywhere +ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain FORWARD (policy ACCEPT) +target prot opt source destination +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +``` + +You can check the default firewall rules with this command: + +``` +sudo iptables --list +``` + +This section describes how to use `firewalld` to apply the [firewall port rules](installation/references) for nodes in a high-availability Rancher server cluster. + +# Prerequisite + +Install v7.x or later ofv`firewalld`: + +``` +yum install firewalld +systemctl start firewalld +systemctl enable firewalld +``` + +# Applying Firewall Port Rules + +In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: + +``` +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` +If your Rancher server nodes have separate roles, use the following commands based on the role of the node: + +``` +# For etcd nodes, run the following commands: +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp + +# For control plane nodes, run the following commands: +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp + +# For worker nodes, run the following commands: +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` + +After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: + +``` +firewall-cmd --reload +``` + +**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md new file mode 100644 index 0000000000..a4d7ecd3f5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb.md @@ -0,0 +1,401 @@ +--- +title: Kubernetes Install with External Load Balancer (TCP/Layer 4) +weight: 275 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-4-lb + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb + - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-4-lb + - /rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-4-lb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install](../../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: + +- Layer 4 load balancer (TCP) +- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) + +In an HA setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. + +Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at ingress controllers +![Rancher HA](/img/ha/rancher2ha.svg) + +## Installation Outline + +Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. + + + +- [1. Provision Linux Hosts](#1-provision-linux-hosts) +- [2. Configure Load Balancer](#2-configure-load-balancer) +- [3. Configure DNS](#3-configure-dns) +- [4. Install RKE](#4-install-rke) +- [5. Download RKE Config File Template](#5-download-rke-config-file-template) +- [6. Configure Nodes](#6-configure-nodes) +- [7. Configure Certificates](#7-configure-certificates) +- [8. Configure FQDN](#8-configure-fqdn) +- [9. Configure Rancher version](#9-configure-rancher-version) +- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) +- [11. Run RKE](#11-run-rke) +- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) + + + +
    + +## 1. Provision Linux Hosts + +Provision three Linux hosts according to our [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). + +## 2. Configure Load Balancer + +We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](../../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +>**Note:** +> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. +> +>One caveat: do not use one of your Rancher nodes as the load balancer. + +### A. Install NGINX + +Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. + +### B. Create NGINX Configuration + +After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. + +1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. + +2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). + + >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + + **Example NGINX config:** + ``` + worker_processes 4; + worker_rlimit_nofile 40000; + + events { + worker_connections 8192; + } + + http { + server { + listen 80; + return 301 https://$host$request_uri; + } + } + + stream { + upstream rancher_servers { + least_conn; + server IP_NODE_1:443 max_fails=3 fail_timeout=5s; + server IP_NODE_2:443 max_fails=3 fail_timeout=5s; + server IP_NODE_3:443 max_fails=3 fail_timeout=5s; + } + server { + listen 443; + proxy_pass rancher_servers; + } + } + ``` + +3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. + +4. Load the updates to your NGINX configuration by running the following command: + + ``` + # nginx -s reload + ``` + +### Option - Run NGINX as Docker container + +Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/nginx.conf:/etc/nginx/nginx.conf \ + nginx:1.14 +``` + +## 3. Configure DNS + +Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

    + +1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). + +2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: + + `nslookup HOSTNAME.DOMAIN.COM` + + **Step Result:** Terminal displays output similar to the following: + + ``` + $ nslookup rancher.yourdomain.com + Server: YOUR_HOSTNAME_IP_ADDRESS + Address: YOUR_HOSTNAME_IP_ADDRESS#53 + + Non-authoritative answer: + Name: rancher.yourdomain.com + Address: HOSTNAME.DOMAIN.COM + ``` + +
    + +## 4. Install RKE + +RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. + +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. + +2. Confirm that RKE is now executable by running the following command: + + ``` + rke --version + ``` + +## 5. Download RKE Config File Template + +RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. + +1. Download one of following templates, depending on the SSL certificate you're using. + + - [Template for self-signed certificate
    ](../cluster-yml-templates/node-certificate.md) + - [Template for certificate signed by recognized CA
    ](../cluster-yml-templates/node-certificate-recognizedca.md) + + + +2. Rename the file to `rancher-cluster.yml`. + +## 6. Configure Nodes + +Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. + +1. Open `rancher-cluster.yml` in your favorite text editor. + +1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). + + For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. + + >**Note:** + > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. + + nodes: + # The IP address or hostname of the node + - address: IP_ADDRESS_1 + # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) + # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 + user: USER + role: [controlplane,etcd,worker] + # Path the SSH key that can be used to access to node with the specified user + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_2 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_3 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + +1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. + + services: + etcd: + backup: false + + +## 7. Configure Certificates + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +Choose from the following options: + +
    + Option A—Bring Your Own Certificate: Self-Signed + +>**Prerequisites:** +>Create a self-signed certificate. +> +>- The certificate files must be in PEM format. +>- The certificate files must be encoded in [base64](#base64). +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +1. In `kind: Secret` with `name: cattle-keys-ingress`: + + * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) + * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) + + >**Note:** + > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. + + **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-ingress + namespace: cattle-system + type: Opaque + data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + ``` + +2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). + + >**Note:** + > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. + + + **Step Result:** The file should look like the example below (the base64 encoded string should be different): + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + ``` + +
    + +
    + Option B—Bring Your Own Certificate: Signed by Recognized CA + +If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the intermediate certificates in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. + +In the `kind: Secret` with `name: cattle-keys-ingress`: + +* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) +* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) + +After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + +>**Note:** +> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cattle-keys-ingress + namespace: cattle-system +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +``` + +
    + + + +## 8. Configure FQDN + +There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). + +In the `kind: Ingress` with `name: cattle-ingress-http`: + +* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). + +After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): + +```yaml + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: rancher.yourdomain.com + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + tls: + - secretName: cattle-keys-ingress + hosts: + - rancher.yourdomain.com +``` + +Save the `.yml` file and close it. + +## 9. Configure Rancher version + +The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. + +``` + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:v2.0.6 + imagePullPolicy: Always +``` + +## 10. Back Up Your RKE Config File + +After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. + +## 11. Run RKE + +With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. + +1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. + +2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. + +3. Enter one of the `rke up` commands listen below. + +``` +rke up --config rancher-cluster.yml +``` + +**Step Result:** The output should be similar to the snippet below: + +``` +INFO[0000] Building Kubernetes cluster +INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] +INFO[0000] [network] Deploying port listener containers +INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] +... +INFO[0101] Finished building Kubernetes cluster successfully +``` + +## 12. Back Up Auto-Generated Config File + +During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. + +## What's Next? + +You have a couple of options: + +- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore](../../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md). +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](../../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
    + +## FAQ and Troubleshooting + +{{< ssl_faq_ha >}} diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md new file mode 100644 index 0000000000..5a4a54f3fa --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb.md @@ -0,0 +1,292 @@ +--- +title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) +weight: 276 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha/rke-add-on/layer-7-lb + - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb/ + - /rancher/v2.0-v2.4/en/installation/options/rke-add-on/layer-7-lb + - /rancher/v2.x/en/installation/resources/advanced/rke-add-on/layer-7-lb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install](../../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](../../../install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md) for details on how to move to using the helm chart. + +This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: + +- Layer 7 load balancer with SSL termination (HTTPS) +- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) + +In an HA setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. + +Rancher installed on a Kubernetes cluster with layer 7 load balancer, depicting SSL termination at load balancer +![Rancher HA](/img/ha/rancher2ha-l7.svg) + +## Installation Outline + +Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. + + + +- [1. Provision Linux Hosts](#1-provision-linux-hosts) +- [2. Configure Load Balancer](#2-configure-load-balancer) +- [3. Configure DNS](#3-configure-dns) +- [4. Install RKE](#4-install-rke) +- [5. Download RKE Config File Template](#5-download-rke-config-file-template) +- [6. Configure Nodes](#6-configure-nodes) +- [7. Configure Certificates](#7-configure-certificates) +- [8. Configure FQDN](#8-configure-fqdn) +- [9. Configure Rancher version](#9-configure-rancher-version) +- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) +- [11. Run RKE](#11-run-rke) +- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) + + + +## 1. Provision Linux Hosts + +Provision three Linux hosts according to our [Requirements](../../../../../pages-for-subheaders/installation-requirements.md). + +## 2. Configure Load Balancer + +When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. + +The load balancer has to be configured to support the following: + +* **WebSocket** connections +* **SPDY** / **HTTP/2** protocols +* Passing / setting the following headers: + +| Header | Value | Description | +|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | +| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

    **Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | +| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | +| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | + +Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. + +We have example configurations for the following load balancers: + +* [Amazon ELB configuration](../../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) +* [NGINX configuration](../../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +## 3. Configure DNS + +Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

    + +1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). + +2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: + + `nslookup HOSTNAME.DOMAIN.COM` + + **Step Result:** Terminal displays output similar to the following: + + ``` + $ nslookup rancher.yourdomain.com + Server: YOUR_HOSTNAME_IP_ADDRESS + Address: YOUR_HOSTNAME_IP_ADDRESS#53 + + Non-authoritative answer: + Name: rancher.yourdomain.com + Address: HOSTNAME.DOMAIN.COM + ``` + +
    + +## 4. Install RKE + +RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. + +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. + +2. Confirm that RKE is now executable by running the following command: + + ``` + rke --version + ``` + +## 5. Download RKE Config File Template + +RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. + +1. Download one of following templates, depending on the SSL certificate you're using. + + - [Template for self-signed certificate
    `3-node-externalssl-certificate.yml`](../cluster-yml-templates/node-externalssl-certificate.md)installation/options/cluster-yml-templates/3-node-externalssl-certificate) + - [Template for certificate signed by recognized CA
    `3-node-externalssl-recognizedca.yml`](../cluster-yml-templates/node-externalssl-recognizedca.md) + + + +2. Rename the file to `rancher-cluster.yml`. + +## 6. Configure Nodes + +Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. + +1. Open `rancher-cluster.yml` in your favorite text editor. + +1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). + + For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. + + >**Note:** + > + >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. + + nodes: + # The IP address or hostname of the node + - address: IP_ADDRESS_1 + # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) + # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 + user: USER + role: [controlplane,etcd,worker] + # Path the SSH key that can be used to access to node with the specified user + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_2 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_3 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + +1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. + + services: + etcd: + backup: false + +## 7. Configure Certificates + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +Choose from the following options: + +
    + Option A—Bring Your Own Certificate: Self-Signed + +>**Prerequisites:** +>Create a self-signed certificate. +> +>- The certificate files must be in PEM format. +>- The certificate files must be encoded in [base64](#base64). +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) + +>**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. + +After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + +
    +
    + Option B—Bring Your Own Certificate: Signed by Recognized CA + +If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. + +
    + +## 8. Configure FQDN + +There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). + +1. Open `rancher-cluster.yml`. + +2. In the `kind: Ingress` with `name: cattle-ingress-http:` + + Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). + + **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + ``` + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: rancher.yourdomain.com + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + ``` + + +3. Save the file and close it. + +## 9. Configure Rancher version + +The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. + +``` + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:v2.0.6 + imagePullPolicy: Always +``` + +## 10. Back Up Your RKE Config File + +After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. + +## 11. Run RKE + +With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. + +1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. + +2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. + +3. Enter one of the `rke up` commands listen below. + + ``` + rke up --config rancher-cluster.yml + ``` + + **Step Result:** The output should be similar to the snippet below: + + ``` + INFO[0000] Building Kubernetes cluster + INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] + INFO[0000] [network] Deploying port listener containers + INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] + ... + INFO[0101] Finished building Kubernetes cluster successfully + ``` + +## 12. Back Up Auto-Generated Config File + +During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. + +## What's Next? + +- **Recommended:** Review [Creating Backups—High Availability Back Up and Restoration](../../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) to learn how to backup your Rancher Server in case of a disaster scenario. +- Create a Kubernetes cluster: [Creating a Cluster](tasks/clusters/creating-a-cluster/). + +
    + +## FAQ and Troubleshooting + +{{< ssl_faq_ha >}} diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/etcd/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/advanced/etcd/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md new file mode 100644 index 0000000000..f4bb029cf6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md @@ -0,0 +1,33 @@ +--- +title: UI for Istio Virtual Services and Destination Rules +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/feature-flags/istio-virtual-service-ui +--- + +This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. + +> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../../reference-guides/installation-references/feature-flags.md) + +Environment Variable Key | Default Value | Status | Available as of +---|---|---|--- +`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `true` | GA | v2.3.2 + +# About this Feature + +A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. + +When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. + +The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** + +- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) +- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) + +To see these tabs, + +1. Go to the project view in Rancher and click **Resources > Istio.** +1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md new file mode 100644 index 0000000000..776ce5b1ee --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md @@ -0,0 +1,43 @@ +--- +title: "Running on ARM64 (Experimental)" +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/arm64-platform +--- + +> **Important:** +> +> Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. + +The following options are available when using an ARM64 platform: + +- Running Rancher on ARM64 based node(s) + - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md) link: + + ``` + # In the last line `rancher/rancher:vX.Y.Z`, be certain to replace "X.Y.Z" with a released version in which ARM64 builds exist. For example, if your matching version is v2.5.8, you would fill in this line with `rancher/rancher:v2.5.8`. + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:vX.Y.Z + ``` +> **Note:** To check if your specific released version is compatible with the ARM64 architecture, you may navigate to your +> version's release notes in the following two ways: +> +> - Manually find your version using https://github.com/rancher/rancher/releases. +> - Go directly to your version using the tag and the specific version number. If you plan to use v2.5.8, for example, you may +> navigate to https://github.com/rancher/rancher/releases/tag/v2.5.8. + +- Create custom cluster and adding ARM64 based node(s) + - Kubernetes cluster version must be 1.12 or higher + - CNI Network Provider must be [Flannel](../../../../faq/container-network-interface-providers.md#flannel) + +- Importing clusters that contain ARM64 based nodes + - Kubernetes cluster version must be 1.12 or higher + +Please see [Cluster Options](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) for information on how to configure the cluster options. + +The following features are not tested: + +- Monitoring, alerts, notifiers, pipelines and logging +- Launching apps from the catalog diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md new file mode 100644 index 0000000000..64964ad056 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md @@ -0,0 +1,42 @@ +--- +title: Allow Unsupported Storage Drivers +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/feature-flags/enable-not-default-storage-drivers/ +--- + +This feature allows you to use types for storage providers and provisioners that are not enabled by default. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](../../../../reference-guides/installation-references/feature-flags.md) + +Environment Variable Key | Default Value | Description +---|---|--- + `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. + +### Types for Persistent Volume Plugins that are Enabled by Default +Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +### Types for StorageClass that are Enabled by Default +Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|-------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md new file mode 100644 index 0000000000..c18a66b06f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -0,0 +1,90 @@ +--- +title: Rollbacks +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/rollbacks + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/rollbacks + - /rancher/v2.0-v2.4/en/upgrades/ha-server-rollbacks + - /rancher/v2.0-v2.4/en/upgrades/rollbacks/ha-server-rollbacks + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/rollbacks/ha-server-rollbacks + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/rollbacks +--- + +### Rolling Back to Rancher v2.2-v2.4 + +For Rancher installed on Kubernetes, follow the procedure detailed here: [Restoring Backups for Kubernetes installs.](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md) Restoring a snapshot of the Rancher Server cluster will revert Rancher to the version and state at the time of the snapshot. + +For information on how to roll back Rancher installed with Docker, refer to [this page.](../other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md) + +> Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. + +### Rolling Back to v2.0.0-v2.1.5 + +If you are rolling back to versions in either of these scenarios, you must follow some extra instructions in order to get your clusters working. + +- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. +- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. + +Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321), special steps are necessary if the user wants to roll back to a previous version of Rancher where this vulnerability exists. The steps are as follows: + +1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. + + **Rancher Installed with Docker** + ``` + docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json + ``` + + **Rancher Installed on a Kubernetes Cluster** + ``` + kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json + ``` + +2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** + +3. Rollback Rancher following the [normal instructions](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md). + +4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. + +5. Apply the backed up tokens based on how you installed Rancher. + + **Rancher Installed with Docker** + + Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. + ``` + set -e + + tokens=$(jq .[] -c tokens.json) + for token in $tokens; do + name=$(echo $token | jq -r .name) + value=$(echo $token | jq -r .token) + + docker exec $1 kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" + done + ``` + the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: + ``` + ./apply_tokens.sh + ``` + After a few moments the clusters will go from Unavailable back to Available. + + **Rancher Installed on a Kubernetes Cluster** + + Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. + ``` + set -e + + tokens=$(jq .[] -c tokens.json) + for token in $tokens; do + name=$(echo $token | jq -r .name) + value=$(echo $token | jq -r .token) + + kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" + done + ``` + Set the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: + ``` + ./apply_tokens.sh + ``` + After a few moments the clusters will go from `Unavailable` back to `Available`. + +6. Continue using Rancher as normal. diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/troubleshooting/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/troubleshooting/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md new file mode 100644 index 0000000000..acb3179a23 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md @@ -0,0 +1,224 @@ +--- +title: Upgrading Rancher Installed on Kubernetes with Helm 2 +weight: 1050 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha/helm2 + - /rancher/v2.0-v2.4/en/upgrades/helm2 + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2 + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha/helm2 + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/helm2 + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/helm2/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> Helm 3 has been released. If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. +> +> The [current instructions for Upgrading Rancher Installed on Kubernetes](../../../../pages-for-subheaders/upgrades.md) use Helm 3. +> +> This section provides a copy of the older instructions for upgrading Rancher with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +The following instructions will guide you through using Helm to upgrade a Rancher server that is installed on a Kubernetes cluster. + +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. + +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade](./migrating-from-rke-add-on.md). + +>**Notes:** +> +> - [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.](../../resources/upgrade-cert-manager.md) +> - If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.](../../../../reference-guides/installation-references/helm-chart-options.md#configuring-ingress-for-external-tls-when-using-nginx-v0-25) +> - The upgrade instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section](../../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2.md) provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +# Prerequisites + +- **Review the [known upgrade issues](../../../../pages-for-subheaders/upgrades.md)** in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) +- **For [air gap installs only,](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry](../../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +# Upgrade Outline + +Follow the steps to upgrade Rancher server: + +- [A. Back up your Kubernetes cluster that is running Rancher server](#a-back-up-your-kubernetes-cluster-that-is-running-rancher-server) +- [B. Update the Helm chart repository](#b-update-the-helm-chart-repository) +- [C. Upgrade Rancher](#c-upgrade-rancher) +- [D. Verify the Upgrade](#d-verify-the-upgrade) + +### A. Back up Your Kubernetes Cluster that is Running Rancher Server + +[Take a one-time snapshot](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md#option-b-one-time-snapshots) +of your Kubernetes cluster running Rancher server. You'll use the snapshot as a restore point if something goes wrong during upgrade. + +### B. Update the Helm chart repository + +1. Update your local helm repo cache. + + ``` + helm repo update + ``` + +1. Get the repository name that you used to install Rancher. + + For information about the repos and their differences, see [Helm Chart Repositories](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + + {{< release-channel >}} + + ``` + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../../resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + + +1. Fetch the latest chart to install Rancher from the Helm chart repository. + + This command will pull down the latest charts and save it in the current directory as a `.tgz` file. + + ```plain + helm fetch rancher-/rancher + ``` + +### C. Upgrade Rancher + +This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. + + + + +Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. + +``` +helm get values rancher + +hostname: rancher.my.org +``` + +> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. + +If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow `Option B: Reinstalling Rancher`. Otherwise, follow `Option A: Upgrading Rancher`. + +
    + Option A: Upgrading Rancher + +Upgrade Rancher to the latest version with all your settings. + +Take all the values from the previous step and append them to the command using `--set key=value`. Note: There will be many more options from the previous step that need to be appended. + +``` +helm upgrade --install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` +
    + +
    + Option B: Reinstalling Rancher chart + +If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11. + +1. Uninstall Rancher + + ``` + helm delete rancher + ``` + In case this results in an error that the release "rancher" was not found, make sure you are using the correct deployment name. Use `helm list` to list the helm-deployed releases. + +2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager](../../resources/upgrade-cert-manager-helm-2.md) page. + +3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. + + ``` + helm install rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org + ``` + +
    + +
    + + +1. Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + Based on the choice you made during installation, complete one of the procedures below. + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + +
    + Option A-Default Self-Signed Certificate + + ```plain +helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +
    +
    + Option B: Certificates From Files using Kubernetes Secrets + +```plain +helm template ./rancher-.tgz --output-dir . \ +--name rancher \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain +helm template ./rancher-.tgz --output-dir . \ +--name rancher \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set privateCA=true \ +--set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +
    + +2. Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. + + Use `kubectl` to apply the rendered manifests. + + ```plain + kubectl -n cattle-system apply -R -f ./rancher + ``` + +
    +
    + +### D. Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +>**Having network issues following upgrade?** +> +> See [Restoring Cluster Networking](namespace-migration.md#restoring-cluster-networking). + +## Rolling Back + +Should something go wrong, follow the [roll back](../rollbacks.md) instructions to restore the snapshot you took before you preformed the upgrade. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md new file mode 100644 index 0000000000..ffab37b01e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on.md @@ -0,0 +1,113 @@ +--- +title: Migrating from a Kubernetes Install with an RKE Add-on +weight: 1030 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/ha-server-upgrade/ + - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha-server-upgrade/ + - /rancher/v2.0-v2.4/en/upgrades/upgrades/migrating-from-rke-add-on + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/migrating-from-rke-add-on + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/migrating-from-rke-add-on + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/migrating-from-rke-add-on/ +--- + +> **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>If you are currently using the RKE add-on install method, please follow these directions to migrate to the Helm install. + + +The following instructions will help guide you through migrating from the RKE Add-on install to managing Rancher with the Helm package manager. + +You will need the to have [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) installed and the kubeconfig YAML file (`kube_config_rancher-cluster.yml`) generated by RKE. + +> **Note:** This guide assumes a standard Rancher install. If you have modified any of the object names or namespaces, please adjust accordingly. + +> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps](../../../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. + +### Point kubectl at your Rancher Cluster + +Make sure `kubectl` is using the correct kubeconfig YAML file. Set the `KUBECONFIG` environmental variable to point to `kube_config_rancher-cluster.yml`: + +``` +export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml +``` + +After setting the `KUBECONFIG` environment variable, verify that it contains the correct `server` parameter. It should point directly to one of your cluster nodes on port `6443`. + +``` +kubectl config view -o=jsonpath='{.clusters[*].cluster.server}' +https://NODE:6443 +``` + +If the output from the command shows your Rancher hostname with the suffix `/k8s/clusters`, the wrong kubeconfig YAML file is configured. It should be the file that was created when you used RKE to create the cluster to run Rancher. + +### Save your certificates + +If you have terminated ssl on the Rancher cluster ingress, recover your certificate and key for use in the Helm install. + +Use `kubectl` to get the secret, decode the value and direct the output to a file. + +``` +kubectl -n cattle-system get secret cattle-keys-ingress -o jsonpath --template='{ .data.tls\.crt }' | base64 -d > tls.crt +kubectl -n cattle-system get secret cattle-keys-ingress -o jsonpath --template='{ .data.tls\.key }' | base64 -d > tls.key +``` + +If you specified a private CA root cert + +``` +kubectl -n cattle-system get secret cattle-keys-server -o jsonpath --template='{ .data.cacerts\.pem }' | base64 -d > cacerts.pem +``` + +### Remove previous Kubernetes objects + +Remove the Kubernetes objects created by the RKE install. + +> **Note:** Removing these Kubernetes components will not affect the Rancher configuration or database, but with any maintenance it is a good idea to create a backup of the data before hand. See [Creating Backups-Kubernetes Install](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) for details. + +``` +kubectl -n cattle-system delete ingress cattle-ingress-http +kubectl -n cattle-system delete service cattle-service +kubectl -n cattle-system delete deployment cattle +kubectl -n cattle-system delete clusterrolebinding cattle-crb +kubectl -n cattle-system delete serviceaccount cattle-admin +``` + +### Remove addons section from `rancher-cluster.yml` + +The addons section from `rancher-cluster.yml` contains all the resources needed to deploy Rancher using RKE. By switching to Helm, this part of the cluster configuration file is no longer needed. Open `rancher-cluster.yml` in your favorite text editor and remove the addons section: + +>**Important:** Make sure you only remove the addons section from the cluster configuration file. + +``` +nodes: + - address: # hostname or IP to access nodes + user: # root user (usually 'root') + role: [controlplane,etcd,worker] # K8s roles for node + ssh_key_path: # path to PEM file + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + - address: + user: + role: [controlplane,etcd,worker] + ssh_key_path: + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +# Remove addons section from here til end of file +addons: |- + --- + ... +# End of file +``` + +### Follow Helm and Rancher install steps + +From here follow the standard install steps. + +* [3 - Initialize Helm](../../../../pages-for-subheaders/helm2-helm-init.md) +* [4 - Install Rancher](../../../../pages-for-subheaders/helm-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md new file mode 100644 index 0000000000..6681b1dc6c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md @@ -0,0 +1,196 @@ +--- +title: Upgrading to v2.0.7+ — Namespace Migration +weight: 1040 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/upgrades/namespace-migration + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/namespace-migration + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/namespace-migration + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/namespace-migration/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +>This section applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. + +In Rancher v2.0.6 and prior, system namespaces crucial for Rancher and Kubernetes operations were not assigned to any Rancher project by default. Instead, these namespaces existed independently from all Rancher projects, but you could move these namespaces into any project without affecting cluster operations. + +These namespaces include: + +- `kube-system` +- `kube-public` +- `cattle-system` +- `cattle-alerting`1 +- `cattle-logging`1 +- `cattle-pipeline`1 +- `ingress-nginx` + +>1 Only displays if this feature is enabled for the cluster. + +However, with the release of Rancher v2.0.7, the `System` project was introduced. This project, which is automatically created during the upgrade, is assigned the system namespaces above to hold these crucial components for safe keeping. + +During upgrades from Rancher v2.0.6- to Rancher v2.0.7+, all system namespaces are moved from their default location outside of all projects into the newly created `System` project. However, if you assigned any of your system namespaces to a project before upgrading, your cluster networking may encounter issues afterwards. This issue occurs because the system namespaces are not where the upgrade expects them to be during the upgrade, so it cannot move them to the `System` project. + +- To prevent this issue from occurring before the upgrade, see [Preventing Cluster Networking Issues](#preventing-cluster-networking-issues). +- To fix this issue following upgrade, see [Restoring Cluster Networking](#restoring-cluster-networking). + +> **Note:** If you are upgrading from from Rancher v2.0.13 or earlier, or v2.1.8 or earlier, and your cluster's certificates have expired, you will need to perform [additional steps](../../../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. + +## Preventing Cluster Networking Issues + +You can prevent cluster networking issues from occurring during your upgrade to v2.0.7+ by unassigning system namespaces from all of your Rancher projects. Complete this task if you've assigned any of a cluster's system namespaces into a Rancher project. + +1. Log into the Rancher UI before upgrade. + +1. From the context menu, open the **local** cluster (or any of your other clusters). + +1. From the main menu, select **Project/Namespaces**. + +1. Find and select the following namespaces. Click **Move** and then choose **None** to move them out of your projects. Click **Move** again. + + >**Note:** Some or all of these namespaces may already be unassigned from all projects. + + - `kube-system` + - `kube-public` + - `cattle-system` + - `cattle-alerting`1 + - `cattle-logging`1 + - `cattle-pipeline`1 + - `ingress-nginx` + + >1 Only displays if this feature is enabled for the cluster. + +
    Moving namespaces out of projects
    + ![Moving Namespaces](/img/move-namespaces.png) + +1. Repeat these steps for each cluster where you've assigned system namespaces to projects. + +**Result:** All system namespaces are moved out of Rancher projects. You can now safely begin the [upgrade](upgrades/upgrades). + +## Restoring Cluster Networking + +Reset the cluster nodes' network policies to restore connectivity. + +>**Prerequisites:** +> +>Download and setup [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + + + + +1. From **Terminal**, change directories to your kubectl file that's generated during Rancher install, `kube_config_rancher-cluster.yml`. This file is usually in the directory where you ran RKE during Rancher installation. + +1. Before repairing networking, run the following two commands to make sure that your nodes have a status of `Ready` and that your cluster components are `Healthy`. + + ``` + kubectl --kubeconfig kube_config_rancher-cluster.yml get nodes + + NAME STATUS ROLES AGE VERSION + 165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1 + 165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1 + 165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1 + + kubectl --kubeconfig kube_config_rancher-cluster.yml get cs + + NAME STATUS MESSAGE ERROR + scheduler Healthy ok + controller-manager Healthy ok + etcd-0 Healthy {"health": "true"} + etcd-2 Healthy {"health": "true"} + etcd-1 Healthy {"health": "true"} + ``` + +1. Check the `networkPolicy` for all clusters by running the following command. + + kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o=custom-columns=ID:.metadata.name,NAME:.spec.displayName,NETWORKPOLICY:.spec.enableNetworkPolicy,APPLIEDNP:.status.appliedSpec.enableNetworkPolicy,ANNOTATION:.metadata.annotations."networking\.management\.cattle\.io/enable-network-policy" + + ID NAME NETWORKPOLICY APPLIEDNP ANNOTATION + c-59ptz custom + local local + + +1. Disable the `networkPolicy` for all clusters, still pointing toward your `kube_config_rancher-cluster.yml`. + + kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o jsonpath='{range .items[*]}{@.metadata.name}{"\n"}{end}' | xargs -I {} kubectl --kubeconfig kube_config_rancher-cluster.yml patch cluster {} --type merge -p '{"spec": {"enableNetworkPolicy": false},"status": {"appliedSpec": {"enableNetworkPolicy": false }}}' + + >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): + > + >``` + kubectl --kubeconfig kube_config_rancher-cluster.yml patch cluster local --type merge -p '{"spec": {"enableNetworkPolicy": false},"status": {"appliedSpec": {"enableNetworkPolicy": false }}}' + ``` + +1. Remove annotations for network policy for all clusters + + kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o jsonpath='{range .items[*]}{@.metadata.name}{"\n"}{end}' | xargs -I {} kubectl --kubeconfig kube_config_rancher-cluster.yml annotate cluster {} "networking.management.cattle.io/enable-network-policy"="false" --overwrite + + >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): + > + >``` + kubectl --kubeconfig kube_config_rancher-cluster.yml annotate cluster local "networking.management.cattle.io/enable-network-policy"="false" --overwrite + ``` + +1. Check the `networkPolicy` for all clusters again to make sure the policies have a status of `false `. + + kubectl --kubeconfig kube_config_rancher-cluster.yml get cluster -o=custom-columns=ID:.metadata.name,NAME:.spec.displayName,NETWORKPOLICY:.spec.enableNetworkPolicy,APPLIEDNP:.status.appliedSpec.enableNetworkPolicy,ANNOTATION:.metadata.annotations."networking\.management\.cattle\.io/enable-network-policy" + + ID NAME NETWORKPOLICY APPLIEDNP ANNOTATION + c-59ptz custom false false false + local local false false false + +1. Remove all network policies from all namespaces. Run this command for each cluster, using the kubeconfig generated by RKE. + + ``` + for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do + kubectl --kubeconfig kube_config_rancher-cluster.yml -n $namespace delete networkpolicy --all; + done + ``` + +1. Remove all the projectnetworkpolicies created for the clusters, to make sure networkpolicies are not recreated. + + ``` + for cluster in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get clusters -o custom-columns=NAME:.metadata.name --no-headers); do + for project in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get project -n $cluster -o custom-columns=NAME:.metadata.name --no-headers); do + kubectl --kubeconfig kube_config_rancher-cluster.yml delete projectnetworkpolicy -n $project --all + done + done + ``` + + >**Tip:** If you want to keep `networkPolicy` enabled for all created clusters, you can run the following command to disable `networkPolicy` for `local` cluster (i.e., your Rancher Server nodes): + > + >``` + for project in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get project -n local -o custom-columns=NAME:.metadata.name --no-headers); do + kubectl --kubeconfig kube_config_rancher-cluster.yml -n $project delete projectnetworkpolicy --all; + done + ``` + +1. Wait a few minutes and then log into the Rancher UI. + + - If you can access Rancher, you're done, so you can skip the rest of the steps. + - If you still can't access Rancher, complete the steps below. + +1. Force your pods to recreate themselves by entering the following command. + + ``` + kubectl --kubeconfig kube_config_rancher-cluster.yml delete pods -n cattle-system --all + ``` + +1. Log into the Rancher UI and view your clusters. Created clusters will show errors from attempting to contact Rancher while it was unavailable. However, these errors should resolve automatically. + + + + +If you can access Rancher, but one or more of the clusters that you launched using Rancher has no networking, you can repair them by moving them: + +- Using the cluster's [embedded kubectl shell](k8s-in-rancher/kubectl/). +- By [downloading the cluster kubeconfig file and running it](../../../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) from your workstation. + + ``` + for namespace in $(kubectl --kubeconfig kube_config_rancher-cluster.yml get ns -o custom-columns=NAME:.metadata.name --no-headers); do + kubectl --kubeconfig kube_config_rancher-cluster.yml -n $namespace delete networkpolicy --all; + done + ``` + + + + + diff --git a/content/rancher/v2.0-v2.4/en/installation/requirements/installing-docker/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/install-docker.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/requirements/installing-docker/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/install-docker.md diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md new file mode 100644 index 0000000000..b76ed5362d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md @@ -0,0 +1,267 @@ +--- +title: Port Requirements +description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes +weight: 300 +--- + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. + +- [Rancher Nodes](#rancher-nodes) + - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) + - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) + - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) +- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) + - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) + - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) + - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) + - [Ports for Imported Clusters](#ports-for-imported-clusters) +- [Other Port Considerations](#other-port-considerations) + - [Commonly Used Ports](#commonly-used-ports) + - [Local Node Traffic](#local-node-traffic) + - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) + - [Opening SUSE Linux Ports](#opening-suse-linux-ports) + +# Rancher Nodes + +The following table lists the ports that need to be open to and from nodes that are running the Rancher server. + +The port requirements differ based on the Rancher server architecture. + +> **Notes:** +> +> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). +> - Kubernetes recommends TCP 30000-32767 for node port services. +> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. + +### Ports for Rancher Server Nodes on K3s + +
    + Click to expand + +The K3s server needs port 6443 to be accessible by the nodes. + +The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +The following tables break down the port requirements for inbound and outbound traffic: + +
    Inbound Rules for Rancher Server Nodes
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
    • server nodes
    • agent nodes
    • hosted/imported Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl | +| TCP | 6443 | K3s server nodes | Kubernetes API +| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. +| TCP | 10250 | K3s server and agent nodes | kubelet + +
    Outbound Rules for Rancher Nodes
    + +| Protocol | Port | Destination | Description | +| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
    + +### Ports for Rancher Server Nodes on RKE + +
    + Click to expand + +Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. + +The following tables break down the port requirements for traffic between the Rancher nodes: + +
    Rules for traffic between Rancher nodes
    + +| Protocol | Port | Description | +|-----|-----|----------------| +| TCP | 443 | Rancher agents | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| TCP | 6443 | Kubernetes apiserver | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 10250 | kubelet | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | + +The following tables break down the port requirements for inbound and outbound traffic: + +
    Inbound Rules for Rancher Nodes
    + +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | +| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | +| TCP | 443 |
    • Load Balancer/Reverse Proxy
    • IPs of all cluster nodes and other API/UI clients
    | HTTPS traffic to Rancher UI/API | +| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | + +
    Outbound Rules for Rancher Nodes
    + +| Protocol | Port | Destination | Description | +|-----|-----|----------------|---| +| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | +| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | +| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | +| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | + +
    + +### Ports for Rancher Server in Docker + +
    + Click to expand + +The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: + +
    Inbound Rules for Rancher Node
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used +| TCP | 443 |
    • hosted/imported Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl + +
    Outbound Rules for Rancher Node
    + +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
    + +# Downstream Kubernetes Cluster Nodes + +Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. + +The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +The following diagram depicts the ports that are opened for each [cluster type](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
    Port Requirements for the Rancher Management Plane
    + +![Basic Port Requirements](/img/port-communications.svg) + +>**Tip:** +> +>If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. + +### Ports for Rancher Launched Kubernetes Clusters using Node Pools + +
    + Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with nodes created in an [Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +>**Note:** +>The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. + +{{< ports-iaas-nodes >}} + +
    + +### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes + +
    + Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with [Custom Nodes](../../../pages-for-subheaders/use-existing-nodes.md). + +{{< ports-custom-nodes >}} + +
    + +### Ports for Hosted Kubernetes Clusters + +
    + Click to expand + +The following table depicts the port requirements for [hosted clusters](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md). + +{{< ports-imported-hosted >}} + +
    + +### Ports for Imported Clusters + + +
    + Click to expand + +The following table depicts the port requirements for [imported clusters](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md). + +{{< ports-imported-hosted >}} + +
    + + +# Other Port Considerations + +### Commonly Used Ports + +These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. + +import CommonPortsTable from '../../../shared-files/_common-ports-table.md'; + + + +---- + +### Local Node Traffic + +Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). +These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. + +However, this traffic may be blocked when: + +- You have applied strict host firewall policies on the node. +- You are using nodes that have multiple interfaces (multihomed). + +In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. + +### Rancher AWS EC2 Security Group + +When using the [AWS EC2 node driver](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. + +| Type | Protocol | Port Range | Source/Destination | Rule Type | +|-----------------|:--------:|:-----------:|------------------------|:---------:| +| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | +| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | +| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | +| All traffic | All | All | 0.0.0.0/0 | Outbound | + +### Opening SUSE Linux Ports + +SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, + +1. SSH into the instance. +1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: + ``` + FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" + FW_SERVICES_EXT_UDP="8472 30000:32767" + FW_ROUTE=yes + ``` +1. Restart the firewall with the new ports: + ``` + SuSEfirewall2 + ``` + +**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md new file mode 100644 index 0000000000..062dd65c9c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -0,0 +1,179 @@ +--- +title: '1. Set up Infrastructure and Private Registry' +weight: 100 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/provision-host +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). + +An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. + +The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.](../../../../pages-for-subheaders/installation-and-upgrade.md) + + + + +We recommend setting up the following infrastructure for a high-availability installation: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set up one of the following external databases: + +* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) +* [MySQL](https://www.mysql.com/) (certified against version 5.7) +* [etcd](https://etcd.io/) (certified against version 3.3.15) + +When you install Kubernetes, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the database, refer to this [tutorial](installation/options/rds) for setting up a MySQL database on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 5. Set up a Private Docker Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 4. Set up a Private Docker Registry + +Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + + +> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. +> +> For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. + +### 1. Set up a Linux Node + +This host will be disconnected from the Internet, but needs to be able to connect to your private registry. + +Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. + +### 2. Set up a Private Docker Registry + +Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) + + + + +### [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md new file mode 100644 index 0000000000..fe0b6c9f2c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -0,0 +1,230 @@ +--- +title: '3. Install Kubernetes (Skip for Docker Installs)' +weight: 300 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-kube +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> Skip this section if you are installing Rancher on a single node with Docker. + +This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. + +For Rancher before v2.4, Rancher should be installed on an [RKE](https://rancher.com/docs/rke/latest/en/) (Rancher Kubernetes Engine) Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. + +In Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. The Rancher management server can only be run on a Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. + +The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown below. + + + + +In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. + +### Installation Outline + +1. [Prepare Images Directory](#1-prepare-images-directory) +2. [Create Registry YAML](#2-create-registry-yaml) +3. [Install K3s](#3-install-k3s) +4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) + +### 1. Prepare Images Directory +Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. + +Place the tar file in the `images` directory before starting K3s on each node, for example: + +```sh +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ +``` + +### 2. Create Registry YAML +Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. + +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +Note, at this time only secure registries are supported with K3s (SSL with custom CA). + +For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) + +### 3. Install K3s + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. +Also obtain the K3s install script at https://get.k3s.io + +Place the binary in `/usr/local/bin` on each node. +Place the install script anywhere on each node, and name it `install.sh`. + +Install K3s on each server: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +Install K3s on each agent: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh +``` + +Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. +The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` + +>**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. + +### 4. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### Note on Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. +2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. +3. Restart the K3s service (if not restarted automatically by installer). + + + + +We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. + +### 1. Install RKE + +Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) + +### 2. Create an RKE Config File + +From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. + +This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the [3 nodes](installation/air-gap-high-availability/provision-hosts) you created. + +> **Tip:** For more details on the options available, see the RKE [Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +
    RKE Options
    + +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | +| `user` | ✓ | A user that can run Docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### 3. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### 4. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +
    +
    + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](installation/options/troubleshooting/) page. + +### [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md new file mode 100644 index 0000000000..954279d62d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -0,0 +1,367 @@ +--- +title: 4. Install Rancher +weight: 400 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-system-charts/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/install-rancher + - /rancher/v2.0-v2.4/en/installation/air-gap/install-rancher + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/install-rancher/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section is about how to deploy Rancher for your air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. There are _tabs_ for either a high availability (recommended) or a Docker installation. + + + + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher in five parts: + +- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) +- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) +- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) +- [4. Install Rancher](#4-install-rancher) +- [5. For Rancher versions before v2.3.0, Configure System Charts](#5-for-rancher-versions-before-v2-3-0-configure-system-charts) + +# 1. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. + ```plain + helm fetch rancher-/rancher + ``` + + If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: + ```plain + helm fetch rancher-stable/rancher --version=v2.4.8 + ``` + +# 2. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
    This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
    This option must be passed when rendering the Rancher Helm template. | no | + +# 3. Render the Rancher Helm Template + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | `` | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +Based on the choice your made in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), complete one of the procedures below. + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +> **Note:** +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](installation/options/upgrading-cert-manager/). + +1. From a system connected to the internet, add the cert-manager repo to Helm. + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v1.0.4 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + ```plain + helm template cert-manager ./cert-manager-v1.0.4.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller \ + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml + ``` + +1. Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + + Placeholder | Description + ------------|------------- + `` | The version number of the output tarball. + `` | The DNS name you pointed at your load balancer. + `` | The DNS name for your private registry. + `` | Cert-manager version running on k8s cluster. + + ```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` + +
    + +### Option B: Certificates From Files using Kubernetes Secrets + +
    + Click to expand + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` + +Then refer to [Adding TLS Secrets](installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. + +
    + +# 4. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +### For Self-Signed Certificate Installs, Install Cert-manager + +
    + Click to expand + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +
    + +### Install Rancher with kubectl + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` +**Step Result:** If you are installing Rancher v2.3.0+, the installation is complete. + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +# 5. For Rancher versions before v2.3.0, Configure System Charts + +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](../../resources/local-system-charts.md). + +# Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](installation/resources/chart-options/) +- [Adding TLS secrets](installation/resources/encryption/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations](installation/options/troubleshooting/) + +
    + + +The Docker installation is for Rancher users who want to test out Rancher. + +Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** There is no upgrade path to transition your Docker installation to a Kubernetes Installation.** Instead of running the single node installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. _Available as of v2.3.0_ | + +> **Do you want to...** +> +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](installation/options/custom-ca-root-certificate/). +> - Record all transactions with the Rancher API? See [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log). + +- For Rancher before v2.3.0, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.](../../resources/local-system-charts.md) + +Choose from the following options: + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | + + + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisite:** The certificate files must be in PEM format. + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to install. | + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    + +If you are installing Rancher v2.3.0+, the installation is complete. + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. + +If you are installing Rancher versions before v2.3.0, you will not be able to use the packaged system charts. Since the Rancher system charts are hosted in Github, an air gapped installation will not be able to access these charts. Therefore, you must [configure the Rancher system charts](../../resources/local-system-charts.md). + +
    +
    diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md new file mode 100644 index 0000000000..41657d7123 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md @@ -0,0 +1,301 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/prepare-private-registry/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/prepare-private-registry/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-for-private-reg/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, all images used to [provision Kubernetes clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) or launch any [tools](../../../../reference-guides/rancher-cluster-tools.md) in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. + +The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes](../../../../pages-for-subheaders/use-windows-clusters.md), there are separate instructions to support the images needed. + +> **Prerequisites:** +> +> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. +> +> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. + + + + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) +2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) +3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) +4. [Populate the private registry](#4-populate-the-private-registry) + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + +### 1. Find the required assets for your Rancher version + +1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets.** Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### 2. Collect the cert-manager image + +> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v1.0.4 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### 4. Populate the private registry + +Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` + + + + +_Available as of v2.3.0_ + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +# Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +1. Find the required assets for your Rancher version +2. Save the images to your Windows Server workstation +3. Prepare the Docker daemon +4. Populate the private registry + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + +| Release File | Description | +|----------------------------|------------------| +| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | +| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | +| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + ```plain + ./rancher-save-images.ps1 + ``` + + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + + + +### 3. Prepare the Docker daemon + +Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ``` + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + + + +### 4. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. + +1. Using `powershell`, log into your private registry if required: + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.ps1 --registry + ``` + +# Linux Steps + +The Linux images needs to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +1. Find the required assets for your Rancher version +2. Collect all the required images +3. Save the images to your Linux workstation +4. Populate the private registry + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets.** + +2. From the release's **Assets** section, download the following files: + +| Release File | Description | +|----------------------------| -------------------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Collect all the required images + +**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.12.0 + helm template ./cert-manager-.tgz | grep -oP '(?<=image: ").*(?=")' >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + + + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + +**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + + + +### 4. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. + +The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + +```plain +docker login +``` + +1. Make `rancher-load-images.sh` an executable: + +``` +chmod +x rancher-load-images.sh +``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + +```plain +./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry +``` + + + + +### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster](install-kubernetes.md) + +### [Next step for Docker Installs - Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md new file mode 100644 index 0000000000..406364ad1e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md @@ -0,0 +1,151 @@ +--- +title: '2. Install Kubernetes' +weight: 200 +--- + +Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. + +### Installing Docker + +First, you have to install Docker and setup the HTTP proxy on all three Linux nodes. For this perform the following steps on all three nodes. + +For convenience export the IP address and port of your proxy into an environment variable and set up the HTTP_PROXY variables for your current shell: + +``` +export proxy_host="10.0.0.5:8888" +export HTTP_PROXY=http://${proxy_host} +export HTTPS_PROXY=http://${proxy_host} +export NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16 +``` + +Next configure apt to use this proxy when installing packages. If you are not using Ubuntu, you have to adapt this step accordingly: + +``` +cat <<'EOF' | sudo tee /etc/apt/apt.conf.d/proxy.conf > /dev/null +Acquire::http::Proxy "http://${proxy_host}/"; +Acquire::https::Proxy "http://${proxy_host}/"; +EOF +``` + +Now you can install Docker: + +``` +curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh +``` + +Then ensure that your current user is able to access the Docker daemon without sudo: + +``` +sudo usermod -aG docker YOUR_USERNAME +``` + +And configure the Docker daemon to use the proxy to pull images: + +``` +sudo mkdir -p /etc/systemd/system/docker.service.d +cat <<'EOF' | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf > /dev/null +[Service] +Environment="HTTP_PROXY=http://${proxy_host}" +Environment="HTTPS_PROXY=http://${proxy_host}" +Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" +EOF +``` + +To apply the configuration, restart the Docker daemon: + +``` +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +### Creating the RKE Cluster + +You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: + +* [RKE CLI binary](https://rancher.com/docs/rke/latest/en/installation/#download-the-rke-binary) + +``` +sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 +sudo chmod +x /usr/local/bin/rke +``` + +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +``` +curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" +chmod +x ./kubectl +sudo mv ./kubectl /usr/local/bin/kubectl +``` + +* [helm](https://helm.sh/docs/intro/install/) + +``` +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 +chmod +x get_helm.sh +sudo ./get_helm.sh +``` + +Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). + +``` +nodes: + - address: 10.0.1.200 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 10.0.1.201 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 10.0.1.202 + user: ubuntu + role: [controlplane,worker,etcd] + +services: + etcd: + backup_config: + interval_hours: 12 + retention: 6 +``` + +After that, you can create the Kubernetes cluster by running: + +``` +rke up --config rancher-cluster.yaml +``` + +RKE creates a state file called `rancher-cluster.rkestate`, this is needed if you want to perform updates, modify your cluster configuration or restore it from a backup. It also creates a `kube_config_rancher-cluster.yaml` file, that you can use to connect to the remote Kubernetes cluster locally with tools like kubectl or Helm. Make sure to save all of these files in a secure location, for example by putting them into a version control system. + +To have a look at your cluster run: + +``` +export KUBECONFIG=kube_config_rancher-cluster.yaml +kubectl cluster-info +kubectl get pods --all-namespaces +``` + +You can also verify that your external load balancer works, and the DNS entry is set up correctly. If you send a request to either, you should receive HTTP 404 response from the ingress controller: + +``` +$ curl 10.0.1.100 +default backend - 404 +$ curl rancher.example.com +default backend - 404 +``` + +### Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](installation/options/troubleshooting/) page. + +### [Next: Install Rancher](install-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md new file mode 100644 index 0000000000..5e8232e060 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md @@ -0,0 +1,86 @@ +--- +title: 3. Install Rancher +weight: 300 +--- + +Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. + +> **Note:** These installation instructions assume you are using Helm 3. + +### Install cert-manager + +Add the cert-manager helm repository: + +``` +helm repo add jetstack https://charts.jetstack.io +``` + +Create a namespace for cert-manager: + +``` +kubectl create namespace cert-manager +``` + +Install the CustomResourceDefinitions of cert-manager: + +``` +kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v0.15.2/cert-manager.crds.yaml +``` + +And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers: + +``` +helm upgrade --install cert-manager jetstack/cert-manager \ + --namespace cert-manager --version v0.15.2 \ + --set http_proxy=http://${proxy_host} \ + --set https_proxy=http://${proxy_host} \ + --set no_proxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local +``` + +Now you should wait until cert-manager is finished starting up: + +``` +kubectl rollout status deployment -n cert-manager cert-manager +kubectl rollout status deployment -n cert-manager cert-manager-webhook +``` + +### Install Rancher + +Next you can install Rancher itself. First add the helm repository: + +``` +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +``` + +Create a namespace: + +``` +kubectl create namespace cattle-system +``` + +And install Rancher with Helm. Rancher also needs a proxy configuration so that it can communicate with external application catalogs or retrieve Kubernetes version update metadata: + +``` +helm upgrade --install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.example.com \ + --set proxy=http://${proxy_host} +``` + +After waiting for the deployment to finish: + +``` +kubectl rollout status deployment -n cattle-system rancher +``` + +You can now navigate to `https://rancher.example.com` and start using Rancher. + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +### Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](installation/resources/chart-options/) +- [Adding TLS secrets](installation/resources/encryption/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations](installation/options/troubleshooting/) diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md new file mode 100644 index 0000000000..fcd7ad6a06 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md @@ -0,0 +1,61 @@ +--- +title: '1. Set up Infrastructure' +weight: 100 +--- + +In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will connect to the internet through an HTTP proxy. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + + +### [Next: Set up a Kubernetes cluster](install-kubernetes.md) diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md rename to versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md new file mode 100644 index 0000000000..d0bfbfb1a2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md @@ -0,0 +1,85 @@ +--- +title: Rolling Back Rancher Installed with Docker +weight: 1015 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/single-node-rollbacks + - /rancher/v2.0-v2.4/en/upgrades/rollbacks/single-node-rollbacks +--- + +If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade](upgrades/upgrades/single-node-upgrade). Rolling back restores: + +- Your previous version of Rancher. +- Your data backup created before upgrade. + +## Before You Start + +During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker pull rancher/rancher: +``` + +In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <PRIOR_RANCHER_VERSION> and <RANCHER_CONTAINER_NAME>![Placeholder Reference](/img/placeholder-ref-2.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | ------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that the backup is for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Rolling Back Rancher + +If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. + +>**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. + + For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. + + ``` + docker pull rancher/rancher: + ``` + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + You can obtain the name for your Rancher container by entering `docker ps`. + +1. Move the backup tarball that you created during completion of [Docker Upgrade](upgrades/upgrades/single-node-upgrade/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Docker Upgrade](upgrades/upgrades/single-node-upgrade/), it will have a name similar to (`rancher-data-backup--.tar.gz`). + +1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. + + ``` + docker run --volumes-from rancher-data \ + -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ + && tar zxvf /backup/rancher-data-backup--.tar.gz" + ``` + +1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. + ``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher: + ``` + + >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. + +**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md new file mode 100644 index 0000000000..ec1d1343e5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md @@ -0,0 +1,363 @@ +--- +title: Upgrading Rancher Installed with Docker +weight: 1010 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/single-node-upgrade/ + - /rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-air-gap-upgrade + - /rancher/v2.0-v2.4/en/upgrades/upgrades/single-node + - /rancher/v2.0-v2.4/en/upgrades/upgrades/single-node-upgrade/ + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/upgrades/single-node/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The following instructions will guide you through upgrading a Rancher server that was installed with Docker. + +# Prerequisites + +- **Review the [known upgrade issues](../../../../pages-for-subheaders/upgrades.md#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren’t supported. +- **For [air gap installs only,](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry](../air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +# Placeholder Review + +During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). + +Here's an **example** of a command with a placeholder: + +``` +docker stop +``` + +In this command, `` is the name of your Rancher container. + +# Get Data for Upgrade Commands + +To obtain the data to replace the placeholders, run: + +``` +docker ps +``` + +Write down or copy this information before starting the upgrade. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | +| `` | `2018-12-19` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +# Upgrade Outline + +During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: + +- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) +- [2. Create a backup tarball](#2-create-a-backup-tarball) +- [3. Pull the new Docker image](#3-pull-the-new-docker-image) +- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) +- [5. Verify the Upgrade](#5-verify-the-upgrade) +- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) + +# 1. Create a copy of the data from your Rancher server container + +1. Using a remote Terminal connection, log into the node running your Rancher server. + +1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data rancher/rancher: + ``` + +# 2. Create a backup tarball + +1. From the data container that you just created (rancher-data), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). + + This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. + + + ``` + docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** When you enter this command, a series of commands should run. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + + ``` + [rancher@ip-10-0-0-50 ~]$ ls + rancher-data-backup-v2.1.3-20181219.tar.gz + ``` + +1. Move your backup tarball to a safe location external from your Rancher server. + +# 3. Pull the New Docker Image + +Pull the image of the Rancher version that you want to upgrade to. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker pull rancher/rancher: +``` + +# 4. Start the New Rancher Server Container + +Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. + +>**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. + +If you used a proxy, see [HTTP Proxy Configuration.](../../../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) + +If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) + +If you are recording all transactions with the Rancher API, see [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +To see the command to use when starting the new Rancher server container, choose from the following options: + +- Docker Upgrade +- Docker Upgrade for Air Gap Installs + + + + +Select which option you had installed Rancher server + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher: +``` + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. + +Placeholder | Description +------------|------------- + `` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher: +``` + + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + rancher/rancher: \ + --no-cacerts +``` + +
    + +### Option D: Let's Encrypt Certificate + +
    + Click to expand + +>**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. + +>**Reminder of the Cert Prerequisites:** +> +>- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +>- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. +`` | The domain address that you had originally started with + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher: \ + --acme-domain +``` + +
    + +
    + + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> For Rancher versions from v2.2.0 to v2.2.x, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach. Then, after Rancher is installed, you will need to configure Rancher to use that repository. For details, refer to the documentation on [setting up the system charts for Rancher before v2.3.0.](../../resources/local-system-charts.md) + +When starting the new Rancher server container, choose from the following options: + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to to upgrade to. + +``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. + + >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](installation/resources/chart-options/) that you want to upgrade to. + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ #Available as of v2.3.0, use the packaged Rancher system charts + /rancher/rancher: +``` + +
    + +
    +
    + +**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. + +# 5. Verify the Upgrade + +Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. + +>**Having network issues in your user clusters following upgrade?** +> +> See [Restoring Cluster Networking](../../install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md#restoring-cluster-networking). + + +# 6. Clean up Your Old Rancher Server Container + +Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. + +# Rolling Back + +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback](upgrades/rollbacks/single-node-rollbacks/). diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/add-tls-secrets.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/add-tls-secrets.md new file mode 100644 index 0000000000..f9b2d4a435 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/add-tls-secrets.md @@ -0,0 +1,38 @@ +--- +title: Adding TLS Secrets +weight: 2 +--- + +Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. + +Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. + +For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. +This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. + +Use `kubectl` with the `tls` secret type to create the secrets. + +``` +kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. + +# Using a Private CA Signed Certificate + +If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. + +Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. + +``` +kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem=./cacerts.pem +``` + +> **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. + +# Updating a Private CA Certificate + +Follow the steps on [this page](update-rancher-certificate.md) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md new file mode 100644 index 0000000000..8b8fbcab31 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md @@ -0,0 +1,105 @@ +--- +title: Choosing a Rancher Version +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/server-tags +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to choose a Rancher version. + +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** + + + + +When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. + +Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +### Helm Chart Repositories + +Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. + +| Type | Command to Add the Repo | Description of the Repo | +| -------------- | ------------ | ----------------- | +| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | +| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | +| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | + +
    +Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). + +> **Note:** The introduction of the `rancher-latest` and `rancher-stable` Helm Chart repositories was introduced after Rancher v2.1.0, so the `rancher-stable` repository contains some Rancher versions that were never marked as `rancher/rancher:stable`. The versions of Rancher that were tagged as `rancher/rancher:stable` before v2.1.0 are v2.0.4, v2.0.6, v2.0.8. Post v2.1.0, all charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. + +### Helm Chart Versions + +Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
    +    `helm search repo --versions` + +If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
    +For more information, see https://helm.sh/docs/helm/helm_search_repo/ + +To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
    +    `helm fetch rancher-stable/rancher --version=2.4.8` + +For the Rancher v2.1.x versions, there were some Helm charts where the version was a build number, i.e. `yyyy.mm.`. These charts have been replaced with the equivalent Rancher version and are no longer available. + +### Switching to a Different Helm Chart Repository + +After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. + +> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. + +{{< release-channel >}} + +1. List the current Helm chart repositories. + + ```plain + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + +2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. + + ```plain + helm repo remove rancher- + ``` + +3. Add the Helm chart repository that you want to start installing Rancher from. + + ```plain + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +4. Continue to follow the steps to [upgrade Rancher](installation/upgrades-rollbacks/upgrades/ha) from the new Helm chart repository. + +
    + + +When performing [Docker installs](installation/single-node), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. + +### Server Tags + +Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. + +| Tag | Description | +| -------------------------- | ------ | +| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | +| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | + +> **Notes:** +> +> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. +> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. + + +
    diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md new file mode 100644 index 0000000000..892d6ae6e7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md @@ -0,0 +1,28 @@ +--- +title: About Custom CA Root Certificates +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/custom-ca-root-certificate/ + - /rancher/v2.0-v2.4/en/installation/resources/choosing-version/encryption/custom-ca-root-certificate +--- + +If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). + +Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. + +To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. + +Examples of services that Rancher can access: + +- Catalogs +- Authentication providers +- Accessing hosting/cloud API when using Node Drivers + +## Installing with the custom CA Certificate + +For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: + +- [Docker install Custom CA certificate options](../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) + +- [Kubernetes install options for Additional Trusted CAs](../../../reference-guides/installation-references/helm-chart-options.md#additional-trusted-cas) + diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/helm-version-requirements.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/helm-version-requirements.md new file mode 100644 index 0000000000..e2165e7096 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/helm-version-requirements.md @@ -0,0 +1,17 @@ +--- +title: Helm Version Requirements +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm-version + - /rancher/v2.0-v2.4/en/installation/options/helm2 + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher +--- + +This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. + +> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section](installation/options/helm2) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. +- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. +- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/local-system-charts.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/local-system-charts.md new file mode 100644 index 0000000000..1778e59f1b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/local-system-charts.md @@ -0,0 +1,72 @@ +--- +title: Setting up Local System Charts for Air Gapped Installations +weight: 120 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md + - /rancher/v2.0-v2.4/en/installation/options/local-system-charts +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. + +In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag in Rancher v2.3.0, and using a Git mirror for Rancher versions before v2.3.0. + +# Using Local System Charts in Rancher v2.3.0 + +In Rancher v2.3.0, a local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. + +Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation](installation/air-gap-single-node/install-rancher) instructions and the [air gap Kubernetes installation](installation/air-gap-high-availability/install-rancher/) instructions. + +# Setting Up System Charts for Rancher Before v2.3.0 + +### A. Prepare System Charts + +The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository to a location in your network that Rancher can reach and configure Rancher to use that repository. + +Refer to the release notes in the `system-charts` repository to see which branch corresponds to your version of Rancher. + +### B. Configure System Charts + +Rancher needs to be configured to use your Git mirror of the `system-charts` repository. You can configure the system charts repository either from the Rancher UI or from Rancher's API view. + + + + +In the catalog management page in the Rancher UI, follow these steps: + +1. Go to the **Global** view. + +1. Click **Tools > Catalogs.** + +1. The system chart is displayed under the name `system-library`. To edit the configuration of the system chart, click **⋮ > Edit.** + +1. In the **Catalog URL** field, enter the location of the Git mirror of the `system-charts` repository. + +1. Click **Save.** + +**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. + + + + +1. Log into Rancher. + +1. Open `https:///v3/catalogs/system-library` in your browser. + + ![](/img/airgap/system-charts-setting.png) + +1. Click **Edit** on the upper right corner and update the value for **url** to the location of the Git mirror of the `system-charts` repository. + + ![](/img/airgap/system-charts-update.png) + +1. Click **Show Request** + +1. Click **Send Request** + +**Result:** Rancher is configured to download all the required catalog items from your `system-charts` repository. + + + diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md new file mode 100644 index 0000000000..f6b30a398b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md @@ -0,0 +1,234 @@ +--- +title: Updating the Rancher Certificate +weight: 10 +--- + +# Updating a Private CA Certificate + +Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. + +A summary of the steps is as follows: + +1. Create or update the `tls-rancher-ingress` Kubernetes secret resource with the new certificate and private key. +2. Create or update the `tls-ca` Kubernetes secret resource with the root CA certificate (only required when using a private CA). +3. Update the Rancher installation using the Helm CLI. +4. Reconfigure the Rancher agents to trust the new CA certificate. + +The details of these instructions are below. + +## 1. Create/update the certificate secret resource + +First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. + +If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +Alternatively, to update an existing certificate secret: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 2. Create/update the CA certificate secret resource + +If the new certificate was signed by a private CA, you will need to copy the corresponding root CA certificate into a file named `cacerts.pem` and create or update the `tls-ca secret` in the `cattle-system` namespace. If the certificate was signed by an intermediate CA, then the `cacerts.pem` must contain both the intermediate and root CA certificates (in this order). + +To create the initial secret: + +``` +$ kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem +``` + +To update an existing `tls-ca` secret: + +``` +$ kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 3. Reconfigure the Rancher deployment + +> Before proceeding, [generate an API token in the Rancher UI](../../../reference-guides/user-settings/api-keys.md#creating-an-api-key) (User > API & Keys). + +This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). + +It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. + +To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: + +``` +$ helm get values rancher -n cattle-system +``` + +Also get the version string of the currently deployed Rancher chart: + +``` +$ helm ls -A +``` + +Upgrade the Helm application instance using the original configuration values and making sure to specify `ingress.tls.source=secret` as well as the current chart version to prevent an application upgrade. + +If the certificate was signed by a private CA, add the `set privateCA=true` argument as well. Also make sure to read the documentation describing the initial installation using custom certificates. + +``` +helm upgrade rancher rancher-stable/rancher \ + --namespace cattle-system \ + --version \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret \ + --set ... +``` + +When the upgrade is completed, navigate to `https:///v3/settings/cacerts` to verify that the value matches the CA certificate written in the `tls-ca` secret earlier. + +## 4. Reconfigure Rancher agents to trust the private CA + +This section covers three methods to reconfigure Rancher agents to trust the private CA. This step is required if either of the following is true: + +- Rancher was initially configured to use the Rancher self-signed certificate (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`) +- The root CA certificate for the new custom certificate has changed + +### Why is this step required? + +When Rancher is configured with a certificate signed by a private CA, the CA certificate chain is downloaded into Rancher agent containers. Agents compare the checksum of the downloaded certificate against the `CATTLE_CA_CHECKSUM` environment variable. This means that, when the private CA certificate is changed on Rancher server side, the environvment variable `CATTLE_CA_CHECKSUM` must be updated accordingly. + +### Which method should I choose? + +Method 1 is the easiest one but requires all clusters to be connected to Rancher after the certificates have been rotated. This is usually the case if the process is performed right after updating the Rancher deployment (Step 3). + +If the clusters have lost connection to Rancher but you have [Authorized Cluster Endpoints](https://rancher.com/docs/rancher/v2.0-v2.4/en/cluster-admin/cluster-access/ace/) enabled, then go with method 2. + +Method 3 can be used as a fallback if method 1 and 2 are unfeasible. + +### Method 1: Kubectl command + +For each cluster under Rancher management (including `local`) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). + +``` +kubectl patch clusters -p '{"status":{"agentImage":"dummy"}}' --type merge +``` + +This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. + + +### Method 2: Manually update checksum + +Manually patch the agent Kubernetes resources by updating the `CATTLE_CA_CHECKSUM` environment variable to the value matching the checksum of the new CA certificate. Generate the new checksum value like so: + +``` +$ curl -k -s -fL /v3/settings/cacerts | jq -r .value > cacert.tmp +$ sha256sum cacert.tmp | awk '{print $1}' +``` + +Using a Kubeconfig for each downstream cluster update the environment variable for the two agent deployments. + +``` +$ kubectl edit -n cattle-system ds/cattle-node-agent +$ kubectl edit -n cattle-system deployment/cluster-agent +``` + +### Method 3: Recreate Rancher agents + +With this method you are recreating the Rancher agents by running a set of commands on a controlplane node of each downstream cluster. + +First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 + +Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: +https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b + +# Updating from a Private CA Certificate to a Common Certificate + +>It is possible to perform the opposite procedure as shown above: you may change from a private certificate to a common, or non-private, certificate. The steps involved are outlined below. + +## 1. Create/update the certificate secret resource + +First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. + +If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +Alternatively, to update an existing certificate secret: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 2. Delete the CA certificate secret resource + +You will delete the `tls-ca secret` in the `cattle-system` namespace as it is no longer needed. You may also optionally save a copy of the `tls-ca secret` if desired. + +To save the existing secret: + +``` +kubectl -n cattle-system get secret tls-ca -o yaml > tls-ca.yaml +``` + +To delete the existing `tls-ca` secret: + +``` +kubectl -n cattle-system delete secret tls-ca +``` + +## 3. Reconfigure the Rancher deployment + +> Before proceeding, [generate an API token in the Rancher UI](https://rancher.com/docs/rancher/v2.6/en/user-settings/api-keys/#creating-an-api-key) (User > API & Keys) and save the Bearer Token which you might need in step 4. + +This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). + +It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. + +To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: + +``` +$ helm get values rancher -n cattle-system +``` + +Also get the version string of the currently deployed Rancher chart: + +``` +$ helm ls -A +``` + +Upgrade the Helm application instance using the original configuration values and making sure to specify the current chart version to prevent an application upgrade. + +Also make sure to read the documentation describing the initial installation using custom certificates. + +``` +helm upgrade rancher rancher-stable/rancher \ + --namespace cattle-system \ + --version \ + --set hostname=rancher.my.org \ + --set ... +``` + +On upgrade, you can either + +- remove `--set ingress.tls.source=secret \` from the Helm upgrade command, as shown above, or + +- remove the `privateCA` parameter or set it to `false` because the CA is valid: + +``` +set privateCA=false +``` + +## 4. Reconfigure Rancher agents for the non-private/common certificate + +`CATTLE_CA_CHECKSUM` environment variable on the downstream cluster agents should be removed or set to "" (an empty string). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md new file mode 100644 index 0000000000..7fb9e3a047 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md @@ -0,0 +1,181 @@ +--- +title: Upgrading Cert-Manager with Helm 2 +weight: 2040 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions + - /rancher/v2.0-v2.4/en/installation/resources/choosing-version/encryption/upgrading-cert-manager/helm-2-instructions + - /rancher/v2.x/en/installation/resources/upgrading-cert-manager/helm-2-instructions/ +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's offficial documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart](installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. + +## Upgrade Cert-Manager Only + +> **Note:** +> These instructions are applied if you have no plan to upgrade Rancher. + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +In order to upgrade cert-manager, follow these instructions: + +
    + Upgrading cert-manager with Internet access +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml + ``` + +1. Delete the existing deployment + + ```plain + helm delete --purge cert-manager + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install --version 0.12.0 --name cert-manager --namespace kube-system jetstack/cert-manager + ``` + +
    + +
    + Upgrading cert-manager in an airgapped environment + +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry](installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace kube-system \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml + ``` + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n kube-system delete deployment,sa,clusterrole,clusterrolebinding -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + +1. Install cert-manager + + ```plain + kubectl -n kube-system apply -R -f ./cert-manager + ``` + +
    + + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace kube-system + +NAME READY STATUS RESTARTS AGE +cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m +cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m +cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m +``` + +If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check cert-manager's [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. + +> **Note:** The above instructions ask you to add the disable-validation label to the kube-system namespace. Here are additional resources that explain why this is necessary: +> +> - [Information on the disable-validation label](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.4-0.5.html?highlight=certmanager.k8s.io%2Fdisable-validation#disabling-resource-validation-on-the-cert-manager-namespace) +> - [Information on webhook validation for certificates](https://docs.cert-manager.io/en/latest/getting-started/webhook.html) + +## Cert-Manager API change and data migration + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be `cert-manager.io` instead of `certmanager.k8s.io.` + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md new file mode 100644 index 0000000000..bb9758deb5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md @@ -0,0 +1,246 @@ +--- +title: Upgrading Cert-Manager +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager + - /rancher/v2.0-v2.4/en/installation/options/upgrading-cert-manager/helm-2-instructions + - /rancher/v2.0-v2.4/en/installation/resources/encryption/upgrading-cert-manager +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart](installation/upgrades-rollbacks/upgrades/ha/) under the upgrade Rancher section. + +# Upgrade Cert-Manager + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.](installation/options/upgrading-cert-manager/helm-2-instructions) + +In order to upgrade cert-manager, follow these instructions: + +### Option A: Upgrade cert-manager with Internet Access + +
    + Click to expand +1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) + + ```plain + helm uninstall cert-manager + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed + + ```plain + kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager if needed + + ```plain + kubectl create namespace cert-manager + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v0.12.0 + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
    + +### Option B: Upgrade cert-manager in an Air Gap Environment + +
    + Click to expand + +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry](installation/air-gap-installation/prepare-private-reg/) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + The Helm 3 command is as follows: + + ```plain + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + + The Helm 2 command is as follows: + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager (old and new) + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n cert-manager \ + delete deployment,sa,clusterrole,clusterrolebinding \ + -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y you installed + + ```plain + kubectl delete -f cert-manager/cert-manager-crd-old.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager + + ```plain + kubectl create namespace cert-manager + ``` + +1. Install cert-manager + + ```plain + kubectl -n cert-manager apply -R -f ./cert-manager + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
    + +### Verify the Deployment + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +## Cert-Manager API change and data migration + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). + diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md new file mode 100644 index 0000000000..e71a60861c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md @@ -0,0 +1,167 @@ +--- +title: Upgrading and Rolling Back Kubernetes +weight: 70 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. + +Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation](https://rancher.com/docs/rke/latest/en/). + +This section covers the following topics: + +- [New Features](#new-features) +- [Tested Kubernetes Versions](#tested-kubernetes-versions) +- [How Upgrades Work](#how-upgrades-work) +- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) +- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) +- [Rolling Back](#rolling-back) +- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) + - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) + - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) + - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) + - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) +- [Troubleshooting](#troubleshooting) + +# New Features + +As of Rancher v2.3.0, the Kubernetes metadata feature was added, which allows Rancher to ship Kubernetes patch versions without upgrading Rancher. For details, refer to the [section on Kubernetes metadata.](upgrade-kubernetes-without-upgrading-rancher.md) + +As of Rancher v2.4.0, + +- The ability to import K3s Kubernetes clusters into Rancher was added, along with the ability to upgrade Kubernetes when editing those clusters. For details, refer to the [section on imported clusters.](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) +- New advanced options are exposed in the Rancher UI for configuring the upgrade strategy of an RKE cluster: **Maximum Worker Nodes Unavailable** and **Drain nodes.** These options leverage the new cluster upgrade process of RKE v1.1.0, in which worker nodes are upgraded in batches, so that applications can remain available during cluster upgrades, under [certain conditions.](#maintaining-availability-for-applications-during-upgrades) + +# Tested Kubernetes Versions + +Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.4.17/) + +# How Upgrades Work + +RKE v1.1.0 changed the way that clusters are upgraded. + +In this section of the [RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. + + +# Recommended Best Practice for Upgrades + + + + +When upgrading the Kubernetes version of a cluster, we recommend that you: + +1. Take a snapshot. +1. Initiate a Kubernetes upgrade. +1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. This is achieved by selecting the **Restore etcd and Kubernetes version** option. This will return your cluster to the pre-upgrade kubernetes version before restoring the etcd snapshot. + +The restore operation will work on a cluster that is not in a healthy or active state. + + + + +When upgrading the Kubernetes version of a cluster, we recommend that you: + +1. Take a snapshot. +1. Initiate a Kubernetes upgrade. +1. If the upgrade fails, restore the cluster from the etcd snapshot. + +The cluster cannot be downgraded to a previous Kubernetes version. + + + + +# Upgrading the Kubernetes Version + +> **Prerequisites:** +> +> - The options below are available only for [Rancher-launched RKE Kubernetes clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) and imported/registered K3s Kubernetes clusters. +> - Before upgrading Kubernetes, [back up your cluster.](../../pages-for-subheaders/backup-restore-and-disaster-recovery.md) + +1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. + +1. Expand **Cluster Options**. + +1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. + +1. Click **Save**. + +**Result:** Kubernetes begins upgrading for the cluster. + +# Rolling Back + +_Available as of v2.4_ + +A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: + +- [Backing up a cluster](../../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md#how-snapshots-work) +- [Restoring a cluster from backup](../../how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md#restoring-a-cluster-from-a-snapshot) + +# Configuring the Upgrade Strategy + +As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability) are met. + +The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. + +### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI + +From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. + +By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. + +To change the default number or percentage of worker nodes, + +1. Go to the cluster view in the Rancher UI. +1. Click **⋮ > Edit.** +1. In the **Advanced Options** section, go to the **Maxiumum Worker Nodes Unavailable** field. Enter the percentage of worker nodes that can be upgraded in a batch. Optionally, select **Count** from the drop-down menu and enter the maximum unavailable worker nodes as an integer. +1. Click **Save.** + +**Result:** The cluster is updated to use the new upgrade strategy. + +### Enabling Draining Nodes During Upgrades from the Rancher UI + +By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. + +To enable draining each node during a cluster upgrade, + +1. Go to the cluster view in the Rancher UI. +1. Click **⋮ > Edit.** +1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** +1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md#aggressive-and-safe-draining-options) +1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. +1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. +1. Click **Save.** + +**Result:** The cluster is updated to use the new upgrade strategy. + +> **Note:** As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. + +### Maintaining Availability for Applications During Upgrades + +_Available as of RKE v1.1.0_ + +In [this section of the RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. + +### Configuring the Upgrade Strategy in the cluster.yml + +More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. + +For details, refer to [Configuring the Upgrade Strategy](https://rancher.com/docs/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. + +# Troubleshooting + +If a node doesn't come up after an upgrade, the `rke up` command errors out. + +No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. + +If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. + +A failed node could be in many different states: + +- Powered off +- Unavailable +- User drains a node while upgrade is in process, so there are no kubelets on the node +- The upgrade itself failed + +If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. diff --git a/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md new file mode 100644 index 0000000000..810fa19336 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -0,0 +1,100 @@ +--- +title: Upgrading Kubernetes without Upgrading Rancher +weight: 1120 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +_Available as of v2.3.0_ + +The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. + +> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. + +Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. + +This table below describes the CRDs that are affected by the periodic data sync. + +> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. + +| Resource | Description | Rancher API URL | +|----------|-------------|-----------------| +| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | +| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | +| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | + +Administrators might configure the RKE metadata settings to do the following: + +- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher +- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub +- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher + +### Refresh Kubernetes Metadata + +The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) + +To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. + +You can configure Rancher to only refresh metadata when desired by setting `refresh-interval-minutes` to `0` (see below) and using this button to perform the metadata refresh manually when desired. + +### Configuring the Metadata Synchronization + +> Only administrators can change these settings. + +The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. + +The way that the metadata is configured depends on the Rancher version. + + + + +To edit the metadata config in Rancher, + +1. Go to the **Global** view and click the **Settings** tab. +1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** +1. You can optionally fill in the following parameters: + + - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. + - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. + +If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) + +However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. + + + + +To edit the metadata config in Rancher, + +1. Go to the **Global** view and click the **Settings** tab. +1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** +1. You can optionally fill in the following parameters: + + - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. + - `url`: This is the HTTP path that Rancher fetches data from. + - `branch`: This refers to the Git branch name if the URL is a Git URL. + +If you don't have an air gap setup, you don't need to specify the URL or Git branch where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata.git) + +However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL and Git branch in the `rke-metadata-config` settings to point to the new location of the repository. + + + + +### Air Gap Setups + +Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) + +If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. + +To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) + +After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. + +1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. +1. Download the OS specific image lists for Linux or Windows. +1. Download `rancher-images.txt`. +1. Prepare the private registry using the same steps during the [air gap install](other-installation-methods/air-gapped-helm-cli-install/publish-images.md), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. + +**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/versioned_docs/version-2.0-2.4/getting-started/introduction/overview.md b/versioned_docs/version-2.0-2.4/getting-started/introduction/overview.md new file mode 100644 index 0000000000..72105ef8fa --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/introduction/overview.md @@ -0,0 +1,65 @@ +--- +title: Overview +weight: 1 +--- +Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. + +# Run Kubernetes Everywhere + +Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. + +# Meet IT requirements + +Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: + +- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. +- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. +- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. + +# Empower DevOps Teams + +Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. + +The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. + +![Platform](/img/platform.png) + +# Features of the Rancher API Server + +The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: + +### Authorization and Role-Based Access Control + +- **User management:** The Rancher API server [manages user identities](../../pages-for-subheaders/about-authentication.md) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. +- **Authorization:** The Rancher API server manages [access control](../../pages-for-subheaders/manage-role-based-access-control-rbac.md) and [security](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) policies. + +### Working with Kubernetes + +- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) on existing nodes, or perform [Kubernetes upgrades.](../installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts](catalog/) that make it easy to repeatedly deploy applications. +- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration](../../pages-for-subheaders/manage-projects.md) and for [managing applications within projects.](../../pages-for-subheaders/kubernetes-resources-setup.md) +- **Pipelines:** Setting up a [pipeline](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. +- **Istio:** Our [integration with Istio](../../pages-for-subheaders/istio.md) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +### Working with Cloud Infrastructure + +- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) in all clusters. +- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) and [persistent storage](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in the cloud. + +### Cluster Visibility + +- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. +- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. +- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. + +# Editing Downstream Clusters with Rancher + +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. + +After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.](../../pages-for-subheaders/cluster-configuration.md) + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../../shared-files/_cluster-capabilities-table.md'; + + diff --git a/versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md b/versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/introduction/what-are-divio-docs.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/cli.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/cli.md new file mode 100644 index 0000000000..445cdbc80a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/cli.md @@ -0,0 +1,51 @@ +--- +title: CLI with Rancher +weight: 100 +--- + +Interact with Rancher using command line interface (CLI) tools from your workstation. + +## Rancher CLI + +Follow the steps in [rancher cli](../../pages-for-subheaders/cli-with-rancher.md). + +Ensure you can run `rancher kubectl get pods` successfully. + + +## kubectl +Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + + +Configure kubectl by visiting your cluster in the Rancher Web UI then clicking on `Kubeconfig`, copying contents and putting into your `~/.kube/config` file. + +Run `kubectl cluster-info` or `kubectl get pods` successfully. + +## Authentication with kubectl and kubeconfig Tokens with TTL + +_**Available as of v2.4.6**_ + +_Requirements_ + +If admins have [enforced TTL on kubeconfig tokens](../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher cli](cli.md) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see error like: +`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. + +This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: + +1. Local +2. Active Directory +3. FreeIpa, OpenLdap +4. SAML providers - Ping, Okta, ADFS, Keycloak, Shibboleth + +When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. +The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid till [it expires](../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../../reference-guides/about-the-api/api-tokens.md#deleting-tokens) +Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. + +_Note_ + +As of CLI [v2.4.10](https://github.com/ranchquick-start-guide/cli/releases/tag/v2.4.10), the kubeconfig token can be cached at a chosen path with `cache-dir` flag or env var `RANCHER_CACHE_DIR`. + +_**Current Known Issues**_ + +1. If [authorized cluster endpoint](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) is enabled for RKE clusters to [authenticate directly with downstream cluster](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) and Rancher server goes down, all kubectl calls will fail after the kubeconfig token expires. No new kubeconfig tokens can be generated if Rancher server isn't accessible. +2. If a kubeconfig token is deleted from Rancher [API tokens]({{}}/rancher/v2.0-v2api/api-tokens/#deleting-tokens) page, and the token is still cached, cli won't ask you to login again until the token expires or is deleted. +`kubectl` calls will result into an error like `error: You must be logged in to the server (the server has asked for the client to provide credentials`. Tokens can be deleted using `rancher token delete`. diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/aws.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/aws.md new file mode 100644 index 0000000000..a7b829710d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/aws.md @@ -0,0 +1,68 @@ +--- +title: Rancher AWS Quick Start Guide +description: Read this step by step Rancher AWS guide to quickly deploy a Rancher Server with a single node cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher Server on AWS with a single node cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to Amazon AWS will incur charges. + +- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. +- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. +- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +1. Go into the AWS folder containing the terraform files by executing `cd quickstart/aws`. + +1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +1. Edit `terraform.tfvars` and customize the following variables: + - `aws_access_key` - Amazon AWS Access Key + - `aws_secret_key` - Amazon AWS Secret Key + - `rancher_server_admin_password` - Admin password for created Rancher server + +1. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. +Suggestions include: + - `aws_region` - Amazon AWS region, choose the closest instead of the default + - `prefix` - Prefix for all created resources + - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget + +1. Run `terraform init`. + +1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). + +#### Result + +Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/aws` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/azure.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/azure.md new file mode 100644 index 0000000000..db6e0c89eb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/azure.md @@ -0,0 +1,74 @@ +--- +title: Rancher Azure Quick Start Guide +description: Read this step by step Rancher Azure guide to quickly deploy a Rancher Server with a single node cluster attached. +weight: 100 +--- + +The following steps will quickly deploy a Rancher server on Azure in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to Microsoft Azure will incur charges. + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +1. Go into the Azure folder containing the terraform files by executing `cd quickstart/azure`. + +1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +1. Edit `terraform.tfvars` and customize the following variables: + - `azure_subscription_id` - Microsoft Azure Subscription ID + - `azure_client_id` - Microsoft Azure Client ID + - `azure_client_secret` - Microsoft Azure Client Secret + - `azure_tenant_id` - Microsoft Azure Tenant ID + - `rancher_server_admin_password` - Admin password for created Rancher server + +2. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. +Suggestions include: + - `azure_location` - Microsoft Azure region, choose the closest instead of the default + - `prefix` - Prefix for all created resources + - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget + - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) + +1. Run `terraform init`. + +1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). + +#### Result + +Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/azure` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md new file mode 100644 index 0000000000..9dc4553dcc --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md @@ -0,0 +1,68 @@ +--- +title: Rancher DigitalOcean Quick Start Guide +description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher Server with a single node cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher Server on DigitalOcean with a single node cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to DigitalOcean will incur charges. + +- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. +- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +1. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/do`. + +1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +1. Edit `terraform.tfvars` and customize the following variables: + - `do_token` - DigitalOcean access key + - `rancher_server_admin_password` - Admin password for created Rancher server + +1. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. +Suggestions include: + - `do_region` - DigitalOcean region, choose the closest instead of the default + - `prefix` - Prefix for all created resources + - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget + - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) + +1. Run `terraform init`. + +1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 15 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). + +#### Result + +Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/do` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md new file mode 100644 index 0000000000..2e8838ced5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md @@ -0,0 +1,69 @@ +--- +title: Rancher GCP Quick Start Guide +description: Read this step by step Rancher GCP guide to quickly deploy a Rancher Server with a single node cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher server on GCP in a single-node RKE Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to Google GCP will incur charges. + +- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. +- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. +- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +1. Go into the GCP folder containing the terraform files by executing `cd quickstart/gcp`. + +1. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +1. Edit `terraform.tfvars` and customize the following variables: + - `gcp_account_json` - GCP service account file path and file name + - `rancher_server_admin_password` - Admin password for created Rancher server + +1. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. +Suggestions include: + - `gcp_region` - Google GCP region, choose the closest instead of the default + - `prefix` - Prefix for all created resources + - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget + - `ssh_key_file_name` - Use a specific SSH key instead of `~/.ssh/id_rsa` (public key is assumed to be `${ssh_key_file_name}.pub`) + +1. Run `terraform init`. + +1. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +1. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). + +#### Result + +Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/gcp` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md new file mode 100644 index 0000000000..ad92057697 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md @@ -0,0 +1,118 @@ +--- +title: Manual Quick Start +weight: 300 +--- +Howdy Partner! This tutorial walks you through: + +- Installation of Rancher 2.x +- Creation of your first cluster +- Deployment of an application, Nginx + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Quick Start Outline + +This Quick Start Guide is divided into different tasks for easier consumption. + + + + +1. [Provision a Linux Host](#1-provision-a-linux-host) + +1. [Install Rancher](#2-install-rancher) + +1. [Log In](#3-log-in) + +1. [Create the Cluster](#4-create-the-cluster) + + +
    +### 1. Provision a Linux Host + + Begin creation of a custom cluster by provisioning a Linux host. Your host can be: + +- A cloud-host virtual machine (VM) +- An on-prem VM +- A bare-metal server + + >**Note:** + > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. + > + > For a full list of port requirements, refer to [Docker Installation](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md). + + Provision the host according to our [Requirements](../../../pages-for-subheaders/installation-requirements.md). + +### 2. Install Rancher + +To install Rancher on your host, connect to it and then use a shell to install. + +1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. + +2. From your shell, enter the following command: + + ``` + sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher + ``` + +**Result:** Rancher is installed. + +### 3. Log In + +Log in to Rancher to begin using the application. After you log in, you'll make some one-time configurations. + +1. Open a web browser and enter the IP address of your host: `https://`. + + Replace `` with your host IP address. + +2. When prompted, create a password for the default `admin` account there cowpoke! + +3. Set the **Rancher Server URL**. The URL can either be an IP address or a host name. However, each node added to your cluster must be able to connect to this URL.

    If you use a hostname in the URL, this hostname must be resolvable by DNS on the nodes you want to add to you cluster. + +
    + +### 4. Create the Cluster + +Welcome to Rancher! You are now able to create your first Kubernetes cluster. + +In this task, you can use the versatile **Custom** option. This option lets you add _any_ Linux host (cloud-hosted VM, on-prem VM, or bare-metal) to be used in a cluster. + +1. From the **Clusters** page, click **Add Cluster**. + +2. Choose **Custom**. + +3. Enter a **Cluster Name**. + +4. Skip **Member Roles** and **Cluster Options**. We'll tell you about them later. + +5. Click **Next**. + +6. From **Node Role**, select _all_ the roles: **etcd**, **Control**, and **Worker**. + +7. **Optional**: Rancher auto-detects the IP addresses used for Rancher communication and cluster communication. You can override these using `Public Address` and `Internal Address` in the **Node Address** section. + +8. Skip the **Labels** stuff. It's not important for now. + +9. Copy the command displayed on screen to your clipboard. + +10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. + +11. When you finish running the command on your Linux host, click **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +#### Finished + +Congratulations! You have created your first cluster. + +#### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md new file mode 100644 index 0000000000..b6e1b1d2c8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md @@ -0,0 +1,47 @@ +--- +title: Vagrant Quick Start +weight: 200 +--- +The following steps quickly deploy a Rancher Server with a single node cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. +- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. +- At least 4GB of free RAM. + +### Note +- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: + + `vagrant plugin install vagrant-vboxmanage` + + `vagrant plugin install vagrant-vbguest` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the folder containing the Vagrantfile by executing `cd quickstart/vagrant`. + +3. **Optional:** Edit `config.yaml` to: + + - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) + - Change the password of the `admin` user for logging into Rancher. (`default_password`) + +4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. + +5. Once provisioning finishes, go to `https://172.22.101.101` in the browser. The default user/password is `admin/admin`. + +**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/vagrant` folder execute `vagrant destroy -f`. + +2. Wait for the confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/nodeports.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/nodeports.md new file mode 100644 index 0000000000..080f324f05 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/nodeports.md @@ -0,0 +1,156 @@ +--- +title: Workload with NodePort Quick Start +weight: 200 +--- + +### Prerequisite + +You have a running cluster with at least 1 node. + +### 1. Deploying a Workload + +You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. + +For this workload, you'll be deploying the application Rancher Hello-World. + +1. From the **Clusters** page, open the cluster that you just created. + +2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. + +3. Open the **Project: Default** project. + +4. Click **Resources > Workloads.** In versions before v2.3.0, click **Workloads > Workloads.** + +5. Click **Deploy**. + + **Step Result:** The **Deploy Workload** page opens. + +6. Enter a **Name** for your workload. + +7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. + +8. From **Port Mapping**, click **Add Port**. + +9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. + + ![As a dropdown, NodePort (On every node selected)](/img/nodeport-dropdown.png) + +10. From the **On Listening Port** field, leave the **Random** value in place. + + ![On Listening Port, Random selected](/img/listening-port-field.png) + +11. From the **Publish the container port** field, enter port `80`. + + ![Publish the container port, 80 entered](/img/container-port-field.png) + +12. Leave the remaining options on their default setting. We'll tell you about them later. + +13. Click **Launch**. + +**Result:** + +* Your workload is deployed. This process might take a few minutes to complete. +* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. + +
    + +### 2. Viewing Your Application + +From the **Workloads** page, click the link underneath your workload. If your deployment succeeded, your application opens. + +### Attention: Cloud-Hosted Sandboxes + +When using a cloud-hosted virtual machine, you may not have access to the port running the container. In this event, you can test Nginx in an ssh session on the local machine using `Execute Shell`. Use the port number after the `:` in the link under your workload if available, which is `31568` in this example. + +```sh +gettingstarted@rancher:~$ curl http://localhost:31568 + + + + Rancher + + + + + +

    Hello world!

    +

    My hostname is hello-world-66b4b9d88b-78bhx

    +
    +

    k8s services found 2

    + + INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
    + + KUBERNETES tcp://10.43.0.1:443
    + +
    +
    + + +
    + +
    +
    + + + +gettingstarted@rancher:~$ + +``` + +### Finished + +Congratulations! You have successfully deployed a workload exposed via a NodePort. + +#### What's Next? + +When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: + +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md new file mode 100644 index 0000000000..e287a0d594 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md @@ -0,0 +1,82 @@ +--- +title: Workload with Ingress Quick Start +weight: 100 +--- + +### Prerequisite + +You have a running cluster with at least 1 node. + +### 1. Deploying a Workload + +You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. + +For this workload, you'll be deploying the application Rancher Hello-World. + +1. From the **Clusters** page, open the cluster that you just created. + +2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. + +3. Open the **Project: Default** project. + +4. Click **Resources > Workloads.** In versions before v2.3.0, click **Workloads > Workloads.** + +5. Click **Deploy**. + + **Step Result:** The **Deploy Workload** page opens. + +6. Enter a **Name** for your workload. + +7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. + +8. Leave the remaining options on their default setting. We'll tell you about them later. + +9. Click **Launch**. + +**Result:** + +* Your workload is deployed. This process might take a few minutes to complete. +* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. + +
    +### 2. Expose The Application Via An Ingress + +Now that the application is up and running it needs to be exposed so that other services can connect. + +1. From the **Clusters** page, open the cluster that you just created. + +2. From the main menu of the **Dashboard**, select **Projects**. + +3. Open the **Default** project. + +4. Click **Resources > Workloads > Load Balancing.** In versions before v2.3.0, click the **Workloads** tab. Click on the **Load Balancing** tab. + +5. Click **Add Ingress**. + +6. Enter a name i.e. **hello**. + +7. In the **Target** field, drop down the list and choose the name that you set for your service. + +8. Enter `80` in the **Port** field. + +9. Leave everything else as default and click **Save**. + +**Result:** The application is assigned a `sslip.io` address and exposed. It may take a minute or two to populate. + +### View Your Application + +From the **Load Balancing** page, click the target link, which will look something like `hello.default.xxx.xxx.xxx.xxx.sslip.io > hello-world`. + +Your application will open in a separate window. + +#### Finished + +Congratulations! You have successfully deployed a workload exposed via an ingress. + +#### What's Next? + +When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: + +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides.md b/versioned_docs/version-2.0-2.4/how-to-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md new file mode 100644 index 0000000000..687c161585 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md @@ -0,0 +1,199 @@ +--- +title: Configuring Active Directory (AD) +weight: 1112 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/active-directory/ +--- + +If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. + +Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication](../../../../../pages-for-subheaders/configure-openldap.md) integration. + +> **Note:** +> +> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +## Prerequisites + +You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. + +Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. + +Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. + +> **Using TLS?** +> +> If the certificate used by the AD server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +## Configuration Steps +### Open Active Directory Configuration + +1. Log into the Rancher UI using the initial local `admin` account. +2. From the **Global** view, navigate to **Security** > **Authentication** +3. Select **Active Directory**. The **Configure an AD server** form will be displayed. + +### Configure Active Directory Server Settings + +In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. + +> **Note:** +> +> If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). + +**Table 1: AD Server parameters** + +| Parameter | Description | +|:--|:--| +| Hostname | Specify the hostname or IP address of the AD server | +| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| +| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| +| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | +| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | +| Service Account Password | The password for the service account. | +| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | +| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| +| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| + +--- + +### Configure User/Group Schema + +In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. + +Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. + +> **Note:** +> +> If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. + +#### User Schema + +The table below details the parameters for the user schema section configuration. + +**Table 2: User schema configuration parameters** + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Username Attribute | The user attribute whose value is suitable as a display name. | +| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | +| User Member Attribute | The attribute containing the groups that a user is a member of. | +| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | +| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | +| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | +| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | + +--- + +#### Group Schema + +The table below details the parameters for the group schema configuration. + +**Table 3: Group schema configuration parameters** + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Name Attribute | The group attribute whose value is suitable for a display name. | +| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | +| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | +| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | +| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | +| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | +| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organisation makes use of these nested memberships (ie. you have groups that contain other groups as members. We advise avoiding nested groups when possible). | + +--- + +### Test Authentication + +Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. + +> **Note:** +> +> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. + +1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. +2. Click **Authenticate with Active Directory** to finalise the setup. + +**Result:** + +- Active Directory authentication has been enabled. +- You have been signed into Rancher as administrator using the provided AD credentials. + +> **Note:** +> +> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. + +## Annex: Identify Search Base and Schema using ldapsearch + +In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. + +The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. + +For the purpose of the example commands provided below we will assume: + +- The Active Directory server has a hostname of `ad.acme.com` +- The server is listening for unencrypted connections on port `389` +- The Active Directory domain is `acme` +- You have a valid AD account with the username `jdoe` and password `secret` + +### Identify Search Base + +First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: + +``` +$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ +-h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" +``` + +This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: + +![](/img/ldapsearch-user.png) + +Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. + +Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, ie. `OU=Groups,DC=acme,DC=com`. + +### Identify User Schema + +The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: + +- `Object Class`: **person** [1] +- `Username Attribute`: **name** [2] +- `Login Attribute`: **sAMAccountName** [3] +- `User Member Attribute`: **memberOf** [4] + +> **Note:** +> +> If the AD users in our organisation were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. + +We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. + +### Identify Group Schema + +Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: + +``` +$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ +-h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ +-s sub "CN=examplegroup" +``` + +This command will inform us on the attributes used for group objects: + +![](/img/ldapsearch-group.png) + +Again, this allows us to determine the correct values to enter in the group schema configuration: + +- `Object Class`: **group** [1] +- `Name Attribute`: **name** [2] +- `Group Member Mapping Attribute`: **member** [3] +- `Search Attribute`: **sAMAccountName** [4] + +Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. + +In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md new file mode 100644 index 0000000000..4463db7b0d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md @@ -0,0 +1,209 @@ +--- +title: Configuring Azure AD +weight: 1115 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/azure-ad/ +--- + +_Available as of v2.0.3_ + +If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. + +>**Note:** Azure AD integration only supports Service Provider initiated logins. + +>**Prerequisite:** Have an instance of Azure AD configured. + +>**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). + +## Azure Active Directory Configuration Outline + +Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. + + + +>**Tip:** Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. + + + +- [1. Register Rancher with Azure](#1-register-rancher-with-azure) +- [2. Create a new client secret](#2-create-a-new-client-secret) +- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) +- [4. Add a Reply URL](#4-add-a-reply-url) +- [5. Copy Azure Application Data](#5-copy-azure-application-data) +- [6. Configure Azure AD in Rancher](#6-configure-azure-ad-in-rancher) + + + +### 1. Register Rancher with Azure + +Before enabling Azure AD within Rancher, you must register Rancher with Azure. + +1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. + +1. Use search to open the **App registrations** service. + + ![Open App Registrations](/img/search-app-registrations.png) + +1. Click **New registrations** and complete the **Create** form. + + ![New App Registration](/img/new-app-registration.png) + + 1. Enter a **Name** (something like `Rancher`). + + 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. + + 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. + + >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + + 1. Click **Register**. + +>**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +### 2. Create a new client secret + +From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. + +1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. + + ![Open Rancher Registration](/img/open-rancher-app.png) + +1. From the navigation pane on left, click **Certificates and Secrets**. + +1. Click **New client secret**. + + ![Create new client secret](/img/select-client-secret.png) + + 1. Enter a **Description** (something like `Rancher`). + + 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. + + 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). + + +1. Copy the key value and save it to an [empty text file](#tip). + + You'll enter this key into the Rancher UI later as your **Application Secret**. + + You won't be able to access the key value again within the Azure UI. + +### 3. Set Required Permissions for Rancher + +Next, set API permissions for Rancher within Azure. + +1. From the navigation pane on left, select **API permissions**. + + ![Open Required Permissions](/img/select-required-permissions.png) + +1. Click **Add a permission**. + +1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: + + ![Select API Permissions](/img/select-required-permissions-2.png) + +
    +
    + - **Access the directory as the signed-in user** + - **Read directory data** + - **Read all groups** + - **Read all users' full profiles** + - **Read all users' basic profiles** + - **Sign in and read user profile** + +1. Click **Add permissions**. + +1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. + + >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. + + +### 4. Add a Reply URL + +To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. + + +1. From the **Setting** blade, select **Reply URLs**. + + ![Azure: Enter Reply URL](/img/enter-azure-reply-url.png) + +1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. + + >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + +1. Click **Save**. + +**Result:** Your reply URL is saved. + +>**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +### 5. Copy Azure Application Data + +As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. + +1. Obtain your Rancher **Tenant ID**. + + 1. Use search to open the **Azure Active Directory** service. + + ![Open Azure Active Directory](/img/search-azure-ad.png) + + 1. From the left navigation pane, open **Overview**. + + 2. Copy the **Directory ID** and paste it into your [text file](#tip). + + You'll paste this value into Rancher as your **Tenant ID**. + +1. Obtain your Rancher **Application ID**. + + 1. Use search to open **App registrations**. + + ![Open App Registrations](/img/search-app-registrations.png) + + 1. Find the entry you created for Rancher. + + 1. Copy the **Application ID** and paste it to your [text file](#tip). + +1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. + + 1. From **App registrations**, click **Endpoints**. + + ![Click Endpoints](/img/click-endpoints.png) + + 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). + + - **Microsoft Graph API endpoint** (Graph Endpoint) + - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) + - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) + +>**Note:** Copy the v1 version of the endpoints + +### 6. Configure Azure AD in Rancher + +From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. + +Enter the values that you copied to your [text file](#tip). + +1. Log into Rancher. From the **Global** view, select **Security > Authentication**. + +1. Select **Azure AD**. + +1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). + + >**Important:** When entering your Graph Endpoint, remove the tenant ID from the URL, like below. + > + >https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c + + The following table maps the values you copied in the Azure portal to the fields in Rancher. + + | Rancher Field | Azure Value | + | ------------------ | ------------------------------------- | + | Tenant ID | Directory ID | + | Application ID | Application ID | + | Application Secret | Key Value | + | Endpoint | https://login.microsoftonline.com/ | + | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | + | Token Endpoint | OAuth 2.0 Token Endpoint | + | Auth Endpoint | OAuth 2.0 Authorization Endpoint | + +1. Click **Authenticate with Azure**. + +**Result:** Azure Active Directory authentication is configured. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md new file mode 100644 index 0000000000..0e9f006970 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md @@ -0,0 +1,56 @@ +--- +title: Configuring FreeIPA +weight: 1114 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/freeipa/ +--- + +_Available as of v2.0.5_ + +If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. + +>**Prerequisites:** +> +>- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. +>- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. +>- Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). + +2. From the **Global** view, select **Security > Authentication** from the main menu. + +3. Select **FreeIPA**. + +4. Complete the **Configure an FreeIPA server** form. + + You may need to log in to your domain controller to find the information requested in the form. + + >**Using TLS?** + >If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. +
    +
    + >**User Search Base vs. Group Search Base** + > + >Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. + > + >* If your users and groups are in the same search base, complete only the User Search Base. + >* If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. + +5. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. + + >**Search Attribute** The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. + > + >The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. + > + > * `uid`: User ID + > * `sn`: Last Name + > * `givenName`: First Name + > + > With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. + +6. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. + +**Result:** + +- FreeIPA authentication is configured. +- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md new file mode 100644 index 0000000000..6ba4fd5f04 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md @@ -0,0 +1,53 @@ +--- +title: Configuring GitHub +weight: 1116 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/github/ +--- + +In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. + +>**Prerequisites:** Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). + +2. From the **Global** view, select **Security > Authentication** from the main menu. + +3. Select **GitHub**. + +4. Follow the directions displayed to **Setup a GitHub Application**. Rancher redirects you to GitHub to complete registration. + + >**What's an Authorization Callback URL?** + > + >The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). + + >When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. + +5. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. + + >**Where do I find the Client ID and Client Secret?** + > + >From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. + +6. Click **Authenticate with GitHub**. + +7. Use the **Site Access** options to configure the scope of user authorization. + + - **Allow any valid Users** + + _Any_ GitHub user can access Rancher. We generally discourage use of this setting! + + - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** + + Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. + + - **Restrict access to only Authorized Users and Organizations** + + Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. +
    +8. Click **Save**. + +**Result:** + +- GitHub authentication is configured. +- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md new file mode 100644 index 0000000000..23a8381545 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md @@ -0,0 +1,106 @@ +--- +title: Configuring Google OAuth +--- +_Available as of v2.3.0_ + +If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. + +Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. + +Within Rancher, only administrators or users with the **Manage Authentication** [global role](../../manage-role-based-access-control-rbac/global-permissions.md) can configure authentication. + +# Prerequisites +- You must have a [G Suite admin account](https://admin.google.com) configured. +- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. +- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) + +After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: +![Enable Admin APIs](/img/Google-Enable-APIs-Screen.png) + +# Setting up G Suite for OAuth with Rancher +Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: + +1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) +1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) +1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) +1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) + +### 1. Adding Rancher as an Authorized Domain +1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. +1. Select your project and click **OAuth consent screen.** +![OAuth Consent Screen](/img/Google-OAuth-consent-screen-tab.png) +1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) +1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. + +**Result:** Rancher has been added as an authorized domain for the Admin SDK API. + +### 2. Creating OAuth2 Credentials for the Rancher Server +1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) +![Credentials](/img/Google-Credentials-tab.png) +1. On the **Create Credentials** dropdown, select **OAuth client ID.** +1. Click **Web application.** +1. Provide a name. +1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. + - Under **Authorized JavaScript origins,** enter your Rancher server URL. + - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. +1. Click on **Create.** +1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. + +**Result:** Your OAuth credentials have been successfully created. + +### 3. Creating Service Account Credentials +Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. + +Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. + +As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. + +This section describes how to: + +- Create a service account +- Create a key for the service account and download the credentials as JSON + +1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. +1. Click on **Create Service Account.** +1. Enter a name and click **Create.** +![Service account creation Step 1](/img/Google-svc-acc-step1.png) +1. Don't provide any roles on the **Service account permissions** page and click **Continue** +![Service account creation Step 2](/img/Google-svc-acc-step2.png) +1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. +![Service account creation Step 3](/img/Google-svc-acc-step3-key-creation.png) + +**Result:** Your service account is created. + +### 4. Register the Service Account Key as an OAuth Client + +You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. + +Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: + +1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** + + ![Service account Unique ID](/img/Google-Select-UniqueID-column.png) +1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) +1. Add the Unique ID obtained in the previous step in the **Client Name** field. +1. In the **One or More API Scopes** field, add the following scopes: + ``` + openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly + ``` +1. Click **Authorize.** + +**Result:** The service account is registered as an OAuth client in your G Suite account. + +# Configuring Google OAuth in Rancher +1. Sign into Rancher using a local user assigned the [administrator](../../manage-role-based-access-control-rbac/global-permissions.md) role. This user is also called the local principal. +1. From the **Global** view, click **Security > Authentication** from the main menu. +1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. + 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. + 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. + 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. + - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) + - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. + - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. +1. Click **Authenticate with Google**. +1. Click **Save**. + +**Result:** Google authentication is successfully configured. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md new file mode 100644 index 0000000000..abc22cb502 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md @@ -0,0 +1,126 @@ +--- +title: Configuring Keycloak (SAML) +description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins +weight: 1200 +--- +_Available as of v2.1.0_ + +If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +## Prerequisites + +- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. +- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. + + Setting | Value + ------------|------------ + `Sign Documents` | `ON` 1 + `Sign Assertions` | `ON` 1 + All other `ON/OFF` Settings | `OFF` + `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 + `Client Name` | (e.g. `rancher`) + `Client Protocol` | `SAML` + `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` + + >1: Optionally, you can enable either one or both of these settings. + >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. + + ![](/img/keycloak/keycloak-saml-client-configuration.png) + +- In the new SAML client, create Mappers to expose the users fields + - Add all "Builtin Protocol Mappers" + ![](/img/keycloak/keycloak-saml-client-builtin-mappers.png) + - Create a new "Group list" mapper to map the member attribute to a user's groups + ![](/img/keycloak/keycloak-saml-client-group-mapper.png) +- Export a `metadata.xml` file from your Keycloak client: + From the `Installation` tab, choose the `SAML Metadata IDPSSODescriptor` format option and download your file. + + >**Note** + > Keycloak versions 6.0.0 and up no longer provide the IDP metadata under the `Installation` tab. + > You can still get the XML from the following url: + > + > `https://{KEYCLOAK-URL}/auth/realms/{REALM-NAME}/protocol/saml/descriptor` + > + > The XML obtained from this URL contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: + > + > * Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. + > * Remove the `` tag from the beginning. + > * Remove the `` from the end of the xml. + > + > You are left with something similar as the example below: + > + > ``` + > + > .... + > + > ``` + +## Configuring Keycloak in Rancher + + +1. From the **Global** view, select **Security > Authentication** from the main menu. + +1. Select **Keycloak**. + +1. Complete the **Configure Keycloak Account** form. + + + | Field | Description | + | ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | + | Display Name Field | The attribute that contains the display name of users.

    Example: `givenName` | + | User Name Field | The attribute that contains the user name/given name.

    Example: `email` | + | UID Field | An attribute that is unique to every user.

    Example: `email` | + | Groups Field | Make entries for managing group memberships.

    Example: `member` | + | Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

    Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | + | Rancher API Host | The URL for your Rancher Server. | + | Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | + | IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | + + >**Tip:** You can generate a key/certificate pair using an openssl command. For example: + > + > openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert + + +1. After you complete the **Configure Keycloak Account** form, click **Authenticate with Keycloak**, which is at the bottom of the page. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. + + >**Note:** You may have to disable your popup blocker to see the IdP login page. + +**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. + +{{< saml_caveats >}} + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. + +### You are not redirected to Keycloak + +When you click on **Authenticate with Keycloak**, your are not redirected to your IdP. + + * Verify your Keycloak client configuration. + * Make sure `Force Post Binding` set to `OFF`. + + +### Forbidden message displayed after IdP login + +You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. + + * Check the Rancher debug log. + * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. + +### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata + +This is usually due to the metadata not being created until a SAML provider is configured. +Try configuring and saving keycloak as your SAML provider and then accessing the metadata. + +### Keycloak Error: "We're sorry, failed to process response" + + * Check your Keycloak log. + * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. + +### Keycloak Error: "We're sorry, invalid requester" + + * Check your Keycloak log. + * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/okta/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/admin-settings/authentication/okta/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/ping-federate/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/admin-settings/authentication/ping-federate/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md diff --git a/content/rancher/v2.0-v2.4/en/admin-settings/authentication/local/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/admin-settings/authentication/local/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md new file mode 100644 index 0000000000..3c01a373a5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md @@ -0,0 +1,64 @@ +--- +title: Users and Groups +weight: 1 +--- + +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. + +Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control](../../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md). + +## Managing Members + +When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local](create-local-users.md) user account, you will not be able to search for GitHub users or groups. + +All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. + +{{< saml_caveats >}} + +## User Information + +Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. + +Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. + +### Automatically Refreshing User Information + +_Available as of v2.2.0_ + +Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. From the **Global** view, click on **Settings**. Two settings control this behavior: + +- **`auth-user-info-max-age-seconds`** + + This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. + +- **`auth-user-info-resync-cron`** + + This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. + + +> **Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. + +### Manually Refreshing User Information + +If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. + +1. From the **Global** view, click on **Users** in the navigation bar. + +1. Click on **Refresh Group Memberships**. + +**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. + +>**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. + + +## Session Length + +_Available as of v2.3.0_ + +The default length (TTL) of each user session is adjustable. The default session length is 16 hours. + +1. From the **Global** view, click on **Settings**. +1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** +1. Enter the amount of time in minutes a session length should last and click **Save.** + +**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md new file mode 100644 index 0000000000..13c946a666 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md @@ -0,0 +1,82 @@ +--- +title: 1. Configuring Microsoft AD FS for Rancher +weight: 1205 +--- + +Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. + +1. Log into your AD server as an administrative user. + +1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. + + ![](/img/adfs/adfs-overview.png) + +1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. + + ![](/img/adfs/adfs-add-rpt-2.png) + +1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. + + ![](/img/adfs/adfs-add-rpt-3.png) + +1. Select **AD FS profile** as the configuration profile for your relying party trust. + + ![](/img/adfs/adfs-add-rpt-4.png) + +1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. + + ![](/img/adfs/adfs-add-rpt-5.png) + +1. Select **Enable support for the SAML 2.0 WebSSO protocol** + and enter `https:///v1-saml/adfs/saml/acs` for the service URL. + + ![](/img/adfs/adfs-add-rpt-6.png) + +1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. + + ![](/img/adfs/adfs-add-rpt-7.png) + +1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. + + ![](/img/adfs/adfs-add-rpt-8.png) + +1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. + + ![](/img/adfs/adfs-add-rpt-9.png) + +1. After reviewing your settings, select **Next** to add the relying party trust. + + ![](/img/adfs/adfs-add-rpt-10.png) + + +1. Select **Open the Edit Claim Rules...** and click **Close**. + + ![](/img/adfs/adfs-add-rpt-11.png) + +1. On the **Issuance Transform Rules** tab, click **Add Rule...**. + + ![](/img/adfs/adfs-edit-cr.png) + +1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. + + ![](/img/adfs/adfs-add-tcr-1.png) + +1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: + + | LDAP Attribute | Outgoing Claim Type | + | -------------------------------------------- | ------------------- | + | Given-Name | Given Name | + | User-Principal-Name | UPN | + | Token-Groups - Qualified by Long Domain Name | Group | + | SAM-Account-Name | Name | +
    + ![](/img/adfs/adfs-add-tcr-2.png) + +1. Download the `federationmetadata.xml` from your AD server at: +``` +https:///federationmetadata/2007-06/federationmetadata.xml +``` + +**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. + +### [Next: Configuring Rancher for Microsoft AD FS](configure-rancher-for-ms-adfs.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md new file mode 100644 index 0000000000..11deb4d2de --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md @@ -0,0 +1,56 @@ +--- +title: 2. Configuring Rancher for Microsoft AD FS +weight: 1205 +--- +_Available as of v2.0.7_ + +After you complete [Configuring Microsoft AD FS for Rancher](configure-ms-adfs-for-rancher.md), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. + +>**Important Notes For Configuring Your AD FS Server:** +> +>- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` +>- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` +>- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` + + +1. From the **Global** view, select **Security > Authentication** from the main menu. + +1. Select **Microsoft Active Directory Federation Services**. + +1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The [configuration section below](#configuration) describe how you can map AD attributes to fields within Rancher. + + + + + + + + +1. After you complete the **Configure AD FS Account** form, click **Authenticate with AD FS**, which is at the bottom of the page. + + Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. + + >**Note:** You may have to disable your popup blocker to see the AD FS login page. + +**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. + +# Configuration + +| Field | Description | +|---------------------------|-----------------| +| Display Name Field | The AD attribute that contains the display name of users.

    Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | +| User Name Field | The AD attribute that contains the user name/given name.

    Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | +| UID Field | An AD attribute that is unique to every user.

    Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | +| Groups Field | Make entries for managing group memberships.

    Example: `http://schemas.xmlsoap.org/claims/Group` | +| Rancher API Host | The URL for your Rancher Server. | +| Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

    [Certificate creation command](#cert-command) | +| Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

    You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | + + + + +**Tip:** You can generate a certificate using an openssl command. For example: + +``` +openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md new file mode 100644 index 0000000000..e1bca2e6d0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md @@ -0,0 +1,34 @@ +--- +title: Group Permissions with Shibboleth and OpenLDAP +weight: 1 +--- + +_Available as of Rancher v2.4_ + +This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. + +Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. + +One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. + +### Terminology + +- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. +- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. +- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. +- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. + +### Adding OpenLDAP Group Permissions to Rancher Resources + +The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. + +For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. + +In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. + +When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. + +Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. + +![Adding OpenLDAP Group Permissions to Rancher Resources](/img/shibboleth-with-openldap-groups.svg) + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md new file mode 100644 index 0000000000..e0a4f92032 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md @@ -0,0 +1,44 @@ +--- +title: Cluster Drivers +weight: 1 +--- + +_Available as of v2.2.0_ + +Cluster drivers are used to create clusters in a [hosted Kubernetes provider](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. + +If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. + +### Managing Cluster Drivers + +>**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Cluster Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. + +## Activating/Deactivating Cluster Drivers + +By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. + +2. From the **Drivers** page, select the **Cluster Drivers** tab. + +3. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. + +## Adding Custom Cluster Drivers + +If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. + +2. From the **Drivers** page select the **Cluster Drivers** tab. + +3. Click **Add Cluster Driver**. + +4. Complete the **Add Cluster Driver** form. Then click **Create**. + + +### Developing your own Cluster Driver + +In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md new file mode 100644 index 0000000000..c47ddbc51b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md @@ -0,0 +1,40 @@ +--- +title: Node Drivers +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/node-drivers/ + - /rancher/v2.0-v2.4/en/tasks/global-configuration/node-drivers/ +--- + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +#### Managing Node Drivers + +>**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Node Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. + +## Activating/Deactivating Node Drivers + +By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version before v2.2.0, you can select **Node Drivers** directly in the navigation bar. + +2. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. + +## Adding Custom Node Drivers + +If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. In version before v2.2.0, you can select **Node Drivers** directly in the navigation bar. + +2. Click **Add Node Driver**. + +3. Complete the **Add Node Driver** form. Then click **Create**. + +### Developing your own node driver + +Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md new file mode 100644 index 0000000000..aa435277b7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md @@ -0,0 +1,61 @@ +--- +title: Access and Sharing +weight: 31 +--- + +If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. + +Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. + +When you share a template, each user can have one of two access levels: + +- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. +- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. + +If you create a template, you automatically become an owner of that template. + +If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.](manage-rke1-templates.md) + +There are several ways to share templates: + +- Add users to a new RKE template during template creation +- Add users to an existing RKE template +- Make the RKE template public, sharing it with all users in the Rancher setup +- Share template ownership with users who are trusted to modify the template + +### Sharing Templates with Specific Users or Groups + +To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to share and click the **⋮ > Edit.** +1. In the **Share Template** section, click on **Add Member**. +1. Search in the **Name** field for the user or group you want to share the template with. +1. Choose the **User** access type. +1. Click **Save.** + +**Result:** The user or group can create clusters using the template. + +### Sharing Templates with All Users + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to share and click the **⋮ > Edit.** +1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** + +**Result:** All users in the Rancher setup can create clusters using the template. + +### Sharing Ownership of Templates + +If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. + +In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. + +To give Owner access to a user or group, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to share and click the **⋮ > Edit.** +1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. +1. In the **Access Type** field, click **Owner.** +1. Click **Save.** + +**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md new file mode 100644 index 0000000000..6d59d6af2a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md @@ -0,0 +1,63 @@ +--- +title: Applying Templates +weight: 50 +--- + +You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.](access-or-share-templates.md) + +RKE templates can be applied to new clusters. + +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +This section covers the following topics: + +- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) +- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) +- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) + +### Creating a Cluster from an RKE Template + +To add a cluster [hosted by an infrastructure provider](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using an RKE template, use these steps: + +1. From the **Global** view, go to the **Clusters** tab. +1. Click **Add Cluster** and choose the infrastructure provider. +1. Provide the cluster name and node template details as usual. +1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** +1. Choose an existing template and revision from the dropdown menu. +1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. +1. Click **Save** to launch the cluster. + +### Updating a Cluster Created with an RKE Template + +When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. + +- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.](../../../../pages-for-subheaders/cluster-configuration.md) +- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. + +If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. + +As of Rancher v2.3.3, an existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. + +> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +### Converting an Existing Cluster to Use an RKE Template + +_Available as of v2.3.3_ + +This section describes how to create an RKE template from an existing cluster. + +RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.](manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) + +To convert an existing cluster to use an RKE template, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** +1. Enter a name for the template in the form that appears, and click **Create.** + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md new file mode 100644 index 0000000000..75dbd23828 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md @@ -0,0 +1,50 @@ +--- +title: Template Creator Permissions +weight: 10 +--- + +Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. + +For more information on administrator permissions, refer to the [documentation on global permissions](../manage-role-based-access-control-rbac/global-permissions.md). + +# Giving Users Permission to Create Templates + +Templates can only be created by users who have the global permission **Create RKE Templates.** + +Administrators have the global permission to create templates, and only administrators can give that permission to other users. + +For information on allowing users to modify existing templates, refer to [Sharing Templates.](access-or-share-templates.md) + +Administrators can give users permission to create RKE templates in two ways: + +- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) +- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) + +### Allowing a User to Create Templates + +An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: + +1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** +1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** + +**Result:** The user has permission to create RKE templates. + +### Allowing New Users to Create Templates by Default + +Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. + +1. From the **Global** view, click **Security > Roles.** +1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **⋮ > Edit**. +1. Select the option **Yes: Default role for new users** and click **Save.** + +**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. + +### Revoking Permission to Create Templates + +Administrators can remove a user's permission to create templates with the following steps: + +1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** +1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. +1. Click **Save.** + +**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md new file mode 100644 index 0000000000..3d6fb98766 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md @@ -0,0 +1,38 @@ +--- +title: Template Enforcement +weight: 32 +--- + +This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. + +By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, + +- Only an administrator has the ability to create clusters without a template. +- All standard users must use an RKE template to create a new cluster. +- Standard users cannot create a cluster without using a template. + +Users can only create new templates if the administrator [gives them permission.](creator-permissions.md#allowing-a-user-to-create-templates) + +After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision](apply-templates.md#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.](manage-rke1-templates.md#updating-a-template) + +# Requiring New Clusters to Use an RKE Template + +You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user](../manage-role-based-access-control-rbac/global-permissions.md) will use the Kubernetes and/or Rancher settings that are vetted by administrators. + +To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** +1. Set the value to **True** and click **Save.** + +**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. + +# Disabling RKE Template Enforcement + +To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** +1. Set the value to **False** and click **Save.** + +**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md new file mode 100644 index 0000000000..850fe6ce2f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md @@ -0,0 +1,71 @@ +--- +title: Example Scenarios +weight: 5 +--- + +These example scenarios describe how an organization could use templates to standardize cluster creation. + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) + + +# Enforcing a Template Setting for Everyone + +Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. + +1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. +1. The administrator makes the template public. +1. The administrator turns on template enforcement. + +**Results:** + +- All Rancher users in the organization have access to the template. +- All new clusters created by [standard users](../manage-role-based-access-control-rbac/global-permissions.md) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. +- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. + +In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. + +# Templates for Basic and Advanced Users + +Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. + +1. First, an administrator turns on [RKE template enforcement.](enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user](../manage-role-based-access-control-rbac/global-permissions.md) in Rancher will need to use an RKE template when they create a cluster. +1. The administrator then creates two templates: + + - One template for basic users, with almost every option specified except for access keys + - One template for advanced users, which has most or all options has **Allow User Override** turned on + +1. The administrator shares the advanced template with only the advanced users. +1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. + +**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. + +# Updating Templates and Clusters Created with Them + +Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. + +In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. + +The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: + +- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. +- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. +- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. + +# Allowing Other Users to Control and Share a Template + +Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. + +Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. + +To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.](access-or-share-templates.md#sharing-ownership-of-templates) + +The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: + +- [Revise the template](manage-rke1-templates.md#updating-a-template) when the best practices change +- [Disable outdated revisions](manage-rke1-templates.md#disabling-a-template-revision) of the template so that no new clusters can be created with it +- [Delete the whole template](manage-rke1-templates.md#deleting-a-template) if the organization wants to go in a different direction +- [Set a certain revision as default](manage-rke1-templates.md#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. +- [Share the template](access-or-share-templates.md) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md new file mode 100644 index 0000000000..38524924ff --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md @@ -0,0 +1,70 @@ +--- +title: RKE Templates and Infrastructure +weight: 90 +--- + +In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. + +Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. + +If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. + +### Node Templates + +[Node templates](../../../../reference-guides/user-settings/manage-node-templates.md) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. + +### Terraform + +Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. + +This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. + +Terraform allows you to: + +- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates +- Leverage catalog apps and multi-cluster apps +- Codify infrastructure across many platforms, including Rancher and major cloud providers +- Commit infrastructure-as-code to version control +- Easily repeat configuration and setup of infrastructure +- Incorporate infrastructure changes into standard development practices +- Prevent configuration drift, in which some servers become configured differently than others + +# How Does Terraform Work? + +Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. + +To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. + +Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. + +When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. + +# Tips for Working with Terraform + +- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) + +- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. + +- You can also modify auth in the Terraform provider. + +- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. + +- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. + +# Tip for Creating CIS Benchmark Compliant Clusters + +This section describes one way that you can make security and compliance-related config files standard in your clusters. + +When you create a [CIS benchmark compliant cluster,](../../../../pages-for-subheaders/rancher-security.md) you have an encryption config file and an audit log config file. + +Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. + +Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. + +In this way, you can create flags that comply with the CIS benchmark. + +# Resources + +- [Terraform documentation](https://www.terraform.io/docs/) +- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) +- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md new file mode 100644 index 0000000000..6b394eeeb9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md @@ -0,0 +1,162 @@ +--- +title: Creating and Revising Templates +weight: 32 +--- + +This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** + +Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. + +Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. + +The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a template](#creating-a-template) +- [Updating a template](#updating-a-template) +- [Deleting a template](#deleting-a-template) +- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) +- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) +- [Disabling a template revision](#disabling-a-template-revision) +- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) +- [Setting a template revision as default](#setting-a-template-revision-as-default) +- [Deleting a template revision](#deleting-a-template-revision) +- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) +- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) + +### Prerequisites + +You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.](creator-permissions.md) + +You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.](access-or-share-templates.md#sharing-ownership-of-templates) + +### Creating a Template + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Click **Add Template.** +1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. +1. Optional: Share the template with other users or groups by [adding them as members.](access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. +1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. + +**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. + +### Updating a Template + +When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. + +You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) + +When new template revisions are created, clusters using an older revision of the template are unaffected. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to edit and click the **⋮ > Edit.** +1. Edit the required information and click **Save.** +1. Optional: You can change the default revision of this template and also change who it is shared with. + +**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) + +### Deleting a Template + +When you no longer use an RKE template for any of your clusters, you can delete it. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to delete and click the **⋮ > Delete.** +1. Confirm the deletion when prompted. + +**Result:** The template is deleted. + +### Creating a Revision Based on the Default Revision + +You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to clone and click the **⋮ > New Revision From Default.** +1. Complete the rest of the form to create a new revision. + +**Result:** The RKE template revision is cloned and configured. + +### Creating a Revision Based on a Cloned Revision + +When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision.** +1. Complete the rest of the form. + +**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. + +### Disabling a Template Revision + +When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. + +You can disable the revision if it is not being used by any cluster. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to disable. Then select **⋮ > Disable.** + +**Result:** The RKE template revision cannot be used to create a new cluster. + +### Re-enabling a Disabled Template Revision + +If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to re-enable. Then select **⋮ > Enable.** + +**Result:** The RKE template revision can be used to create a new cluster. + +### Setting a Template Revision as Default + +When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. + +To set an RKE template revision as default, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default.** + +**Result:** The RKE template revision will be used as the default option when clusters are created with the template. + +### Deleting a Template Revision + +You can delete all revisions of a template except for the default revision. + +To permanently delete a revision, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete.** + +**Result:** The RKE template revision is deleted. + +### Upgrading a Cluster to Use a New Template Revision + +> This section assumes that you already have a cluster that [has an RKE template applied.](apply-templates.md) +> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. + +To upgrade a cluster to use a new template revision, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that you want to upgrade and click **⋮ > Edit.** +1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. +1. Click **Save.** + +**Result:** The cluster is upgraded to use the settings defined in the new template revision. + +### Exporting a Running Cluster to a New RKE Template and Revision + +You can save an existing cluster's settings as an RKE template. + +This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] + +To convert an existing cluster to use an RKE template, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** +1. Enter a name for the template in the form that appears, and click **Create.** + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template and revision.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md new file mode 100644 index 0000000000..4fa8a4dbca --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md @@ -0,0 +1,15 @@ +--- +title: Overriding Template Settings +weight: 33 +--- + +When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** + +After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision](manage-rke1-templates.md) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. + +When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. + +The **Allow User Override** model of the RKE template is useful for situations such as: + +- Administrators know that some settings will need the flexibility to be frequently updated over time +- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md new file mode 100644 index 0000000000..2b8cf9fe43 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md @@ -0,0 +1,89 @@ +--- +title: Pod Security Policies +weight: 1135 +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/pod-security-policies/ + - /rancher/v2.0-v2.4/en/tasks/global-configuration/pod-security-policies/ + - /rancher/v2.0-v2.4/en/tasks/clusters/adding-a-pod-security-policy/ +--- + +_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). + +If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. + +- [How PSPs Work](#how-psps-work) +- [Default PSPs](#default-psps) + - [Restricted](#restricted) + - [Unrestricted](#unrestricted) +- [Creating PSPs](#creating-psps) + - [Requirements](#requirements) + - [Creating PSPs in the Rancher UI](#creating-psps-in-the-rancher-ui) +- [Configuration](#configuration) + +# How PSPs Work + +You can assign PSPs at the cluster or project level. + +PSPs work through inheritance: + +- By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. +- **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. +- You can override the default PSP by assigning a different PSP directly to the project. + +Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. + +Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). + +# Default PSPs + +_Available as of v2.0.7_ + +Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies. + +### Restricted + +This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: + +- Prevents pods from running as a privileged user and prevents escalation of privileges. +- Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added. + +### Unrestricted + +This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. + +# Creating PSPs + +Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. + +### Requirements + +Rancher can only assign PSPs for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.](../../../pages-for-subheaders/cluster-configuration.md) + +It is a best practice to set PSP at the cluster level. + +We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. + +### Creating PSPs in the Rancher UI + +1. From the **Global** view, select **Security** > **Pod Security Policies** from the main menu. Then click **Add Policy**. + + **Step Result:** The **Add Policy** form opens. + +2. Name the policy. + +3. Complete each section of the form. Refer to the [Kubernetes documentation]((https://kubernetes.io/docs/concepts/policy/pod-security-policy/)) for more information on what each policy does. + + +# Configuration + +The Kubernetes documentation on PSPs is [here.](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) + + + + + +[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems +[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces +[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md new file mode 100644 index 0000000000..0e5375d0ca --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md @@ -0,0 +1,44 @@ +--- +title: Configuring a Global Default Private Registry +weight: 400 +aliases: +--- + +You might want to use a private container registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the container images that are used in your clusters. + +There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. + +For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Docker installation](installation/air-gap-single-node) or [air gapped Kubernetes installation](installation/air-gap-high-availability) instructions. + +If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. + +# Setting a Private Registry with No Credentials as the Default Registry + +1. Log into Rancher and configure the default administrator password. + +1. Go into the **Settings** view. + + ![](/img/airgap/settings.png) + +1. Look for the setting called `system-default-registry` and choose **Edit**. + + ![](/img/airgap/edit-system-default-registry.png) + +1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. + + ![](/img/airgap/enter-system-default-registry.png) + +**Result:** Rancher will use your private registry to pull system images. + +# Setting a Private Registry with Credentials when Deploying a Cluster + +You can follow these steps to configure a private registry when you provision a cluster with Rancher: + +1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** +1. In the Enable Private Registries section, click **Enabled.** +1. Enter the registry URL and credentials. +1. Click **Save.** + +**Result:** The new cluster will be able to pull images from the private registry. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md new file mode 100644 index 0000000000..3067a8c032 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md @@ -0,0 +1,194 @@ +--- +title: Cluster and Project Roles +weight: 1127 +--- + +Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. + +### Membership and Role Assignment + +The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. + +When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. + +> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. + +### Cluster Roles + +_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. + +- **Cluster Owner:** + + These users have full control over the cluster and all resources in it. + +- **Cluster Member:** + + These users can view most cluster level resources and create new projects. + +#### Custom Cluster Roles + +Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. + +#### Cluster Role Reference + +The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. + +| Built-in Cluster Role | Owner | Member | +| ---------------------------------- | ------------- | --------------------------------- | +| Create Projects | ✓ | ✓ | +| Manage Cluster Backups             | ✓ | | +| Manage Cluster Catalogs | ✓ | | +| Manage Cluster Members | ✓ | | +| Manage Nodes | ✓ | | +| Manage Storage | ✓ | | +| View All Projects | ✓ | | +| View Cluster Catalogs | ✓ | ✓ | +| View Cluster Members | ✓ | ✓ | +| View Nodes | ✓ | ✓ | + +For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. + +> **Note:** +>When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +### Giving a Custom Cluster Role to a Cluster Member + +After an administrator [sets up a custom cluster role,](custom-roles.md) cluster owners and admins can then assign those roles to cluster members. + +To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. + +To assign the role to a new cluster member, + +1. Go to the **Cluster** view, then go to the **Members** tab. +1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create.** + +**Result:** The member has the assigned role. + +To assign any custom role to an existing cluster member, + +1. Go to the member you want to give the role to. Click the **⋮ > View in API.** +1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** + +**Result:** The member has the assigned role. + +### Project Roles + +_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. + +- **Project Owner:** + + These users have full control over the project and all resources in it. + +- **Project Member:** + + These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. + + >**Note:** + > + >By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + +- **Read Only:** + + These users can view everything in the project but cannot create, update, or delete anything. + + >**Caveat:** + > + >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + + +#### Custom Project Roles + +Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. + +#### Project Role Reference + +The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. + +| Built-in Project Role | Owner | Member | Read Only | +| ---------------------------------- | ------------- | ----------------------------- | ------------- | +| Manage Project Members | ✓ | | | +| Create Namespaces | ✓ | ✓ | | +| Manage Config Maps | ✓ | ✓ | | +| Manage Ingress | ✓ | ✓ | | +| Manage Project Catalogs | ✓ | | | +| Manage Secrets | ✓ | ✓ | | +| Manage Service Accounts | ✓ | ✓ | | +| Manage Services | ✓ | ✓ | | +| Manage Volumes | ✓ | ✓ | | +| Manage Workloads | ✓ | ✓ | | +| View Secrets | ✓ | ✓ | | +| View Config Maps | ✓ | ✓ | ✓ | +| View Ingress | ✓ | ✓ | ✓ | +| View Project Members | ✓ | ✓ | ✓ | +| View Project Catalogs | ✓ | ✓ | ✓ | +| View Service Accounts | ✓ | ✓ | ✓ | +| View Services | ✓ | ✓ | ✓ | +| View Volumes | ✓ | ✓ | ✓ | +| View Workloads | ✓ | ✓ | ✓ | + +> **Notes:** +> +>- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. +>- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. +>- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. + +### Defining Custom Roles +As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. + +When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. + +### Default Cluster and Project Roles + +By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. + +There are two methods for changing default cluster/project roles: + +- **Assign Custom Roles**: Create a [custom role](custom-roles.md) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. + +- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. + + For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). + +>**Note:** +> +>- Although you can [lock](locked-roles.md) a default role, the system still assigns the role to users who create a cluster/project. +>- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. + +### Configuring Default Roles for Cluster and Project Creators + +You can change the cluster or project role(s) that are automatically assigned to the creating user. + +1. From the **Global** view, select **Security > Roles** from the main menu. Select either the **Cluster** or **Project** tab. + +1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit**. + +1. Enable the role as default. + +
    + For Clusters + + 1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. + 1. Click **Save**. + +
    +
    + For Projects + + 1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. + 1. Click **Save**. + +
    + +1. If you want to remove a default role, edit the permission and select **No** from the default roles option. + +**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. + +### Cluster Membership Revocation Behavior + +When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: + +- Access the projects they hold membership in. +- Exercise any [individual project roles](#project-role-reference) they are assigned. + +If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md new file mode 100644 index 0000000000..71cc0ba754 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md @@ -0,0 +1,179 @@ +--- +title: Custom Roles +weight: 1128 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/roles/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Within Rancher, _roles_ determine what actions a user can make within a cluster or project. + +Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) +- [Creating a custom global role](#creating-a-custom-global-role) +- [Deleting a custom global role](#deleting-a-custom-global-role) +- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) + +## Prerequisites + +To complete the tasks on this page, one of the following permissions are required: + + - [Administrator Global Permissions](global-permissions.md). + - [Custom Global Permissions](global-permissions.md#custom-global-permissions) with the [Manage Roles](global-permissions.md) role assigned. + +## Creating A Custom Role for a Cluster or Project + +While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. + +The steps to add custom roles differ depending on the version of Rancher. + + + + +1. From the **Global** view, select **Security > Roles** from the main menu. + +1. Select a tab to determine the scope of the roles you're adding. The tabs are: + + - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. + - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. + +1. Click **Add Cluster/Project Role.** + +1. **Name** the role. + +1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. + + > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. + +1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. + + > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + + You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. + +1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. + +1. Click **Create**. + + + + +1. From the **Global** view, select **Security > Roles** from the main menu. + +1. Click **Add Role**. + +1. **Name** the role. + +1. Choose whether to set the role to a status of [locked](locked-roles.md). + + > **Note:** Locked roles cannot be assigned to users. + +1. In the **Context** dropdown menu, choose the scope of the role assigned to the user. The contexts are: + + - **All:** The user can use their assigned role regardless of context. This role is valid for assignment when adding/managing members to clusters or projects. + + - **Cluster:** This role is valid for assignment when adding/managing members to _only_ clusters. + + - **Project:** This role is valid for assignment when adding/managing members to _only_ projects. + +1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. + + > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + + You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. + +1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. + +1. Click **Create**. + + + + +## Creating a Custom Global Role + +_Available as of v2.4.0_ + +### Creating a Custom Global Role that Copies Rules from an Existing Role + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. + +The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. + +To create a custom global role based on an existing role, + +1. Go to the **Global** view and click **Security > Roles.** +1. On the **Global** tab, go to the role that the custom global role will be based on. Click **⋮ (…) > Clone.** +1. Enter a name for the role. +1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** +1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + +1. Click **Save.** + +### Creating a Custom Global Role that Does Not Copy Rules from Another Role + +Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: + +1. Go to the **Global** view and click **Security > Roles.** +1. On the **Global** tab, click **Add Global Role.** +1. Enter a name for the role. +1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** +1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + +1. Click **Save.** + +## Deleting a Custom Global Role + +_Available as of v2.4.0_ + +When deleting a custom global role, all global role bindings with this custom role are deleted. + +If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. + +Custom global roles can be deleted, but built-in roles cannot be deleted. + +To delete a custom global role, + +1. Go to the **Global** view and click **Security > Roles.** +2. On the **Global** tab, go to the custom global role that should be deleted and click **⋮ (…) > Delete.** +3. Click **Delete.** + +## Assigning a Custom Global Role to a Group + +_Available as of v2.4.0_ + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. + +When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. + +> **Prerequisites:** You can only assign a global role to a group if: +> +> * You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +> * The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) +> * You have already set up at least one user group with the authentication provider + +To assign a custom global role to a group, follow these steps: + +1. From the **Global** view, go to **Security > Groups.** +1. Click **Assign Global Role.** +1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. +1. In the **Custom** section, choose any custom global role that will be assigned to the group. +1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. +1. Click **Create.** + +**Result:** The custom global role will take effect when the users in the group log into Rancher. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md new file mode 100644 index 0000000000..b9dd90f3d7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md @@ -0,0 +1,174 @@ +--- +title: Global Permissions +weight: 1126 +--- + +_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. + +Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are three default global permissions: `Administrator`, `Standard User` and `User-base`. + +- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. + +- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. + +- **User-Base:** User-Base users have login-access only. + +You cannot update or delete the built-in Global Permissions. + +This section covers the following topics: + +- [Global permission assignment](#global-permission-assignment) + - [Global permissions for new local users](#global-permissions-for-new-local-users) + - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) +- [Custom global permissions](#custom-global-permissions) + - [Custom global permissions reference](#custom-global-permissions-reference) + - [Configuring default global permissions for new users](#configuring-default-global-permissions) + - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) + - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + - [Refreshing group memberships](#refreshing-group-memberships) + +# Global Permission Assignment + +Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. + +### Global Permissions for New Local Users + +When you create a new local user, you assign them a global permission as you complete the **Add User** form. + +To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) + +### Global Permissions for Users with External Authentication + +When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. + +To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) + +Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) + +As of Rancher v2.4.0, you can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. + +# Custom Global Permissions + +Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. + +When a user from an [external authentication source](../../../../pages-for-subheaders/about-authentication.md) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. + +However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. + +The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. + +Administrators can enforce custom global permissions in multiple ways: + +- [Changing the default permissions for new users](#configuring-default-global-permissions) +- [Configuring global permissions for individual users](#configuring-global-permissions-for-individual-users) +- [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + +### Custom Global Permissions Reference + +The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. + +| Custom Global Permission | Administrator | Standard User | User-Base | +| ---------------------------------- | ------------- | ------------- |-----------| +| Create Clusters | ✓ | ✓ | | +| Create RKE Templates | ✓ | ✓ | | +| Manage Authentication | ✓ | | | +| Manage Catalogs | ✓ | | | +| Manage Cluster Drivers | ✓ | | | +| Manage Node Drivers | ✓ | | | +| Manage PodSecurityPolicy Templates | ✓ | | | +| Manage Roles | ✓ | | | +| Manage Settings | ✓ | | | +| Manage Users | ✓ | | | +| Use Catalog Templates | ✓ | ✓ | | +| User Base\* (Basic log-in access) | ✓ | ✓ | | + +> \*This role has two names: +> +> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. +> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. + +For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. + +> **Notes:** +> +> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. +> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +### Configuring Default Global Permissions + +If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. + +> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. + +To change the default global permissions that are assigned to external users upon their first log in, follow these steps: + +1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. + +1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit**. + +1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. + +1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. + +**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. + +### Configuring Global Permissions for Individual Users + +To configure permission for a user, + +1. Go to the **Users** tab. + +1. On this page, go to the user whose access level you want to change and click **⋮ > Edit.** + +1. In the **Global Permissions** section, click **Custom.** + +1. Check the boxes for each subset of permissions you want the user to have access to. + +1. Click **Save.** + +> **Result:** The user's global permissions have been updated. + +### Configuring Global Permissions for Groups + +_Available as of v2.4.0_ + +If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. + +After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. + +For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) + +For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,](#refreshing-group-memberships) whichever comes first. + +> **Prerequisites:** You can only assign a global role to a group if: +> +> * You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +> * The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) +> * You have already set up at least one user group with the authentication provider + +To assign a custom global role to a group, follow these steps: + +1. From the **Global** view, go to **Security > Groups.** +1. Click **Assign Global Role.** +1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. +1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. +1. Click **Create.** + +**Result:** The custom global role will take effect when the users in the group log into Rancher. + +### Refreshing Group Memberships + +When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. + +To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. + +An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. + +To refresh group memberships, + +1. From the **Global** view, click **Security > Users.** +1. Click **Refresh Group Memberships.** + +**Result:** Any changes to the group members' permissions will take effect. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md new file mode 100644 index 0000000000..0929bca9c0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md @@ -0,0 +1,37 @@ +--- +title: Locked Roles +weight: 1129 +--- + +You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. + +Locked roles: + +- Cannot be assigned to users that don't already have it assigned. +- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. +- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. + + **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. + + To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. + +Roles can be locked by the following users: + +- Any user assigned the `Administrator` global permission. +- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. + + +## Locking/Unlocking Roles + +If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. + +You can lock roles in two contexts: + +- When you're [adding a custom role](custom-roles.md). +- When you editing an existing role (see below). + +1. From the **Global** view, select **Security** > **Roles**. + +2. From the role that you want to lock (or unlock), select **⋮** > **Edit**. + +3. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md new file mode 100644 index 0000000000..e7fb1c63f9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp.md @@ -0,0 +1,53 @@ +--- +title: Enable Istio with Pod Security Policies +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/enable-istio-with-psp/ +--- + + >**Note:** The following guide is only for RKE provisioned clusters. + +If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. + +The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). + +- 1. [Configure the System Project Policy to allow Istio install.](#1-configure-the-system-project-policy-to-allow-istio-install) +- 2. [Install the CNI plugin in the System project.](#2-install-the-cni-plugin-in-the-system-project) +- 3. [Install Istio.](#3-install-istio) + +### 1. Configure the System Project Policy to allow Istio install + +1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. +1. Find the **Project: System** project and select the **⋮ > Edit**. +1. Change the Pod Security Policy option to be unrestricted, then click Save. + + +### 2. Install the CNI Plugin in the System Project + +1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. +1. Select the **Project: System** project. +1. Choose **Tools > Catalogs** in the navigation bar. +1. Add a catalog with the following: + 1. Name: istio-cni + 1. Catalog URL: https://github.com/istio/cni + 1. Branch: The branch that matches your current release, for example: `release-1.4`. +1. From the main menu select **Apps** +1. Click Launch and select istio-cni +1. Update the namespace to be "kube-system" +1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: + +``` +--- + logLevel: "info" + excludeNamespaces: + - "istio-system" + - "kube-system" +``` + +### 3. Install Istio + +Follow the [primary instructions](enable-istio-in-cluster.md), adding a custom answer: `istio_cni.enabled: true`. + +After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md new file mode 100644 index 0000000000..463cf794df --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -0,0 +1,39 @@ +--- +title: 1. Enable Istio in the Cluster +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-cluster + - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-cluster + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-cluster/ +--- + +This cluster uses the default Nginx controller to allow traffic into the cluster. + +A Rancher [administrator](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) can configure Rancher to deploy Istio in a Kubernetes cluster. + +# Prerequisites + +This guide assumes you have already [installed Rancher,](../../../pages-for-subheaders/installation-and-upgrade.md) and you have already [provisioned a separate Kubernetes cluster](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) on which you will install Istio. + +The nodes in your cluster must meet the [CPU and memory requirements.](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) + +The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) + +> If the cluster has a Pod Security Policy enabled there are [additional prerequisites steps](enable-istio-in-cluster-with-psp.md) + +# Enable Istio in the Cluster + +1. From the **Global** view, navigate to the **cluster** where you want to enable Istio. +1. Click **Tools > Istio.** +1. Optional: Configure member access and [resource limits](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. +1. Click **Enable**. +1. Click **Save**. + +**Result:** Istio is enabled at the cluster level. + +The Istio application, `cluster-istio`, is added as an application to the cluster's `system` project. + +When Istio is enabled in the cluster, the label for Istio sidecar auto injection,`istio-injection=enabled`, will be automatically added to each new namespace in this cluster. This automatically enables Istio sidecar injection in all new workloads that are deployed in those namespaces. You will need to manually enable Istio in preexisting namespaces and workloads. + +### [Next: Enable Istio in a Namespace](enable-istio-in-namespace.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md new file mode 100644 index 0000000000..ddc35756ce --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -0,0 +1,53 @@ +--- +title: 2. Enable Istio in a Namespace +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/enable-istio-in-namespace + - /rancher/v2.0-v2.4/en/istio/legacy/setup/enable-istio-in-namespace + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/ +--- + +You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. + +This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. + +> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio enabled. + +1. In the Rancher UI, go to the cluster view. Click the **Projects/Namespaces** tab. +1. Go to the namespace where you want to enable the Istio sidecar auto injection and click the **⋮.** +1. Click **Edit.** +1. In the **Istio sidecar auto injection** section, click **Enable.** +1. Click **Save.** + +**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. + +### Verifying that Automatic Istio Sidecar Injection is Enabled + +To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. + +### Excluding Workloads from Being Injected with the Istio Sidecar + +If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: + +``` +sidecar.istio.io/inject: “false” +``` + +To add the annotation to a workload, + +1. From the **Global** view, open the project that has the workload that should not have the sidecar. +1. Click **Resources > Workloads.** +1. Go to the workload that should not have the sidecar and click **⋮ > Edit.** +1. Click **Show Advanced Options.** Then expand the **Labels & Annotations** section. +1. Click **Add Annotation.** +1. In the **Key** field, enter `sidecar.istio.io/inject`. +1. In the **Value** field, enter `false`. +1. Click **Save.** + +**Result:** The Istio sidecar will not be injected into the workload. + +> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. + + +### [Next: Select the Nodes ](node-selectors.md) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/view-traffic/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md new file mode 100644 index 0000000000..de315a961c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md @@ -0,0 +1,43 @@ +--- +title: 3. Select the Nodes Where Istio Components Will be Deployed +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/node-selectors + - /rancher/v2.0-v2.4/en/istio/legacy/setup/node-selectors + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/node-selectors + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/node-selectors/ +--- + +> **Prerequisite:** Your cluster needs a worker node that can designated for Istio. The worker node should meet the [resource requirements.](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) + +This section describes how use node selectors to configure Istio components to be deployed on a designated node. + +In larger deployments, it is strongly advised that Istio's infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. + +# Adding a Label to the Istio Node + +First, add a label to the node where Istio components should be deployed. This label can have any key-value pair. For this example, we will use the key `istio` and the value `enabled`. + +1. From the cluster view, go to the **Nodes** tab. +1. Go to a worker node that will host the Istio components and click **⋮ > Edit.** +1. Expand the **Labels & Annotations** section. +1. Click **Add Label.** +1. In the fields that appear, enter `istio` for the key and `enabled` for the value. +1. Click **Save.** + +**Result:** A worker node has the label that will allow you to designate it for Istio components. + +# Configuring Istio Components to Use the Labeled Node + +Configure each Istio component to be deployed to the node with the Istio label. Each Istio component can be configured individually, but in this tutorial, we will configure all of the components to be scheduled on the same node for the sake of simplicity. + +For larger deployments, it is recommended to schedule each component of Istio onto separate nodes. + +1. From the cluster view, click **Tools > Istio.** +1. Expand the **Pilot** section and click **Add Selector** in the form that appears. Enter the node selector label that you added to the Istio node. In our case, we are using the key `istio` and the value `enabled.` +1. Repeat the previous step for the **Mixer** and **Tracing** sections. +1. Click **Save.** + +**Result:** The Istio components will be deployed on the Istio node. + +### [Next: Add Deployments and Services](use-istio-sidecar.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md new file mode 100644 index 0000000000..1526f5a4d7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -0,0 +1,135 @@ +--- +title: 5. Set up the Istio Gateway +weight: 5 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/gateway + - /rancher/v2.0-v2.4/en/istio/legacy/setup/gateway + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/gateway + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/gateway/ +--- + +The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. + +You can use the NGINX ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. + +To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two ingresses. + +You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. + +You can route traffic into the service mesh with a load balancer or just Istio's NodePort gateway. This section describes how to set up the NodePort gateway. + +For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) + +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.](/img/istio-ingress.svg) + +# Enable the Istio Gateway + +The ingress gateway is a Kubernetes service that will be deployed in your cluster. There is only one Istio gateway per cluster. + +1. Go to the cluster where you want to allow outside traffic into Istio. +1. Click **Tools > Istio.** +1. Expand the **Ingress Gateway** section. +1. Under **Enable Ingress Gateway,** click **True.** The default type of service for the Istio gateway is NodePort. You can also configure it as a [load balancer.](../../new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md) +1. Optionally, configure the ports, service types, node selectors and tolerations, and resource requests and limits for this service. The default resource requests for CPU and memory are the minimum recommended resources. +1. Click **Save.** + +**Result:** The gateway is deployed, which allows Istio to receive traffic from outside the cluster. + +# Add a Kubernetes Gateway that Points to the Istio Gateway + +To allow traffic to reach Ingress, you will also need to provide a Kubernetes gateway resource in your YAML that points to Istio's implementation of the ingress gateway to the cluster. + +1. Go to the namespace where you want to deploy the Kubernetes gateway and click **Import YAML.** +1. Upload the gateway YAML as a file or paste it into the form. An example gateway YAML is provided below. +1. Click **Import.** + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage + port: + number: 9080 +``` + +**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. + +Confirm that the resource exists by running: +``` +kubectl get gateway -A +``` + +The result should be something like this: +``` +NAME AGE +bookinfo-gateway 64m +``` + +### Access the ProductPage Service from a Web Browser + +To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: + +`http://:/productpage` + +To get the ingress gateway URL and port, + +1. Go to the `System` project in your cluster. +1. Within the `System` project, go to `Resources` > `Workloads` then scroll down to the `istio-system` namespace. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. +1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. + +**Result:** You should see the BookInfo app in the web browser. + +For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) + +# Troubleshooting + +The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. + +### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller + +You can try the steps in this section to make sure the Kubernetes gateway is configured properly. + +In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: + +1. Go to the `System` project in your cluster. +1. Within the `System` project, go to the namespace `istio-system`. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. +1. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. + +### [Next: Set up Istio's Components for Traffic Management](set-up-traffic-management.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md new file mode 100644 index 0000000000..be98965cf9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -0,0 +1,66 @@ +--- +title: 6. Set up Istio's Components for Traffic Management +weight: 6 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/set-up-traffic-management + - /rancher/v2.0-v2.4/en/istio/legacy/setup/set-up-traffic-management + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/set-up-traffic-management/ +--- + +A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. + +- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. +- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. + +This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. + +In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. + +After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. + +To deploy the virtual service and destination rules for the `reviews` service, + +1. Go to the project view and click **Import YAML.** +1. Copy resources below into the form. +1. Click **Import.** + +``` +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: reviews +spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + subset: v1 + weight: 50 + - destination: + host: reviews + subset: v3 + weight: 50 +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: reviews +spec: + host: reviews + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 +``` +**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. + +### [Next: Generate and View Traffic](generate-and-view-traffic.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md new file mode 100644 index 0000000000..68d1a60df1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -0,0 +1,327 @@ +--- +title: 4. Add Deployments and Services with the Istio Sidecar +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup/deploy-workloads + - /rancher/v2.0-v2.4/en/istio/legacy/setup/deploy-workloads + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/deploy-workloads/ +--- + +> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have Istio enabled. + +Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. + +To inject the Istio sidecar on an existing workload in the namespace, go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. + +Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see istio-init and istio-proxy alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. + +### 3. Add Deployments and Services + +Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. + +1. Go to the project inside the cluster you want to deploy the workload on. +1. In Workloads, click **Import YAML.** +1. Copy the below resources into the form. +1. Click **Import.** + +This will set up the following sample resources from Istio's example BookInfo app: + +Details service and deployment: + +- A `details` Service +- A ServiceAccount for `bookinfo-details` +- A `details-v1` Deployment + +Ratings service and deployment: + +- A `ratings` Service +- A ServiceAccount for `bookinfo-ratings` +- A `ratings-v1` Deployment + +Reviews service and deployments (three versions): + +- A `reviews` Service +- A ServiceAccount for `bookinfo-reviews` +- A `reviews-v1` Deployment +- A `reviews-v2` Deployment +- A `reviews-v3` Deployment + +Productpage service and deployment: + +This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. + +- A `productpage` service +- A ServiceAccount for `bookinfo-productpage` +- A `productpage-v1` Deployment + +### Resource YAML + +```yaml +# Copyright 2017 Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +``` + +### [Next: Set up the Istio Gateway](set-up-istio-gateway.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md new file mode 100644 index 0000000000..5824ed96cf --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md @@ -0,0 +1,57 @@ +--- +title: Adding Users to Clusters +weight: 2020 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/adding-managing-cluster-members/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/cluster-members/ + - /rancher/v2.0-v2.4/en/cluster-admin/cluster-members +--- + +If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. + +>**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members](k8s-in-rancher/projects-and-namespaces/project-members/) instead. + +There are two contexts where you can add cluster members: + +- Adding Members to a New Cluster + + You can add members to a cluster as you create it (recommended if possible). + +- [Adding Members to an Existing Cluster](#editing-cluster-membership) + + You can always add members to a cluster after a cluster is provisioned. + +## Editing Cluster Membership + +Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. + +1. From the **Global** view, open the cluster that you want to add members to. + +2. From the main menu, select **Members**. Then click **Add Member**. + +3. Search for the user or group that you want to add to the cluster. + + If external authentication is configured: + + - Rancher returns users from your [external authentication](../../../../pages-for-subheaders/about-authentication.md) source as you type. + + >**Using AD but can't find your users?** + >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5](../../authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md). + + - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. + + >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users](../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +4. Assign the user or group **Cluster** roles. + + [What are Cluster Roles?](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + + >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. + > + > - To add roles to the list, [Add a Custom Role](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + > - To remove roles from the list, [Lock/Unlock Roles](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). + +**Result:** The chosen users are added to the cluster. + +- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md new file mode 100644 index 0000000000..9da68ae0ea --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -0,0 +1,48 @@ +--- +title: How the Authorized Cluster Endpoint Works +weight: 2015 +--- + +This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) + +### About the kubeconfig File + +The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). + +This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. + +After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. + +_Available as of v2.4.6_ + +If admins have [enforced TTL on kubeconfig tokens](../../../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](cluster-admin/cluster-access/cli) to be present in your PATH. + + +### Two Authentication Methods for RKE Clusters + +If the cluster is not an [RKE cluster,](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. + +For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: + +- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. +- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. + +This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. + +To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) + +These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page](../../../../pages-for-subheaders/rancher-manager-architecture.md#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. + +### About the kube-api-auth Authentication Webhook + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) which is only available for [RKE clusters.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. + +The scheduling rules for `kube-api-auth` are listed below: + +_Applies to v2.3.0 and higher_ + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
    `node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md new file mode 100644 index 0000000000..6c815abefd --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -0,0 +1,109 @@ +--- +title: "Access a Cluster with Kubectl and kubeconfig" +description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." +weight: 2010 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/kubectl/ + - /rancher/v2.0-v2.4/en/cluster-admin/kubectl + - /rancher/v2.0-v2.4/en/concepts/clusters/kubeconfig-files/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/kubeconfig/ + - /rancher/2.x/en/cluster-admin/kubeconfig +--- + +This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. + +For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). + +- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) +- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) +- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) +- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) + - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) + - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) + + +### Accessing Clusters with kubectl Shell in the Rancher UI + +You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. + +1. From the **Global** view, open the cluster that you want to access with kubectl. + +2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. + +### Accessing Clusters with kubectl from Your Workstation + +This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. + +This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. + +> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. +1. Click **Kubeconfig File**. +1. Copy the contents displayed to your clipboard. +1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: + ``` + kubectl --kubeconfig /custom/path/kube.config get pods + ``` +1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. + + +### Note on Resources Created Using kubectl + +Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. + +# Authenticating Directly with a Downstream Cluster + +This section intended to help you set up an alternative method to access an [RKE cluster.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +This method is only available for RKE clusters that have the [authorized cluster endpoint](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](authorized-cluster-endpoint.md) + +We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. + +> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) + +To find the name of the context(s) in your downloaded kubeconfig file, run: + +``` +kubectl config get-contexts --kubeconfig /custom/path/kube.config +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* my-cluster my-cluster user-46tmn + my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn +``` + +In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. + +With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. + +When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. + +### Connecting Directly to Clusters with FQDN Defined + +If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: + +``` +kubectl --context -fqdn get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods +``` + +### Connecting Directly to Clusters without FQDN Defined + +If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: +``` +kubectl --context - get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context - get pods +``` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md new file mode 100644 index 0000000000..874cb531b7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md @@ -0,0 +1,30 @@ +--- +title: Adding a Pod Security Policy +weight: 80 +--- + +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy](../authentication-permissions-and-global-configuration/create-pod-security-policies.md), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. + +You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. + +1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **⋮ > Edit**. + +2. Expand **Cluster Options**. + +3. From **Pod Security Policy Support**, select **Enabled**. + + >**Note:** This option is only available for clusters [provisioned by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. + + Rancher ships with [policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) as well. + +5. Click **Save**. + +**Result:** The pod security policy is applied to the cluster and any projects within the cluster. + +>**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. +> +>To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md new file mode 100644 index 0000000000..a3a955ed02 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md @@ -0,0 +1,19 @@ +--- +title: Assigning Pod Security Policies +weight: 2260 +--- + +_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). + +## Adding a Default Pod Security Policy + +When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. + +>**Prerequisite:** +>Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). +>**Note:** +>For security purposes, we recommend assigning a PSP as you create your clusters. + +To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. + +When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md new file mode 100644 index 0000000000..ec5122ad08 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md @@ -0,0 +1,225 @@ +--- +title: Backing up a Cluster +weight: 2045 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +_Available as of v2.2.0_ + +In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. + +Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. + +Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. + +This section covers the following topics: + +- [How snapshots work](#how-snapshots-work) +- [Configuring recurring snapshots](#configuring-recurring-snapshots) +- [One-time snapshots](#one-time-snapshots) +- [Snapshot backup targets](#snapshot-backup-targets) + - [Local backup target](#local-backup-target) + - [S3 backup target](#s3-backup-target) + - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) + - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) +- [Viewing available snapshots](#viewing-available-snapshots) +- [Safe timestamps](#safe-timestamps) +- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) + +# How Snapshots Work + + + + +### Snapshot Components + +When Rancher creates a snapshot, it includes three components: + +- The cluster data in etcd +- The Kubernetes version +- The cluster configuration in the form of the `cluster.yml` + +Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. + +The multiple components of the snapshot allow you to select from the following options if you need to restore a cluster from a snapshot: + +- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. +- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. +- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. + +It's always recommended to take a new snapshot before any upgrades. + +### Generating the Snapshot from etcd Nodes + +For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. + +The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. + +In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. + +### Snapshot Naming Conventions + +The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. + +When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: + +- `m` stands for manual +- `r` stands for recurring +- `l` stands for local +- `s` stands for S3 + +Some example snapshot names are: + +- c-9dmxz-rl-8b2cx +- c-9dmxz-ml-kr56m +- c-9dmxz-ms-t6bjb +- c-9dmxz-rs-8gxc8 + +### How Restoring from a Snapshot Works + +On restore, the following process is used: + +1. The snapshot is retrieved from S3, if S3 is configured. +2. The snapshot is unzipped (if zipped). +3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. +4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. +5. The cluster is restored and post-restore actions will be done in the cluster. + + + + +When Rancher creates a snapshot, only the etcd data is included in the snapshot. + +Because the Kubernetes version is not included in the snapshot, there is no option to restore a cluster to a different Kubernetes version. + +It's always recommended to take a new snapshot before any upgrades. + +### Generating the Snapshot from etcd Nodes + +For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. + +The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. + +In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. + +### Snapshot Naming Conventions + +The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. + +When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: + +- `m` stands for manual +- `r` stands for recurring +- `l` stands for local +- `s` stands for S3 + +Some example snapshot names are: + +- c-9dmxz-rl-8b2cx +- c-9dmxz-ml-kr56m +- c-9dmxz-ms-t6bjb +- c-9dmxz-rs-8gxc8 + +### How Restoring from a Snapshot Works + +On restore, the following process is used: + +1. The snapshot is retrieved from S3, if S3 is configured. +2. The snapshot is unzipped (if zipped). +3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. +4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. +5. The cluster is restored and post-restore actions will be done in the cluster. + + + + + +# Configuring Recurring Snapshots + +Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. + +By default, [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. + +During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. + +In the **Advanced Cluster Options** section, there are several options available to configure: + +| Option | Description | Default Value| +| --- | ---| --- | +| etcd Snapshot Backup Target | Select where you want the snapshots to be saved. Options are either local or in S3 | local| +|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| +| Recurring etcd Snapshot Creation Period | Time in hours between recurring snapshots| 12 hours | +| Recurring etcd Snapshot Retention Count | Number of snapshots to retain| 6 | + +# One-Time Snapshots + +In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. + +1. In the **Global** view, navigate to the cluster that you want to take a one-time snapshot. + +2. Click the **⋮ > Snapshot Now**. + +**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. + +# Snapshot Backup Targets + +Rancher supports two different backup targets: + +* [Local Target](#local-backup-target) +* [S3 Target](#s3-backup-target) + +### Local Backup Target + +By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. + +### S3 Backup Target + +The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. + +| Option | Description | Required| +|---|---|---| +|S3 Bucket Name| S3 bucket name where backups will be stored| *| +|S3 Region|S3 region for the backup bucket| | +|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | +|S3 Access Key|S3 access key with permission to access the backup bucket|*| +|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| +| Custom CA Certificate | A custom certificate used to access private S3 backends _Available as of v2.2.5_ || + +### Using a custom CA certificate for S3 + +_Available as of v2.2.5_ + +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. + +### IAM Support for Storing Snapshots in S3 + +The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: + + - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. + - The cluster etcd nodes must have network access to the specified S3 endpoint. + - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. + - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. + + To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +# Viewing Available Snapshots + +The list of all available snapshots for the cluster is available in the Rancher UI. + +1. In the **Global** view, navigate to the cluster that you want to view snapshots. + +2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. + +# Safe Timestamps + +_Available as of v2.3.0_ + +As of v2.2.6, snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. As of Rancher v2.3.0, the option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. + +This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. + +# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 + +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restoring-etcd.md). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md new file mode 100644 index 0000000000..a8e880a3c4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md @@ -0,0 +1,284 @@ +--- +title: Removing Kubernetes Components from Nodes +description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually +weight: 2055 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. + +When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. + +When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. + +## What Gets Removed? + +When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. + +| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Imported Nodes][4] | +| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | +| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | +| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | +| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | +| Rancher Deployment | ✓ | ✓ | ✓ | | +| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | +| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | +| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | + +[1]: cluster-provisioning/rke-clusters/node-pools/ +[2]: cluster-provisioning/rke-clusters/custom-nodes/ +[3]: cluster-provisioning/hosted-kubernetes-clusters/ +[4]: cluster-provisioning/imported-clusters/ + +## Removing a Node from a Cluster by Rancher UI + +When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +## Removing Rancher Components from a Cluster Manually + +When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. + +>**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. + +### Removing Rancher Components from Imported Clusters + +For imported clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. + +After the imported cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was imported into Rancher. + + + + +>**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. + +After you initiate the removal of an imported cluster using the Rancher UI (or API), the following events occur. + +1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. + +1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. + +1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. + +**Result:** All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + +Rather than cleaning imported cluster nodes using the Rancher UI, you can run a script instead. This functionality is available since `v2.1.0`. + +>**Prerequisite:** +> +>Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. + +1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: + + ``` + chmod +x user-cluster.sh + ``` + +1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. + + If you don't have an air gap environment, skip this step. + +1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): + + >**Tip:** + > + >Add the `-dry-run` flag to preview the script's outcome without making changes. + ``` + ./user-cluster.sh rancher/rancher-agent: + ``` + +**Result:** The script runs. All components listed for imported clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + +### Windows Nodes + +To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. + +To run the script, you can use this command in the PowerShell: + +``` +pushd c:\etc\rancher +.\cleanup.ps1 +popd +``` + +**Result:** The node is reset and can be re-added to a Kubernetes cluster. + +### Docker Containers, Images, and Volumes + +Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) + +**To clean all Docker containers, images and volumes:** + +``` +docker rm -f $(docker ps -qa) +docker rmi -f $(docker images -q) +docker volume rm $(docker volume ls -q) +``` + +### Mounts + +Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. + +Mounts | +--------| +`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | +`/var/lib/kubelet` | +`/var/lib/rancher` | + +**To unmount all mounts:** + +``` +for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done +``` + +### Directories and Files + +The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. + +>**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. + +Directories | +--------| +`/etc/ceph` | +`/etc/cni` | +`/etc/kubernetes` | +`/opt/cni` | +`/opt/rke` | +`/run/secrets/kubernetes.io` | +`/run/calico` | +`/run/flannel` | +`/var/lib/calico` | +`/var/lib/etcd` | +`/var/lib/cni` | +`/var/lib/kubelet` | +`/var/lib/rancher/rke/log` | +`/var/log/containers` | +`/var/log/kube-audit` | +`/var/log/pods` | +`/var/run/calico` | + +**To clean the directories:** + +``` +rm -rf /etc/ceph \ + /etc/cni \ + /etc/kubernetes \ + /opt/cni \ + /opt/rke \ + /run/secrets/kubernetes.io \ + /run/calico \ + /run/flannel \ + /var/lib/calico \ + /var/lib/etcd \ + /var/lib/cni \ + /var/lib/kubelet \ + /var/lib/rancher/rke/log \ + /var/log/containers \ + /var/log/kube-audit \ + /var/log/pods \ + /var/run/calico +``` + +### Network Interfaces and Iptables + +The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. + +### Network Interfaces + +>**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. + +Interfaces | +--------| +`flannel.1` | +`cni0` | +`tunl0` | +`caliXXXXXXXXXXX` (random interface names) | +`vethXXXXXXXX` (random interface names) | + +**To list all interfaces:** + +``` +# Using ip +ip address show + +# Using ifconfig +ifconfig -a +``` + +**To remove an interface:** + +``` +ip link delete interface_name +``` + +### Iptables + +>**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. + +Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. + +Chains | +--------| +`cali-failsafe-in` | +`cali-failsafe-out` | +`cali-fip-dnat` | +`cali-fip-snat` | +`cali-from-hep-forward` | +`cali-from-host-endpoint` | +`cali-from-wl-dispatch` | +`cali-fw-caliXXXXXXXXXXX` (random chain names) | +`cali-nat-outgoing` | +`cali-pri-kns.NAMESPACE` (chain per namespace) | +`cali-pro-kns.NAMESPACE` (chain per namespace) | +`cali-to-hep-forward` | +`cali-to-host-endpoint` | +`cali-to-wl-dispatch` | +`cali-tw-caliXXXXXXXXXXX` (random chain names) | +`cali-wl-to-host` | +`KUBE-EXTERNAL-SERVICES` | +`KUBE-FIREWALL` | +`KUBE-MARK-DROP` | +`KUBE-MARK-MASQ` | +`KUBE-NODEPORTS` | +`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | +`KUBE-SERVICES` | +`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | + +**To list all iptables rules:** + +``` +iptables -L -t nat +iptables -L -t mangle +iptables -L +``` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md new file mode 100644 index 0000000000..e787c544fd --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md @@ -0,0 +1,101 @@ +--- +title: Cloning Clusters +weight: 2035 +aliases: + - /rancher/v2.0-v2.4/en/cluster-provisioning/cloning-clusters/ +--- + +If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. + +Duplication of imported clusters is not supported. + +| Cluster Type | Cloneable? | +|----------------------------------|---------------| +| [Nodes Hosted by Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) | ✓ | +| [Hosted Kubernetes Providers](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) | ✓ | +| [Custom Cluster](../../../pages-for-subheaders/use-existing-nodes.md) | ✓ | +| [Imported Cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) | | + +> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. + +## Prerequisites + +Download and install [Rancher CLI](../../../pages-for-subheaders/cli-with-rancher.md). Remember to [create an API bearer token](../../../reference-guides/user-settings/api-keys.md) if necessary. + + +## 1. Export Cluster Config + +Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. + +1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. + +1. Enter the following command to list the clusters managed by Rancher. + + + ./rancher cluster ls + + +1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. + +1. Enter the following command to export the configuration for your cluster. + + + ./rancher clusters export + + + **Step Result:** The YAML for a cloned cluster prints to Terminal. + +1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). + +## 2. Modify Cluster Config + +Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. + +> **Note:** As of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher-v2-3-0) + +1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. + + >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. + + +1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. + + ```yml + Version: v3 + clusters: + : # ENTER UNIQUE NAME + dockerRootDir: /var/lib/docker + enableNetworkPolicy: false + rancherKubernetesEngineConfig: + addonJobTimeout: 30 + authentication: + strategy: x509 + authorization: {} + bastionHost: {} + cloudProvider: {} + ignoreDockerVersion: true + ``` + +1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. + + ```yml + nodePools: + : + clusterId: do + controlPlane: true + etcd: true + hostnamePrefix: mark-do + nodeTemplateId: do + quantity: 1 + worker: true + ``` + +1. When you're done, save and close the configuration. + +## 3. Launch Cloned Cluster + +Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: + + ./rancher up --file cluster-template.yml + +**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. You can also log into the Rancher UI and open the **Global** view to watch your provisioning cluster's progress. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md new file mode 100644 index 0000000000..fbafbeffe4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md @@ -0,0 +1,32 @@ +--- +title: GlusterFS Volumes +weight: 5000 +--- + +> This section only applies to [RKE clusters.](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: + +- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) +- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) + +``` +docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version +``` + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +``` +services: + kubelet: + extra_binds: + - "/usr/bin/systemd-run:/usr/bin/systemd-run" +``` + +After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: + +``` +Detected OS with systemd +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md new file mode 100644 index 0000000000..db37ec5d8c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md @@ -0,0 +1,78 @@ +--- +title: How Persistent Storage Works +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/tasks/workloads/add-persistent-volume-claim +--- + +A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. + +There are two ways to use persistent storage in Kubernetes: + +- Use an existing persistent volume +- Dynamically provision new persistent volumes + +To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. + +For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. + +![Setting Up New and Existing Persistent Storage](/img/rancher-storage.svg) + +For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) + +This section covers the following topics: + +- [About persistent volume claims](#about-persistent-volume-claims) + - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) +- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) + - [Binding PVs to PVCs](#binding-pvs-to-pvcs) +- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) + +# About Persistent Volume Claims + +Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. + +To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. + +Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** (In versions before v2.3.0, the PVCs are in the **Volumes** tab.) You can reuse these PVCs when creating deployments in the future. + +### PVCs are Required for Both New and Existing Persistent Storage + +A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. + +If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. + +If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. + +Rancher lets you create as many PVCs within a project as you'd like. + +You can mount PVCs to a deployment as you create it, or later, after the deployment is running. + +# Setting up Existing Storage with a PVC and PV + +Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. + +PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. + +### Binding PVs to PVCs + +When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + +> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. + +In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. + +To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. + +# Provisioning New Storage with a PVC and Storage Class + +Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. + +For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. + +The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. + diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md new file mode 100644 index 0000000000..9d9073b1a1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md @@ -0,0 +1,113 @@ +--- +title: Dynamically Provisioning New Storage in Rancher +weight: 2 +--- + +This section describes how to provision new persistent storage for workloads in Rancher. + +This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) + +New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. + +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. + +To provision new storage for your workloads, follow these steps: + +1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage) +2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) +3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) + +### Prerequisites + +- To set up persistent storage, the `Manage Volumes` [role](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. +- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](cluster-provisioning/rke-clusters/options/cloud-providers/) +- Make sure your storage provisioner is available to be enabled. + +The following storage provisioners are enabled by default: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.](installation/options/feature-flags/enable-not-default-storage-drivers/) + +### 1. Add a storage class and configure it to use your storage + +These steps describe how to set up a storage class at the cluster level. + +1. Go to the cluster for which you want to dynamically provision persistent storage volumes. + +1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. + +1. Enter a `Name` for your storage class. + +1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. + +1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. + +1. Click `Save`. + +**Result:** The storage class is available to be consumed by a PVC. + +For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). + +### 2. Add a persistent volume claim that refers to the storage class + +These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. + +1. Go to the project containing a workload that you want to add a PVC to. + +1. From the main navigation bar, choose **Resources > Workloads.** (In versions before v2.3.0, choose **Workloads** on the main navigation bar.) Then select the **Volumes** tab. Click **Add Volume**. + +1. Enter a **Name** for the volume claim. + +1. Select the namespace of the volume claim. + +1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** + +1. Go to the **Storage Class** drop-down and select the storage class that you created. + +1. Enter a volume **Capacity**. + +1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. + +1. Click **Create.** + +**Result:** Your PVC is created. You can now attach it to any workload in the project. + +### 3. Mount the persistent volume claim as a volume for your workload + +Mount PVCs to workloads so that your applications can store their data. + +You can mount PVCs during the deployment of a workload, or following workload creation. + +To attach the PVC to a new workload, + +1. Create a workload as you would in [Deploying Workloads](../../../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). +1. For **Workload Type**, select **Stateful set of 1 pod**. +1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** +1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch.** + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +To attach the PVC to an existing workload, + +1. Go to the project that has the workload that will have the PVC attached. +1. Go to the workload that will have persistent storage and click **⋮ > Edit.** +1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** +1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save.** + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md new file mode 100644 index 0000000000..2c82e06547 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md @@ -0,0 +1,30 @@ +--- +title: iSCSI Volumes +weight: 6000 +--- + +In [Rancher Launched Kubernetes clusters](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. + +Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. + +If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: + +| Platform | Package Name | Install Command | +| ------------- | ----------------------- | -------------------------------------- | +| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | +| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | + + +After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +``` +services: + kubelet: + extra_binds: + - "/etc/iscsi:/etc/iscsi" + - "/sbin/iscsiadm:/sbin/iscsiadm" +``` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md new file mode 100644 index 0000000000..c21fb887ce --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md @@ -0,0 +1,104 @@ +--- +title: Setting up Existing Storage +weight: 1 +--- + +This section describes how to set up existing persistent storage for workloads in Rancher. + +> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) + +To set up storage, follow these steps: + +1. [Set up persistent storage.](#1-set-up-persistent-storage) +2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) +3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) +4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-volume-claim-as-a-volume-in-your-workload) + +### Prerequisites + +- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +### 1. Set up persistent storage + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../provisioning-storage-examples/vsphere-storage.md) [NFS,](../provisioning-storage-examples/nfs-storage.md) or Amazon's [EBS.](../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) + +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. + +### 2. Add a persistent volume that refers to the persistent storage + +These steps describe how to set up a persistent volume at the cluster level in Kubernetes. + +1. From the cluster view, select **Storage > Persistent Volumes**. + +1. Click **Add Volume**. + +1. Enter a **Name** for the persistent volume. + +1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. + +1. Enter the **Capacity** of your volume in gigabytes. + +1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. + +1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. + +1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. + +1. Click **Save**. + +**Result:** Your new persistent volume is created. + +### 3. Add a persistent volume claim that refers to the persistent volume + +These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. + +1. Go to the project containing a workload that you want to add a persistent volume claim to. + +1. Then click the **Volumes** tab and click **Add Volume**. (In versions before v2.3.0, click **Workloads** on the main navigation bar, then **Volumes.**) + +1. Enter a **Name** for the volume claim. + +1. Select the namespace of the workload that you want to add the persistent storage to. + +1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. + +1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. + +1. Click **Create.** + +**Result:** Your PVC is created. You can now attach it to any workload in the project. + +### 4. Mount the persistent volume claim as a volume in your workload + +Mount PVCs to stateful workloads so that your applications can store their data. + +You can mount PVCs during the deployment of a workload, or following workload creation. + +The following steps describe how to assign existing storage to a new workload that is a stateful set: + +1. From the **Project** view, go to the **Workloads** tab. +1. Click **Deploy.** +1. Enter a name for the workload. +1. Next to the **Workload Type** field, click **More Options.** +1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. +1. Choose the namespace where the workload will be deployed. +1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. +1. In the **Persistent Volume Claim** field, select the PVC that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch.** + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +The following steps describe how to assign persistent storage to an existing workload: + +1. From the **Project** view, go to the **Workloads** tab. +1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **⋮ > Edit.** +1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. +1. In the **Persistent Volume Claim** field, select the PVC that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save.** + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md new file mode 100644 index 0000000000..fa96276633 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md @@ -0,0 +1,68 @@ +--- +title: NFS Storage +weight: 3054 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ +--- + +Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. + +>**Note:** +> +>- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage](../../../../../pages-for-subheaders/create-kubernetes-persistent-storage.md). +> +>- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. + +>**Recommended:** To simplify the process of managing firewall rules, use NFSv4. + +1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. + +1. Enter the following command: + + ``` + sudo apt-get install nfs-kernel-server + ``` + +1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. + + ``` + mkdir -p /nfs && chown nobody:nogroup /nfs + ``` + - The `-p /nfs` parameter creates a directory named `nfs` at root. + - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. + +1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. + + 1. Open `/etc/exports` using your text editor of choice. + 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. + + ``` + /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) + ``` + + **Tip:** You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` + + 1. Update the NFS table by entering the following command: + + ``` + exportfs -ra + ``` + +1. Open the ports used by NFS. + + 1. To find out what ports NFS is using, enter the following command: + + ``` + rpcinfo -p | grep nfs + ``` + 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: + + ``` + sudo ufw allow 2049 + ``` + +**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. + +## What's Next? + +Within Rancher, add the NFS server as a storage volume and/or storage class. After adding the server, you can use it for storage for your deployments. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md new file mode 100644 index 0000000000..b5a5788c3c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md @@ -0,0 +1,16 @@ +--- +title: Creating Persistent Storage in Amazon's EBS +weight: 3053 +--- + +This section describes how to set up Amazon's Elastic Block Store in EC2. + +1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** +1. Click **Create Volume.** +1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. +1. Click **Create Volume.** +1. Click **Close.** + +**Result:** Persistent storage has been created. + +For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.](../manage-persistent-storage/set-up-existing-storage.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md new file mode 100644 index 0000000000..84af3dd43c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md @@ -0,0 +1,78 @@ +--- +title: vSphere Storage +weight: 3055 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ +--- + +To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a [persistent volume claim](k8s-in-rancher/volumes-and-storage/persistent-volume-claims/). + +In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../../../new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) + +- [Prerequisites](#prerequisites) +- [Creating a StorageClass](#creating-a-storageclass) +- [Creating a Workload with a vSphere Volume](#creating-a-workload-with-a-vsphere-volume) +- [Verifying Persistence of the Volume](#verifying-persistence-of-the-volume) +- [Why to Use StatefulSets Instead of Deployments](#why-to-use-statefulsets-instead-of-deployments) + +### Prerequisites + +In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), the [vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md). + +### Creating a StorageClass + +> **Note:** +> +> The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. + +1. From the Global view, open the cluster where you want to provide vSphere storage. +2. From the main menu, select **Storage > Storage Classes**. Then click **Add Class**. +3. Enter a **Name** for the class. +4. Under **Provisioner**, select **VMWare vSphere Volume**. + + ![](/img/vsphere-storage-class.png) + +5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. +5. Click **Save**. + +### Creating a Workload with a vSphere Volume + +1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads](../../../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). +2. For **Workload Type**, select **Stateful set of 1 pod**. +3. Expand the **Volumes** section and click **Add Volume**. +4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. +5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. +6. Enter the required **Capacity** for the volume. Then click **Define**. + + ![](/img/workload-add-volume.png) + +7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. +8. Click **Launch** to create the workload. + +### Verifying Persistence of the Volume + +1. From the context menu of the workload you just created, click **Execute Shell**. +2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). +3. Create a file in the volume by executing the command `touch //data.txt`. +4. **Close** the shell window. +5. Click on the name of the workload to reveal detail information. +6. Open the context menu next to the Pod in the *Running* state. +7. Delete the Pod by selecting **Delete**. +8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. +9. Once the replacement pod is running, click **Execute Shell**. +10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. + + ![workload-persistent-data](/img/workload-persistent-data.png) + +### Why to Use StatefulSets Instead of Deployments + +You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. + +Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. + +Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. + +### Related Links + +- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) +- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md new file mode 100644 index 0000000000..06cd2b0ac0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md @@ -0,0 +1,580 @@ +--- +title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups +weight: 1 +--- + +This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. + +- [Prerequisites](#prerequisites) +- [1. Create a Custom Cluster](#1-create-a-custom-cluster) +- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) +- [3. Deploy Nodes](#3-deploy-nodes) +- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) + - [Parameters](#parameters) + - [Deployment](#deployment) +- [Testing](#testing) + - [Generating Load](#generating-load) + - [Checking Scale](#checking-scale) + +# Prerequisites + +These elements are required to follow this guide: + +* The Rancher server is up and running +* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles + +### 1. Create a Custom Cluster + +On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: + +* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag +* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag +* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster + + ```sh + sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum + ``` + +### 2. Configure the Cloud Provider + +On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. + +1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. + * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:DescribeTags", + "autoscaling:DescribeLaunchConfigurations", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + +2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. + * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + + * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` + * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) + * Tags: + `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--etcd --controlplane" + + sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. + * IAM profile: Provides cloud_provider worker integration. + This profile is called `K8sWorkerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } + ] + } + ``` + + * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` + * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--worker" + + sudo docker run -d --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +More info is at [RKE clusters on AWS](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) + +### 3. Deploy Nodes + +Once we've configured AWS, let's create VMs to bootstrap our cluster: + +* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.](../../../../pages-for-subheaders/checklist-for-production-ready-clusters.md) + * IAM role: `K8sMasterRole` + * Security group: `K8sMasterSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` + +* worker: Define an ASG on EC2 with the following settings: + * Name: `K8sWorkerAsg` + * IAM role: `K8sWorkerRole` + * Security group: `K8sWorkerSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` + * Instances: + * minimum: 2 + * desired: 2 + * maximum: 10 + +Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. + +### 4. Install Cluster-autoscaler + +At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. + +#### Parameters + +This table shows cluster-autoscaler parameters for fine tuning: + +| Parameter | Default | Description | +|---|---|---| +|cluster-name|-|Autoscaled cluster name, if available| +|address|:8085|The address to expose Prometheus metrics| +|kubernetes|-|Kubernetes master location. Leave blank for default| +|kubeconfig|-|Path to kubeconfig file with authorization and master location information| +|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| +|namespace|"kube-system"|Namespace in which cluster-autoscaler run| +|scale-down-enabled|true|Should CA scale down the cluster| +|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| +|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| +|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| +|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| +|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| +|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| +|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| +|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| +|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| +|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| +|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| +|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +cloud-provider|-|Cloud provider type| +|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| +|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| +|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| +|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| +|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| +|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| +|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| +|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| +|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: `::`| +|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| +|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| +|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| +|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| +|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| +|write-status-configmap|true|Should CA write status information to a configmap| +|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| +|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| +|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| +|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| +|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| +|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| +|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| +|regional|false|Cluster is regional| +|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| +|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| +|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| +|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| +|profiling|false|Is debug/pprof endpoint enabled| + +#### Deployment + +Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: + + +```yml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler + name: cluster-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["endpoints"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list", "get", "update"] + - apiGroups: [""] + resources: + - "pods" + - "services" + - "replicationcontrollers" + - "persistentvolumeclaims" + - "persistentvolumes" + verbs: ["watch", "list", "get"] + - apiGroups: ["extensions"] + resources: ["replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["watch", "list"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["coordination.k8s.io"] + resourceNames: ["cluster-autoscaler"] + resources: ["leases"] + verbs: ["get", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create","list","watch"] + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] + verbs: ["delete", "get", "update", "watch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8085' + spec: + serviceAccountName: cluster-autoscaler + tolerations: + - effect: NoSchedule + operator: "Equal" + value: "true" + key: node-role.kubernetes.io/controlplane + nodeSelector: + node-role.kubernetes.io/controlplane: "true" + containers: + - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=aws + - --skip-nodes-with-local-storage=false + - --expander=least-waste + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + imagePullPolicy: "Always" + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" + +``` + +Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): + +```sh +kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml +``` + +**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) + +# Testing + +At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. + +### Generating Load + +We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hello-world + name: hello-world +spec: + replicas: 3 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + ports: + - containerPort: 80 + protocol: TCP + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi +``` + +Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): + +``` +kubectl -n default apply -f test-deployment.yaml +``` + +### Checking Scale + +Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. + +Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md new file mode 100644 index 0000000000..a4951bf68a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md @@ -0,0 +1,232 @@ +--- +title: Nodes and Node Pools +weight: 2030 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. + +> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters](../../../pages-for-subheaders/cluster-configuration.md#editing-clusters-with-yaml). + +This section covers the following topics: + +- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) + - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) + - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) + - [Imported nodes](#imported-nodes) +- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) +- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) +- [Deleting a node](#deleting-a-node) +- [Scaling nodes](#scaling-nodes) +- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) +- [Cordoning a node](#cordoning-a-node) +- [Draining a node](#draining-a-node) + - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) + - [Grace period](#grace-period) + - [Timeout](#timeout) + - [Drained and cordoned state](#drained-and-cordoned-state) +- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) + +# Node Options Available for Each Cluster Creation Option + +The following table lists which node options are available for each type of cluster in Rancher. Click the links in the **Option** column for more detailed information about each feature. + +| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Imported Nodes][4] | Description | +| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | ------------------------------------------------------------------ | +| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable. | +| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | | Marks the node as unschedulable _and_ evicts all pods. | +| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | | Enter a custom name, description, label, or taints for a node. | +| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | | View API data. | +| [Delete](#deleting-a-node) | ✓ | ✓ | | | Deletes defective nodes from the cluster. | +| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | Download SSH key for in order to SSH into the node. | +| [Node Scaling](#scaling-nodes) | ✓ | | | | Scale the number of nodes in the node pool up or down. | + +[1]: cluster-provisioning/rke-clusters/node-pools/ +[2]: cluster-provisioning/rke-clusters/custom-nodes/ +[3]: cluster-provisioning/hosted-kubernetes-clusters/ +[4]: cluster-provisioning/imported-clusters/ + +### Nodes Hosted by an Infrastructure Provider + +Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +Clusters provisioned using [one of the node pool options](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) can be scaled up or down if the node pool is edited. + +A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. + +Rancher uses [node templates](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. + +### Nodes Provisioned by Hosted Kubernetes Providers + +Options for managing nodes [hosted by a Kubernetes provider](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. + +### Imported Nodes + +Although you can deploy workloads to an [imported cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. + +# Managing and Editing Individual Nodes + +Editing a node lets you: + +* Change its name +* Change its description +* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + +To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**...**). + +# Viewing a Node in the Rancher API + +Select this option to view the node's [API endpoints](../../../pages-for-subheaders/about-the-api.md). + +# Deleting a Node + +Use **Delete** to remove defective nodes from the cloud provider. + +When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) + +>**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. + +# Scaling Nodes + +For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) by using the scale controls. This option isn't available for other cluster types. + +# SSH into a Node Hosted by an Infrastructure Provider + +For [nodes hosted by an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. + +1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. + +1. Find the node that you want to remote into. Select **⋮ > Download Keys**. + + **Step Result:** A ZIP file containing files used for SSH is downloaded. + +1. Extract the ZIP file to any location. + +1. Open Terminal. Change your location to the extracted ZIP file. + +1. Enter the following command: + + ``` + ssh -i id_rsa root@ + ``` + +# Cordoning a Node + +_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. + +# Draining a Node + +_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. + +- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. + +- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. + +You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. + +However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. + +### Aggressive and Safe Draining Options + +The node draining options are different based on your version of Rancher. + + + + +There are two drain modes: aggressive and safe. + +- **Aggressive Mode** + + In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. + + Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. + +- **Safe Mode** + + If a node has standalone pods or ephemeral data it will be cordoned but not drained. + + + + +The following list describes each drain option: + +- **Even if there are pods not managed by a ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet** + + These types of pods won't get rescheduled to a new node, since they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. Kubernetes forces you to choose this option (which will delete/evict these pods) or drain won't proceed. + +- **Even if there are DaemonSet-managed pods** + + Similar to above, if you have any daemonsets, drain would proceed only if this option is selected. Even when this option is on, pods won't be deleted since they'll immediately be replaced. On startup, Rancher currently has a few daemonsets running by default in the system, so this option is turned on by default. + +- **Even if there are pods using emptyDir** + + If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Similar to the first option, Kubernetes expects the implementation to decide what to do with these pods. Choosing this option will delete these pods. + + + + +### Grace Period + +The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. + +### Timeout + +The amount of time drain should continue to wait before giving up. + +>**Kubernetes Known Issue:** The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node before Kubernetes 1.12. + +### Drained and Cordoned State + +If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. + +If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. + +Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. + +>**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). + +# Labeling a Node to be Ignored by Rancher + +_Available as of 2.3.3_ + +Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. + +Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. + +In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. + +You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. + +> **Note:** There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. + +### Labeling Nodes to be Ignored with the Rancher UI + +To add a node that is ignored by Rancher, + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `ignore-node-name` setting and click **⋮ > Edit.** +1. Enter a name that Rancher will use to ignore nodes. All nodes with this name will be ignored. +1. Click **Save.** + +**Result:** Rancher will not wait to register nodes with this name. In the UI, the node will displayed with a grayed-out status. The node is still part of the cluster and can be listed with `kubectl`. + +If the setting is changed afterward, the ignored nodes will continue to be hidden. + +### Labeling Nodes to be Ignored with kubectl + +To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: + +``` +cattle.rancher.io/node-status: ignore +``` + +**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. + +If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. + +If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. + +If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings under `ignore-node-name`. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md new file mode 100644 index 0000000000..75a1d71b2f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md @@ -0,0 +1,206 @@ +--- +title: Projects and Kubernetes Namespaces with Rancher +description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces +weight: 2032 +aliases: + - /rancher/v2.0-v2.4/en/concepts/projects/ + - /rancher/v2.0-v2.4/en/tasks/projects/ + - /rancher/v2.0-v2.4/en/tasks/projects/create-project/ + - /rancher/v2.0-v2.4/en/tasks/projects/create-project/ +--- + +A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. + +A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +This section describes how projects and namespaces work with Rancher. It covers the following topics: + +- [About namespaces](#about-namespaces) +- [About projects](#about-projects) + - [The cluster's default project](#the-cluster-s-default-project) + - [The system project](#the-system-project) +- [Project authorization](#project-authorization) +- [Pod security policies](#pod-security-policies) +- [Creating projects](#creating-projects) +- [Switching between clusters and projects](#switching-between-clusters-and-projects) + +# About Namespaces + +A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) + +> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. + +Namespaces provide the following functionality: + +- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. +- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. + +You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. + +You can assign the following resources directly to namespaces: + +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) + +To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. + +For more information on creating and moving namespaces, see [Namespaces](../manage-projects/manage-namespaces.md). + +### Role-based access control issues with namespaces and kubectl + +Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. + +This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. + +If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](../manage-projects/manage-namespaces.md) to ensure that you will have permission to access the namespace. + +If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. + +# About Projects + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. + +You can use projects to perform actions such as: + +- Assign users to a group of namespaces (i.e., [project membership](k8s-in-rancher/projects-and-namespaces/project-members)). +- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). +- Assign resources to the project. +- Assign Pod Security Policies. + +When you create a cluster, two projects are automatically created within it: + +- [Default Project](#the-cluster-s-default-project) +- [System Project](#the-system-project) + +### The Cluster's Default Project + +When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. + +If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. + +If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. + +### The System Project + +_Available as of v2.0.7_ + +When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. + +To open it, open the **Global** menu, and then select the `system` project for your cluster. + +The `system` project: + +- Is automatically created when you provision a cluster. +- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. +- Allows you to add more namespaces or move its namespaces to other projects. +- Cannot be deleted because it's required for cluster operations. + +>**Note:** In clusters where both: +> +> - The Canal network plug-in is in use. +> - The Project Network Isolation option is enabled. +> +>The `system` project overrides the Project Network Isolation option so that it can communicate with other projects, collect logs, and check health. + +# Project Authorization + +Standard users are only authorized for project access in two situations: + +- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. +- Standard users can access projects that they create themselves. + +# Pod Security Policies + +Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policluster-admin/pod-security-policy/) at the [project level](../manage-projects/manage-pod-security-policies.md) in addition to the [cluster level.](../pod-security-policy) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. + +# Creating Projects + +This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. + +1. [Name a new project.](#1-name-a-new-project) +2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) +3. [Recommended: Add project members.](#3-recommended-add-project-members) +4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) + +### 1. Name a New Project + +1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. + +1. From the main menu, choose **Projects/Namespaces**. Then click **Add Project**. + +1. Enter a **Project Name**. + +### 2. Optional: Select a Pod Security Policy + +This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +Assigning a PSP to a project will: + +- Override the cluster's default PSP. +- Apply the PSP to the project. +- Apply the PSP to any namespaces you add to the project later. + +### 3. Recommended: Add Project Members + +Use the **Members** section to provide other users with project access and roles. + +By default, your user is added as the project `Owner`. + +>**Notes on Permissions:** +> +>- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. +> +>- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. +> +>- Choose `Custom` to create a custom role on the fly: [Custom Project Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#custom-project-roles). + +To add members: + +1. Click **Add Member**. +1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. +1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + +### 4. Optional: Add Resource Quotas + +_Available as of v2.1.0_ + +Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas). + +To add a resource quota, + +1. Click **Add Quota**. +1. Select a Resource Type. For more information, see [Resource Quotas.](k8s-in-rancher/projects-and-namespaces/resource-quotas/). +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. +1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit](../../../pages-for-subheaders/manage-project-resource-quotas.md) Note: This option is available as of v2.2.0. +1. Click **Create**. + +**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. + +| Field | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------- | +| Project Limit | The overall resource limit for the project. | +| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | + +# Switching between Clusters and Projects + +To switch between clusters and projects, use the **Global** drop-down available in the main menu. + +![Global Menu](/img/global-menu.png) + +Alternatively, you can switch between projects and clusters using the main menu. + +- To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. +- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md new file mode 100644 index 0000000000..7aeb7f26a9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md @@ -0,0 +1,116 @@ +--- +title: Restoring a Cluster from Backup +weight: 2050 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +_Available as of v2.2.0_ + +etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. + +Rancher recommends enabling the [ability to set up recurring snapshots of etcd](backing-up-etcd.md#configuring-recurring-snapshots), but [one-time snapshots](backing-up-etcd.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). + +As of Rancher v2.4.0, clusters can also be restored to a prior Kubernetes version and cluster configuration. + +This section covers the following topics: + +- [Viewing Available Snapshots](#viewing-available-snapshots) +- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) +- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) +- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) + +## Viewing Available Snapshots + +The list of all available snapshots for the cluster is available. + +1. In the **Global** view, navigate to the cluster that you want to view snapshots. + +2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. + +## Restoring a Cluster from a Snapshot + +If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. + +Restores changed in Rancher v2.4.0. + + + + +Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: + +- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. +- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. +- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. + +When rolling back to a prior Kubernetes version, the [upgrade strategy options](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. + +> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.](backing-up-etcd.md#configuring-recurring-snapshots) + +1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. + +2. Click the **⋮ > Restore Snapshot**. + +3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. + +4. In the **Restoration Type** field, choose one of the restore options described above. + +5. Click **Save**. + +**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. + + + + +> **Prerequisites:** +> +> - Make sure your etcd nodes are healthy. If you are restoring a cluster with unavailable etcd nodes, it's recommended that all etcd nodes are removed from Rancher before attempting to restore. For clusters in which Rancher used node pools to provision [nodes in an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), new etcd nodes will automatically be created. For [custom clusters](../../../pages-for-subheaders/use-existing-nodes.md), please ensure that you add new etcd nodes to the cluster. +> - To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.](backing-up-etcd.md#configuring-recurring-snapshots) + +1. In the **Global** view, navigate to the cluster that you want to restore from a snapshot. + +2. Click the **⋮ > Restore Snapshot**. + +3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. + +4. Click **Save**. + +**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. + + + + +## Recovering etcd without a Snapshot + +If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. The cluster should have three etcd nodes to prevent a loss of quorum. If you want to recover your set of etcd nodes, follow these instructions: + +1. Keep only one etcd node in the cluster by removing all other etcd nodes. + +2. On the single remaining etcd node, run the following command: + + ``` + $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd + ``` + + This command outputs the running command for etcd, save this command to use later. + +3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. + + ``` + $ docker stop etcd + $ docker rename etcd etcd-old + ``` + +4. Take the saved command from Step 2 and revise it: + + - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. + - Add `--force-new-cluster` to the end of the command. + +5. Run the revised command. + +6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster](../../../pages-for-subheaders/use-existing-nodes.md) and you want to reuse an old node, you are required to [clean up the nodes](faq/cleaning-cluster-nodes/) before attempting to add them back into a cluster. + +# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 + +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restoring-etcd.md). diff --git a/content/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-admin/certificate-rotation/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md new file mode 100644 index 0000000000..924c39d151 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md @@ -0,0 +1,55 @@ +--- +title: Adding Users to Projects +weight: 2505 +aliases: + - /rancher/v2.0-v2.4/en/tasks/projects/add-project-members/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/project-members/ +--- + +If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. + +You can add members to a project as it is created, or add them to an existing project. + +>**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members](cluster-provisioning/cluster-members/) instead. + +### Adding Members to a New Project + +You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.](k8s-in-rancher/projects-and-namespaces/) + +### Adding Members to an Existing Project + +Following project creation, you can add users as project members so that they can access its resources. + +1. From the **Global** view, open the project that you want to add members to. + +2. From the main menu, select **Members**. Then click **Add Member**. + +3. Search for the user or group that you want to add to the project. + + If external authentication is configured: + + - Rancher returns users from your external authentication source as you type. + + - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. + + >**Note:** If you are logged in as a local user, external users do not display in your search results. + +1. Assign the user or group **Project** roles. + + [What are Project Roles?](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + + >**Notes:** + > + >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + > + >- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + > + >- For `Custom` roles, you can modify the list of individual roles available for assignment. + > + > - To add roles to the list, [Add a Custom Role](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + > - To remove roles from the list, [Lock/Unlock Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). + +**Result:** The chosen users are added to the project. + +- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md new file mode 100644 index 0000000000..34b2d14a61 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md @@ -0,0 +1,20 @@ +--- +title: Rancher's CI/CD Pipelines +description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users +weight: 4000 +aliases: + - /rancher/v2.0-v2.4/en/concepts/ci-cd-pipelines/ + - /rancher/v2.0-v2.4/en/tasks/pipelines/ + - /rancher/v2.0-v2.4/en/tools/pipelines/configurations/ +--- +Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +For details, refer to the [pipelines](k8s-in-rancher/pipelines) section. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md new file mode 100644 index 0000000000..a8c6b494af --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md @@ -0,0 +1,68 @@ +--- +title: Namespaces +weight: 2520 +--- + +Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. + +Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. + +Resources that you can assign directly to namespaces include: + +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](k8s-in-rancher/volumes-and-storage/persistent-volume-claims/) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) + +To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. + +> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace. + + +### Creating Namespaces + +Create a new namespace to isolate apps and resources in a project. + +>**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads](../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md), [certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md), [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md), etc.) you can create a namespace on the fly. + +1. From the **Global** view, open the project where you want to create a namespace. + + >**Tip:** As a best practice, we recommend creating namespaces from the project level. However, cluster owners and members can create them from the cluster level as well. + +1. From the main menu, select **Namespace**. The click **Add Namespace**. + +1. **Optional:** If your project has [Resource Quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). + +1. Enter a **Name** and then click **Create**. + +**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. + +### Moving Namespaces to Another Project + +Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. + +1. From the **Global** view, open the cluster that contains the namespace you want to move. + +1. From the main menu, select **Projects/Namespaces**. + +1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. + + >**Notes:** + > + >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. + >- You cannot move a namespace into a project that already has a [resource quota](k8s-in-rancher/projects-and-namespaces/resource-quotas/) configured. + >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. + +1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. + +**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. + +### Editing Namespace Resource Quotas + +You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +For more information, see how to [edit namespace resource quotas](project-admin//resource-quotas/override-namespace-default/). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md new file mode 100644 index 0000000000..1eb528cbbe --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -0,0 +1,31 @@ +--- +title: Pod Security Policies +weight: 5600 +--- + +> These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. + +### Prerequisites + +- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). +- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster](../manage-clusters/add-a-pod-security-policy.md). + +### Applying a Pod Security Policy + +1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. +1. From the main menu, select **Projects/Namespaces**. +1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit**. +1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. + Assigning a PSP to a project will: + + - Override the cluster's default PSP. + - Apply the PSP to the project. + - Apply the PSP to any namespaces you add to the project later. + +1. Click **Save**. + +**Result:** The PSP is applied to the project and any namespaces added to the project. + +>**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md new file mode 100644 index 0000000000..ff0fa52c3f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -0,0 +1,41 @@ +--- +title: How Resource Quotas Work in Rancher Projects +weight: 1 +--- + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. + +In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. + +In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. + +Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace +![Native Kubernetes Resource Quota Implementation](/img/kubernetes-resource-quota.svg) + +Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. + +The resource quota includes two limits, which you set while creating or editing a project: + + +- **Project Limits:** + + This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. + +- **Namespace Default Limits:** + + This value is the default resource limit available for each namespace. When the resource quota is created at the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you override it. + +In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. + +Rancher: Resource Quotas Propagating to Each Namespace +![Rancher Resource Quota Implementation](/img/rancher-resource-quota.png) + +Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. + +The following table explains the key differences between the two quota types. + +| Rancher Resource Quotas | Kubernetes Resource Quotas | +| ---------------------------------------------------------- | -------------------------------------------------------- | +| Applies to projects and namespace. | Applies to namespaces only. | +| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | +| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md new file mode 100644 index 0000000000..41cc2304e2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -0,0 +1,34 @@ +--- +title: Overriding the Default Limit for a Namespace +weight: 2 +--- + +Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. + +In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](k8s-in-rancher/projects-and-namespaces/) for `Namespace 3` so that the namespace can access more resources. + +Namespace Default Limit Override +![Namespace Default Limit Override](/img/rancher-resource-quota-override.svg) + +How to: [Editing Namespace Resource Quotas](k8s-in-rancher/projects-and-namespaces/) + +### Editing Namespace Resource Quotas + +If there is a [resource quota](k8s-in-rancher/projects-and-namespaces/resource-quotas) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. + +1. From the main menu, select **Projects/Namespaces**. + +1. Find the namespace for which you want to edit the resource quota. Select **⋮ > Edit**. + +1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. + + For more information about each **Resource Type**, see [Resource Quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas/). + + >**Note:** + > + >- If a resource quota is not configured for the project, these options will not be available. + >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. + +**Result:** Your override is applied to the namespace's resource quota. diff --git a/content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/quota-type-reference/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/project-admin/resource-quotas/quota-type-reference/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md new file mode 100644 index 0000000000..021065930c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -0,0 +1,43 @@ +--- +title: Setting Container Default Resource Limits +weight: 3 +--- + +_Available as of v2.2.0_ + +When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. + +To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. + +### Editing the Container Default Resource Limit + +_Available as of v2.2.0_ + +Edit [container default resource limit](k8s-in-rancher/projects-and-namespaces/resource-quotas/) when: + +- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. +- You want to edit the default container resource limit. + +1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. +1. From the main menu, select **Projects/Namespaces**. +1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit**. +1. Expand **Container Default Resource Limit** and edit the values. + +### Resource Limit Propagation + +When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. + +> **Note:** Before v2.2.0, you could not launch catalog applications that did not have any limits set. With v2.2.0, you can set a default container resource limit on a project and launch any catalog applications. + +Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. + +### Container Resource Quota Types + +The following resource limits can be configured: + +| Resource Type | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| +| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | +| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | +| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md new file mode 100644 index 0000000000..6e01afb9c0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md @@ -0,0 +1,90 @@ +--- +title: Backing up Rancher Installed with Docker +shortTitle: Docker Installs +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ + - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ + - /rancher/v2.0-v2.4/en/backups/backups/single-node-backups/ + - /rancher/v2.0-v2.4/en/backups/legacy/backup/single-node-backups/ + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/docker-backups + - /rancher/v2.0-v2.4/en/installation/backups-and-restoration/single-node-backup-and-restoration/ + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/docker-backups/ +--- + + +After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. + +### How to Read Placeholders + +During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run \ + --volumes-from rancher-data- \ + -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher +``` + +In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. + +### Obtaining Placeholder Data + +Get the placeholder data by running: + +``` +docker ps +``` + +Write down or copy this information before starting the [procedure below](#creating-a-backup). + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. + +### Creating a Backup + +This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. + + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the [name of your Rancher container](#how-to-read-placeholders). + + ``` + docker stop + ``` +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data- rancher/rancher: + ``` + +1. From the data container that you just created (rancher-data-<DATE>), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). Use the following command, replacing each placeholder. + + ``` + docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** A stream of commands runs on the screen. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. + +1. Restart Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker start + ``` + +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs](backups/restorations/single-node-restoration) if you need to restore backup data. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md new file mode 100644 index 0000000000..63856627d6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher.md @@ -0,0 +1,34 @@ +--- +title: Backing up Rancher Installed on a K3s Kubernetes Cluster +shortTitle: K3s Installs +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/backups/backups/k3s-backups + - /rancher/v2.0-v2.4/en/backups/backups/k8s-backups/k3s-backups + - /rancher/v2.0-v2.4/en/backups/legacy/backup/k8s-backups/k3s-backups/ + - /rancher/v2.0-v2.4/en/backups/legacy/backups/k3s-backups + - /rancher/v2.0-v2.4/en/backups/legacy/backup/k3s-backups + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/k3s-backups + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/k3s-backups/ +--- + +When Rancher is installed on a high-availability Kubernetes cluster, we recommend using an external database to store the cluster data. + +The database administrator will need to back up the external database, or restore it from a snapshot or dump. + +We recommend configuring the database to take recurring snapshots. + +### K3s Kubernetes Cluster Data + +One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. + +
    Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
    +![Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server](/img/k3s-server-storage.svg) + +### Creating Snapshots and Restoring Databases from Snapshots + +For details on taking database snapshots and restoring your database from them, refer to the official database documentation: + +- [Official MySQL documentation](https://dev.mysql.com/doc/refman/8.0/en/replication-snapshot-method.html) +- [Official PostgreSQL documentation](https://www.postgresql.org/docs/8.3/backup-dump.html) +- [Official etcd documentation](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/recovery.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md new file mode 100644 index 0000000000..700f0ccc06 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md @@ -0,0 +1,188 @@ +--- +title: Backing up Rancher Installed on an RKE Kubernetes Cluster +shortTitle: RKE Installs +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/installation/after-installation/k8s-install-backup-and-restoration/ + - /rancher/v2.0-v2.4/en/installation/backups-and-restoration/ha-backup-and-restoration/ + - /rancher/v2.0-v2.4/en/backups/backups/ha-backups + - /rancher/v2.0-v2.4/en/backups/backups/k8s-backups/ha-backups + - /rancher/v2.0-v2.4/en/backups/legacy/backup/k8s-backups/ha-backups/ + - /rancher/v2.0-v2.4/en/backups/legacy/backups/ha-backups + - /rancher/v2.0-v2.4/en/backups/legacy/backup/ha-backups + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/backup/rke-backups + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/backup/rke-backups/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to create backups of your high-availability Rancher install. + +In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. + +
    Cluster Data within an RKE Kubernetes Cluster Running the Rancher Management Server
    +![Architecture of an RKE Kubernetes cluster running the Rancher management server](/img/rke-server-storage.svg) + +# Requirements + +### RKE Version + +The commands for taking `etcd` snapshots are only available in RKE v0.1.7 and later. + +### RKE Config File + +You'll need the RKE config file that you used for Rancher install, `rancher-cluster.yml`. You created this file during your initial install. Place this file in same directory as the RKE binary. + + +# Backup Outline + + +Backing up your high-availability Rancher cluster is process that involves completing multiple tasks. + +1. [Take Snapshots of the `etcd` Database](#1-take-snapshots-of-the-etcd-database) + + Take snapshots of your current `etcd` database using Rancher Kubernetes Engine (RKE). + +1. [Store Snapshot(s) Externally](#2-back-up-local-snapshots-to-a-safe-location) + + After taking your snapshots, export them to a safe location that won't be affected if your cluster encounters issues. + + +# 1. Take Snapshots of the `etcd` Database + +Take snapshots of your `etcd` database. You can use these snapshots later to recover from a disaster scenario. There are two ways to take snapshots: recurringly, or as a one-off. Each option is better suited to a specific use case. Read the short description below each link to know when to use each option. + +- [Option A: Recurring Snapshots](#option-a-recurring-snapshots) + + After you stand up a high-availability Rancher install, we recommend configuring RKE to automatically take recurring snapshots so that you always have a safe restore point available. + +- [Option B: One-Time Snapshots](#option-b-one-time-snapshots) + + We advise taking one-time snapshots before events like upgrades or restore of another snapshot. + +### Option A: Recurring Snapshots + +For all high-availability Rancher installs, we recommend taking recurring snapshots so that you always have a safe restore point available. + +To take recurring snapshots, enable the `etcd-snapshot` service, which is a service that's included with RKE. This service runs in a service container alongside the `etcd` container. You can enable this service by adding some code to `rancher-cluster.yml`. + +**To Enable Recurring Snapshots:** + +The steps to enable recurring snapshots differ based on the version of RKE. + + + + +1. Open `rancher-cluster.yml` with your favorite text editor. +2. Edit the code for the `etcd` service to enable recurring snapshots. Snapshots can be saved in a S3 compatible backend. + + ``` + services: + etcd: + backup_config: + enabled: true # enables recurring etcd snapshots + interval_hours: 6 # time increment between snapshots + retention: 60 # time in days before snapshot purge + # Optional S3 + s3backupconfig: + access_key: "myaccesskey" + secret_key: "myaccesssecret" + bucket_name: "my-backup-bucket" + folder: "folder-name" # Available as of v2.3.0 + endpoint: "s3.eu-west-1.amazonaws.com" + region: "eu-west-1" + custom_ca: |- + -----BEGIN CERTIFICATE----- + $CERTIFICATE + -----END CERTIFICATE----- + ``` +4. Save and close `rancher-cluster.yml`. +5. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. +6. Run the following command: + ``` + rke up --config rancher-cluster.yml + ``` + +**Result:** RKE is configured to take recurring snapshots of `etcd` on all nodes running the `etcd` role. Snapshots are saved locally to the following directory: `/opt/rke/etcd-snapshots/`. If configured, the snapshots are also uploaded to your S3 compatible backend. + + + + +1. Open `rancher-cluster.yml` with your favorite text editor. +2. Edit the code for the `etcd` service to enable recurring snapshots. + + ``` + services: + etcd: + snapshot: true # enables recurring etcd snapshots + creation: 6h0s # time increment between snapshots + retention: 24h # time increment before snapshot purge + ``` +4. Save and close `rancher-cluster.yml`. +5. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. +6. Run the following command: + ``` + rke up --config rancher-cluster.yml + ``` + +**Result:** RKE is configured to take recurring snapshots of `etcd` on all nodes running the `etcd` role. Snapshots are saved locally to the following directory: `/opt/rke/etcd-snapshots/`. + + + + + +### Option B: One-Time Snapshots + +When you're about to upgrade Rancher or restore it to a previous snapshot, you should snapshot your live image so that you have a backup of `etcd` in its last known state. + +**To Take a One-Time Local Snapshot:** + +1. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. + +2. Enter the following command. Replace `` with any name that you want to use for the snapshot (e.g. `upgrade.db`). + + ``` + rke etcd snapshot-save \ + --name \ + --config rancher-cluster.yml + ``` + +**Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. + +**To Take a One-Time S3 Snapshot:** + +_Available as of RKE v0.2.0_ + +1. Open **Terminal** and change directory to the location of the RKE binary. Your `rancher-cluster.yml` file must reside in the same directory. + +2. Enter the following command. Replace `` with any name that you want to use for the snapshot (e.g. `upgrade.db`). + + ```shell + rke etcd snapshot-save \ + --config rancher-cluster.yml \ + --name snapshot-name \ + --s3 \ + --access-key S3_ACCESS_KEY \ + --secret-key S3_SECRET_KEY \ + --bucket-name s3-bucket-name \ + --s3-endpoint s3.amazonaws.com \ + --folder folder-name # Available as of v2.3.0 + ``` + +**Result:** RKE takes a snapshot of `etcd` running on each `etcd` node. The file is saved to `/opt/rke/etcd-snapshots`. It is also uploaded to the S3 compatible backend. + +# 2. Back up Local Snapshots to a Safe Location + +> **Note:** If you are using RKE v0.2.0, you can enable saving the backups to a S3 compatible backend directly and skip this step. + +After taking the `etcd` snapshots, save them to a safe location so that they're unaffected if your cluster experiences a disaster scenario. This location should be persistent. + +In this documentation, as an example, we're using Amazon S3 as our safe location, and [S3cmd](http://s3tools.org/s3cmd) as our tool to create the backups. The backup location and tool that you use are ultimately your decision. + +**Example:** + +``` +root@node:~# s3cmd mb s3://rke-etcd-snapshots +root@node:~# s3cmd put /opt/rke/etcd-snapshots/snapshot.db s3://rke-etcd-snapshots/ +``` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md new file mode 100644 index 0000000000..ec80eca402 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md @@ -0,0 +1,75 @@ +--- +title: Restoring Backups—Docker Installs +shortTitle: Docker Installs +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/after-installation/single-node-backup-and-restoration/ + - /rancher/v2.0-v2.4/en/backups/restorations/single-node-restoration + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/docker-restores + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/docker-restores/ +--- + +If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. + +## Before You Start + +During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from -v $PWD:/backup \ +busybox sh -c "rm /var/lib/rancher/* -rf && \ +tar pzxvf /backup/rancher-data-backup--" +``` + +In this command, `` and `-` are environment variables for your Rancher deployment. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version number for your Rancher backup. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Restoring Backups + +Using a [backup](backups/backups/single-node-backups/) that you created earlier, restore Rancher to its last known healthy state. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs](backups/backups/single-node-backups/) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Creating Backups—Docker Installs](backups/backups/single-node-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. + + >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. + + ``` + docker run --volumes-from -v $PWD:/backup \ + busybox sh -c "rm /var/lib/rancher/* -rf && \ + tar pzxvf /backup/rancher-data-backup--.tar.gz" + ``` + + **Step Result:** A series of commands should run. + +1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. + + ``` + docker start + ``` + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.0-v2.4/en/backups/restore/k3s-restore/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/backups/restore/k3s-restore/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md new file mode 100644 index 0000000000..dd3c28b461 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -0,0 +1,141 @@ +--- +title: Restoring Backups—Kubernetes installs +shortTitle: RKE Installs +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/installation/after-installation/ha-backup-and-restoration/ + - /rancher/v2.0-v2.4/en/backups/restorations/ha-restoration + - /rancher/v2.0-v2.4/en/backups/restorations/k8s-restore/rke-restore + - /rancher/v2.0-v2.4/en/backups/legacy/restore/k8s-restore/rke-restore/ + - /rancher/v2.0-v2.4/en/backups/legacy/restore/rke-restore + - /rancher/v2.0-v2.4/en/backups/v2.0.x-v2.4.x/restore/rke-restore + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/ +--- + +This procedure describes how to use RKE to restore a snapshot of the Rancher Kubernetes cluster. +This will restore the Kubernetes configuration and the Rancher database and state. + +> **Note:** This document covers clusters set up with RKE >= v0.2.x, for older RKE versions refer to the [RKE Documentation](https://rancher.com/docs/rke/latest/en/etcd-snapshots/restoring-from-backup). + +## Restore Outline + + + +- [1. Preparation](#1-preparation) +- [2. Place Snapshot](#2-place-snapshot) +- [3. Configure RKE](#3-configure-rke) +- [4. Restore the Database and bring up the Cluster](#4-restore-the-database-and-bring-up-the-cluster) + + + +### 1. Preparation + +It is advised that you run the restore from your local host or a jump box/bastion where your cluster yaml, rke statefile, and kubeconfig are stored. You will need [RKE](https://rancher.com/docs/rke/latest/en/installation/) and [kubectl](../../../faq/install-and-configure-kubectl.md) CLI utilities installed locally. + +Prepare by creating 3 new nodes to be the target for the restored Rancher instance. We recommend that you start with fresh nodes and a clean state. For clarification on the requirements, review the [Installation Requirements](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation/requirements/). + +Alternatively you can re-use the existing nodes after clearing Kubernetes and Rancher configurations. This will destroy the data on these nodes. See [Node Cleanup](faq/cleaning-cluster-nodes/) for the procedure. + +You must restore each of your etcd nodes to the same snapshot. Copy the snapshot you're using from one of your nodes to the others before running the `etcd snapshot-restore` command. + +> **IMPORTANT:** Before starting the restore make sure all the Kubernetes services on the old cluster nodes are stopped. We recommend powering off the nodes to be sure. + +### 2. Place Snapshot + +As of RKE v0.2.0, snapshots could be saved in an S3 compatible backend. To restore your cluster from the snapshot stored in S3 compatible backend, you can skip this step and retrieve the snapshot in [4. Restore the Database and bring up the Cluster](#4-restore-the-database-and-bring-up-the-cluster). Otherwise, you will need to place the snapshot directly on one of the etcd nodes. + +Pick one of the clean nodes that will have the etcd role assigned and place the zip-compressed snapshot file in `/opt/rke/etcd-snapshots` on that node. + +> **Note:** Because of a current limitation in RKE, the restore process does not work correctly if `/opt/rke/etcd-snapshots` is a NFS share that is mounted on all nodes with the etcd role. The easiest options are to either keep `/opt/rke/etcd-snapshots` as a local folder during the restore process and only mount the NFS share there after it has been completed, or to only mount the NFS share to one node with an etcd role in the beginning. + +### 3. Configure RKE + +Use your original `rancher-cluster.yml` and `rancher-cluster.rkestate` files. If they are not stored in a version control system, it is a good idea to back them up before making any changes. + +``` +cp rancher-cluster.yml rancher-cluster.yml.bak +cp rancher-cluster.rkestate rancher-cluster.rkestate.bak +``` + +If the replaced or cleaned nodes have been configured with new IP addresses, modify the `rancher-cluster.yml` file to ensure the address and optional internal_address fields reflect the new addresses. + +> **IMPORTANT:** You should not rename the `rancher-cluster.yml` or `rancher-cluster.rkestate` files. It is important that the filenames match each other. + +### 4. Restore the Database and bring up the Cluster + +You will now use the RKE command-line tool with the `rancher-cluster.yml` and the `rancher-cluster.rkestate` configuration files to restore the etcd database and bring up the cluster on the new nodes. + +> **Note:** Ensure your `rancher-cluster.rkestate` is present in the same directory as the `rancher-cluster.yml` file before starting the restore, as this file contains the certificate data for the cluster. + +#### Restoring from a Local Snapshot + +When restoring etcd from a local snapshot, the snapshot is assumed to be located on the target node in the directory `/opt/rke/etcd-snapshots`. + +``` +rke etcd snapshot-restore --name snapshot-name --config ./rancher-cluster.yml +``` + +> **Note:** The --name parameter expects the filename of the snapshot without the extension. + +#### Restoring from a Snapshot in S3 + +_Available as of RKE v0.2.0_ + +When restoring etcd from a snapshot located in an S3 compatible backend, the command needs the S3 information in order to connect to the S3 backend and retrieve the snapshot. + +``` +$ rke etcd snapshot-restore --config ./rancher-cluster.yml --name snapshot-name \ +--s3 --access-key S3_ACCESS_KEY --secret-key S3_SECRET_KEY \ +--bucket-name s3-bucket-name --s3-endpoint s3.amazonaws.com \ +--folder folder-name # Available as of v2.3.0 +``` + +#### Options for `rke etcd snapshot-restore` + +S3 specific options are only available for RKE v0.2.0+. + +| Option | Description | S3 Specific | +| --- | --- | ---| +| `--name` value | Specify snapshot name | | +| `--config` value | Specify an alternate cluster YAML file (default: "cluster.yml") [$RKE_CONFIG] | | +| `--s3` | Enabled backup to s3 |* | +| `--s3-endpoint` value | Specify s3 endpoint url (default: "s3.amazonaws.com") | * | +| `--access-key` value | Specify s3 accessKey | *| +| `--secret-key` value | Specify s3 secretKey | *| +| `--bucket-name` value | Specify s3 bucket name | *| +| `--folder` value | Specify s3 folder in the bucket name _Available as of v2.3.0_ | *| +| `--region` value | Specify the s3 bucket location (optional) | *| +| `--ssh-agent-auth` | [Use SSH Agent Auth defined by SSH_AUTH_SOCK](https://rancher.com/docs/rke/latest/en/config-options/#ssh-agent) | | +| `--ignore-docker-version` | [Disable Docker version check](https://rancher.com/docs/rke/latest/en/config-options/#supported-docker-versions) | + +#### Testing the Cluster + +Once RKE completes it will have created a credentials file in the local directory. Configure `kubectl` to use the `kube_config_rancher-cluster.yml` credentials file and check on the state of the cluster. See [Installing and Configuring kubectl](../../../faq/install-and-configure-kubectl.md#configuration) for details. + +#### Check Kubernetes Pods + +Wait for the pods running in `kube-system`, `ingress-nginx` and the `rancher` pod in `cattle-system` to return to the `Running` state. + +> **Note:** `cattle-cluster-agent` and `cattle-node-agent` pods will be in an `Error` or `CrashLoopBackOff` state until Rancher server is up and the DNS/Load Balancer have been pointed at the new cluster. + +``` +kubectl get pods --all-namespaces + +NAMESPACE NAME READY STATUS RESTARTS AGE +cattle-system cattle-cluster-agent-766585f6b-kj88m 0/1 Error 6 4m +cattle-system cattle-node-agent-wvhqm 0/1 Error 8 8m +cattle-system rancher-78947c8548-jzlsr 0/1 Running 1 4m +ingress-nginx default-http-backend-797c5bc547-f5ztd 1/1 Running 1 4m +ingress-nginx nginx-ingress-controller-ljvkf 1/1 Running 1 8m +kube-system canal-4pf9v 3/3 Running 3 8m +kube-system cert-manager-6b47fc5fc-jnrl5 1/1 Running 1 4m +kube-system kube-dns-7588d5b5f5-kgskt 3/3 Running 3 4m +kube-system kube-dns-autoscaler-5db9bbb766-s698d 1/1 Running 1 4m +kube-system metrics-server-97bc649d5-6w7zc 1/1 Running 1 4m +kube-system tiller-deploy-56c4cf647b-j4whh 1/1 Running 1 4m +``` + +#### Finishing Up + +Rancher should now be running and available to manage your Kubernetes clusters. +> **IMPORTANT:** Remember to save your updated RKE config (`rancher-cluster.yml`) state file (`rancher-cluster.rkestate`) and `kubectl` credentials (`kube_config_rancher-cluster.yml`) files in a safe place for future maintenance for example in a version control system. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md new file mode 100644 index 0000000000..490094574b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1.md @@ -0,0 +1,77 @@ +--- +title: "Rolling back to v2.0.0-v2.1.5" +weight: 1 +aliases: + - /rancher/v2.x/en/backups/v2.0.x-v2.4.x/restore/rke-restore/v2.0-v2.1/ +--- + +> Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved here and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. + +If you are rolling back to versions in either of these scenarios, you must follow some extra instructions in order to get your clusters working. + +- Rolling back from v2.1.6+ to any version between v2.1.0 - v2.1.5 or v2.0.0 - v2.0.10. +- Rolling back from v2.0.11+ to any version between v2.0.0 - v2.0.10. + +Because of the changes necessary to address [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321), special steps are necessary if the user wants to roll back to a previous version of Rancher where this vulnerability exists. The steps are as follows: + +1. Record the `serviceAccountToken` for each cluster. To do this, save the following script on a machine with `kubectl` access to the Rancher management plane and execute it. You will need to run these commands on the machine where the rancher container is running. Ensure JQ is installed before running the command. The commands will vary depending on how you installed Rancher. + + **Rancher Installed with Docker** + ``` + docker exec kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json + ``` + + **Rancher Installed on a Kubernetes Cluster** + ``` + kubectl get clusters -o json | jq '[.items[] | select(any(.status.conditions[]; .type == "ServiceAccountMigrated")) | {name: .metadata.name, token: .status.serviceAccountToken}]' > tokens.json + ``` + +2. After executing the command a `tokens.json` file will be created. Important! Back up this file in a safe place.** You will need it to restore functionality to your clusters after rolling back Rancher. **If you lose this file, you may lose access to your clusters.** + +3. Rollback Rancher following the [normal instructions](upgrades/rollbacks/). + +4. Once Rancher comes back up, every cluster managed by Rancher (except for Imported clusters) will be in an `Unavailable` state. + +5. Apply the backed up tokens based on how you installed Rancher. + + **Rancher Installed with Docker** + + Save the following script as `apply_tokens.sh` to the machine where the Rancher docker container is running. Also copy the `tokens.json` file created previously to the same directory as the script. + ``` + set -e + + tokens=$(jq .[] -c tokens.json) + for token in $tokens; do + name=$(echo $token | jq -r .name) + value=$(echo $token | jq -r .token) + + docker exec $1 kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" + done + ``` + the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: + ``` + ./apply_tokens.sh + ``` + After a few moments the clusters will go from Unavailable back to Available. + + **Rancher Installed on a Kubernetes Cluster** + + Save the following script as `apply_tokens.sh` to a machine with kubectl access to the Rancher management plane. Also copy the `tokens.json` file created previously to the same directory as the script. + ``` + set -e + + tokens=$(jq .[] -c tokens.json) + for token in $tokens; do + name=$(echo $token | jq -r .name) + value=$(echo $token | jq -r .token) + + kubectl patch --type=merge clusters $name -p "{\"status\": {\"serviceAccountToken\": \"$value\"}}" + done + ``` + Set the script to allow execution (`chmod +x apply_tokens.sh`) and execute the script as follows: + ``` + ./apply_tokens.sh + ``` + After a few moments the clusters will go from `Unavailable` back to `Available`. + +6. Continue using Rancher as normal. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/deploy-apps-across-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/deploy-apps-across-clusters.md new file mode 100644 index 0000000000..558e681dc5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/deploy-apps-across-clusters.md @@ -0,0 +1,164 @@ +--- +title: Deploying Applications across Clusters +weight: 13 +aliases: + - /rancher/v2.0-v2.4/en/deploy-across-clusters/multi-cluster-apps +--- + +_Available as of v2.2.0_ + +Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. + +Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. + +After creating a multi-cluster application, you can program a [Global DNS entry](helm-charts-in-rancher/globaldns.md) to make it easier to access the application. + +- [Prerequisites](#prerequisites) +- [Launching a multi-cluster app](#launching-a-multi-cluster-app) +- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) + - [Targets](#targets) + - [Upgrades](#upgrades) + - [Roles](#roles) +- [Application configuration options](#application-configuration-options) + - [Using a questions.yml file](#using-a-questions-yml-file) + - [Key value pairs for native Helm charts](#key-value-pairs-for-native-helm-charts) + - [Members](#members) + - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) +- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) +- [Multi-cluster application management](#multi-cluster-application-management) +- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) + +# Prerequisites + +To create a multi-cluster app in Rancher, you must have at least one of the following permissions: + +- A [project-member role](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads +- A [cluster owner role](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) for the clusters(s) that include the target project(s) + +# Launching a Multi-Cluster App + +1. From the **Global** view, choose **Apps** in the navigation bar. Click **Launch**. + +2. Find the application that you want to launch, and then click **View Details**. + +3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. + +4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. + +5. Select a **Template Version**. + +6. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). + +7. Select the **Members** who can [interact with the multi-cluster application](#members). + +8. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. + +7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. + +**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: + +# Multi-cluster App Configuration Options + +Rancher has divided the configuration option for the multi-cluster application into several sections. + +### Targets + +In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. + +### Upgrades + +In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. + +* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. + +* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. + +### Roles + +In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications](catalog/launching-apps), that specific user's permissions are used for creation of all workloads/resources that is required by the app. + +For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. + +Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. + +- **Project** - This is the equivalent of a [project member](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), a [cluster owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or a [project owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles), then the user is considered to have the appropriate level of permissions. + +- **Cluster** - This is the equivalent of a [cluster owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator](../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), then the user is considered to have the appropriate level of permissions. + +When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. + +> **Note:** There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. + +# Application Configuration Options + +For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. + +> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). + +### Using a questions.yml file + +If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. + +### Key Value Pairs for Native Helm Charts + +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository](catalog/custom/), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. + +### Members + +By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. + +1. Find the user that you want to add by typing in the member's name in the **Member** search box. + +2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. + + - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. + + - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. + + - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. + + > **Note:** Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. + +### Overriding Application Configuration Options for Specific Projects + +The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. + +1. In the **Answer Overrides** section, click **Add Override**. + +2. For each override, you can select the following: + + - **Scope**: Select which target projects you want to override the answer in the configuration option. + + - **Question**: Select which question you want to override. + + - **Answer**: Enter the answer that you want to be used instead. + +# Upgrading Multi-Cluster App Roles and Projects + +- **Changing Roles on an existing Multi-Cluster app** +The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. + +- **Adding/Removing target projects** +1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. +2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. + + +# Multi-Cluster Application Management + +One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. + +1. From the **Global** view, choose **Apps** in the navigation bar. + +2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: + + * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. + * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). + * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. + +# Deleting a Multi-Cluster Application + +1. From the **Global** view, choose **Apps** in the navigation bar. + +2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. + + > **Note:** The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs.md new file mode 100644 index 0000000000..fe55d09715 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs.md @@ -0,0 +1,109 @@ +--- +title: Creating Custom Catalogs +weight: 200 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/adding-custom-catalogs/ + - /rancher/v2.0-v2.4/en/catalog/custom/adding + - /rancher/v2.0-v2.4/en/catalog/adding-catalogs + - /rancher/v2.0-v2.4/en/catalog/custom/ + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/adding-catalogs +--- + +Custom catalogs can be added into Rancher at a global scope, cluster scope, or project scope. + +- [Adding catalog repositories](#adding-catalog-repositories) + - [Add custom Git repositories](#add-custom-git-repositories) + - [Add custom Helm chart repositories](#add-custom-helm-chart-repositories) + - [Add private Git/Helm chart repositories](#add-private-git-helm-chart-repositories) +- [Adding global catalogs](#adding-global-catalogs) +- [Adding cluster level catalogs](#adding-cluster-level-catalogs) +- [Adding project level catalogs](#adding-project-level-catalogs) +- [Custom catalog configuration reference](#custom-catalog-configuration-reference) + +# Adding Catalog Repositories + +Adding a catalog is as simple as adding a catalog name, a URL and a branch name. + +**Prerequisite:** An [admin](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) of Rancher has the ability to add or remove catalogs globally in Rancher. + +### Add Custom Git Repositories +The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will use the `master` branch by default. Whenever you add a catalog to Rancher, it will be available immediately. + +### Add Custom Helm Chart Repositories + +A Helm chart repository is an HTTP server that houses one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. + +Helm comes with built-in package server for developer testing (helm serve). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). + +In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. + +### Add Private Git/Helm Chart Repositories +_Available as of v2.2.0_ + +Private catalog repositories can be added using credentials like Username and Password. You may also want to use the OAuth token if your Git or Helm repository server supports that. + +For more information on private Git/Helm catalogs, refer to the [custom catalog configuration reference.](catalog/catalog-config) + + 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. + 2. Click **Add Catalog**. + 3. Complete the form and click **Create**. + + **Result:** Your catalog is added to Rancher. + +# Adding Global Catalogs + +>**Prerequisites:** In order to manage the [built-in catalogs](catalog/built-in/) or manage global catalogs, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) role assigned. + + 1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. + 2. Click **Add Catalog**. + 3. Complete the form. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( +helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) +4. Click **Create**. + + **Result**: Your custom global catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [multi-cluster apps](catalog/multi-cluster-apps/) or [applications in any project](catalog/launching-apps/) from this catalog. + +# Adding Cluster Level Catalogs + +_Available as of v2.2.0_ + +>**Prerequisites:** In order to manage cluster scoped catalogs, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Cluster Owner Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) +>- [Custom Cluster Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) with the [Manage Cluster Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-role-reference) role assigned. + +1. From the **Global** view, navigate to your cluster that you want to start adding custom catalogs. +2. Choose the **Tools > Catalogs** in the navigation bar. +2. Click **Add Catalog**. +3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Cluster** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( +helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) +5. Click **Create**. + +**Result**: Your custom cluster catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in any project in that cluster](catalog/apps/) from this catalog. + +# Adding Project Level Catalogs + +_Available as of v2.2.0_ + +>**Prerequisites:** In order to manage project scoped catalogs, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Cluster Owner Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) +>- [Project Owner Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) +>- [Custom Project Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) with the [Manage Project Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) role assigned. + +1. From the **Global** view, navigate to your project that you want to start adding custom catalogs. +2. Choose the **Tools > Catalogs** in the navigation bar. +2. Click **Add Catalog**. +3. Complete the form. By default, the form will provide the ability to select `Scope` of the catalog. When you have added a catalog from the **Project** scope, it is defaulted to `Cluster`. Select the Helm version that will be used to launch all of the apps in the catalog. For more information about the Helm version, refer to [this section.]( +helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) +5. Click **Create**. + +**Result**: Your custom project catalog is added to Rancher. Once it is in `Active` state, it has completed synchronization and you will be able to start deploying [applications in that project](catalog/apps/) from this catalog. + +# Custom Catalog Configuration Reference + +Refer to [this page](catalog/catalog-config) more information on configuring custom catalogs. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/built-in.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/built-in.md new file mode 100644 index 0000000000..9688a359ad --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/built-in.md @@ -0,0 +1,27 @@ +--- +title: Enabling and Disabling Built-in Global Catalogs +weight: 100 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/enabling-default-catalogs/ + - /rancher/v2.0-v2.4/en/catalog/built-in + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/built-in +--- + +There are default global catalogs packaged as part of Rancher. + +Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. + +>**Prerequisites:** In order to manage the built-in catalogs or manage global catalogs, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Catalogs](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions-reference) role assigned. + +1. From the **Global** view, choose **Tools > Catalogs** in the navigation bar. In versions before v2.2.0, you can select **Catalogs** directly in the navigation bar. + +2. Toggle the default catalogs that you want to be enabled or disabled: + + - **Library:** The Library Catalog includes charts curated by Rancher. Rancher stores charts in a Git repository to expedite the fetch and update of charts. This catalog features Rancher Charts, which include some [notable advantages](helm-charts/legacy-catalogs/creating-apps/#rancher-charts) over native Helm charts. + - **Helm Stable:** This catalog, which is maintained by the Kubernetes community, includes native [Helm charts](https://helm.sh/docs/chart_template_guide/). This catalog features the largest pool of apps. + - **Helm Incubator:** Similar in user experience to Helm Stable, but this catalog is filled with applications in **beta**. + + **Result**: The chosen catalogs are enabled. Wait a few minutes for Rancher to replicate the catalog charts. When replication completes, you'll be able to see them in any of your projects by selecting **Apps** from the main navigation bar. In versions before v2.2.0, within a project, you can select **Catalog Apps** from the main navigation bar. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config.md new file mode 100644 index 0000000000..2549634b41 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config.md @@ -0,0 +1,75 @@ +--- +title: Custom Catalog Configuration Reference +weight: 300 +aliases: + - /rancher/v2.0-v2.4/en/catalog/catalog-config + - /rancher/v2.0-v2.4/en/catalog/catalog-config + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/catalog-config +--- + +Any user can create custom catalogs to add into Rancher. Besides the content of the catalog, users must ensure their catalogs are able to be added into Rancher. + +- [Types of Repositories](#types-of-repositories) +- [Custom Git Repository](#custom-git-repository) +- [Custom Helm Chart Repository](#custom-helm-chart-repository) +- [Catalog Fields](#catalog-fields) +- [Private Repositories](#private-repositories) + - [Using Username and Password](#using-username-and-password) + - [Using an OAuth token](#using-an-oauth-token) + +# Types of Repositories + +Rancher supports adding in different types of repositories as a catalog: + +* Custom Git Repository +* Custom Helm Chart Repository + +# Custom Git Repository + +The Git URL needs to be one that `git clone` [can handle](https://git-scm.com/docs/git-clone#_git_urls_a_id_urls_a) and must end in `.git`. The branch name must be a branch that is in your catalog URL. If no branch name is provided, it will default to use the `master` branch. Whenever you add a catalog to Rancher, it will be available almost immediately. + +# Custom Helm Chart Repository + +A Helm chart repository is an HTTP server that contains one or more packaged charts. Any HTTP server that can serve YAML files and tar files and can answer GET requests can be used as a repository server. + +Helm comes with a built-in package server for developer testing (`helm serve`). The Helm team has tested other servers, including Google Cloud Storage with website mode enabled, S3 with website mode enabled or hosting custom chart repository server using open-source projects like [ChartMuseum](https://github.com/helm/chartmuseum). + +In Rancher, you can add the custom Helm chart repository with only a catalog name and the URL address of the chart repository. + +# Catalog Fields + +When [adding your catalog](catalog/custom/adding/) to Rancher, you'll provide the following information: + + +| Variable | Description | +| -------------------- | ------------- | +| Name | Name for your custom catalog to distinguish the repositories in Rancher | +| Catalog URL | URL of your custom chart repository| +| Use Private Catalog | Selected if you are using a private repository that requires authentication | +| Username (Optional) | Username or OAuth Token | +| Password (Optional) | If you are authenticating using a username, enter the associated password. If you are using an OAuth token, use `x-oauth-basic`. | +| Branch | For a Git repository, the branch name. Default: `master`. For a Helm Chart repository, this field is ignored. | +| Helm version | The Helm version that will be used to deploy all of the charts in the catalog. This field cannot be changed later. For more information, refer to the [section on Helm versions.](helm-charts/legacy-catalogs/#catalog-helm-deployment-versions) | + +# Private Repositories + +_Available as of v2.2.0_ + +Private Git or Helm chart repositories can be added into Rancher using either credentials, i.e. `Username` and `Password`. Private Git repositories also support authentication using OAuth tokens. + +### Using Username and Password + +1. When [adding the catalog](catalog/custom/adding/), select the **Use private catalog** checkbox. + +2. Provide the `Username` and `Password` for your Git or Helm repository. + +### Using an OAuth token + +Read [using Git over HTTPS and OAuth](https://github.blog/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/) for more details on how OAuth authentication works. + +1. Create an [OAuth token](https://github.com/settings/tokens) +with `repo` permission selected, and click **Generate token**. + +2. When [adding the catalog](catalog/custom/adding/), select the **Use private catalog** checkbox. + +3. For `Username`, provide the Git generated OAuth token. For `Password`, enter `x-oauth-basic`. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps.md new file mode 100644 index 0000000000..3fc6132aed --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps.md @@ -0,0 +1,131 @@ +--- +title: Creating Catalog Apps +weight: 400 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/customizing-charts/ + - /rancher/v2.0-v2.4/en/catalog/custom/creating + - /rancher/v2.0-v2.4/en/catalog/custom + - /rancher/v2.0-v2.4/en/catalog/creating-apps + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/creating-apps +--- + +Rancher's catalog service requires any custom catalogs to be structured in a specific format for the catalog service to be able to leverage it in Rancher. + +> For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. + +- [Chart types](#chart-types) + - [Helm charts](#helm-charts) + - [Rancher charts](#rancher-charts) +- [Chart directory structure](#chart-directory-structure) +- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) + - [questions.yml](#questions-yml) + - [Min/Max Rancher versions](#min-max-rancher-versions) + - [Question variable reference](#question-variable-reference) +- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) + +# Chart Types + +Rancher supports two different types of charts: Helm charts and Rancher charts. + +### Helm Charts + +Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you'll learn the chart's parameters and then configure them using **Answers**, which are sets of key value pairs. + +The Helm Stable and Helm Incubators are populated with native Helm charts. However, you can also use native Helm charts in Custom catalogs (although we recommend Rancher Charts). + +### Rancher Charts + +Rancher charts mirror native helm charts, although they add two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) + +Advantages of Rancher charts include: + +- **Enhanced revision tracking:** While Helm supports versioned deployments, Rancher adds tracking and revision history to display changes between different versions of the chart. +- **Streamlined application launch:** Rancher charts add simplified chart descriptions and configuration forms to make catalog application deployment easy. Rancher users need not read through the entire list of Helm variables to understand how to launch an application. +- **Application resource management:** Rancher tracks all the resources created by a specific application. Users can easily navigate to and troubleshoot on a page listing all the workload objects used to power an application. + +# Chart Directory Structure + +The following table demonstrates the directory structure for a Rancher Chart. The `charts` directory is the top level directory under the repository base. Adding the repository to Rancher will expose all charts contained within it. This information is helpful when customizing charts for a custom catalog. The `questions.yaml`, `README.md`, and `requirements.yml` files are specific to Rancher charts, but are optional for chart customization. + +``` +/ + │ + ├── charts/ + │ ├── / # This directory name will be surfaced in the Rancher UI as the chart name + │ │ ├── / # Each directory at this level provides different app versions that will be selectable within the chart in the Rancher UI + │ │ │ ├── Chart.yaml # Required Helm chart information file. + │ │ │ ├── questions.yaml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* + │ │ │ ├── README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. + │ │ │ ├── requirements.yml # Optional: YAML file listing dependencies for the chart. + │ │ │ ├── values.yml # Default configuration values for the chart. + │ │ │ ├── templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. +``` + +# Additional Files for Rancher Charts + +Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. + +- `app-readme.md` + + A file that provides descriptive text in the chart's UI header. The following image displays the difference between a Rancher chart (which includes `app-readme.md`) and a native Helm chart (which does not). + +
    Rancher Chart with app-readme.md (left) vs. Helm Chart without (right)
    + + ![app-readme.md](/img/app-readme.png) + +- `questions.yml` + + A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using key value pairs, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). + + +
    Rancher Chart with questions.yml (left) vs. Helm Chart without (right)
    + + ![questions.yml](/img/questions.png) + + +### questions.yml + +Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. + +### Min/Max Rancher versions + +_Available as of v2.3.0_ + +For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. + +> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. + +``` +rancher_min_version: 2.3.0 +rancher_max_version: 2.3.99 +``` + +### Question Variable Reference + +This reference contains variables that you can use in `questions.yml` nested under `questions:`. + +| Variable | Type | Required | Description | +| ------------- | ------------- | --- |------------- | +| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | +| label | string | true | Define the UI label. | +| description | string | false | Specify the description of the variable.| +| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| +| required | bool | false | Define if the variable is required or not (true \| false)| +| default | string | false | Specify the default value. | +| group | string | false | Group questions by input value. | +| min_length | int | false | Min character length.| +| max_length | int | false | Max character length.| +| min | int | false | Min integer length. | +| max | int | false | Max integer length. | +| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
    - "ClusterIP"
    - "NodePort"
    - "LoadBalancer"| +| valid_chars | string | false | Regular expression for input chars validation. | +| invalid_chars | string | false | Regular expression for invalid input chars validation.| +| subquestions | []subquestion | false| Add an array of subquestions.| +| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | +| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| + +>**Note:** `subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. + +# Tutorial: Example Custom Chart Creation + +For a tutorial on adding a custom Helm chart to a custom catalog, refer to [this page.](catalog/tutorial) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md new file mode 100644 index 0000000000..787df5a6ad --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md @@ -0,0 +1,161 @@ +--- +title: Global DNS +weight: 5010 +aliases: + - /rancher/v2.0-v2.4/en/catalog/globaldns + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/globaldns +--- + +_Available as of v2.2.0_ + +Rancher's Global DNS feature provides a way to program an external DNS provider to route traffic to your Kubernetes applications. Since the DNS programming supports spanning applications across different Kubernetes clusters, Global DNS is configured at a global level. An application can become highly available as it allows you to have one application run on different Kubernetes clusters. If one of your Kubernetes clusters goes down, the application would still be accessible. + +> **Note:** Global DNS is only available in [Kubernetes installations](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) with the `local` cluster enabled. + +- [Global DNS Providers](#global-dns-providers) +- [Global-DNS-Entries](#global-dns-entries) +- [Permissions for Global DNS Providers and Entries](#permissions-for-global-dns-providers-and-entries) +- [Setting up Global DNS for Applications](#setting-up-global-dns-for-applications) +- [Adding a Global DNS Entry](#adding-a-global-dns-entry) +- [Editing a Global DNS Provider](#editing-a-global-dns-provider) +- [Global DNS Entry Configuration](#global-dns-entry-configuration) +- [DNS Provider Configuration](#dns-provider-configuration) + - [Route53](#route53) + - [CloudFlare](#cloudflare) + - [AliDNS](#alidns) +- [Adding Annotations to Ingresses to program the External DNS](#adding-annotations-to-ingresses-to-program-the-external-dns) + +# Global DNS Providers + +Before adding in Global DNS entries, you will need to configure access to an external provider. + +The following table lists the first version of Rancher each provider debuted. + +| DNS Provider | Available as of | +| --- | --- | +| [AWS Route53](https://aws.amazon.com/route53/) | v2.2.0 | +| [CloudFlare](https://www.cloudflare.com/dns/) | v2.2.0 | +| [AliDNS](https://www.alibabacloud.com/product/dns) | v2.2.0 | + +# Global DNS Entries + +For each application that you want to route traffic to, you will need to create a Global DNS Entry. This entry will use a fully qualified domain name (a.k.a FQDN) from a global DNS provider to target applications. The applications can either resolve to a single [multi-cluster application](catalog/multi-cluster-apps/) or to specific projects. You must [add specific annotation labels](#adding-annotations-to-ingresses-to-program-the-external-dns) to the ingresses in order for traffic to be routed correctly to the applications. Without this annotation, the programming for the DNS entry will not work. + +# Permissions for Global DNS Providers and Entries + +By default, only [global administrators](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) and the creator of the Global DNS provider or Global DNS entry have access to use, edit and delete them. When creating the provider or entry, the creator can add additional users in order for those users to access and manage them. By default, these members will get `Owner` role to manage them. + +# Setting up Global DNS for Applications + +1. From the **Global View**, select **Tools > Global DNS Providers**. +1. To add a provider, choose from the available provider options and configure the Global DNS Provider with necessary credentials and an optional domain. For help, see [DNS Provider Configuration.](#dns-provider-configuration) +1. (Optional) Add additional users so they could use the provider when creating Global DNS entries as well as manage the Global DNS provider. +1. (Optional) Pass any custom values in the Additional Options section. + +# Adding a Global DNS Entry + +1. From the **Global View**, select **Tools > Global DNS Entries**. +1. Click on **Add DNS Entry**. +1. Fill out the form. For help, refer to [Global DNS Entry Configuration.](#global-dns-entry-configuration) +1. Click **Create.** + +# Editing a Global DNS Provider + +The [global administrators](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), creator of the Global DNS provider and any users added as `members` to a Global DNS provider, have _owner_ access to that provider. Any members can edit the following fields: + +- Root Domain +- Access Key & Secret Key +- Members +- Custom values + +1. From the **Global View**, select **Tools > Global DNS Providers**. + +1. For the Global DNS provider that you want to edit, click the **⋮ > Edit**. + +# Editing a Global DNS Entry + +The [global administrators](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), creator of the Global DNS entry and any users added as `members` to a Global DNS entry, have _owner_ access to that DNS entry. Any members can edit the following fields: + +- FQDN +- Global DNS Provider +- Target Projects or Multi-Cluster App +- DNS TTL +- Members + +Any users who can access the Global DNS entry can **only** add target projects that they have access to. However, users can remove **any** target project as there is no check to confirm if that user has access to the target project. + +Permission checks are relaxed for removing target projects in order to support situations where the user's permissions might have changed before they were able to delete the target project. Another use case could be that the target project was removed from the cluster before being removed from a target project of the Global DNS entry. + +1. From the **Global View**, select **Tools > Global DNS Entries**. + +1. For the Global DNS entry that you want to edit, click the **⋮ > Edit**. + + +# Global DNS Entry Configuration + +| Field | Description | +|----------|--------------------| +| FQDN | Enter the **FQDN** you wish to program on the external DNS. | +| Provider | Select a Global DNS **Provider** from the list. | +| Resolves To | Select if this DNS entry will be for a [multi-cluster application](catalog/multi-cluster-apps/) or for workloads in different [projects](k8s-in-rancher/projects-and-namespaces/). | +| Multi-Cluster App Target | The target for the global DNS entry. You will need to ensure that [annotations are added to any ingresses](#adding-annotations-to-ingresses-to-program-the-external-dns) for the applications that you want to target. | +| DNS TTL | Configure the DNS time to live value in seconds. By default, it will be 300 seconds. | +| Member Access | Search for any users that you want to have the ability to manage this Global DNS entry. | + +# DNS Provider Configuration + +### Route53 + +| Field | Explanation | +|---------|---------------------| +| Name | Enter a **Name** for the provider. | +| Root Domain | (Optional) Enter the **Root Domain** of the hosted zone on AWS Route53. If this is not provided, Rancher's Global DNS Provider will work with all hosted zones that the AWS keys can access. | +| Credential Path | The [AWS credential path.](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-where) | +| Role ARN | An [Amazon Resource Name.](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) | +| Region | An [AWS region.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) | +| Zone | An [AWS zone.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.AvailabilityZones) | +| Access Key | Enter the AWS **Access Key**. | +| Secret Key | Enter the AWS **Secret Key**. | +| Member Access | Under **Member Access**, search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | + + +### CloudFlare + +| Field | Explanation | +|---------|---------------------| +| Name | Enter a **Name** for the provider. | +| Root Domain | Optional: Enter the **Root Domain**. In case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. | +| Proxy Setting | When set to yes, the global DNS entry that gets created for the provider has proxy settings on. | +| API Email | Enter the CloudFlare **API Email**. | +| API Key | Enter the CloudFlare **API Key**. | +| Member Access | Search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | + +### AliDNS + +>**Notes:** +> +>- Alibaba Cloud SDK uses TZ data. It needs to be present on `/usr/share/zoneinfo` path of the nodes running `local` cluster, and it is mounted to the external DNS pods. If it is not available on the nodes, please follow the [instruction](https://www.ietf.org/timezones/tzdb-2018f/tz-link.html) to prepare it. +>- Different versions of AliDNS have different allowable TTL range, where the default TTL for a global DNS entry may not be valid. Please see the [reference](https://www.alibabacloud.com/help/doc-detail/34338.htm) before adding an AliDNS entry. + +| Field | Explanation | +|---------|---------------------| +| Name | Enter a **Name** for the provider. | +| Root Domain | Optional: Enter the **Root Domain**. In case this is not provided, Rancher's Global DNS Provider will work with all domains that the keys can access. | +| Access Key | Enter the **Access Key**. | +| Secret Key | Enter the **Secret Key**. | +| Member Access | Search for any users that you want to have the ability to use this provider. By adding this user, they will also be able to manage the Global DNS Provider entry. | + +# Adding Annotations to Ingresses to program the External DNS + +In order for Global DNS entries to be programmed, you will need to add a specific annotation on an ingress in your application or target project. + +For any application that you want targeted for your Global DNS entry, find an ingress associated with the application. + +This ingress needs to use a specific `hostname` and an annotation that should match the FQDN of the Global DNS entry. + +In order for the DNS to be programmed, the following requirements must be met: + +* The ingress routing rule must be set to use a `hostname` that matches the FQDN of the Global DNS entry. +* The ingress must have an annotation (`rancher.io/globalDNS.hostname`) and the value of this annotation should match the FQDN of the Global DNS entry. + +Once the ingress in your [multi-cluster application](catalog/multi-cluster-apps/) or in your target projects is in an `active` state, the FQDN will be programmed on the external DNS against the Ingress IP addresses. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps.md new file mode 100644 index 0000000000..aa63c7ab22 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps.md @@ -0,0 +1,110 @@ +--- +title: Launching Catalog Apps +weight: 700 +aliases: + - /rancher/v2.0-v2.4/en/catalog/launching-apps + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/launching-apps +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Within a project, when you want to deploy applications from catalogs, the applications available in your project will be based on the [scope of the catalogs](helm-charts/legacy-catalogs/#catalog-scopes). + +If your application is using ingresses, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry](globaldns.md). + +- [Prerequisites](#prerequisites) +- [Launching a catalog app](#launching-a-catalog-app) +- [Configuration options](#configuration-options) + +# Prerequisites + +When Rancher deploys a catalog app, it launches an ephemeral instance of a Helm service account that has the permissions of the user deploying the catalog app. Therefore, a user cannot gain more access to the cluster through Helm or a catalog application than they otherwise would have. + +To launch an app from a catalog in Rancher, you must have at least one of the following permissions: + +- A [project-member role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) in the target cluster, which gives you the ability to create, read, update, and delete the workloads +- A [cluster owner role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) for the cluster that include the target project + +Before launching an app, you'll need to either [enable a built-in global catalog](catalog/built-in) or [add your own custom catalog.](catalog/adding-catalogs) + +# Launching a Catalog App + +1. From the **Global** view, open the project that you want to deploy an app to. + +2. From the main navigation bar, choose **Apps**. In versions before v2.2.0, choose **Catalog Apps** on the main navigation bar. Click **Launch**. + +3. Find the app that you want to launch, and then click **View Now**. + +4. Under **Configuration Options** enter a **Name**. By default, this name is also used to create a Kubernetes namespace for the application. + + * If you would like to change the **Namespace**, click **Customize** and enter a new name. + * If you want to use a different namespace that already exists, click **Customize**, and then click **Use an existing namespace**. Choose a namespace from the list. + +5. Select a **Template Version**. + +6. Complete the rest of the **Configuration Options**. + + * For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs), answers are provided as key value pairs in the **Answers** section. + * Keys and values are available within **Detailed Descriptions**. + * When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of --set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. For example, when entering an answer that includes two values separated by a comma (i.e., `abc, bcd`), wrap the values with double quotes (i.e., `"abc, bcd"`). + +7. Review the files in **Preview**. When you're satisfied, click **Launch**. + +**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's **Workloads** view or **Apps** view. In versions before v2.2.0, this is the **Catalog Apps** view. + +# Configuration Options + +For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. + +> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). + + + + +### Using a questions.yml file + +If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. + +### Key Value Pairs for Native Helm Charts + +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a [custom Helm chart repository](helm-charts/legacy-catalogs/catalog-config/#custom-helm-chart-repository)), answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. + + + + +_Available as of v2.1.0_ + +If you do not want to input answers using the UI, you can choose the **Edit as YAML** option. + +With this example YAML: + +```YAML +outer: + inner: value +servers: +- port: 80 + host: example +``` + +### Key Value Pairs + +You can have a YAML file that translates these fields to match how to [format custom values so that it can be used with `--set`](https://github.com/helm/helm/blob/master/docs/using_helm.md#the-format-and-limitations-of---set). + +These values would be translated to: + +``` +outer.inner=value +servers[0].port=80 +servers[0].host=example +``` + +### YAML files + +_Available as of v2.2.0_ + +You can directly paste that YAML formatted structure into the YAML editor. By allowing custom values to be set using a YAML formatted structure, Rancher has the ability to easily customize for more complicated input values (e.g. multi-lines, array and JSON objects). + + + \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/helm-charts/managing-apps/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/managing-apps.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/helm-charts/managing-apps/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/managing-apps.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps.md new file mode 100644 index 0000000000..500d4586fe --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps.md @@ -0,0 +1,10 @@ +--- +title: Multi-Cluster Apps +weight: 600 +aliases: + - /rancher/v2.0-v2.4/en/catalog/multi-cluster-apps + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/multi-cluster-apps +--- +_Available as of v2.2.0_ + +The documentation about multi-cluster apps has moved [here.](deploy-across-clusters/multi-cluster-apps) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial.md new file mode 100644 index 0000000000..e653bbac65 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial.md @@ -0,0 +1,75 @@ +--- +title: "Tutorial: Example Custom Chart Creation" +weight: 800 +aliases: + - /rancher/v2.0-v2.4/en/catalog/tutorial + - /rancher/v2.0-v2.4/en/helm-charts/legacy-catalogs/tutorial +--- + +In this tutorial, you'll learn how to create a Helm chart and deploy it to a repository. The repository can then be used as a source for a custom catalog in Rancher. + +You can fill your custom catalogs with either Helm Charts or Rancher Charts, although we recommend Rancher Charts due to their enhanced user experience. + +> For a complete walkthrough of developing charts, see the upstream Helm chart [developer reference](https://helm.sh/docs/chart_template_guide/). + +1. Within the GitHub repo that you're using as your custom catalog, create a directory structure that mirrors the structure listed in the [Chart Directory Structure](helm-charts/legacy-catalogs/creating-apps/#chart-directory-structure). + + Rancher requires this directory structure, although `app-readme.md` and `questions.yml` are optional. + + >**Tip:** + > + >- To begin customizing a chart, copy one from either the [Rancher Library](https://github.com/rancher/charts) or the [Helm Stable](https://github.com/kubernetes/charts/tree/master/stable). + >- For a complete walk through of developing charts, see the upstream Helm chart [developer reference](https://docs.helm.sh/developing_charts/). + +2. **Recommended:** Create an `app-readme.md` file. + + Use this file to create custom text for your chart's header in the Rancher UI. You can use this text to notify users that the chart is customized for your environment or provide special instruction on how to use it. +
    +
    + **Example**: + + ``` + $ cat ./app-readme.md + + # Wordpress ROCKS! + ``` + +3. **Recommended:** Create a `questions.yml` file. + + This file creates a form for users to specify deployment parameters when they deploy the custom chart. Without this file, users **must** specify the parameters manually using key value pairs, which isn't user-friendly. +
    +
    + The example below creates a form that prompts users for persistent volume size and a storage class. +
    +
    + For a list of variables you can use when creating a `questions.yml` file, see [Question Variable Reference](helm-charts/legacy-catalogs/creating-apps/#question-variable-reference). + + ```yaml + categories: + - Blog + - CMS + questions: + - variable: persistence.enabled + default: "false" + description: "Enable persistent volume for WordPress" + type: boolean + required: true + label: WordPress Persistent Volume Enabled + show_subquestion_if: true + group: "WordPress Settings" + subquestions: + - variable: persistence.size + default: "10Gi" + description: "WordPress Persistent Volume Size" + type: string + label: WordPress Volume Size + - variable: persistence.storageClass + default: "" + description: "If undefined or null, uses the default StorageClass. Default to null" + type: storageclass + label: Default StorageClass for WordPress + ``` + +4. Check the customized chart into your GitHub repo. + +**Result:** Your custom chart is added to the repo. Your Rancher Server will replicate the chart within a few minutes. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md new file mode 100644 index 0000000000..0586afbd70 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md @@ -0,0 +1,183 @@ +--- +title: Setting up Amazon ELB Network Load Balancer +weight: 5 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha/create-nodes-lb/nlb + - /rancher/v2.0-v2.4/en/installation/k8s-install/create-nodes-lb/nlb + - /rancher/v2.0-v2.4/en/installation/options/nlb +--- + +This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. + +These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. + +This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. + +Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. + +# Setting up the Load Balancer + +Configuring an Amazon NLB is a multistage process: + +1. [Create Target Groups](#1-create-target-groups) +2. [Register Targets](#2-register-targets) +3. [Create Your NLB](#3-create-your-nlb) +4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) + +# Requirements + +These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. + +# 1. Create Target Groups + +Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. + +Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. + +1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. +1. Click **Create target group** to create the first target group, regarding TCP port 443. + +> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. + +| Option | Setting | +|-------------------|-------------------| +| Target Group Name | `rancher-tcp-443` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `443` | +| VPC | Choose your VPC | + +Health check settings: + +| Option | Setting | +|---------------------|-----------------| +| Protocol | TCP | +| Port | `override`,`80` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. + +| Option | Setting | +|-------------------|------------------| +| Target Group Name | `rancher-tcp-80` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `80` | +| VPC | Choose your VPC | + + +Health check settings: + +| Option |Setting | +|---------------------|----------------| +| Protocol | TCP | +| Port | `traffic port` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +# 2. Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +![](/img/ha/nlb/edit-targetgroup-443.png) + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +*** +**Screenshot Add targets to target group TCP port 443**
    + +![](/img/ha/nlb/add-targets-targetgroup-443.png) + +*** +**Screenshot Added targets to target group TCP port 443**
    + +![](/img/ha/nlb/added-targets-targetgroup-443.png) + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +# 3. Create Your NLB + +Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. Then complete each form. + +- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) +- [Step 2: Configure Routing](#step-2-configure-routing) +- [Step 3: Register Targets](#step-3-register-targets) +- [Step 4: Review](#step-4-review) + +### Step 1: Configure Load Balancer + +Set the following fields in the form: + +- **Name:** `rancher` +- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. +- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. +- **Availability Zones:** Select Your **VPC** and **Availability Zones**. + +### Step 2: Configure Routing + +1. From the **Target Group** drop-down, choose **Existing target group**. +1. From the **Name** drop-down, choose `rancher-tcp-443`. +1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +### Step 3: Register Targets + +Since you registered your targets earlier, all you have to do is click **Next: Review**. + +### Step 4: Review + +Look over the load balancer details and click **Create** when you're satisfied. + +After AWS creates the NLB, click **Close**. + +# 4. Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to...** + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. + +# Health Check Paths for NGINX Ingress and Traefik Ingresses + +K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. + +For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. + +- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. +- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. + +To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md new file mode 100644 index 0000000000..fea1cd4196 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md @@ -0,0 +1,67 @@ +--- +title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' +weight: 1 +--- + +This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. + +The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. + +For more information about each installation option, refer to [this page.](../../../pages-for-subheaders/installation-and-upgrade.md) + +> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +To install the Rancher management server on a high-availability K3s cluster, we recommend setting up the following infrastructure: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. We recommend MySQL. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set a [MySQL](https://www.mysql.com/) external database. Rancher has been tested on K3s Kubernetes clusters using MySQL version 5.7 as the datastore. + +When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the MySQL database, refer to this [tutorial](installation/options/rds/) for setting up MySQL on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](installation/options/chart-options/#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md new file mode 100644 index 0000000000..b91dab8a81 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md @@ -0,0 +1,59 @@ +--- +title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' +weight: 2 +--- + +This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. + +> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, Azure, or vSphere. + * **Note:** When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. Please refer [here](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations) for more information on Azure load balancer limitations. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](installation/options/ec2-node/) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on any of the three nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](installation/options/chart-options/#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](installation/options/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](installation/options/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md new file mode 100644 index 0000000000..c56672ef42 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md @@ -0,0 +1,36 @@ +--- +title: Setting up a MySQL Database in Amazon RDS +weight: 4 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/rds +--- +This tutorial describes how to set up a MySQL database in Amazon's RDS. + +This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. + +1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. In the left panel, click **Databases.** +1. Click **Create database.** +1. In the **Engine type** section, click **MySQL.** +1. In the **Version** section, choose **MySQL 5.7.22.** +1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. +1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. +1. Click **Create database.** + +You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. + +To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. + +- **Username:** Use the admin username. +- **Password:** Use the admin password. +- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. +- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. +- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name.** + +This information will be used to connect to the database in the following format: + +``` +mysql://username:password@tcp(hostname:3306)/database-name +``` + +For more information on configuring the datastore for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md new file mode 100644 index 0000000000..e385890558 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md @@ -0,0 +1,66 @@ +--- +title: Setting up Nodes in Amazon EC2 +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/ec2-node +--- + +In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. + +If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. + +If the Rancher server is installed in a single Docker container, you only need one instance. + +### 1. Optional Preparation + +- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.](cluster-provisioning/rke-clusters/options/cloud-providers/) +- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.](../../../pages-for-subheaders/installation-requirements.md#port-requirements) + +### 2. Provision Instances + +1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. +1. In the left panel, click **Instances.** +1. Click **Launch Instance.** +1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select.** +1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. +1. Click **Next: Configure Instance Details.** +1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. +1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. +1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** +1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements](../../../pages-for-subheaders/installation-requirements.md#port-requirements) for Rancher nodes. +1. Click **Review and Launch.** +1. Click **Launch.** +1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. +1. Click **Launch Instances.** + +**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. Next, you will install Docker on each node. + +### 3. Install Docker and Create User + +1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. +1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect.** +1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: +``` +sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] +``` +1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: +``` +curl https://releases.rancher.com/install-docker/18.09.sh | sh +``` +1. When you are connected to the instance, run the following command on the instance to create a user: +``` +sudo usermod -aG docker ubuntu +``` +1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. + +> To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. + +**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. + +### Next Steps for RKE Kubernetes Cluster Nodes + +If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. + +RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md new file mode 100644 index 0000000000..f275fe2ab5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md @@ -0,0 +1,25 @@ +--- +title: About High-availability Installations +weight: 1 +--- + +We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. + +In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. + +Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. + +The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. + +For information on how Rancher works, regardless of the installation method, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md) + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
    Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
    +![High-availability Kubernetes Installation of Rancher](/img/ha/rancher2ha.svg) +Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md new file mode 100644 index 0000000000..ef6e6daa84 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md @@ -0,0 +1,120 @@ +--- +title: Setting up a High-availability K3s Kubernetes Cluster for Rancher +shortTitle: Set up K3s for Rancher +weight: 2 +--- + +This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) + +For systems without direct internet access, refer to the air gap installation instructions. + +> **Single-node Installation Tip:** +> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. +> +> To set up a single-node K3s cluster, run the Rancher server installation command on just one node instead of two nodes. +> +> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. + +# Prerequisites + +These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.](../infrastructure-setup/ha-k3s-kubernetes-cluster.md) + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. +# Installing Kubernetes + +### 1. Install Kubernetes and Set up the K3s Server + +When running the command to start the K3s Kubernetes API server, you will pass in an option to use the external datastore that you set up earlier. + +1. Connect to one of the Linux nodes that you have prepared to run the Rancher server. +1. On the Linux node, run this command to start the K3s server and connect it to the external datastore: + ``` + curl -sfL https://get.k3s.io | sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" + ``` + To specify the K3s version, use the INSTALL_K3S_VERSION environment variable: + ```sh + curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" + ``` + Note: The datastore endpoint can also be passed in using the environment variable `$K3S_DATASTORE_ENDPOINT`. + +1. Repeat the same command on your second K3s server node. + +### 2. Confirm that K3s is Running + +To confirm that K3s has been set up successfully, run the following command on either of the K3s server nodes: +``` +sudo k3s kubectl get nodes +``` + +Then you should see two nodes with the master role: +``` +ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-172-31-60-194 Ready master 44m v1.17.2+k3s1 +ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1 +``` + +Then test the health of the cluster pods: +``` +sudo k3s kubectl get pods --all-namespaces +``` + +**Result:** You have successfully set up a K3s Kubernetes cluster. + +### 3. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +```yml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### 4. Check the Health of Your Cluster Pods + +Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. + +Check that all the required pods and containers are healthy are ready to continue: + +``` +ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d +kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d +kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d +``` + +**Result:** You have confirmed that you can access the cluster with `kubectl` and the K3s cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md new file mode 100644 index 0000000000..7a0c53fe5e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md @@ -0,0 +1,173 @@ +--- +title: Setting up a High-availability RKE Kubernetes Cluster +shortTitle: Set up RKE Kubernetes +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/k8s-install/kubernetes-rke +--- + + +This section describes how to install a Kubernetes cluster. This cluster should be dedicated to run only the Rancher server. + +For Rancher before v2.4, Rancher should be installed on an RKE Kubernetes cluster. RKE is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. + +As of Rancher v2.4, the Rancher management server can be installed on either an RKE cluster or a K3s Kubernetes cluster. K3s is also a fully certified Kubernetes distribution released by Rancher, but is newer than RKE. We recommend installing Rancher on K3s because K3s is easier to use, and more lightweight, with a binary size of less than 100 MB. Note: After Rancher is installed on an RKE cluster, there is no migration path to a K3s setup at this time. + +The Rancher management server can only be run on Kubernetes cluster in an infrastructure provider where Kubernetes is installed using RKE or K3s. Use of Rancher on hosted Kubernetes providers, such as EKS, is not supported. + +For systems without direct internet access, refer to [Air Gap: Kubernetes install.](installation/air-gap-high-availability/) + +> **Single-node Installation Tip:** +> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. +> +> To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. +> +> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. + +# Installing Kubernetes + +### Required CLI Tools + +Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. + +Also install [RKE,](https://rancher.com/docs/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. + +### 1. Create the cluster configuration file + +In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. + +Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. + +If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. + +RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. + +```yaml +nodes: + - address: 165.227.114.63 + internal_address: 172.16.22.12 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.116.167 + internal_address: 172.16.32.37 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.127.226 + internal_address: 172.16.42.73 + user: ubuntu + role: [controlplane, worker, etcd] + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +# Required for external TLS termination with +# ingress-nginx v0.22+ +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + +
    Common RKE Nodes Options
    + +| Option | Required | Description | +| ------------------ | -------- | -------------------------------------------------------------------------------------- | +| `address` | yes | The public DNS or IP address | +| `user` | yes | A user that can run docker commands | +| `role` | yes | List of Kubernetes roles assigned to the node | +| `internal_address` | no | The private DNS or IP address for internal cluster traffic | +| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | + +> **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. +> +> Please see the [RKE Documentation](https://rancher.com/docs/rke/latest/en/config-options/) for the full list of options and capabilities. +> +> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide](installation/options/etcd/). + +### 2. Run RKE + +``` +rke up --config ./rancher-cluster.yml +``` + +When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. + +### 3. Test Your Cluster + +This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. + +Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. + +When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. + +> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. + +Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`: + +``` +export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml +``` + +Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: + +``` +kubectl get nodes + +NAME STATUS ROLES AGE VERSION +165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 +``` + +### 4. Check the Health of Your Cluster Pods + +Check that all the required pods and containers are healthy are ready to continue. + +- Pods are in `Running` or `Completed` state. +- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` +- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. + +``` +kubectl get pods --all-namespaces + +NAMESPACE NAME READY STATUS RESTARTS AGE +ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s +kube-system canal-jp4hz 3/3 Running 0 30s +kube-system canal-z2hg8 3/3 Running 0 30s +kube-system canal-z6kpw 3/3 Running 0 30s +kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s +kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s +kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s +kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s +kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s +kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s +kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s +``` + +This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. + +### 5. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](installation/options/troubleshooting/) page. + + +### [Next: Install Rancher](installation/k8s-install/helm-rancher/) + diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md new file mode 100644 index 0000000000..bbb9851ed5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md @@ -0,0 +1,74 @@ +--- +title: Recommended Cluster Architecture +weight: 1 +--- + +There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. + +# Separating Worker Nodes from Nodes with Other Roles + +When designing your cluster(s), you have two options: + +* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements](../node-requirements-for-rancher-managed-clusters.md#networking-requirements). +* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. + +In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. + +Therefore, each node should have one of the following role configurations: + + * `etcd` + * `controlplane` + * Both `etcd` and `controlplane` + * `worker` + +# Recommended Number of Nodes with Each Role + +The cluster should have: + +- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +- At least two nodes with the role `controlplane` for master component high availability. +- At least two nodes with the role `worker` for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](roles-for-nodes-in-kubernetes.md) + + +### Number of Controlplane Nodes + +Adding more than one node with the `controlplane` role makes every master component highly available. + +### Number of etcd Nodes + +The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. + +| Nodes with `etcd` role | Majority | Failure Tolerance | +|--------------|------------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | **1** | +| 4 | 3 | 1 | +| 5 | 3 | **2** | +| 6 | 4 | 2 | +| 7 | 4 | **3** | +| 8 | 5 | 3 | +| 9 | 5 | **4** | + +References: + +* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) +* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) + +### Number of Worker Nodes + +Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. + +### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications + +You may have noticed that our [Kubernetes Install](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +# References + +* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md new file mode 100644 index 0000000000..df3b3c3cef --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md @@ -0,0 +1,43 @@ +--- +title: Roles for Nodes in Kubernetes +weight: 1 +--- + +This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. + +This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +![Cluster diagram](/img/clusterdiagram.svg)
    +Lines show the traffic flow between components. Colors are used purely for visual aid + +# etcd + +Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. + +>**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +# controlplane + +Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. + +>**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +### kube-apiserver + +The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. + +### kube-controller-manager + +The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +### kube-scheduler + +The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +# worker + +Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. + +# References + +* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md new file mode 100644 index 0000000000..68e73acaa2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md @@ -0,0 +1,193 @@ +--- +title: Importing Existing Clusters +description: Learn how you can create a cluster in Rancher by importing an existing Kubernetes cluster. Then, you can manage it using Rancher +metaTitle: 'Kubernetes Cluster Management' +metaDescription: 'Learn how you can import an existing Kubernetes cluster and then manage it using Rancher' +weight: 5 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/import-cluster/ +--- + +_Available as of v2.0.x-v2.4.x_ + +When managing an imported cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. Note that Rancher does not automate the provisioning or scaling of imported clusters. + +For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. + +Rancher v2.4 added the capability to import a K3s cluster into Rancher, as well as the ability to upgrade Kubernetes by editing the cluster in the Rancher UI. + +- [Features](#features) +- [Prerequisites](#prerequisites) +- [Importing a cluster](#importing-a-cluster) +- [Imported K3s clusters](#imported-k3s-clusters) + - [Additional features for imported K3s clusters](#additional-features-for-imported-k3s-clusters) + - [Configuring a K3s Cluster to Enable Importation to Rancher](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) + - [Debug Logging and Troubleshooting for Imported K3s clusters](#debug-logging-and-troubleshooting-for-imported-k3s-clusters) +- [Annotating imported clusters](#annotating-imported-clusters) + +# Features + +After importing a cluster, the cluster owner can: + +- [Manage cluster access](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) through role-based access control +- Enable [monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) and [logging](cluster-admin/tools/logging/) +- Enable [Istio](../../../pages-for-subheaders/istio.md) +- Use [pipelines](../../advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- Configure [alerts](cluster-admin/tools/alerts/) and [notifiers](../../../explanations/integrations-in-rancher/notifiers.md) +- Manage [projects](../../../pages-for-subheaders/manage-projects.md) and [workloads](../../../pages-for-subheaders/workloads-and-pods.md) + +After importing a K3s cluster, the cluster owner can also [upgrade Kubernetes from the Rancher UI.](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) + +# Prerequisites + +If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to import the cluster into Rancher. + +In order to apply the privilege, you need to run: + +```plain +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user [USER_ACCOUNT] +``` + +before running the `kubectl` command to import the cluster. + +By default, GKE users are not given this privilege, so you will need to run the command before importing GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). + +> If you are importing a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-importation-to-rancher) + +# Importing a Cluster + +1. From the **Clusters** page, click **Add Cluster**. +2. Choose **Import**. +3. Enter a **Cluster Name**. +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user.} +5. Click **Create**. +6. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. +7. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. +8. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. +9. When you finish running the command(s) on your node, click **Done**. + +**Result:** + +- Your cluster is imported and assigned a state of **Pending.** Rancher is deploying resources to manage your cluster. +- You can access your cluster after its state is updated to **Active.** +- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). + +> **Note:** +> You can not re-import a cluster that is currently active in a Rancher setup. + +# Imported K3s Clusters + +You can now import a K3s Kubernetes cluster into Rancher. [K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. You can also upgrade Kubernetes by editing the K3s cluster in the Rancher UI. + +### Additional Features for Imported K3s Clusters + +_Available as of v2.4.0_ + +When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: + +- The ability to upgrade the K3s version +- The ability to configure the maximum number of nodes that will be upgraded concurrently +- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. + +### Configuring K3s Cluster Upgrades + +> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. + +The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. + +- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes +- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes + +In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. + +Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. + +### Configuring a K3s Cluster to Enable Importation to Rancher + +The K3s server needs to be configured to allow writing to the kubeconfig file. + +This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: + +``` +$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 +``` + +The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: + +``` +$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - +``` + +### Debug Logging and Troubleshooting for Imported K3s Clusters + +Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. + +To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. + +Logs created by the `system-upgrade-controller` can be viewed by running this command: + +``` +kubectl logs -n cattle-system system-upgrade-controller +``` + +The current status of the plans can be viewed with this command: + +``` +kubectl get plans -A -o yaml +``` + +If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. + +To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. + +# Annotating Imported Clusters + +For all types of imported Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. + +Therefore, when Rancher imports a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the imported cluster. + +However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. + +By annotating an imported cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. + +This example annotation indicates that a pod security policy is enabled: + +``` +"capabilities.cattle.io/pspEnabled": "true" +``` + +The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. + +``` +"capabilities.cattle.io/ingressCapabilities": "[ + { + "customDefaultBackend":true, + "ingressProvider":"asdf" + } +]" +``` + +These capabilities can be annotated for the cluster: + +- `ingressCapabilities` +- `loadBalancerCapabilities` +- `nodePoolScalingSupported` +- `nodePortRange` +- `pspEnabled` +- `taintSupport` + +All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. + +To annotate an imported cluster, + +1. Go to the cluster view in Rancher and select **⋮ > Edit.** +1. Expand the **Labels & Annotations** section. +1. Click **Add Annotation.** +1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. +1. Click **Save.** + +**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md new file mode 100644 index 0000000000..6baaaaebb7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md @@ -0,0 +1,39 @@ +--- +title: Rancher Agents +weight: 2400 +--- + +There are two different agent resources deployed on Rancher managed clusters: + +- [cattle-cluster-agent](#cattle-cluster-agent) +- [cattle-node-agent](#cattle-node-agent) + +For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture](../../../../pages-for-subheaders/rancher-manager-architecture.md) + +### cattle-cluster-agent + +The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. + +### cattle-node-agent + +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters when `cattle-cluster-agent` is unavailable. + +> **Note:** In Rancher v2.2.4 and lower, the `cattle-node-agent` pods did not tolerate all taints, causing Kubernetes upgrades to fail on these nodes. The fix for this has been included in Rancher v2.2.5 and higher. + +### Scheduling rules + +_Applies to v2.3.0 up to v2.5.3_ + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | +| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | + +The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. + +The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: + +| Weight | Expression | +| ------ | ------------------------------------------------ | +| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | +| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md new file mode 100644 index 0000000000..3666137155 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md @@ -0,0 +1,150 @@ +--- +title: Setting up the Amazon Cloud Provider +weight: 1 +--- + +When using the `Amazon` cloud provider, you can leverage the following capabilities: + +- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. +- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. + +See [cloud-provider-aws README](https://github.com/kubernetes/cloud-provider-aws/blob/master/README.md) for all information regarding the Amazon cloud provider. + +To set up the Amazon cloud provider, + +1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) +2. [Configure the ClusterID](#2-configure-the-clusterid) + +### 1. Create an IAM Role and attach to the instances + +All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: + +* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. +* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. + +While creating an [Amazon EC2 cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. + +While creating a [Custom cluster](../../../../../../pages-for-subheaders/use-existing-nodes.md), you must manually attach the IAM role to the instance(s). + +IAM Policy for nodes with the `controlplane` role: + +```json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } +] +} +``` + +IAM policy for nodes with the `etcd` or `worker` role: + +```json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } +] +} +``` + +### 2. Configure the ClusterID + +The following resources need to tagged with a `ClusterID`: + +- **Nodes**: All hosts added in Rancher. +- **Subnet**: The subnet used for your cluster. +- **Security Group**: The security group used for your cluster. + +>**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). + +When you create an [Amazon EC2 Cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. + +Use the following tag: + +**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` + +`CLUSTERID` can be any string you like, as long as it is equal across all tags set. + +Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: + +**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. + +### Using Amazon Elastic Container Registry (ECR) + +The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/azure/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md new file mode 100644 index 0000000000..2a20c6930b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md @@ -0,0 +1,25 @@ +--- +title: Setting up the vSphere Cloud Provider +weight: 4 +--- + +In this section, you'll learn how to set up the vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. + +Follow these steps while creating the vSphere cluster in Rancher: + +1. Set **Cloud Provider** option to `Custom`. + + ![](/img/vsphere-node-driver-cloudprovider.png) + +1. Click on **Edit as YAML** +1. Insert the following structure to the pre-populated cluster YAML. As of Rancher v2.3+, this structure must be placed under `rancher_kubernetes_engine_config`. In versions before v2.3, it has to be defined as a top-level field. Note that the `name` *must* be set to `vsphere`. + + ```yaml + rancher_kubernetes_engine_config: # Required as of Rancher v2.3+ + cloud_provider: + name: vsphere + vsphereCloudProvider: + [Insert provider configuration] + ``` + +Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md new file mode 100644 index 0000000000..bb9d59450c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md @@ -0,0 +1,95 @@ +--- +title: Creating a DigitalOcean Cluster +shortTitle: DigitalOcean +weight: 2215 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-digital-ocean/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. + +First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. + +Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **DigitalOcean**. +1. Enter your Digital Ocean credentials. +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md) + +### 3. Create a cluster with node pools using the node template + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **DigitalOcean**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **DigitalOcean**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Digital Ocean Options** form. For help filling out the form, refer to the [Digital Ocean node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md new file mode 100644 index 0000000000..703a8eef18 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md @@ -0,0 +1,267 @@ +--- +title: Creating an Amazon EC2 Cluster +shortTitle: Amazon EC2 +description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher +weight: 2210 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. + +First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. + +Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +### Prerequisites + +- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. +- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: + - [Example IAM Policy](#example-iam-policy) + - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) + - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) +- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. + +> **Note:** Rancher v2.4.6 and v2.4.7 had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. + +# Creating an EC2 Cluster + +The steps to create a cluster differ based on your Rancher version. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **Amazon.** +1. In the **Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key.** +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials and information from EC2 + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md) + +### 3. Create a cluster with node pools using the node template + +Add one or more node pools to your cluster. For more information about node pools, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Amazon EC2**. +1. Enter a **Cluster Name**. +1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Amazon EC2**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** Refer to [Selecting Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) To create a node template, click **Add Node Template**. For help filling out the node template, refer to [EC2 Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md) +1. Click **Create**. +1. **Optional:** Add additional node pools. +1. Review your cluster settings to confirm they are correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# IAM Policies + +> **Note:** Rancher v2.4.6 and v2.4.7 had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. + +### Example IAM Policy + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` + +### Example IAM Policy with PassRole + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", + "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` +### Example IAM Policy to allow encrypted EBS volumes +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Encrypt", + "kms:DescribeKey", + "kms:CreateGrant", + "ec2:DetachVolume", + "ec2:AttachVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", + "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Resource": "*" + } + ] +} +``` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md new file mode 100644 index 0000000000..a9dc25f088 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md @@ -0,0 +1,132 @@ +--- +title: Creating an Azure Cluster +shortTitle: Azure +weight: 2220 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-azure/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Azure through Rancher. + +First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. + +Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +>**Warning:** When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: + +> - Terminate the SSL/TLS on the internal load balancer +> - Use the L7 load balancer + +> For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) + +- [Preparation in Azure](#preparation-in-azure) +- [Creating an Azure Cluster](#creating-an-azure-cluster) + +# Preparation in Azure + +Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. + +To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. + +The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: + +``` +az ad sp create-for-rbac \ + --name="" \ + --role="Contributor" \ + --scopes="/subscriptions/" +``` + +The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, *The client secret*, and *The tenant ID*. This information will be used when you create a node template for Azure. + +# Creating an Azure Cluster + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **Azure**. +1. Enter your Azure credentials. +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in Azure. + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Azure**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + +Use Rancher to create a Kubernetes cluster in Azure. + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Azure**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **Azure Options** form. For help filling out the form, refer to the [Azure node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md new file mode 100644 index 0000000000..074e10bab2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md @@ -0,0 +1,43 @@ +--- +title: Creating Credentials in the vSphere Console +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials +--- + +This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. + +The following table lists the permissions required for the vSphere user account: + +| Privilege Group | Operations | +|:----------------------|:-----------------------------------------------------------------------| +| Datastore | AllocateSpace
    Browse
    FileManagement (Low level file operations)
    UpdateVirtualMachineFiles
    UpdateVirtualMachineMetadata | +| Network | Assign | +| Resource | AssignVMToPool | +| Virtual Machine | Config (All)
    GuestOperations (All)
    Interact (All)
    Inventory (All)
    Provisioning (All) | + +The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: + +1. From the **vSphere** console, go to the **Administration** page. + +2. Go to the **Roles** tab. + +3. Create a new role. Give it a name and select the privileges listed in the permissions table above. + + ![](/img/rancherroles1.png) + +4. Go to the **Users and Groups** tab. + +5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. + + ![](/img/rancheruser.png) + +6. Go to the **Global Permissions** tab. + +7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. + + ![](/img/globalpermissionuser.png) + + ![](/img/globalpermissionrole.png) + +**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md new file mode 100644 index 0000000000..81f72eed67 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md @@ -0,0 +1,151 @@ +--- +title: Provisioning Kubernetes Clusters in vSphere +weight: 1 +--- + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. + +First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. + +Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.](cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference/) + +For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +- [Preparation in vSphere](#preparation-in-vsphere) +- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) + +# Preparation in vSphere + +This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. + +The node templates are documented and tested with the vSphere Web Services API version 6.5. + +### Create Credentials in vSphere + +Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. + +Refer to this [how-to guide](cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. + +### Network Permissions + +It must be ensured that the hosts running the Rancher server are able to establish the following network connections: + +- To the vSphere API on the vCenter server (usually port 443/TCP). +- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required with Rancher before v2.3.3 or when using the ISO creation method in later versions*). +- To port 22/TCP and 2376/TCP on the created VMs + +See [Node Networking Requirements](../../../node-requirements-for-rancher-managed-clusters.md#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. + +### Valid ESXi License for vSphere API Access + +The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. + +### VM-VM Affinity Rules for Clusters with DRS + +If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. + +# Creating a vSphere Cluster + +The a vSphere cluster is created in Rancher depends on the Rancher version. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **vSphere**. +1. Enter your vSphere credentials. For help, refer to **Account Access** in the [configuration reference for your Rancher version.](../../../../../../pages-for-subheaders/creating-a-vsphere-cluster.md) +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: + - [v2.3.3](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md) + - [v2.3.0](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md) + - [v2.2.0](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md) + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in vSphere. + +1. Navigate to **Clusters** in the **Global** view. +1. Click **Add Cluster** and select the **vSphere** infrastructure provider. +1. Enter a **Cluster Name.** +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.](../../set-up-cloud-providers/other-cloud-providers/vsphere.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + +Use Rancher to create a Kubernetes cluster in vSphere. + +For Rancher versions before v2.0.4, when you create the cluster, you will also need to follow the steps in [this section](http://localhost:9001/rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vpshere-node-template-config/prior-to-2.0.4/#disk-uuids) to enable disk UUIDs. + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **vSphere**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.](../../set-up-cloud-providers/other-cloud-providers/vsphere.md) +1. Add one or more [node pools](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) to your cluster. Each node pool uses a node template to provision new nodes. To create a node template, click **Add Node Template** and complete the **vSphere Options** form. For help filling out the form, refer to the vSphere node template configuration reference. Refer to the newest version of the configuration reference that is less than or equal to your Rancher version: + - [v2.0.4](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md) + - [before v2.0.4](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md) +1. Review your options to confirm they're correct. Then click **Create** to start provisioning the VMs and Kubernetes services. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + + + + + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. +- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../../../../../advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../set-up-cloud-providers/other-cloud-providers/vsphere.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md new file mode 100644 index 0000000000..770ee0d9e7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md @@ -0,0 +1,41 @@ +--- +title: Configuration for Storage Classes in Azure +weight: 3 +--- + +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. + +In order to have the Azure platform create the required storage resources, follow these steps: + +1. [Configure the Azure cloud provider.](../set-up-cloud-providers/other-cloud-providers/azure.md) +1. Configure `kubectl` to connect to your cluster. +1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: + + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:azure-cloud-provider + rules: + - apiGroups: [''] + resources: ['secrets'] + verbs: ['get','create'] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:azure-cloud-provider + roleRef: + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + name: system:azure-cloud-provider + subjects: + - kind: ServiceAccount + name: persistent-volume-binder + namespace: kube-system + +1. Create these in your cluster using one of the follow command. + + ``` + # kubectl create -f + ``` diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md new file mode 100644 index 0000000000..56f1e0f654 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2.md @@ -0,0 +1,178 @@ +--- +title: v2.1.x and v2.2.x Windows Documentation (Experimental) +weight: 9100 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/docs-for-2.1-and-2.2/ +--- + +_Available from v2.1.0 to v2.1.9 and v2.2.0 to v2.2.3_ + +This section describes how to provision Windows clusters in Rancher v2.1.x and v2.2.x. If you are using Rancher v2.3.0 or later, please refer to the new documentation for [v2.3.0 or later](../../../../../pages-for-subheaders/use-windows-clusters.md). + +When you create a [custom cluster](../../../../../pages-for-subheaders/use-existing-nodes.md), Rancher uses RKE (the Rancher Kubernetes Engine) to provision the Kubernetes cluster on your existing infrastructure. + +You can provision a custom Windows cluster using Rancher by using a mix of Linux and Windows hosts as your cluster nodes. + +>**Important:** In versions of Rancher before v2.3, support for Windows nodes is experimental. Therefore, it is not recommended to use Windows nodes for production environments if you are using Rancher before v2.3. + +This guide walks you through create of a custom cluster that includes three nodes: + +- A Linux node, which serves as a Kubernetes control plane node +- Another Linux node, which serves as a Kubernetes worker used to support Ingress for the cluster +- A Windows node, which is assigned the Kubernetes worker role and runs your Windows containers + +For a summary of Kubernetes features supported in Windows, see [Using Windows in Kubernetes](https://kubernetes.io/docs/setup/windows/intro-windows-in-kubernetes/). + +## OS and Container Requirements + +- For clusters provisioned with Rancher v2.1.x and v2.2.x, containers must run on Windows Server 1809 or above. +- You must build containers on a Windows Server core version 1809 or above to run these containers on the same server version. + +## Objectives for Creating Cluster with Windows Support + +When setting up a custom cluster with support for Windows nodes and containers, complete the series of tasks below. + + + +- [1. Provision Hosts](#1-provision-hosts) +- [2. Cloud-host VM Networking Configuration](#2-cloud-hosted-vm-networking-configuration) +- [3. Create the Custom Cluster](#3-create-the-custom-cluster) +- [4. Add Linux Host for Ingress Support](#4-add-linux-host-for-ingress-support) +- [5. Adding Windows Workers](#5-adding-windows-workers) +- [6. Cloud-host VM Routes Configuration](#6-cloud-hosted-vm-routes-configuration) + + + +## 1. Provision Hosts + +To begin provisioning a custom cluster with Windows support, prepare your host servers. Provision three nodes according to our [requirements](../../../../../pages-for-subheaders/installation-requirements.md)—two Linux, one Windows. Your hosts can be: + +- Cloud-hosted VMs +- VMs from virtualization clusters +- Bare-metal servers + +The table below lists the Kubernetes node roles you'll assign to each host, although you won't enable these roles until further along in the configuration process—we're just informing you of each node's purpose. The first node, a Linux host, is primarily responsible for managing the Kubernetes control plane, although, in this use case, we're installing all three roles on this node. Node 2 is also a Linux worker, which is responsible for Ingress support. Finally, the third node is your Windows worker, which will run your Windows applications. + +Node | Operating System | Future Cluster Role(s) +--------|------------------|------ +Node 1 | Linux (Ubuntu Server 16.04 recommended) | Control plane, etcd, worker +Node 2 | Linux (Ubuntu Server 16.04 recommended) | Worker (This node is used for Ingress support) +Node 3 | Windows (Windows Server core version 1809 or above) | Worker + +### Requirements + +- You can view node requirements for Linux and Windows nodes in the [installation section](../../../../../pages-for-subheaders/installation-requirements.md). +- All nodes in a virtualization cluster or a bare metal cluster must be connected using a layer 2 network. +- To support [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), your cluster must include at least one Linux node dedicated to the worker role. +- Although we recommend the three node architecture listed in the table above, you can add additional Linux and Windows workers to scale up your cluster for redundancy. + + +## 2. Cloud-hosted VM Networking Configuration + +>**Note:** This step only applies to nodes hosted on cloud-hosted virtual machines. If you're using virtualization clusters or bare-metal servers, skip ahead to [Create the Custom Cluster](#3-create-the-custom-cluster). + +If you're hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. + +Service | Directions to disable private IP address checks +--------|------------------------------------------------ +Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) +Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) +Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) + +## 3. Create the Custom Cluster + +To create a custom cluster that supports Windows nodes, follow the instructions in [Creating a Cluster with Custom Nodes](../../../../../pages-for-subheaders/use-existing-nodes.md), starting from 2. Create the Custom Cluster. While completing the linked instructions, look for steps that requires special actions for Windows nodes, which are flagged with a note. These notes will link back here, to the special Windows instructions listed in the subheadings below. + + +### Enable the Windows Support Option + +While choosing **Cluster Options**, set **Windows Support (Experimental)** to **Enabled**. + +After you select this option, resume [Creating a Cluster with Custom Nodes](../../../../../pages-for-subheaders/use-existing-nodes.md) from [step 6](../../../../../pages-for-subheaders/use-existing-nodes.md#step-6). + +### Networking Option + +When choosing a network provider for a cluster that supports Windows, the only option available is Flannel, as [host-gw](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) is needed for IP routing. + +If your nodes are hosted by a cloud provider and you want automation support such as load balancers or persistent storage devices, see [Selecting Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers) for configuration info. + +### Node Configuration + +The first node in your cluster should be a Linux host that fills the Control Plane role. This role must be fulfilled before you can add Windows hosts to your cluster. At minimum, the node must have this role enabled, but we recommend enabling all three. The following table lists our recommended settings (we'll provide the recommended settings for nodes 2 and 3 later). + +Option | Setting +-------|-------- +Node Operating System | Linux +Node Roles | etcd
    Control Plane
    Worker + +When you're done with these configurations, resume [Creating a Cluster with Custom Nodes](../../../../../pages-for-subheaders/use-existing-nodes.md) from [step 8](../../../../../pages-for-subheaders/use-existing-nodes.md#step-8). + + + +## 4. Add Linux Host for Ingress Support + +After the initial provisioning of your custom cluster, your cluster only has a single Linux host. Add another Linux host, which will be used to support Ingress for your cluster. + +1. Using the content menu, open the custom cluster your created in [2. Create the Custom Cluster](#3-create-the-custom-cluster). + +1. From the main menu, select **Nodes**. + +1. Click **Edit Cluster**. + +1. Scroll down to **Node Operating System**. Choose **Linux**. + +1. Select the **Worker** role. + +1. Copy the command displayed on screen to your clipboard. + +1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. + +1. From **Rancher**, click **Save**. + +**Result:** The worker role is installed on your Linux host, and the node registers with Rancher. + +## 5. Adding Windows Workers + +You can add Windows hosts to a custom cluster by editing the cluster and choosing the **Windows** option. + +1. From the main menu, select **Nodes**. + +1. Click **Edit Cluster**. + +1. Scroll down to **Node Operating System**. Choose **Windows**. + +1. Select the **Worker** role. + +1. Copy the command displayed on screen to your clipboard. + +1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. + +1. From Rancher, click **Save**. + +1. **Optional:** Repeat these instruction if you want to add more Windows nodes to your cluster. + +**Result:** The worker role is installed on your Windows host, and the node registers with Rancher. + +## 6. Cloud-hosted VM Routes Configuration + +In Windows clusters, containers communicate with each other using the `host-gw` mode of Flannel. In `host-gw` mode, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. + +- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. + +- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. + +To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: + +```bash +kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR +``` + +Then follow the instructions for each cloud provider to configure routing rules for each node: + +Service | Instructions +--------|------------- +Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). +Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). + + +` ` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md new file mode 100644 index 0000000000..d1afa1431b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md @@ -0,0 +1,121 @@ +--- +title: Node Requirements for Rancher Managed Clusters +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. + +> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.](../../../pages-for-subheaders/installation-requirements.md) + +Make sure the nodes for the Rancher server fulfill the following requirements: + +- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) +- [Hardware Requirements](#hardware-requirements) +- [Networking Requirements](#networking-requirements) +- [Optional: Security Considerations](#optional-security-considerations) + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) The capability to use Windows worker nodes in downstream clusters was added in Rancher v2.3.0. + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +If you plan to use ARM64, see [Running on ARM64 (Experimental).](installation/options/arm64-platform/) + +For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) + +### Oracle Linux and RHEL Derived Linux Nodes + +Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. + +### SUSE Linux Nodes + +SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. + +### Flatcar Container Linux Nodes + +When [Launching Kubernetes with Rancher](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File](../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#cluster-config-file) + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: canal + options: + canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: calico + options: + calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +It is also required to enable the Docker service, you can enable the Docker service using the following command: + +``` +systemctl enable docker.service +``` + +The Docker service is enabled automatically when using [Node Drivers](../../../pages-for-subheaders/about-provisioning-drivers.md#node-drivers). + +### Windows Nodes + +_Windows worker nodes can be used as of Rancher v2.3.0_ + +Nodes with Windows Server must run Docker Enterprise Edition. + +Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows](../../../pages-for-subheaders/use-windows-clusters.md) + +# Hardware Requirements + +The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. + +Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. + +For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) + +For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) + +# Networking Requirements + +For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. + +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.](https://rancher.com/docs/rke/latest/en/os/#ports) + +Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes). + +# Optional: Security Considerations + +If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. + +For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.](../../../pages-for-subheaders/rancher-security.md#rancher-hardening-guide) diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md new file mode 100644 index 0000000000..5e862ee9d0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md @@ -0,0 +1,57 @@ +--- +title: Creating an Aliyun ACK Cluster +shortTitle: Alibaba Cloud Container Service for Kubernetes +weight: 2120 +--- + +_Available as of v2.2.0_ + +You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. + +## Prerequisites + +>**Note** +>Deploying to ACK will incur charges. + +1. In Aliyun, activate the following services in their respective consoles. + + - [Container Service](https://cs.console.aliyun.com) + - [Resource Orchestration Service](https://ros.console.aliyun.com) + - [RAM](https://ram.console.aliyun.com) + +2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. + +3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). + +4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. + +## Create an ACK Cluster + +1. From the **Clusters** page, click **Add Cluster**. + +1. Choose **Alibaba ACK**. + +1. Enter a **Cluster Name**. + +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. + +1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. + +1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. + +1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. + +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md new file mode 100644 index 0000000000..d0e156615b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md @@ -0,0 +1,87 @@ +--- +title: Creating a Huawei CCE Cluster +shortTitle: Huawei Cloud Kubernetes Service +weight: 2130 +--- + +_Available as of v2.2.0_ + +You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. + +## Prerequisites in Huawei + +>**Note** +>Deploying to CCE will incur charges. + +1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). + +2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). + +## Limitations + +Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. + +## Create the CCE Cluster + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Huawei CCE**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. Fill in the cluster configuration. For help filling out the form, refer to [Huawei CCE Configuration.](#huawei-cce-configuration) +1. Fill the following node configuration of the cluster. For help filling out the form, refer to [Node Configuration.](#node-configuration) +1. Click **Create** to create the CCE cluster. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# Huawei CCE Configuration + +|Settings|Description| +|---|---| +| Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | +| Description | The description of the cluster. | +| Master Version | The Kubernetes version. | +| Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | +| High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | +| Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | +| Container Network CIDR | Network CIDR for the cluster. | +| VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | +| Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | +| External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | +| Cluster Label | The labels for the cluster. | +| Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | + +**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher-v2-3-0) + +# Node Configuration + +|Settings|Description| +|---|---| +| Zone | The available zone at where the node(s) of the cluster is deployed. | +| Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | +| Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | +| Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | +| Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | +| Data Volume Size | Data volume size for the cluster node(s) | +| Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | +| Root Volume Size | Root volume size for the cluster node(s) | +| Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | +| Node Count | The node count of the cluster | +| Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | +| SSH Key Name | The ssh key for the cluster node(s) | +| EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | +| EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | +| EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | +| EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | +| EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | +| EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | +| Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | +| Node Label | The labels for the cluster node(s). | \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md new file mode 100644 index 0000000000..ff8574f687 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md @@ -0,0 +1,86 @@ +--- +title: Creating a Tencent TKE Cluster +shortTitle: Tencent Kubernetes Engine +weight: 2125 +--- + +_Available as of v2.2.0_ + +You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. + +## Prerequisites in Tencent + +>**Note** +>Deploying to TKE will incur charges. + +1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. + +2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). + +3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. + +4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. + +## Create a TKE Cluster + +1. From the **Clusters** page, click **Add Cluster**. + +2. Choose **Tencent TKE**. + +3. Enter a **Cluster Name**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites-in-tencent). + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Region | From the drop-down chooses the geographical region in which to build your cluster. | + | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | + | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | + +6. Click `Next: Configure Cluster` to set your TKE cluster configurations. + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | + | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | + | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | + | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | + + **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that as of Rancher v2.3.0, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#config-file-structure-in-rancher-v2-3-0) + +7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Availability Zone | Choose the availability zone of the VPC region. | + | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | + | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | + +8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. + + Option | Description + -------|------------ + Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 + Security Group | Security group ID, default does not bind any security groups. + Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). + Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. + Data Disk Type | Data disk type, default value to the SSD cloud drive + Data Disk Size | Data disk size (GB), the step size is 10 + Band Width Type | Type of bandwidth, PayByTraffic or PayByHour + Band Width | Public network bandwidth (Mbps) + Key Pair | Key id, after associating the key can be used to logging to the VM node + +9. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md new file mode 100644 index 0000000000..8033ef02b1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md @@ -0,0 +1,44 @@ +--- +title: ConfigMaps +weight: 3061 +aliases: + - /rancher/v2.0-v2.4/en/tasks/projects/add-configmaps + - /rancher/v2.0-v2.4/en/k8s-in-rancher/configmaps +--- + +While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). + +ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. + +>**Note:** ConfigMaps can only be applied to namespaces and not projects. + +1. From the **Global** view, select the project containing the namespace that you want to add a ConfigMap to. + +1. From the main menu, select **Resources > Config Maps**. Click **Add Config Map**. + +1. Enter a **Name** for the Config Map. + + >**Note:** Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. + +1. Select the **Namespace** you want to add Config Map to. You can also add a new namespace on the fly by clicking **Add to a new namespace**. + +1. From **Config Map Values**, click **Add Config Map Value** to add a key value pair to your ConfigMap. Add as many values as you need. + +1. Click **Save**. + + >**Note:** Don't use ConfigMaps to store sensitive data [use a secret](secrets.md). + > + >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. + > + > ![](/img/bulk-key-values.gif) + +**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. + +## What's Next? + +Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: + +- Application environment variables. +- Specifying parameters for a Volume mounted to the workload. + +For more information on adding ConfigMaps to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/k8s-in-rancher/service-discovery/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md new file mode 100644 index 0000000000..7a1118d763 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md @@ -0,0 +1,46 @@ +--- +title: Encrypting HTTP Communication +description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments +weight: 3060 +aliases: + - /rancher/v2.0-v2.4/en/tasks/projects/add-ssl-certificates/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/certificates +--- + +When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. + +Add SSL certificates to either projects, namespaces, or both. A project scoped certificate will be available in all its namespaces. + +>**Prerequisites:** You must have a TLS private key and certificate available to upload. + +1. From the **Global** view, select the project where you want to deploy your ingress. + +1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. (For Rancher before v2.3, click **Resources > Certificates.**) + +1. Enter a **Name** for the certificate. + + >**Note:** Kubernetes classifies SSL certificates as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your SSL certificate must have a unique name among the other certificates, registries, and secrets within your project/workspace. + +1. Select the **Scope** of the certificate. + + - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. + + - **Available to a single namespace:** The certificate is only available for the deployments in one namespace. If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. + +1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. + + Private key files end with an extension of `.key`. + +1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. + + Certificate files end with an extension of `.crt`. + +**Result:** Your certificate is added to the project or namespace. You can now add it to deployments. + +- If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. +- If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. +- Your certificate is added to the **Resources > Secrets > Certificates** view. (For Rancher before v2.3, it is added to **Resources > Certificates.**) + +## What's Next? + +Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress](load-balancer-and-ingress-controller/add-ingresses.md). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md new file mode 100644 index 0000000000..502bfd33eb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md @@ -0,0 +1,42 @@ +--- +title: Background Information on HPAs +weight: 3027 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background +--- + +The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. + +## Why Use Horizontal Pod Autoscaler? + +Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: + +- A minimum and maximum number of pods allowed to run, as defined by the user. +- Observed CPU/memory use, as reported in resource metrics. +- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. + +HPA improves your services by: + +- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. +- Increase/decrease performance as needed to accomplish service level agreements. + +## How HPA Works + +![HPA Schema](/img/horizontal-pod-autoscaler.jpg) + +HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: + +Flag | Default | Description | +---------|----------|----------| + `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. + `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. + `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. + + +For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). + +## Horizontal Pod Autoscaler API Objects + +HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. + +For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md new file mode 100644 index 0000000000..4a46e27fe9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md @@ -0,0 +1,206 @@ +--- +title: Manual HPA Installation for Clusters Created Before Rancher v2.0.7 +weight: 3050 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-for-rancher-before-2_0_7 + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-for-rancher-before-2_0_7/ +--- + +This section describes how to manually install HPAs for clusters created with Rancher before v2.0.7. This section also describes how to configure your HPA to scale up or down, and how to assign roles to your HPA. + +Before you can use HPA in your Kubernetes cluster, you must fulfill some requirements. + +### Requirements + +Be sure that your Kubernetes cluster services are running with these flags at minimum: + +- kube-api: `requestheader-client-ca-file` +- kubelet: `read-only-port` at 10255 +- kube-controller: Optional, just needed if distinct values than default are required. + + - `horizontal-pod-autoscaler-downscale-delay: "5m0s"` + - `horizontal-pod-autoscaler-upscale-delay: "3m0s"` + - `horizontal-pod-autoscaler-sync-period: "30s"` + +For an RKE Kubernetes cluster definition, add this snippet in the `services` section. To add this snippet using the Rancher v2.0 UI, open the **Clusters** view and select **⋮ > Edit** for the cluster in which you want to use HPA. Then, from **Cluster Options**, click **Edit as YAML**. Add the following snippet to the `services` section: + +``` +services: +... + kube-api: + extra_args: + requestheader-client-ca-file: "/etc/kubernetes/ssl/kube-ca.pem" + kube-controller: + extra_args: + horizontal-pod-autoscaler-downscale-delay: "5m0s" + horizontal-pod-autoscaler-upscale-delay: "1m0s" + horizontal-pod-autoscaler-sync-period: "30s" + kubelet: + extra_args: + read-only-port: 10255 +``` + +Once the Kubernetes cluster is configured and deployed, you can deploy metrics services. + +>**Note:** `kubectl` command samples in the sections that follow were tested in a cluster running Rancher v2.0.6 and Kubernetes v1.10.1. + +### Configuring HPA to Scale Using Resource Metrics + +To create HPA resources based on resource metrics such as CPU and memory use, you need to deploy the `metrics-server` package in the `kube-system` namespace of your Kubernetes cluster. This deployment allows HPA to consume the `metrics.k8s.io` API. + +>**Prerequisite:** You must be running `kubectl` 1.8 or later. + +1. Connect to your Kubernetes cluster using `kubectl`. + +1. Clone the GitHub `metrics-server` repo: + ``` + # git clone https://github.com/kubernetes-incubator/metrics-server + ``` + +1. Install the `metrics-server` package. + ``` + # kubectl create -f metrics-server/deploy/1.8+/ + ``` + +1. Check that `metrics-server` is running properly. Check the service pod and logs in the `kube-system` namespace. + + 1. Check the service pod for a status of `running`. Enter the following command: + ``` + # kubectl get pods -n kube-system + ``` + Then check for the status of `running`. + ``` + NAME READY STATUS RESTARTS AGE + ... + metrics-server-6fbfb84cdd-t2fk9 1/1 Running 0 8h + ... + ``` + 1. Check the service logs for service availability. Enter the following command: + ``` + # kubectl -n kube-system logs metrics-server-6fbfb84cdd-t2fk9 + ``` + Then review the log to confirm that the `metrics-server` package is running. + +
    + Metrics Server Log Output + + ``` + I0723 08:09:56.193136 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:'' + I0723 08:09:56.193574 1 heapster.go:72] Metrics Server version v0.2.1 + I0723 08:09:56.194480 1 configs.go:61] Using Kubernetes client with master "https://10.43.0.1:443" and version + I0723 08:09:56.194501 1 configs.go:62] Using kubelet port 10255 + I0723 08:09:56.198612 1 heapster.go:128] Starting with Metric Sink + I0723 08:09:56.780114 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) + I0723 08:09:57.391518 1 heapster.go:101] Starting Heapster API server... + [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] listing is available at https:///swaggerapi + [restful] 2018/07/23 08:09:57 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ + I0723 08:09:57.394080 1 serve.go:85] Serving securely on 0.0.0.0:443 + ``` + +
    + + +1. Check that the metrics api is accessible from `kubectl`. + + + - If you are accessing the cluster through Rancher, enter your Server URL in the `kubectl` config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. + ``` + # kubectl get --raw /k8s/clusters//apis/metrics.k8s.io/v1beta1 + ``` + If the API is working correctly, you should receive output similar to the output below. + ``` + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} + ``` + + - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. + ``` + # kubectl get --raw /apis/metrics.k8s.io/v1beta1 + ``` + If the API is working correctly, you should receive output similar to the output below. + ``` + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]} + ``` + +### Assigning Additional Required Roles to Your HPA + +By default, HPA reads resource and custom metrics with the user `system:anonymous`. Assign `system:anonymous` to `view-resource-metrics` and `view-custom-metrics` in the ClusterRole and ClusterRoleBindings manifests. These roles are used to access metrics. + +To do it, follow these steps: + +1. Configure `kubectl` to connect to your cluster. + +1. Copy the ClusterRole and ClusterRoleBinding manifest for the type of metrics you're using for your HPA. + +
    + Resource Metrics: ApiGroups resource.metrics.k8s.io + + ``` + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: view-resource-metrics + rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: view-resource-metrics + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: view-resource-metrics + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:anonymous + ``` + +
    +
    + Custom Metrics: ApiGroups custom.metrics.k8s.io + + ``` + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: view-custom-metrics + rules: + - apiGroups: + - custom.metrics.k8s.io + resources: + - "*" + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: view-custom-metrics + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: view-custom-metrics + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:anonymous + ``` + +
    + +1. Create them in your cluster using one of the follow commands, depending on the metrics you're using. + ``` + # kubectl create -f + # kubectl create -f + ``` diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md new file mode 100644 index 0000000000..f3c912ffeb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md @@ -0,0 +1,211 @@ +--- +title: Managing HPAs with kubectl +weight: 3029 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl +--- + +This section describes HPA management with `kubectl`. This document has instructions for how to: + +- Create an HPA +- Get information on HPAs +- Delete an HPA +- Configure your HPAs to scale with CPU or memory utilization +- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics + +### Note For Rancher v2.3.x + +In Rancher v2.3.x, you can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI](manage-hpas-with-ui.md). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. + +### Note For Rancher Before v2.0.7 + +Clusters created with older versions of Rancher don't automatically have all the requirements to create an HPA. To install an HPA on these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7](hpa-for-rancher-before-2.0.7.md). + +##### Basic kubectl Command for Managing HPAs + +If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: + +- Creating HPA + + - With manifest: `kubectl create -f ` + + - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` + +- Getting HPA info + + - Basic: `kubectl get hpa hello-world` + + - Detailed description: `kubectl describe hpa hello-world` + +- Deleting HPA + + - `kubectl delete hpa hello-world` + +##### HPA Manifest Definition Example + +The HPA manifest is the config file used for managing an HPA with `kubectl`. + +The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. + +```yml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: hello-world +spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi +``` + + +Directive | Description +---------|----------| + `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | + `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | + `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | + `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. + `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. + `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. +
    + +##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) + +Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. Run the following commands to check if metrics are available in your installation: + +``` +$ kubectl top nodes +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +node-controlplane 196m 9% 1623Mi 42% +node-etcd 80m 4% 1090Mi 28% +node-worker 64m 3% 1146Mi 29% +$ kubectl -n kube-system top pods +NAME CPU(cores) MEMORY(bytes) +canal-pgldr 18m 46Mi +canal-vhkgr 20m 45Mi +canal-x5q5v 17m 37Mi +canal-xknnz 20m 37Mi +kube-dns-7588d5b5f5-298j2 0m 22Mi +kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi +metrics-server-97bc649d5-jxrlt 0m 12Mi +$ kubectl -n kube-system logs -l k8s-app=metrics-server +I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true +I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 +I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version +I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 +I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink +I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) +I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ +I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 +``` + +If you have created your cluster in Rancher v2.0.6 or before, please refer to the manual installation. + +##### Configuring HPA to Scale Using Custom Metrics with Prometheus + +You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. + +For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: + +- Prometheus is deployed in the cluster. +- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. +- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` + +Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. + +For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). + +1. Initialize Helm in your cluster. + ``` + # kubectl -n kube-system create serviceaccount tiller + kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + helm init --service-account tiller + ``` + +1. Clone the `banzai-charts` repo from GitHub: + ``` + # git clone https://github.com/banzaicloud/banzai-charts + ``` + +1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. + ``` + # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system + ``` + +1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. + + 1. Check that the service pod is `Running`. Enter the following command. + ``` + # kubectl get pods -n kube-system + ``` + From the resulting output, look for a status of `Running`. + ``` + NAME READY STATUS RESTARTS AGE + ... + prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h + ... + ``` + 1. Check the service logs to make sure the service is running correctly by entering the command that follows. + ``` + # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system + ``` + Then review the log output to confirm the service is running. + +
    + Prometheus Adaptor Logs + + ... + I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds + I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: + I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT + I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json + I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 + I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} + I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.sslip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK + I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} + I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] + I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} + ... +
    + + + +1. Check that the metrics API is accessible from kubectl. + + - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. + ``` + # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
    + API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} +
    + + - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. + ``` + # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
    + API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} +
    diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md new file mode 100644 index 0000000000..4f0b1e2ee1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md @@ -0,0 +1,57 @@ +--- +title: Managing HPAs with the Rancher UI +weight: 3028 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui +--- + +_Available as of v2.3.0_ + +The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. + +If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +## Creating an HPA + +1. From the **Global** view, open the project that you want to deploy a HPA to. + +1. Click **Resources > HPA.** + +1. Click **Add HPA.** + +1. Enter a **Name** for the HPA. + +1. Select a **Namespace** for the HPA. + +1. Select a **Deployment** as scale target for the HPA. + +1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. + +1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl](manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +1. Click **Create** to create the HPA. + +> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. + +## Get HPA Metrics and Status + +1. From the **Global** view, open the project with the HPAs you want to look at. + +1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. + +1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. + + +## Deleting an HPA + +1. From the **Global** view, open the project that you want to delete an HPA from. + +1. Click **Resources > HPA.** + +1. Find the HPA which you would like to delete. + +1. Click **⋮ > Delete**. + +1. Click **Delete** to confirm. + +> **Result:** The HPA is deleted from the current cluster. diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md new file mode 100644 index 0000000000..1715c2a5b2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md @@ -0,0 +1,534 @@ +--- +title: Testing HPAs with kubectl +weight: 3031 + +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa +--- + +This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI](manage-hpas-with-kubectl.md). + +For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. + +1. Configure `kubectl` to connect to your Kubernetes cluster. + +1. Copy the `hello-world` deployment manifest below. + +
    + Hello World Manifest + + ``` + apiVersion: apps/v1beta2 + kind: Deployment + metadata: + labels: + app: hello-world + name: hello-world + namespace: default + spec: + replicas: 1 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + resources: + requests: + cpu: 500m + memory: 64Mi + ports: + - containerPort: 80 + protocol: TCP + restartPolicy: Always + --- + apiVersion: v1 + kind: Service + metadata: + name: hello-world + namespace: default + spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: hello-world + ``` + +
    + +1. Deploy it to your cluster. + + ``` + # kubectl create -f + ``` + +1. Copy one of the HPAs below based on the metric type you're using: + +
    + Hello World HPA: Resource Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 1000Mi + ``` + +
    +
    + Hello World HPA: Custom Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi + - type: Pods + pods: + metricName: cpu_system + targetAverageValue: 20m + ``` + +
    + +1. View the HPA info and description. Confirm that metric data is shown. + +
    + Resource Metrics + + 1. Enter the following commands. + ``` + # kubectl get hpa + NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE + hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m + # kubectl describe hpa + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 1253376 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
    +
    + Custom Metrics + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive the output that follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 3514368 / 100Mi + "cpu_system" on pods: 0 / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
    + +1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). + +1. Test that pod autoscaling works as intended.

    + **To Test Autoscaling Using Resource Metrics:** + +
    + Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to two pods based on CPU Usage. + + 1. View your HPA. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10928128 / 100Mi + resource cpu on pods (as a percentage of request): 56% (280m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm you've scaled to two pods. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
    +
    + Upscale to 3 pods: CPU Usage Up to Target + + Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 9424896 / 100Mi + resource cpu on pods (as a percentage of request): 66% (333m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + ``` + 2. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-f46kh 0/1 Running 0 1m + hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
    +
    + Downscale to 1 Pod: All Metrics Below Target + + Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10070016 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + +
    + + **To Test Autoscaling Using Custom Metrics:** + +
    + Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale two pods based on CPU usage. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8159232 / 100Mi + "cpu_system" on pods: 7m / 20m + resource cpu on pods (as a percentage of request): 64% (321m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm two pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Upscale to 3 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows: + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + ``` + 1. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + # kubectl get pods + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Upscale to 4 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm four pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m + hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Downscale to 1 Pod: All Metrics Below Target + + Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive similar output to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8101888 / 100Mi + "cpu_system" on pods: 8m / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + 1. Enter the following command to confirm a single pods is running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/k8s-in-rancher/registries/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md new file mode 100644 index 0000000000..49a163e130 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md @@ -0,0 +1,72 @@ +--- +title: Adding Ingresses to Your Project +description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project +weight: 3042 +aliases: + - /rancher/v2.0-v2.4/en/tasks/workloads/add-ingress/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/ingress +--- + +Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a [Global DNS entry](../../helm-charts-in-rancher/globaldns.md). + +1. From the **Global** view, open the project that you want to add ingress to. +1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. (In versions before v2.3.0, just click the **Load Balancing** tab.) Then click **Add Ingress**. +1. Enter a **Name** for the ingress. +1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new namespace on the fly by clicking **Add to a new namespace**. +1. Create ingress forwarding **Rules**. For help configuring the rules, refer to [this section.](#ingress-rule-configuration) If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. +1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. + +**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. + + +# Ingress Rule Configuration + +- [Automatically generate a sslip.io hostname](#automatically-generate-a-sslip-io-hostname) +- [Specify a hostname to use](#specify-a-hostname-to-use) +- [Use as the default backend](#use-as-the-default-backend) +- [Certificates](#certificates) +- [Labels and Annotations](#labels-and-annotations) + +### Automatically generate a sslip.io hostname + +If you choose this option, ingress routes requests to hostname to a DNS name that's automatically generated. Rancher uses [sslip.io](http://sslip.io/) to automatically generates the DNS name. This option is best used for testing, _not_ production environments. + +>**Note:** To use this option, you must be able to resolve to `sslip.io` addresses. + +1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. +1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. +1. Select a workload or service from the **Target** drop-down list for each target you've added. +1. Enter the **Port** number that each target operates on. + +### Specify a hostname to use + +If you use this option, ingress routes requests for a hostname to the service or workload that you specify. + +1. Enter the hostname that your ingress will handle request forwarding for. For example, `www.mysite.com`. +1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. +1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. +1. Select a workload or service from the **Target** drop-down list for each target you've added. +1. Enter the **Port** number that each target operates on. + +### Use as the default backend + +Use this option to set an ingress rule for handling requests that don't match any other ingress rules. For example, use this option to route requests that can't be found to a `404` page. + +>**Note:** If you deployed Rancher using RKE, a default backend for 404s and 202s is already configured. + +1. Add a **Target Backend**. Click either **Service** or **Workload** to add the target. +1. Select a service or workload from the **Target** drop-down list. + +### Certificates +>**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates](../encrypt-http-communication.md). + +1. Click **Add Certificate**. +1. Select a **Certificate** from the drop-down list. +1. Enter the **Host** using encrypted communication. +1. To add additional hosts that use the certificate, click **Add Hosts**. + +### Labels and Annotations + +Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. + +For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md new file mode 100644 index 0000000000..21c83f4089 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -0,0 +1,67 @@ +--- +title: "Layer 4 and Layer 7 Load Balancing" +description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" +weight: 3041 +aliases: + - /rancher/v2.0-v2.4/en/concepts/load-balancing/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers +--- +Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. + +## Layer-4 Load Balancer + +Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. + +Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. + +> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. + +### Support for Layer-4 Load Balancing + +Support for layer-4 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-4 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GCE cloud provider +Azure AKS | Supported by Azure cloud provider +RKE on EC2 | Supported by AWS cloud provider +RKE on DigitalOcean | Limited NGINX or third-party Ingress* +RKE on vSphere | Limited NGINX or third party-Ingress* +RKE on Custom Hosts
    (e.g. bare-metal servers) | Limited NGINX or third-party Ingress* +Third-party MetalLB | Limited NGINX or third-party Ingress* + +\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) + +## Layer-7 Load Balancer + +Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. + +### Support for Layer-7 Load Balancing + +Support for layer-7 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-7 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GKE cloud provider +Azure AKS | Not Supported +RKE on EC2 | Nginx Ingress Controller +RKE on DigitalOcean | Nginx Ingress Controller +RKE on vSphere | Nginx Ingress Controller +RKE on Custom Hosts
    (e.g. bare-metal servers) | Nginx Ingress Controller + +### Host Names in Layer-7 Load Balancer + +Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. + +Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: + +1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. +2. Ask Rancher to generate an sslip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name `..a.b.c.d.sslip.io`. + +The benefit of using sslip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. + +## Related Links + +- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md new file mode 100644 index 0000000000..4ea67ee24b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md @@ -0,0 +1,47 @@ +--- +title: Secrets +weight: 3062 +aliases: + - /rancher/v2.0-v2.4/en/tasks/projects/add-a-secret + - /rancher/v2.0-v2.4/en/k8s-in-rancher/secrets +--- + +[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. + +> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.](kubernetes-and-docker-registries.md) + +When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. + +Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) + +# Creating Secrets + +When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. + +1. From the **Global** view, select the project containing the namespace(s) where you want to add a secret. + +2. From the main menu, select **Resources > Secrets**. Click **Add Secret**. + +3. Enter a **Name** for the secret. + + >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. + +4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single namespace. + +5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. + + >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. + > + > ![](/img/bulk-key-values.gif) + +1. Click **Save**. + +**Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. + +Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) + +# What's Next? + +Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. + +For more information on adding secret to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md new file mode 100644 index 0000000000..1f4b8d05a3 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md @@ -0,0 +1,38 @@ +--- +title: Adding a Sidecar +weight: 3029 +aliases: + - /rancher/v2.0-v2.4/en/tasks/workloads/add-a-sidecar/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/add-a-sidecar +--- +A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. + +1. From the **Global** view, open the project running the workload you want to add a sidecar to. + +1. Click **Resources > Workloads.** In versions before v2.3.0, select the **Workloads** tab. + +1. Find the workload that you want to extend. Select **⋮ icon (...) > Add a Sidecar**. + +1. Enter a **Name** for the sidecar. + +1. Select a **Sidecar Type**. This option determines if the sidecar container is deployed before or after the main container is deployed. + + - **Standard Container:** + + The sidecar container is deployed after the main container. + + - **Init Container:** + + The sidecar container is deployed before the main container. + +1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. + +1. Set the remaining options. You can read about them in [Deploying Workloads](deploy-workloads.md). + +1. Click **Launch**. + +**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. + +## Related Links + +- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md new file mode 100644 index 0000000000..e4afb406df --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md @@ -0,0 +1,60 @@ +--- +title: Deploying Workloads +description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. +weight: 3026 +aliases: + - /rancher/v2.0-v2.4/en/tasks/workloads/deploy-workloads/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/deploy-workloads +--- + +Deploy a workload to run an application in one or more containers. + +1. From the **Global** view, open the project that you want to deploy a workload to. + +1. 1. Click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) From the **Workloads** view, click **Deploy**. + +1. Enter a **Name** for the workload. + +1. Select a [workload type](../../../../pages-for-subheaders/workloads-and-pods.md). The workload defaults to a scalable deployment, but you can change the workload type by clicking **More options.** + +1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. + +1. Either select an existing namespace, or click **Add to a new namespace** and enter a new namespace. + +1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services](../../../../pages-for-subheaders/workloads-and-pods.md#services). + +1. Configure the remaining options: + + - **Environment Variables** + + Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap](../configmaps.md). + + - **Node Scheduling** + - **Health Check** + - **Volumes** + + Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap](../configmaps.md). + + When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. This option is available in the UI as of Rancher v2.2.0. + + - **Scaling/Upgrade Policy** + + >**Amazon Note for Volumes:** + > + > To mount an Amazon EBS volume: + > + >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. + > + >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster](../../kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) or [Creating a Custom Cluster](../../../../pages-for-subheaders/use-existing-nodes.md). + + +1. Click **Show Advanced Options** and configure: + + - **Command** + - **Networking** + - **Labels & Annotations** + - **Security and Host Config** + +1. Click **Launch**. + +**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/rollback-workloads/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/rollback-workloads/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md diff --git a/content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md rename to versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md new file mode 100644 index 0000000000..6b3b7feeed --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md @@ -0,0 +1,94 @@ +--- +title: "6. Service Discovery" +weight: 600 +aliases: + - /rancher/v2.x/en/v1.6-migration/discover-services/ +--- + +Service discovery is one of the core functionalities of any container-based environment. Once you have packaged and launched your application, the next step is making it discoverable to other containers in your environment or the external world. This document will describe how to use the service discovery support provided by Rancher v2.x so that you can find them by name. + +This document will also show you how to link the workloads and services that you migrated into Rancher v2.x. When you parsed your services from v1.6 using migration-tools CLI, it output two files for each service: one deployment manifest and one service manifest. You'll have to link these two files together before the deployment works correctly in v2.x. + +
    Resolve the output.txt Link Directive
    + +![Resolve Link Directive](/img/resolve-links.png) + +## In This Document + + + + +- [Service Discovery: Rancher v1.6 vs. v2.x](#service-discovery-rancher-v1-6-vs-v2-x) +- [Service Discovery Within and Across Namespaces](#service-discovery-within-and-across-namespaces) +- [Container Discovery](#container-discovery) +- [Service Name Alias Creation](#service-name-alias-creation) + + + +## Service Discovery: Rancher v1.6 vs. v2.x + +For Rancher v2.x, we've replaced the Rancher DNS microservice used in v1.6 with native [Kubernetes DNS support](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/), which provides equivalent service discovery for Kubernetes workloads and pods. Former Cattle users can replicate all the service discovery features from Rancher v1.6 in v2.x. There's no loss of functionality. + +Kubernetes schedules a DNS pod and service in the cluster, which is similar to the [Rancher v1.6 DNS microservice]({{}}/rancher/v1.6/en/cattle/internal-dns-service/#internal-dns-service-in-cattle-environments). Kubernetes then configures its kubelets to route all DNS lookups to this DNS service, which is skyDNS, a flavor of the default Kube-DNS implementation. + +The following table displays each service discovery feature available in the two Rancher releases. + +Service Discovery Feature | Rancher v1.6 | Rancher v2.x | Description +--------------------------|--------------|--------------|------------- +[service discovery within and across stack][1] (i.e., clusters) | ✓ | ✓ | All services in the stack are resolvable by `` and by `.` across stacks. +[container discovery][2] | ✓ | ✓ | All containers are resolvable globally by their name. +[service alias name creation][3] | ✓ | ✓ | Adding an alias name to services and linking to other services using aliases. +[discovery of external services][4] | ✓ | ✓ | Pointing to services deployed outside of Rancher using the external IP(s) or a domain name. + +[1]: #service-discovery-within-and-across-stacks +[2]: #container-discovery +[3]: #service-name-alias-creation +[4]: #service-name-alias-creation + +
    + +### Service Discovery Within and Across Namespaces + + +When you create a _new_ workload in v2.x (not migrated, more on that [below](#linking-migrated-workloads-and-services)), Rancher automatically creates a service with an identical name, and then links the service and workload together. If you don't explicitly expose a port, the default port of `42` is used. This practice makes the workload discoverable within and across namespaces by its name. + +### Container Discovery + +Individual pods running in the Kubernetes cluster also get a DNS record assigned, which uses dot notation as well: `..pod.cluster.local`. For example, a pod with an IP of `10.42.2.7` in the namespace `default` with a DNS name of `cluster.local` would have an entry of `10-42-2-7.default.pod.cluster.local`. + +Pods can also be resolved using the `hostname` and `subdomain` fields if set in the pod spec. Details about this resolution is covered in the [Kubernetes docs](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). + +### Linking Migrated Workloads and Services + +When you migrate v1.6 services to v2.x, Rancher does not automatically create a Kubernetes service record for each migrated deployment. Instead, you'll have to link the deployment and service together manually, using any of the methods listed below. + +In the image below, the `web-deployment.yml` and `web-service.yml` files [created after parsing](migrate-services.md#migration-example-file-output) our [migration example services](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files) are linked together. + +
    Linked Workload and Kubernetes Service
    + +![Linked Workload and Kubernetes Service](/img/linked-service-workload.png) + + +### Service Name Alias Creation + +Just as you can create an alias for Rancher v1.6 services, you can do the same for Rancher v2.x workloads. Similarly, you can also create DNS records pointing to services running externally, using either their hostname or IP address. These DNS records are Kubernetes service objects. + +Using the v2.x UI, use the context menu to navigate to the `Project` view. Then click **Resources > Workloads > Service Discovery.** (In versions before v2.3.0, click the **Workloads > Service Discovery** tab.) All existing DNS records created for your workloads are listed under each namespace. + +Click **Add Record** to create new DNS records. Then view the various options supported to link to external services or to create aliases for another workload, DNS record, or set of pods. + +
    Add Service Discovery Record
    +![Add Service Discovery Record](/img/add-record.png) + +The following table indicates which alias options are implemented natively by Kubernetes and which options are implemented by Rancher leveraging Kubernetes. + +Option | Kubernetes-implemented? | Rancher-implemented? +-------|-------------------------|--------------------- +Pointing to an external hostname | ✓ | | +Pointing to a set of pods that match a selector | ✓ | | +Pointing to an external IP address | | ✓ +Pointing to another workload | | ✓ +Create alias for another DNS record | | ✓ + + +### [Next: Load Balancing](load-balancing.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md new file mode 100644 index 0000000000..2901cbeb91 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md @@ -0,0 +1,106 @@ +--- +title: "3. Expose Your Services" +weight: 400 +aliases: + - /rancher/v2.x/en/v1.6-migration/expose-services/ +--- + +In testing environments, you usually need to route external traffic to your cluster containers by using an unadvertised IP and port number, providing users access to their apps. You can accomplish this goal using port mapping, which exposes a workload (i.e., service) publicly over a specific port, provided you know your node IP address(es). You can either map a port using HostPorts (which exposes a service on a specified port on a single node) or NodePorts (which exposes a service on _all_ nodes on a single port). + +Use this document to correct workloads that list `ports` in `output.txt`. You can correct it by either setting a HostPort or a NodePort. + +
    Resolve ports for the web Workload
    + +![Resolve Ports](/img/resolve-ports.png) + + +## In This Document + + + +- [What's Different About Exposing Services in Rancher v2.x?](#what-s-different-about-exposing-services-in-rancher-v2-x) +- [HostPorts](#hostport) +- [Setting HostPort](#setting-hostport) +- [NodePorts](#nodeport) +- [Setting NodePort](#setting-nodeport) + + + +## What's Different About Exposing Services in Rancher v2.x? + +In Rancher v1.6, we used the term _Port Mapping_ for exposing an IP address and port where your you and your users can access a service. + +In Rancher v2.x, the mechanisms and terms for service exposure have changed and expanded. You now have two port mapping options: _HostPorts_ (which is most synonymous with v1.6 port mapping, allows you to expose your app at a single IP and port) and _NodePorts_ (which allows you to map ports on _all_ of your cluster nodes, not just one). + +Unfortunately, port mapping cannot be parsed by the migration-tools CLI. If the services you're migrating from v1.6 to v2.x have port mappings set, you'll have to either set a [HostPort](#hostport) or [NodePort](#nodeport) as a replacement. + +## HostPort + +A _HostPort_ is a port exposed to the public on a _specific node_ running one or more pod. Traffic to the node and the exposed port (`:`) are routed to the requested container's private port. Using a HostPort for a Kubernetes pod in Rancher v2.x is synonymous with creating a public port mapping for a container in Rancher v1.6. + +In the following diagram, a user is trying to access an instance of Nginx, which is running within a pod on port 80. However, the Nginx deployment is assigned a HostPort of 9890. The user can connect to this pod by browsing to its host IP address, followed by the HostPort in use (9890 in case). + +![HostPort Diagram](/img/hostPort.svg) + + +#### HostPort Pros + +- Any port available on the host can be exposed. +- Configuration is simple, and the HostPort is set directly in the Kubernetes pod specifications. Unlike NodePort, no other objects need to be created to expose your app. + +#### HostPort Cons + +- Limits the scheduling options for your pod, as only hosts with vacancies for your chosen port can be used. +- If the scale of your workload is larger than the number of nodes in your Kubernetes cluster, the deployment fails. +- Any two workloads that specify the same HostPort cannot be deployed to the same node. +- If the host where your pods are running becomes unavailable, Kubernetes reschedules the pods to different nodes. Thus, if the IP address for your workload changes, external clients of your application will lose access to the pod. The same thing happens when you restart your pods—Kubernetes reschedules them to a different node. + +## Setting HostPort + +You can set a HostPort for migrated workloads (i.e., services) using the Rancher v2.x UI. To add a HostPort, browse to the project containing your workloads, and edit each workload that you want to expose, as shown below. Map the port that your service container exposes to the HostPort exposed on your target node. + +For example, for the web-deployment.yml file parsed from v1.6 that we've been using as a sample, we would edit its Kubernetes manifest, set the publish the port that the container uses, and then declare a HostPort listening on the port of your choice (`9890`) as shown below. You can then access your workload by clicking the link created in the Rancher UI. + +
    Port Mapping: Setting HostPort
    + +![](/img/set-hostport.gif) + +## NodePort + +A _NodePort_ is a port that's open to the public _on each_ of your cluster nodes. When the NodePort receives a request for any of the cluster hosts' IP address for the set NodePort value, NodePort (which is a Kubernetes service) routes traffic to a specific pod, regardless of what node it's running on. NodePort provides a static endpoint where external requests can reliably reach your pods. + +NodePorts help you circumvent an IP address shortcoming. Although pods can be reached by their IP addresses, they are disposable by nature. Pods are routinely destroyed and recreated, getting a new IP address with each replication. Therefore, IP addresses are not a reliable way to access your pods. NodePorts help you around this issue by providing a static service where they can always be reached. Even if your pods change their IP addresses, external clients dependent on them can continue accessing them without disruption, all without any knowledge of the pod re-creation occurring on the back end. + +In the following diagram, a user is trying to connect to an instance of Nginx running in a Kubernetes cluster managed by Rancher. Although he knows what NodePort Nginx is operating on (30216 in this case), he does not know the IP address of the specific node that the pod is running on. However, with NodePort enabled, he can connect to the pod using the IP address for _any_ node in the cluster. Kubeproxy will forward the request to the correct node and pod. + +![NodePort Diagram](/img/nodePort.svg) + +NodePorts are available within your Kubernetes cluster on an internal IP. If you want to expose pods external to the cluster, use NodePorts in conjunction with an external load balancer. Traffic requests from outside your cluster for `:` are directed to the workload. The `` can be the IP address of any node in your Kubernetes cluster. + +#### NodePort Pros + +- Creating a NodePort service provides a static public endpoint to your workload pods. There, even if the pods are destroyed, Kubernetes can deploy the workload anywhere in the cluster without altering the public endpoint. +- The scale of the pods is not limited by the number of nodes in the cluster. NodePort allows decoupling of public access from the number and location of pods. + +#### NodePort Cons + +- When a NodePort is used, that `:` is reserved in your Kubernetes cluster on all nodes, even if the workload is never deployed to the other nodes. +- You can only specify a port from a configurable range (by default, it is `30000-32767`). +- An extra Kubernetes object (a Kubernetes service of type NodePort) is needed to expose your workload. Thus, finding out how your application is exposed is not straightforward. + +## Setting NodePort + +You can set a NodePort for migrated workloads (i.e., services) using the Rancher v2.x UI. To add a NodePort, browse to the project containing your workloads, and edit each workload that you want to expose, as shown below. Map the port that your service container exposes to a NodePort, which you'll be able to access from each cluster node. + +For example, for the `web-deployment.yml` file parsed from v1.6 that we've been using as a sample, we would edit its Kubernetes manifest, set the publish the port that the container uses, and then declare a NodePort. You can then access your workload by clicking the link created in the Rancher UI. + +>**Note:** +> +>- If you set a NodePort without giving it a value, Rancher chooses a port at random from the following range: `30000-32767`. +>- If you manually set a NodePort, you must assign it a value within the `30000-32767` range. + +
    Port Mapping: Setting NodePort
    + +![](/img/set-nodeport.gif) + +### [Next: Configure Health Checks](monitor-apps.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md new file mode 100644 index 0000000000..745752f249 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md @@ -0,0 +1,102 @@ +--- +title: "1. Get Started" +weight: 25 +aliases: + - /rancher/v2.x/en/v1.6-migration/get-started/ +--- +Get started with your migration to Rancher v2.x by installing Rancher and configuring your new Rancher environment. + +## Outline + + + +- [A. Install Rancher v2.x](#a-install-rancher-v2-x) +- [B. Configure Authentication](#b-configure-authentication) +- [C. Provision a Cluster and Project](#c-provision-a-cluster-and-project) +- [D. Create Stacks](#d-create-stacks) + + + + +## A. Install Rancher v2.x + +The first step in migrating from v1.6 to v2.x is to install the Rancher v2.x Server side-by-side with your v1.6 Server, as you'll need your old install during the migration process. Due to the architecture changes between v1.6 and v2.x, there is no direct path for upgrade. You'll have to install v2.x independently and then migrate your v1.6 services to v2.x. + +New for v2.x, all communication to Rancher Server is encrypted. The procedures below instruct you not only on installation of Rancher, but also creation and installation of these certificates. + +Before installing v2.x, provision one host or more to function as your Rancher Server(s). You can find the requirements for these hosts in [Server Requirements](../../../pages-for-subheaders/installation-requirements.md). + +After provisioning your node(s), install Rancher: + +- [Docker Install](installation/single-node) + + For development environments, Rancher can be installed on a single node using Docker. This installation procedure deploys a single Rancher container to your host. + +- [Kubernetes Install](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + + For production environments where your user base requires constant access to your cluster, we recommend installing Rancher in a high availability Kubernetes installation. This installation procedure provisions a three-node cluster and installs Rancher on each node using a Helm chart. + + >**Important Difference:** Although you could install Rancher v1.6 in a high-availability Kubernetes configuration using an external database and a Docker command on each node, Rancher v2.x in a Kubernetes install requires an existing Kubernetes cluster. Review [Kubernetes Install](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) for full requirements. + +## B. Configure Authentication + +After your Rancher v2.x Server is installed, we recommend configuring external authentication (like Active Directory or GitHub) so that users can log into Rancher using their single sign-on. For a full list of supported authentication providers and instructions on how to configure them, see [Authentication](../../../pages-for-subheaders/about-authentication.md). + +
    Rancher v2.x Authentication
    + +![Rancher v2.x Authentication](/img/auth-providers.svg) + +### Local Users + +Although we recommend using an external authentication provider, Rancher v1.6 and v2.x both offer support for users local to Rancher. However, these users cannot be migrated from Rancher v1.6 to v2.x. If you used local users in Rancher v1.6 and want to continue this practice in v2.x, you'll need to [manually recreate these user accounts](../../../pages-for-subheaders/about-authentication.md) and assign them access rights. + +As a best practice, you should use a hybrid of external _and_ local authentication. This practice provides access to Rancher should your external authentication experience an interruption, as you can still log in using a local user account. Set up a few local accounts as administrative users of Rancher. + + +### SAML Authentication Providers + +In Rancher v1.6, we encouraged our SAML users to use Shibboleth, as it was the only SAML authentication option we offered. However, to better support their minor differences, we've added more fully tested SAML providers for v2.x: Ping Identity, Microsoft ADFS, and FreeIPA. + +## C. Provision a Cluster and Project + +Begin work in Rancher v2.x by using it to provision a new Kubernetes cluster, which is similar to an environment in v1.6. This cluster will host your application deployments. + +A cluster and project in combined together in Rancher v2.x is equivalent to a v1.6 environment. A _cluster_ is the compute boundary (i.e., your hosts) and a _project_ is an administrative boundary (i.e., a grouping of namespaces used to assign access rights to users). + +There's more basic info on provisioning clusters in the headings below, but for full information, see [Provisioning Kubernetes Clusters](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +### Clusters + +In Rancher v1.6, compute nodes were added to an _environment_. Rancher v2.x eschews the term _environment_ for _cluster_, as Kubernetes uses this term for a team of computers instead of _environment_. + +Rancher v2.x lets you launch a Kubernetes cluster anywhere. Host your cluster using: + +- A [hosted Kubernetes provider](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md). +- A [pool of nodes from an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). Rancher launches Kubernetes on the nodes. +- Any [custom node(s)](../../../pages-for-subheaders/use-existing-nodes.md). Rancher can launch Kubernetes on the nodes, be they bare metal servers, virtual machines, or cloud hosts on a less popular infrastructure provider. + +### Projects + +Additionally, Rancher v2.x introduces [projects](k8s-in-rancher/projects-and-namespaces/), which are objects that divide clusters into different application groups that are useful for applying user permissions. This model of clusters and projects allow for multi-tenancy because hosts are owned by the cluster, and the cluster can be further divided into multiple projects where users can manage their apps, but not those of others. + +When you create a cluster, two projects are automatically created: + +- The `System` project, which includes system namespaces where important Kubernetes resources are running (like ingress controllers and cluster dns services) +- The `Default` project. + +However, for production environments, we recommend [creating your own project](../../advanced-user-guides/manage-clusters/projects-and-namespaces.md#creating-projects) and giving it a descriptive name. + +After provisioning a new cluster and project, you can authorize your users to access and use project resources. Similarly to Rancher v1.6 environments, Rancher v2.x allows you to [assign users to projects](k8s-in-rancher/projects-and-namespaces/editing-projects/). By assigning users to projects, you can limit what applications and resources a user can access. + +## D. Create Stacks + +In Rancher v1.6, _stacks_ were used to group together the services that belong to your application. In v2.x, you need to [create namespaces](k8s-in-rancher/projects-and-namespaces/), which are the v2.x equivalent of stacks, for the same purpose. + +In Rancher v2.x, namespaces are child objects to projects. When you create a project, a `default` namespace is added to the project, but you can create your own to parallel your stacks from v1.6. + +During migration, if you don't explicitly define which namespace a service should be deployed to, it's deployed to the `default` namespace. + +Just like v1.6, Rancher v2.x supports service discovery within and across namespaces (we'll get to [service discovery](discover-services.md) soon). + + +### [Next: Migrate Your Services](migrate-services.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md new file mode 100644 index 0000000000..a7d4d7e47a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md @@ -0,0 +1,41 @@ +--- +title: Kubernetes Introduction +weight: 1 +aliases: + - /rancher/v2.x/en/v1.6-migration/kub-intro/ +--- + +Rancher v2.x is built on the [Kubernetes](https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational) container orchestrator. This shift in underlying technology for v2.x is a large departure from v1.6, which supported several popular container orchestrators. Since Rancher is now based entirely on Kubernetes, it's helpful to learn the Kubernetes basics. + +The following table introduces and defines some key Kubernetes concepts. + +| **Concept** | **Definition** | +| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Cluster | A collection of machines that run containerized applications managed by Kubernetes. | +| Namespace | A virtual cluster, multiple of which can be supported by a single physical cluster. | +| Node | One of the physical or virtual machines that make up a cluster. | +| Pod | The smallest and simplest Kubernetes object. A pod represents a set of running [containers](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/#why-containers) on your cluster. | +| Deployment | An API object that manages a replicated application. | +| Workload | Workloads are objects that set deployment rules for pods. | + + +## Migration Cheatsheet + +Because Rancher v1.6 defaulted to our Cattle container orchestrator, it primarily used terminology related to Cattle. However, because Rancher v2.x uses Kubernetes, it aligns with the Kubernetes naming standard. This shift could be confusing for people unfamiliar with Kubernetes, so we've created a table that maps terms commonly used in Rancher v1.6 to their equivalents in Rancher v2.x. + +| **Rancher v1.6** | **Rancher v2.x** | +| --- | --- | +| Container | Pod | +| Services | Workload | +| Load Balancer | Ingress | +| Stack | Namespace | +| Environment | Project (Administration)/Cluster (Compute) +| Host | Node | +| Catalog | Helm | +| Port Mapping | HostPort (Single Node)/NodePort (All Nodes) | + +
    +More detailed information on Kubernetes concepts can be found in the +[Kubernetes Concepts Documentation](https://kubernetes.io/docs/concepts/). + +### [Next: Get Started](install-and-configure-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md new file mode 100644 index 0000000000..4af7090515 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md @@ -0,0 +1,164 @@ +--- +title: "7. Load Balancing" +weight: 700 +aliases: + - /rancher/v2.x/en/v1.6-migration/load-balancing/ +--- + +If your applications are public-facing and consume significant traffic, you should place a load balancer in front of your cluster so that users can always access their apps without service interruption. Typically, you can fulfill a high volume of service requests by [horizontally scaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) your deployment, which spins up additional application containers as traffic ramps up. However, this technique requires routing that distributes traffic across your nodes efficiently. In cases where you need to accommodate public traffic that scales up and down, you'll need a load balancer. + +As outlined in [its documentation]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/), Rancher v1.6 provided rich support for load balancing using its own microservice powered by HAProxy, which supports HTTP, HTTPS, TCP hostname, and path-based routing. Most of these same features are available in v2.x. However, load balancers that you used with v1.6 cannot be migrated to v2.x. You'll have to manually recreate your v1.6 load balancer in v2.x. + +If you encounter the `output.txt` text below after parsing your v1.6 Compose files to Kubernetes manifests, you'll have to resolve it by manually creating a load balancer in v2.x. + +
    output.txt Load Balancer Directive
    + +![Resolve Load Balancer Directive](/img/resolve-load-balancer.png) + +## In This Document + + + +- [Load Balancing Protocol Options](#load-balancing-protocol-options) +- [Load Balancer Deployment](#load-balancer-deployment) +- [Load Balancing Architecture](#load-balancing-architecture) +- [Ingress Caveats](#ingress-caveats) +- [Deploying Ingress](#deploying-ingress) +- [Rancher v2.x Load Balancing Limitations](#rancher-v2-x-load-balancing-limitations) + + + +## Load Balancing Protocol Options + +By default, Rancher v2.x replaces the v1.6 load balancer microservice with the native [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/), which is backed by NGINX Ingress Controller for layer 7 load balancing. By default, Kubernetes Ingress only supports the HTTP and HTTPS protocols, not TCP. Load balancing is limited to these two protocols when using Ingress. + +> **TCP Required?** See [TCP Load Balancing Options](#tcp-load-balancing-options) + + +## Load Balancer Deployment + +In Rancher v1.6, you could add port/service rules for configuring your HA proxy to load balance for target services. You could also configure the hostname/path-based routing rules. + +Rancher v2.x offers similar functionality, but load balancing is instead handled by Ingress. An Ingress is a specification of rules that a controller component applies to your load balancer. The actual load balancer can run outside of your cluster or within it. + +By default, Rancher v2.x deploys NGINX Ingress Controller on clusters provisioned using RKE (Rancher's own Kubernetes installer) to process the Kubernetes Ingress rules. The NGINX Ingress Controller is installed by default only in clusters provisioned by RKE. Clusters provisioned by cloud providers like GKE have their own Ingress Controllers that configure the load balancer. For this document, our scope is limited to the RKE-installed NGINX Ingress Controller only. + +RKE deploys NGINX Ingress Controller as a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), meaning that an NGINX instance is deployed on every node in the cluster. NGINX acts like an Ingress Controller listening to Ingress creation within your entire cluster, and it also configures itself as the load balancer to satisfy the Ingress rules. The DaemonSet is configured with hostNetwork to expose two ports: 80 and 443. + +For more information NGINX Ingress Controller, their deployment as DaemonSets, deployment configuration options, see the [RKE documentation](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/). + +## Load Balancing Architecture + +Deployment of Ingress Controller in v2.x as a DaemonSet brings some architectural changes that v1.6 users should know about. + +In Rancher v1.6 you could deploy a scalable load balancer service within your stack. If you had four hosts in your Cattle environment, you could deploy one load balancer service with a scale of two and point to your application by appending port 80 to your two host IP Addresses. You could also launch another load balancer on the remaining two hosts to balance a different service again using port 80 because your load balancer is using different host IP Addresses). + + + +
    Rancher v1.6 Load Balancing Architecture
    + +![Rancher v1.6 Load Balancing](/img/cattle-load-balancer.svg) + +The Rancher v2.x Ingress Controller is a DaemonSet, it is globally deployed on all schedulable nodes to serve your entire Kubernetes Cluster. Therefore, when you program the Ingress rules, you must use a unique hostname and path to point to your workloads, as the load balancer node IP addresses and ports 80 and 443 are common access points for all workloads. + +
    Rancher v2.x Load Balancing Architecture
    + +![Rancher v2.x Load Balancing](/img/kubernetes-load-balancer.svg) + +## Ingress Caveats + +Although Rancher v2.x supports HTTP and HTTPS hostname and path-based load balancing, you must use unique host names and paths when configuring your workloads. This limitation derives from: + +- Ingress confinement to ports 80 and 443 (i.e, the ports HTTP[S] uses for routing). +- The load balancer and the Ingress Controller is launched globally for the cluster as a DaemonSet. + +> **TCP Required?** Rancher v2.x still supports TCP. See [TCP Load Balancing Options](#tcp-load-balancing-options) for workarounds. + +## Deploying Ingress + +You can launch a new load balancer to replace your load balancer from v1.6. Using the Rancher v2.x UI, browse to the applicable project and choose **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Deploy**. During deployment, you can choose a target project or namespace. + +>**Prerequisite:** Before deploying Ingress, you must have a workload deployed that's running a scale of two or more pods. +> + +![Workload Scale](/img/workload-scale.png) + +For balancing between these two pods, you must create a Kubernetes Ingress rule. To create this rule, navigate to your cluster and project, and click **Resources > Workloads > Load Balancing.** (In versions before v2.3.0, click **Workloads > Load Balancing.**) Then click **Add Ingress**. This GIF below depicts how to add Ingress to one of your projects. + +
    Browsing to Load Balancer Tab and Adding Ingress
    + +![Adding Ingress](/img/add-ingress.gif) + +Similar to a service/port rules in Rancher v1.6, here you can specify rules targeting your workload's container port. The sections below demonstrate how to create Ingress rules. + +### Configuring Host- and Path-Based Routing + +Using Rancher v2.x, you can add Ingress rules that are based on host names or a URL path. Based on the rules you create, your NGINX Ingress Controller routes traffic to multiple target workloads or Kubernetes services. + +For example, let's say you have multiple workloads deployed to a single namespace. You can add an Ingress to route traffic to these two workloads using the same hostname but different paths, as depicted in the image below. URL requests to `foo.com/name.html` will direct users to the `web` workload, and URL requests to `foo.com/login` will direct users to the `chat` workload. + +
    Ingress: Path-Based Routing Configuration
    + +![Ingress: Path-Based Routing Configuration](/img/add-ingress-form.png) + +Rancher v2.x also places a convenient link to the workloads on the Ingress record. If you configure an external DNS to program the DNS records, this hostname can be mapped to the Kubernetes Ingress address. + +
    Workload Links
    + +![Load Balancer Links to Workloads](/img/load-balancer-links.png) + +The Ingress address is the IP address in your cluster that the Ingress Controller allocates for your workload. You can reach your workload by browsing to this IP address. Use `kubectl` command below to see the Ingress address assigned by the controller: + +``` +kubectl get ingress +``` + +### HTTPS/Certificates Option + +Rancher v2.x Ingress functionality supports the HTTPS protocol, but if you want to use it, you need to use a valid SSL/TLS certificate. While configuring Ingress rules, use the **SSL/TLS Certificates** section to configure a certificate. + +- We recommend [uploading a certificate](../kubernetes-resources-setup/encrypt-http-communication.md) from a known certificate authority (you'll have to do this before configuring Ingress). Then, while configuring your load balancer, use the **Choose a certificate** option and select the uploaded certificate that you want to use. +- If you have configured [NGINX default certificate](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/#configuring-an-nginx-default-certificate), you can select **Use default ingress controller certificate**. + +
    Load Balancer Configuration: SSL/TLS Certificate Section
    + +![SSL/TLS Certificates Section](/img/load-balancer-ssl-certs.png) + +### TCP Load Balancing Options + +#### Layer-4 Load Balancer + +For the TCP protocol, Rancher v2.x supports configuring a Layer 4 load balancer using the cloud provider in which your Kubernetes cluster is deployed. Once this load balancer appliance is configured for your cluster, when you choose the option of a `Layer-4 Load Balancer` for port-mapping during workload deployment, Rancher automatically creates a corresponding load balancer service. This service will call the corresponding cloud provider and configure the load balancer appliance to route requests to the appropriate pods. See [Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) for information on how to configure LoadBalancer services for your cloud provider. + +For example, if we create a deployment named `myapp` and specify a Layer 4 load balancer in the **Port Mapping** section, Rancher will automatically add an entry to the **Load Balancer** tab named `myapp-loadbalancer`. + +
    Workload Deployment: Layer 4 Load Balancer Creation
    + +![Deploy Layer-4 Load Balancer](/img/deploy-workload-load-balancer.png) + +Once configuration of the load balancer succeeds, the Rancher UI provides a link to your workload's public endpoint. + +#### NGINX Ingress Controller TCP Support by ConfigMaps + +Although NGINX supports TCP, Kubernetes Ingress itself does not support the TCP protocol. Therefore, out-of-the-box configuration of NGINX Ingress Controller for TCP balancing isn't possible. + +However, there is a workaround to use NGINX's TCP balancing by creating a Kubernetes ConfigMap, as described in the [Ingress GitHub readme](https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/exposing-tcp-udp-services.md). You can create a ConfigMap object that stores pod configuration parameters as key-value pairs, separate from the pod image, as described in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/). + +To configure NGINX to expose your services via TCP, you can add the ConfigMap `tcp-services` that should exist in the `ingress-nginx` namespace. This namespace also contains the NGINX Ingress Controller pods. + +![Layer-4 Load Balancer: ConfigMap Workaround](/img/layer-4-lb-config-map.png) + +The key in the ConfigMap entry should be the TCP port that you want to expose for public access: `:`. As shown above, two workloads are listed in the `Default` namespace. For example, the first entry in the ConfigMap above instructs NGINX to expose the `myapp` workload (the one in the `default` namespace that's listening on private port 80) over external port `6790`. Adding these entries to the ConfigMap automatically updates the NGINX pods to configure these workloads for TCP balancing. The workloads exposed should be available at `:`. If they are not accessible, you might have to expose the TCP port explicitly using a NodePort service. + +## Rancher v2.x Load Balancing Limitations + +Cattle provided feature-rich load balancer support that is [well documented]({{}}/rancher/v1.6/en/cattle/adding-load-balancers/#load-balancers). Some of these features do not have equivalents in Rancher v2.x. This is the list of such features: + +- No support for SNI in current NGINX Ingress Controller. +- TCP load balancing requires a load balancer appliance enabled by cloud provider within the cluster. There is no Ingress support for TCP on Kubernetes. +- Only ports 80 and 443 can be configured for HTTP/HTTPS routing via Ingress. Also Ingress Controller is deployed globally as a DaemonSet and not launched as a scalable service. Also, users cannot assign random external ports to be used for balancing. Therefore, users need to ensure that they configure unique hostname/path combinations to avoid routing conflicts using the same two ports. +- There is no way to specify port rule priority and ordering. +- Rancher v1.6 added support for draining backend connections and specifying a drain timeout. This is not supported in Rancher v2.x. +- There is no support for specifying a custom stickiness policy and a custom load balancer config to be appended to the default config as of now in Rancher v2.x. There is some support, however, available in native Kubernetes for customizing the NGINX configuration as noted in the [NGINX Ingress Controller Custom Configuration Documentation](https://kubernetes.github.io/ingress-nginx/examples/customization/custom-configuration/). + +### Finished! diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md new file mode 100644 index 0000000000..5003c4eecd --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md @@ -0,0 +1,313 @@ +--- +title: 2. Migrate Your Services +weight: 100 +aliases: + - /rancher/v2.x/en/v1.6-migration/run-migration-tool/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Although your services from v1.6 won't work in Rancher v2.x by default, that doesn't mean you have to start again from square one, manually rebuilding your applications in v2.x. To help with migration from v1.6 to v2.x, Rancher has developed a migration tool. The migration-tools CLI is a utility that helps you recreate your applications in Rancher v2.x. This tool exports your Rancher v1.6 services as Compose files and converts them to a Kubernetes manifest that Rancher v2.x can consume. + +Additionally, for each Rancher v1.6-specific Compose directive that cannot be consumed by Kubernetes, migration-tools CLI provides instructions on how to manually recreate them in Rancher v2.x. + +This command line interface tool will: + +- Export Compose files (i.e., `docker-compose.yml` and `rancher-compose.yml`) for each stack in your v1.6 Cattle environment. For every stack, files are exported to a unique folder: `//`. + +- Parse Compose files that you’ve exported from your Rancher v1.6 stacks and converts them to Kubernetes manifests that Rancher v2.x can consume. The tool also outputs a list of directives present in the Compose files that cannot be converted automatically to Rancher v2.x. These are directives that you’ll have to manually configure using the Rancher v2.x UI. + +## Outline + + + +- [A. Download the migration-tools CLI](#a-download-the-migration-tools-cli) +- [B. Configure the migration-tools CLI](#b-configure-the-migration-tools-cli) +- [C. Run the migration-tools CLI](#c-run-the-migration-tools-cli) +- [D. Deploy Services Using Rancher CLI](#d-re-deploy-services-as-kubernetes-manifests) +- [What Now?](#what-now) + + + + + +## A. Download the migration-tools CLI + +The migration-tools CLI for your platform can be downloaded from our [GitHub releases page](https://github.com/rancher/migration-tools/releases). The tools are available for Linux, Mac, and Windows platforms. + + +## B. Configure the migration-tools CLI + +After you download migration-tools CLI, rename it and make it executable. + +1. Open a terminal window and change to the directory that contains the migration-tool file. + +1. Rename the file to `migration-tools` so that it no longer includes the platform name. + +1. Enter the following command to make `migration-tools` executable: + + ``` + chmod +x migration-tools + ``` + +## C. Run the migration-tools CLI + +Next, use the migration-tools CLI to export all stacks in all of the Cattle environments into Compose files. Then, for stacks that you want to migrate to Rancher v2.x, convert the Compose files into Kubernetes manifest. + +>**Prerequisite:** Create an [Account API Key]({{}}/rancher/v1.6/en/api/v2-beta/api-keys/#account-api-keys) to authenticate with Rancher v1.6 when using the migration-tools CLI. + +1. Export the Docker Compose files for your Cattle environments and stacks from Rancher v1.6. + + In the terminal window, execute the following command, replacing each placeholder with your values. + + ``` + migration-tools export --url http:// --access-key --secret-key --export-dir --all + ``` + + **Step Result:** migration-tools exports Compose files (`docker-compose.yml` and `rancher-compose.yml`) for each stack in the `--export-dir` directory. If you omitted this option, Compose files are output to your current directory. + + A unique directory is created for each environment and stack. For example, if we export each [environment/stack](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files) from Rancher v1.6, the following directory structure is created: + + ``` + export/ # migration-tools --export-dir + |--/ # Rancher v1.6 ENVIRONMENT + |--/ # Rancher v1.6 STACK + |--docker-compose.yml # STANDARD DOCKER DIRECTIVES FOR ALL STACK SERVICES + |--rancher-compose.yml # RANCHER-SPECIFIC DIRECTIVES FOR ALL STACK SERVICES + |--README.md # README OF CHANGES FROM v1.6 to v2.x + ``` + + + +1. Convert the exported Compose files to Kubernetes manifest. + + Execute the following command, replacing each placeholder with the absolute path to your Stack's Compose files. If you want to migrate multiple stacks, you'll have to re-run the command for each pair of Compose files that you exported. + + ``` + migration-tools parse --docker-file --rancher-file + ``` + + >**Note:** If you omit the `--docker-file` and `--rancher-file` options from your command, migration-tools uses the current working directory to find Compose files. + +>**Want full usage and options for the migration-tools CLI?** See the [Migration Tools CLI Reference](../../../reference-guides/v1.6-migration/migration-tools-cli-reference.md). + +### migration-tools CLI Output + +After you run the migration-tools parse command, the following files are output to your target directory. + +| Output | Description | +| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `output.txt` | This file lists how to recreate your Rancher v1.6-specific functionality in Kubernetes. Each listing links to the relevant blog articles on how to implement it in Rancher v2.x. | +| Kubernetes manifest specs | Migration-tools internally invokes [Kompose](https://github.com/kubernetes/kompose) to generate a Kubernetes manifest for each service you're migrating to v2.x. Each YAML spec file is named for the service you're migrating. + +#### Why are There Separate Deployment and Service Manifests? + +To make an application publicly accessible by URL, a Kubernetes service is required in support of the deployment. A Kubernetes service is a REST object that abstracts access to the pods in the workload. In other words, a service provides a static endpoint to the pods by mapping a URL to pod(s) Therefore, even if the pods change IP address, the public endpoint remains unchanged. A service object points to its corresponding deployment (workload) by using selector labels. + +When a you export a service from Rancher v1.6 that exposes public ports, migration-tools CLI parses those ports to a Kubernetes service spec that links to a deployment YAML spec. + +#### Migration Example File Output + +If we parse the two example files from [Migration Example Files](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files), `docker-compose.yml` and `rancher-compose.yml`, the following files are output: + +File | Description +-----|------------ +`web-deployment.yaml` | A file containing Kubernetes container specs for a Let's Chat deployment. +`web-service.yaml` | A file containing specs for the Let's Chat service. +`database-deployment.yaml` | A file containing container specs for the MongoDB deployment in support of Let's Chat. +`webLB-deployment.yaml` | A file containing container specs for an HAProxy deployment that's serving as a load balancer.1 +`webLB-service.yaml` | A file containing specs for the HAProxy service.1 + +>1 Because Rancher v2.x uses Ingress for load balancing, we won't be migrating our Rancher v1.6 load balancer to v2.x. + + + +## D. Re-Deploy Services as Kubernetes Manifests + +>**Note:** Although these instructions deploy your v1.6 services in Rancher v2.x, they will not work correctly until you adjust their Kubernetes manifests. + + + + +You can deploy the Kubernetes manifests created by migration-tools by importing them into Rancher v2.x. + +>**Receiving an `ImportYaml Error`?** +> +>Delete the YAML directive listed in the error message. These are YAML directives from your v1.6 services that Kubernetes can't read. + +
    Deploy Services: Import Kubernetes Manifest
    + +![Deploy Services](/img/deploy-service.gif) + +
    + + +>**Prerequisite:** [Install Rancher CLI](../../../pages-for-subheaders/cli-with-rancher.md) for Rancher v2.x. + +Use the following Rancher CLI commands to deploy your application using Rancher v2.x. For each Kubernetes manifest output by migration-tools CLI, enter one of the commands below to import it into Rancher v2.x. + +``` +./rancher kubectl create -f # DEPLOY THE DEPLOYMENT YAML + +./rancher kubectl create -f # DEPLOY THE SERVICE YAML +``` + + +
    + +Following importation, you can view your v1.6 services in the v2.x UI as Kubernetes manifests by using the context menu to select ` > ` that contains your services. The imported manifests will display on the **Resources > Workloads** and on the tab at **Resources > Workloads > Service Discovery.** (In Rancher v2.x before v2.3.0, these are on the **Workloads** and **Service Discovery** tabs in the top navigation bar.) + +
    Imported Services
    + +![Imported Services](/img/imported-workloads.png) + +## What Now? + +Although the migration-tool CLI parses your Rancher v1.6 Compose files to Kubernetes manifests, there are discrepancies between v1.6 and v2.x that you must address by manually editing your parsed [Kubernetes manifests](#output). In other words, you need to edit each workload and service imported into Rancher v2.x, as displayed below. + +
    Edit Migrated Services
    + +![Edit Migrated Workload](/img/edit-migration-workload.gif) + +As mentioned in [Migration Tools CLI Output](#migration-tools-cli-output), the `output.txt` files generated during parsing lists the manual steps you must make for each deployment. Review the upcoming topics for more information on manually editing your Kubernetes specs. + +Open your `output.txt` file and take a look at its contents. When you parsed your Compose files into Kubernetes manifests, migration-tools CLI output a manifest for each workload that it creates for Kubernetes. For example, our when our [Migration Example Files](../../../pages-for-subheaders/migrate-from-v1.6-v2.x.md#migration-example-files) are parsed into Kubernetes manifests, `output.txt` lists each resultant parsed [Kubernetes manifest file](#migration-example-file-output) (i.e., workloads). Each workload features a list of action items to restore operations for the workload in v2.x. + +
    Output.txt Example
    + +![output.txt](/img/output-dot-text.png) + +The following table lists possible directives that may appear in `output.txt`, what they mean, and links on how to resolve them. + +Directive | Instructions +----------|-------------- +[ports][4] | Rancher v1.6 _Port Mappings_ cannot be migrated to v2.x. Instead, you must manually declare either a HostPort or NodePort, which are similar to Port Mappings. +[health_check][1] | The Rancher v1.6 health check microservice has been replaced with native Kubernetes health checks, called _probes_. Recreate your v1.6 health checks in v2.0 using probes. +[labels][2] | Rancher v1.6 uses labels to implement a variety of features in v1.6. In v2.x, Kubernetes uses different mechanisms to implement these features. Click through on the links here for instructions on how to address each label.

    [io.rancher.container.pull_image][7]: In v1.6, this label instructed deployed containers to pull a new version of the image upon restart. In v2.x, this functionality is replaced by the `imagePullPolicy` directive.

    [io.rancher.scheduler.global][8]: In v1.6, this label scheduled a container replica on every cluster host. In v2.x, this functionality is replaced by [Daemon Sets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/).

    [io.rancher.scheduler.affinity][9]: In v2.x, affinity is applied in a different way. +[links][3] | During migration, you must create links between your Kubernetes workloads and services for them to function properly in v2.x. +[scale][5] | In v1.6, scale refers to the number of container replicas running on a single node. In v2.x, this feature is replaced by replica sets. +start_on_create | No Kubernetes equivalent. No action is required from you. + +[1]:v1.6-migration/monitor-apps/#configuring-probes-in-rancher-v2-x +[2]:v1.6-migration/schedule-workloads/#scheduling-using-labels +[3]:v1.6-migration/discover-services +[4]:v1.6-migration/expose-services +[5]:v1.6-migration/schedule-workloads/#scheduling-pods-to-a-specific-node + + + +[7]:v1.6-migration/schedule-workloads/#scheduling-using-labels +[8]:v1.6-migration/schedule-workloads/#scheduling-global-services +[9]:v1.6-migration/schedule-workloads/#label-affinity-antiaffinity + +### [Next: Expose Your Services](expose-services.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md new file mode 100644 index 0000000000..e1f163f512 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md @@ -0,0 +1,177 @@ +--- +title: "4. Configure Health Checks" +weight: 400 +aliases: + - /rancher/v2.x/en/v1.6-migration/monitor-apps/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher v1.6 provided TCP and HTTP health checks on your nodes and services using its own health check microservice. These health checks monitored your containers to confirm they're operating as intended. If a container failed a health check, Rancher would destroy the unhealthy container and then replicates a healthy one to replace it. + +For Rancher v2.x, we've replaced the health check microservice, leveraging instead Kubernetes' native health check support. + +Use this document to correct Rancher v2.x workloads and services that list `health_check` in `output.txt`. You can correct them by configuring a liveness probe (i.e., a health check). + +For example, for the image below, we would configure liveness probes for the `web` and `weblb` workloads (i.e., the Kubernetes manifests output by migration-tools CLI). + +
    Resolve health_check for the web and webLB Workloads
    + +![Resolve health_check](/img/resolve-health-checks.png) + +## In This Document + + + +- [Rancher v1.6 Health Checks](#rancher-v1-6-health-checks) +- [Rancher v2.x Health Checks](#rancher-v2-x-health-checks) +- [Configuring Probes in Rancher v2.x](#configuring-probes-in-rancher-v2-x) + + + +## Rancher v1.6 Health Checks + +In Rancher v1.6, you could add health checks to monitor a particular service's operations. These checks were performed by the Rancher health check microservice, which is launched in a container on a node separate from the node hosting the monitored service (however, Rancher v1.6.20 and later also runs a local health check container as a redundancy for the primary health check container on another node). Health check settings were stored in the `rancher-compose.yml` file for your stack. + +The health check microservice features two types of health checks, which have a variety of options for timeout, check interval, etc.: + +- **TCP health checks**: + + These health checks check if a TCP connection opens at the specified port for the monitored service. For full details, see the [Rancher v1.6 documentation]({{}}/rancher/v1.6/en/cattle/health-checks/). + +- **HTTP health checks**: + + These health checks monitor HTTP requests to a specified path and check whether the response is expected response (which is configured along with the health check). + +The following diagram displays the health check microservice evaluating a container running Nginx. Notice that the microservice is making its check across nodes. + +![Rancher v1.6 Health Checks](/img/healthcheck.svg) + +## Rancher v2.x Health Checks + +In Rancher v2.x, the health check microservice is replaced with Kubernetes's native health check mechanisms, called _probes_. These probes, similar to the Rancher v1.6 health check microservice, monitor the health of pods over TCP and HTTP. + +However, probes in Rancher v2.x have some important differences, which are described below. For full details about probes, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes). + + +### Local Health Checks + +Unlike the Rancher v1.6 health checks performed across hosts, probes in Rancher v2.x occur on _same_ host, performed by the kubelet. + + +### Multiple Probe Types + +Kubernetes includes two different _types_ of probes: liveness checks and readiness checks. + +- **Liveness Check**: + + Checks if the monitored container is running. If the probe reports failure, Kubernetes kills the pod, and then restarts it according to the deployment [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). + +- **Readiness Check**: + + Checks if the container is ready to accept and serve requests. If the probe reports failure, the pod is sequestered from the public until it self heals. + +The following diagram displays kubelets running probes on containers they are monitoring ([kubelets](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) are the primary "agent" running on each node). The node on the left is running a liveness probe, while the one of the right is running a readiness check. Notice that the kubelet is scanning containers on its host node rather than across nodes, as in Rancher v1.6. + +![Rancher v2.x Probes](/img/probes.svg) + +## Configuring Probes in Rancher v2.x + +The [migration-tool CLI](migrate-services.md) cannot parse health checks from Compose files to Kubernetes manifest. Therefore, if want you to add health checks to your Rancher v2.x workloads, you'll have to add them manually. + +Using the Rancher v2.x UI, you can add TCP or HTTP health checks to Kubernetes workloads. By default, Rancher asks you to configure a readiness check for your workloads and applies a liveness check using the same configuration. Optionally, you can define a separate liveness check. + +If the probe fails, the container is restarted per the restartPolicy defined in the workload specs. This setting is equivalent to the strategy parameter for health checks in Rancher v1.6. + +Configure probes by using the **Health Check** section while editing deployments called out in `output.txt`. + +
    Edit Deployment: Health Check Section
    + +![Health Check Section](/img/health-check-section.png) + +### Configuring Checks + +While you create a workload using Rancher v2.x, we recommend configuring a check that monitors the health of the deployment's pods. + + + + +TCP checks monitor your deployment's health by attempting to open a connection to the pod over a specified port. If the probe can open the port, it's considered healthy. Failure to open it is considered unhealthy, which notifies Kubernetes that it should kill the pod and then replace it according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). + +You can configure the probe along with values for specifying its behavior by selecting the **TCP connection opens successfully** option in the **Health Check** section. For more information, see [Deploying Workloads](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). + +![TCP Check](/img/readiness-check-tcp.png) + +When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. + + + + + + + +HTTP checks monitor your deployment's health by sending an HTTP GET request to a specific URL path that you define. If the pod responds with a message range of `200`-`400`, the health check is considered successful. If the pod replies with any other value, the check is considered unsuccessful, so Kubernetes kills and replaces the pod according to its [restart policy](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy). (this applies to Liveness probes, for Readiness probes, it will mark the pod as Unready). + +You can configure the probe along with values for specifying its behavior by selecting the **HTTP returns successful status** or **HTTPS returns successful status**. For more information, see [Deploying Workloads](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#healthcheck-parameter-mappings). + +![HTTP Check](/img/readiness-check-http.png) + +When you configure a readiness check using Rancher v2.x, the `readinessProbe` directive and the values you've set are added to the deployment's Kubernetes manifest. Configuring a readiness check also automatically adds a liveness check (`livenessProbe`) to the deployment. + + + + +### Configuring Separate Liveness Checks + +While configuring a readiness check for either the TCP or HTTP protocol, you can configure a separate liveness check by clicking the **Define a separate liveness check**. For help setting probe timeout and threshold values, see [Health Check Parameter Mappings](#health-check-parameter-mappings). + +![Separate Liveness Check](/img/separate-check.png) + +### Additional Probing Options + +Rancher v2.x, like v1.6, lets you perform health checks using the TCP and HTTP protocols. However, Rancher v2.x also lets you check the health of a pod by running a command inside of it. If the container exits with a code of `0` after running the command, the pod is considered healthy. + +You can configure a liveness or readiness check that executes a command that you specify by selecting the `Command run inside the container exits with status 0` option from **Health Checks** while [deploying a workload](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). + +![Healthcheck Execute Command](/img/healthcheck-cmd-exec.png) + +#### Health Check Parameter Mappings + +While configuring readiness checks and liveness checks, Rancher prompts you to fill in various timeout and threshold values that determine whether the probe is a success or failure. The reference table below shows you the equivalent health check values from Rancher v1.6. + +Rancher v1.6 Compose Parameter | Rancher v2.x Kubernetes Parameter +-------------------------------|----------------------------------- +`port` | `tcpSocket.port` +`response_timeout` | `timeoutSeconds` +`healthy_threshold` | `failureThreshold` +`unhealthy_threshold` | `successThreshold` +`interval` | `periodSeconds` +`initializing_timeout` | `initialDelaySeconds` +`strategy` | `restartPolicy` + +### [Next: Schedule Your Services](schedule-services.md) diff --git a/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md new file mode 100644 index 0000000000..965a85165e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md @@ -0,0 +1,249 @@ +--- +title: "5. Schedule Your Services" +weight: 500 +aliases: + - /rancher/v2.x/en/v1.6-migration/schedule-workloads/ +--- + +In v1.6, objects called _services_ were used to schedule containers to your cluster hosts. Services included the Docker image for an application, along with configuration settings for a desired state. + +In Rancher v2.x, the equivalent object is known as a _workload_. Rancher v2.x retains all scheduling functionality from v1.6, but because of the change from Cattle to Kubernetes as the default container orchestrator, the terminology and mechanisms for scheduling workloads has changed. + +Workload deployment is one of the more important and complex aspects of container orchestration. Deploying pods to available shared cluster resources helps maximize performance under optimum compute resource use. + +You can schedule your migrated v1.6 services while editing a deployment. Schedule services by using **Workload Type** and **Node Scheduling** sections, which are shown below. + +
    Editing Workloads: Workload Type and Node Scheduling Sections
    + +![Workload Type and Node Scheduling Sections](/img/migrate-schedule-workloads.png) + +## In This Document + + + + + +- [What's Different for Scheduling Services?](#whats-different-for-scheduling-services) +- [Node Scheduling Options](#node-scheduling-options) +- [Scheduling Pods to a Specific Node](#scheduling-pods-to-a-specific-node) +- [Scheduling Using Labels](#scheduling-using-labels) +- [Scheduling Pods Using Resource Constraints](#scheduling-pods-using-resource-constraints) +- [Preventing Scheduling Specific Services to Specific Nodes](#preventing-scheduling-specific-services-to-specific-nodes) +- [Scheduling Global Services](#scheduling-global-services) + + + + +## What's Different for Scheduling Services? + + +Rancher v2.x retains _all_ methods available in v1.6 for scheduling your services. However, because the default container orchestration system has changed from Cattle to Kubernetes, the terminology and implementation for each scheduling option has changed. + +In v1.6, you would schedule a service to a host while adding a service to a Stack. In Rancher v2.x., the equivalent action is to schedule a workload for deployment. The following composite image shows a comparison of the UI used for scheduling in Rancher v2.x versus v1.6. + +![Node Scheduling: Rancher v2.x vs v1.6](/img/node-scheduling.png) + +## Node Scheduling Options + +Rancher offers a variety of options when scheduling nodes to host workload pods (i.e., scheduling hosts for containers in Rancher v1.6). + +You can choose a scheduling option as you deploy a workload. The term _workload_ is synonymous with adding a service to a Stack in Rancher v1.6). You can deploy a workload by using the context menu to browse to a cluster project (` > > Workloads`). + +The sections that follow provide information on using each scheduling options, as well as any notable changes from Rancher v1.6. For full instructions on deploying a workload in Rancher v2.x beyond just scheduling options, see [Deploying Workloads](../kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). + +Option | v1.6 Feature | v2.x Feature +-------|------|------ +[Schedule a certain number of pods?](#schedule-a-certain-number-of-pods) | ✓ | ✓ +[Schedule pods to specific node?](#scheduling-pods-to-a-specific-node) | ✓ | ✓ +[Schedule to nodes using labels?](#applying-labels-to-nodes-and-pods) | ✓ | ✓ +[Schedule to nodes using label affinity/anti-affinity rules?](#label-affinity-antiaffinity) | ✓ | ✓ +[Schedule based on resource constraints?](#scheduling-pods-using-resource-constraints) | ✓ | ✓ +[Preventing scheduling specific services to specific hosts?](#preventing-scheduling-specific-services-to-specific-nodes) | ✓ | ✓ +[Schedule services globally?](#scheduling-global-services) | ✓ | ✓ + + +### Schedule a certain number of pods + +In v1.6, you could control the number of container replicas deployed for a service. You can schedule pods the same way in v2.x, but you'll have to set the scale manually while editing a workload. + +![Resolve Scale](/img/resolve-scale.png) + +During migration, you can resolve `scale` entries in `output.txt` by setting a value for the **Workload Type** option **Scalable deployment** depicted below. + +
    Scalable Deployment Option
    + +![Workload Scale](/img/workload-type-option.png) + +### Scheduling Pods to a Specific Node + +Just as you could schedule containers to a single host in Rancher v1.6, you can schedule pods to single node in Rancher v2.x + +As you deploy a workload, use the **Node Scheduling** section to choose a node to run your pods on. The workload below is being scheduled to deploy an Nginx image with a scale of two pods on a specific node. + + +
    Rancher v2.x: Workload Deployment
    + +![Workload Tab and Group by Node Icon](/img/schedule-specific-node.png) + +Rancher schedules pods to the node you select if 1) there are compute resource available for the node and 2) you've configured port mapping to use the HostPort option, that there are no port conflicts. + +If you expose the workload using a NodePort that conflicts with another workload, the deployment gets created successfully, but no NodePort service is created. Therefore, the workload isn't exposed outside of the cluster. + +After the workload is created, you can confirm that the pods are scheduled to your chosen node. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Click the **Group by Node** icon to sort your workloads by node. Note that both Nginx pods are scheduled to the same node. + +![Pods Scheduled to Same Node](/img/scheduled-nodes.png) + + + +### Scheduling Using Labels + +In Rancher v2.x, you can constrain pods for scheduling to specific nodes (referred to as hosts in v1.6). Using [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/), which are key/value pairs that you can attach to different Kubernetes objects, you can configure your workload so that pods you've labeled are assigned to specific nodes (or nodes with specific labels are automatically assigned workload pods). + +
    Label Scheduling Options
    + +Label Object | Rancher v1.6 | Rancher v2.x +-------------|--------------|--------------- +Schedule by Node? | ✓ | ✓ +Schedule by Pod? | ✓ | ✓ + +#### Applying Labels to Nodes and Pods + +Before you can schedule pods based on labels, you must first apply labels to your pods or nodes. + +>**Hooray!** +>All the labels that you manually applied in Rancher v1.6 (but _not_ the ones automatically created by Rancher) are parsed by migration-tools CLI, meaning you don't have to manually reapply labels. + +To apply labels to pods, make additions to the **Labels and Annotations** section as you configure your workload. After you complete workload configuration, you can view the label by viewing each pod that you've scheduled. To apply labels to nodes, edit your node and make additions to the **Labels** section. + + +#### Label Affinity/AntiAffinity + +Some of the most-used scheduling features in v1.6 were affinity and anti-affinity rules. + +
    output.txt Affinity Label
    + +![Affinity Label](/img/resolve-affinity.png) + +- **Affinity** + + Any pods that share the same label are scheduled to the same node. Affinity can be configured in one of two ways: + + Affinity | Description + ---------|------------ + **Hard** | A hard affinity rule means that the host chosen must satisfy all the scheduling rules. If no such host can be found, the workload will fail to deploy. In the Kubernetes manifest, this rule translates to the `nodeAffinity` directive.

    To use hard affinity, configure a rule using the **Require ALL of** section (see figure below). + **Soft** | Rancher v1.6 user are likely familiar with soft affinity rules, which try to schedule the deployment per the rule, but can deploy even if the rule is not satisfied by any host.

    To use soft affinity, configure a rule using the **Prefer Any of** section (see figure below). + +
    + +
    Affinity Rules: Hard and Soft
    + + ![Affinity Rules](/img/node-scheduling-affinity.png) + +- **AntiAffinity** + + Any pods that share the same label are scheduled to different nodes. In other words, while affinity _attracts_ a specific label to each other, anti-affinity _repels_ a label from itself, so that pods are scheduled to different nodes. + + You can create an anti-affinity rules using either hard or soft affinity. However, when creating your rule, you must use either the `is not set` or `not in list` operator. + + For anti-affinity rules, we recommend using labels with phrases like `NotIn` and `DoesNotExist`, as these terms are more intuitive when users are applying anti-affinity rules. + +
    AntiAffinity Operators
    + + ![AntiAffinity ](/img/node-schedule-antiaffinity.png) + +Detailed documentation for affinity/anti-affinity is available in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +Affinity rules that you create in the UI update your workload, adding pod affinity/anti-affinity directives to the workload Kubernetes manifest specs. + + +### Preventing Scheduling Specific Services to Specific Nodes + +In Rancher v1.6 setups, you could prevent services from being scheduled to specific nodes with the use of labels. In Rancher v2.x, you can reproduce this behavior using native Kubernetes scheduling options. + +In Rancher v2.x, you can prevent pods from being scheduled to specific nodes by applying _taints_ to a node. Pods will not be scheduled to a tainted node unless it has special permission, called a _toleration_. A toleration is a special label that allows a pod to be deployed to a tainted node. While editing a workload, you can apply tolerations using the **Node Scheduling** section. Click **Show advanced options**. + +
    Applying Tolerations
    + +![Tolerations](/img/node-schedule-advanced-options.png) + +For more information, see the Kubernetes documentation on [taints and tolerations](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/). + +### Scheduling Global Services + +Rancher v1.6 included the ability to deploy [global services]({{}}/rancher/v1.6/en/cattle/scheduling/#global-service), which are services that deploy duplicate containers to each host in the environment (i.e., nodes in your cluster using Rancher v2.x terms). If a service has the `io.rancher.scheduler.global: 'true'` label declared, then Rancher v1.6 schedules a service container on each host in the environment. + +
    output.txt Global Service Label
    + +![Global Service Label](/img/resolve-global.png) + +In Rancher v2.x, you can schedule a pod to each node using a [Kubernetes DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), which is a specific type of workload ). A _DaemonSet_ functions exactly like a Rancher v1.6 global service. The Kubernetes scheduler deploys a pod on each node of the cluster, and as new nodes are added, the scheduler will start new pods on them provided they match the scheduling requirements of the workload. Additionally, in v2.x, you can also limit a DaemonSet to be deployed to nodes that have a specific label. + +To create a daemonset while configuring a workload, choose **Run one pod on each node** from the **Workload Type** options. + +
    Workload Configuration: Choose run one pod on each node to configure daemonset
    + +![choose Run one pod on each node](/img/workload-type.png) + +### Scheduling Pods Using Resource Constraints + +While creating a service in the Rancher v1.6 UI, you could schedule its containers to hosts based on hardware requirements that you choose. The containers are then scheduled to hosts based on which ones have bandwidth, memory, and CPU capacity. + +In Rancher v2.x, you can still specify the resources required by your pods. However, these options are unavailable in the UI. Instead, you must edit your workload's manifest file to declare these resource constraints. + +To declare resource constraints, edit your migrated workloads, editing the **Security & Host** sections. + +- To reserve a minimum hardware reservation available for your pod(s), edit the following sections: + + - Memory Reservation + - CPU Reservation + - NVIDIA GPU Reservation + +- To set a maximum hardware limit for your pods, edit: + + - Memory Limit + - CPU Limit + +
    Scheduling: Resource Constraint Settings
    + +![Resource Constraint Settings](/img/resource-constraint-settings.png) + +You can find more detail about these specs and how to use them in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container). + +### [Next: Service Discovery](discover-services.md) diff --git a/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md b/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md new file mode 100644 index 0000000000..0cb9f6d5ed --- /dev/null +++ b/versioned_docs/version-2.0-2.4/installation/resources/chart-options/chart-options.md @@ -0,0 +1,6 @@ +--- +title: Rancher Helm Chart Options +weight: 50 +--- + +The Rancher Helm chart options reference moved to [this page.](../../../reference-guides/installation-references/helm-chart-options.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md new file mode 100644 index 0000000000..1a6d9f50f4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-authentication.md @@ -0,0 +1,97 @@ +--- +title: Authentication +weight: 1115 +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/authentication/ + - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/ +--- + +One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. + +This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. + +## External vs. Local Authentication + +The Rancher authentication proxy integrates with the following external authentication services. The following table lists the first version of Rancher each service debuted. + +| Auth Service | Available as of | +| ------------------------------------------------------------------------------------------------ | ---------------- | +| [Microsoft Active Directory](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md) | v2.0.0 | +| [GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md) | v2.0.0 | +| [Microsoft Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md) | v2.0.3 | +| [FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md) | v2.0.5 | +| [OpenLDAP](configure-openldap.md) | v2.0.5 | +| [Microsoft AD FS](configure-microsoft-ad-federation-service-saml.md) | v2.0.7 | +| [PingIdentity](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md) | v2.0.7 | +| [Keycloak](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md) | v2.1.0 | +| [Okta](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md) | v2.2.0 | +| [Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md) | v2.3.0 | +| [Shibboleth](configure-shibboleth-saml.md) | v2.4.0 | + +
    +However, Rancher also provides [local authentication](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md). + +In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. + +## Users and Groups + +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control](manage-role-based-access-control-rbac.md). + +> **Note:** Local authentication does not support creating or managing groups. + +For more information, see [Users and Groups](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md) + +## Scope of Rancher Authorization + +After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: + +| Access Level | Description | +|----------------------------------------------|-------------| +| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | +| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | +| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | + +To set the Rancher access level for users in the authorization service, follow these steps: + +1. From the **Global** view, click **Security > Authentication.** + +1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. + +1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. + +1. Click **Save.** + +**Result:** The Rancher access configuration settings are applied. + +{{< saml_caveats >}} + +## External Authentication Configuration and Principal Users + +Configuration of external authentication requires: + +- A local user assigned the administrator role, called hereafter the _local principal_. +- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. + +Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. + +1. Sign into Rancher as the local principal and complete configuration of external authentication. + + ![Sign In](/img/sign-in.png) + +2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. + + ![Principal ID Sharing](/img/principal-ID.png) + +3. After you complete configuration, Rancher automatically signs out the local principal. + + ![Sign Out Local Principal](/img/sign-out-local.png) + +4. Then, Rancher automatically signs you back in as the external principal. + + ![Sign In External Principal](/img/sign-in-external.png) + +5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. + + ![Sign In External Principal](/img/users-page.png) + +6. The external principal and the local principal share the same access rights. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md new file mode 100644 index 0000000000..d9f71e5a3b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-provisioning-drivers.md @@ -0,0 +1,46 @@ +--- +title: Provisioning Drivers +weight: 1140 +--- + +Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +### Rancher Drivers + +With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. + +There are two types of drivers within Rancher: + +* [Cluster Drivers](#cluster-drivers) +* [Node Drivers](#node-drivers) + +### Cluster Drivers + +_Available as of v2.2.0_ + +Cluster drivers are used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. + +By default, Rancher has activated several hosted Kubernetes cloud providers including: + +* [Amazon EKS](../reference-guides/installation-references/amazon-eks-permissions.md) +* [Google GKE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +* [Azure AKS](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) + +There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: + +* [Alibaba ACK](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +* [Huawei CCE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) +* [Tencent](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) + +### Node Drivers + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: + +* [Amazon EC2](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) +* [Azure](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md) +* [Digital Ocean](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md) +* [vSphere](vsphere.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md new file mode 100644 index 0000000000..00998ad99a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-rke1-templates.md @@ -0,0 +1,127 @@ +--- +title: RKE Templates +weight: 7010 +--- + +_Available as of Rancher v2.3.0_ + +RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. + +RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. + +With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. + +RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. + +Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. + +If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. + +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. + +The core features of RKE templates allow DevOps and security teams to: + +- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices +- Prevent less technical users from making uninformed choices when provisioning clusters +- Share different templates with different sets of users and groups +- Delegate ownership of templates to users who are trusted to make changes to them +- Control which users can create templates +- Require users to create clusters from a template + +# Configurable Settings + +RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: + +- Cloud provider options +- Pod security options +- Network providers +- Ingress controllers +- Network security configuration +- Network plugins +- Private registry URL and credentials +- Add-ons +- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services + +The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. + +# Scope of RKE Templates + +RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. + +RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +As of v2.3.3, the settings of an existing cluster can be [saved as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#updating-a-template), and the cluster is upgraded to [use a newer version of the template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. + + +# Example Scenarios +When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. + +These [example scenarios](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md) describe how an organization could use templates to standardize cluster creation. + +Some of the example scenarios include the following: + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#allowing-other-users-to-control-and-share-a-template) + +# Template Management + +When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. + +Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. + +RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. + +In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. + +For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. + +The documents in this section explain the details of RKE template management: + +- [Getting permission to create templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md) +- [Creating and revising templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md) +- [Enforcing template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) +- [Overriding template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md) +- [Sharing templates with cluster creators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) +- [Sharing ownership of a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-ownership-of-templates) + +An [example YAML configuration file for a template](../reference-guides/rke1-template-example-yaml.md) is provided for reference. + +# Applying Templates + +You can [create a cluster from a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md) + +If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#updating-a-cluster-created-with-an-rke-template) + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +As of Rancher v2.3.3, you can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +# Standardizing Hardware + +RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). + +# YAML Customization + +If you define an RKE template as a YAML file, you can modify this [example RKE template YAML](../reference-guides/rke1-template-example-yaml.md). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. + +The RKE documentation also has [annotated](https://rancher.com/docs/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. + +For guidance on available options, refer to the RKE documentation on [cluster configuration.](https://rancher.com/docs/rke/latest/en/config-options/) + +### Add-ons + +The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file](https://rancher.com/docs/rke/latest/en/config-options/add-ons/). + +The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. + +Some things you could do with add-ons include: + +- Install applications on the Kubernetes cluster after it starts +- Install plugins on nodes that are deployed with a Kubernetes daemonset +- Automatically set up namespaces, service accounts, or role binding + +The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md new file mode 100644 index 0000000000..99a8a889c8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/about-the-api.md @@ -0,0 +1,52 @@ +--- +title: API +weight: 24 +--- + +## How to use the API + +The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys](../reference-guides/user-settings/api-keys.md). + +## Authentication + +API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys](../reference-guides/user-settings/api-keys.md). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. + +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page](../reference-guides/about-the-api/api-tokens.md). + +## Making requests + +The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). + +- Every type has a Schema which describes: + - The URL to get to the collection of this type of resources + - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. + - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). + - Every field that filtering is allowed on + - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. + + +- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. + +- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. + +- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. + +- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. + +- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. + +- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. + +- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). + +## Filtering + +Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. + +## Sorting + +Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. + +## Pagination + +API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md new file mode 100644 index 0000000000..105c41ce92 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/access-clusters.md @@ -0,0 +1,32 @@ +--- +title: Cluster Access +weight: 1 +--- + +This section is about what tools can be used to access clusters managed by Rancher. + +For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) + +For more information on roles-based access control, see [this section.](manage-role-based-access-control-rbac.md) + +For information on how to set up an authentication system, see [this section.](about-authentication.md) + + +### Rancher UI + +Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. + +### kubectl + +You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/referenccluster-admin/cluster-access/kubectloverview/), to manage your clusters. You have two options for using kubectl: + +- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell](k8s-in-ranchecluster-admin/cluster-access/kubectl). +- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). + +### Rancher CLI + +You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI](cli-with-rancher.md). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. + +### Rancher API + +Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/content/rancher/v2.6/en/installation/resources/advanced/_index.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/advanced/_index.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-options.md diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/advanced-user-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gap-helm2.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gap-helm2.md new file mode 100644 index 0000000000..48a529576f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gap-helm2.md @@ -0,0 +1,45 @@ +--- +title: Installing Rancher in an Air Gapped Environment with Helm 2 +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-installation/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/ + - /rancher/v2.0-v2.4/en/installation/options/air-gap-helm2 + - /rancher/v2.x/en/installation/resources/advanced/air-gap-helm2/ +--- + +> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. +> +> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. +> +> This section provides a copy of the older instructions for installing Rancher on a Kubernetes cluster using Helm 2 in an air air gap environment, and it is intended to be used if upgrading to Helm 3 is not feasible. + +This section is about installations of Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +Throughout the installations instructions, there will be _tabs_ for either a high availability Kubernetes installation or a single-node Docker installation. + +### Air Gapped Kubernetes Installations + +This section covers how to install Rancher on a Kubernetes cluster in an air gapped environment. + +A Kubernetes installation is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +### Air Gapped Docker Installations + +These instructions also cover how to install Rancher on a single node in an air gapped environment. + +The Docker installation is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. + +Instead of running the Docker installation, you have the option to follow the Kubernetes Install guide, but only use one node to install Rancher. Afterwards, you can scale up the etcd nodes in your Kubernetes cluster to make it a Kubernetes Installation. + +# Installation Outline + +- [1. Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) +- [2. Collect and Publish Images to your Private Registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) +- [3. Launch a Kubernetes Cluster with RKE](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) +- [4. Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +### [Next: Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md new file mode 100644 index 0000000000..cd5be4553d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -0,0 +1,31 @@ +--- +title: Air Gapped Helm CLI Install +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/installation/air-gap-installation/ + - /rancher/v2.0-v2.4/en/installation/air-gap-high-availability/ + - /rancher/v2.0-v2.4/en/installation/air-gap-single-node/ +--- + +This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. + +For more information on each installation option, refer to [this page.](installation-and-upgrade.md) + +Throughout the installation instructions, there will be _tabs_ for each installation option. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. + +# Installation Outline + +1. [Set up infrastructure and private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) +2. [Collect and publish images to your private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) +3. [Set up a Kubernetes cluster (Skip this step for Docker installations)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) +4. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +# Upgrades + +To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.](upgrades.md) + +### [Next: Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-config.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md new file mode 100644 index 0000000000..eb2d580219 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -0,0 +1,60 @@ +--- +title: Authentication, Permissions and Global Configuration +weight: 6 +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/ + - /rancher/v2.0-v2.4/en/tasks/global-configuration/ + - /rancher/v2.0-v2.4/en/concepts/global-configuration/server-url/ + - /rancher/v2.0-v2.4/en/tasks/global-configuration/server-url/ + - /rancher/v2.0-v2.4/en/admin-settings/log-in/ +--- + +After installation, the [system administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. + +## First Log In + +After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. + +>**Important!** After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. + +## Authentication + +One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. + +For more information how authentication works and how to configure each provider, see [Authentication](about-authentication.md). + +## Authorization + +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. + +For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)](manage-role-based-access-control-rbac.md). + +## Pod Security Policies + +_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. + +For more information how to create and use PSPs, see [Pod Security Policies](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +## Provisioning Drivers + +Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +For more information, see [Provisioning Drivers](about-provisioning-drivers.md). + +## Adding Kubernetes Versions into Rancher + +_Available as of v2.3.0_ + +With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. + +The information that Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md) + +Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md). + +For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md). + +## Enabling Experimental Features + +_Available as of v2.3.0_ + +Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.](installation/options/feature-flags/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md new file mode 100644 index 0000000000..c4eda7c857 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -0,0 +1,12 @@ +--- +title: Backups and Disaster Recovery +weight: 5 +--- + +This section is devoted to protecting your data in a disaster scenario. + +To protect yourself from a disaster scenario, you should create backups on a regular basis. + +- [Backup](backups/backup) +- [Restore](backups/restore) + diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md new file mode 100644 index 0000000000..4bb14b9178 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/best-practices.md @@ -0,0 +1,22 @@ +--- +title: Best Practices Guide +weight: 4 +aliases: + - /rancher/v2.x/en/best-practices/v2.0-v2.4/ +--- + +The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. + +If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. + +Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. + +For more guidance on best practices, you can consult these resources: + +- [Security](rancher-security.md) +- [Rancher Blog](https://rancher.com/blog/) + - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) + - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) +- [Rancher Forum](https://forums.rancher.com/) +- [Rancher Users Slack](https://slack.rancher.io/) +- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md new file mode 100644 index 0000000000..c9e6c2cb5f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -0,0 +1,50 @@ +--- +title: Checklist for Production-Ready Clusters +weight: 2 +--- + +In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. + +For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) + +This is a shortlist of best practices that we strongly recommend for all production clusters. + +For a full list of all the best practices that we recommend, refer to the [best practices section.](best-practices.md) + +### Node Requirements + +* Make sure your nodes fulfill all of the [node requirements,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) including the port requirements. + +### Back up etcd + +* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure [etcd Recurring Snapshots](backups/v2.0.x-v2.4.x/backup/rke-backups/#option-a-recurring-snapshots) for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. + +### Cluster Architecture + +* Nodes should have one of the following role configurations: + * `etcd` + * `controlplane` + * `etcd` and `controlplane` + * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) +* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +* Assign two or more nodes the `controlplane` role for master component high availability. +* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md) + +For more information about the +number of nodes for each Kubernetes role, refer to the section on [recommended architecture.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +### Logging and Monitoring + +* Configure alerts/notifiers for Kubernetes components (System Service). +* Configure logging for cluster analysis and post-mortems. + +### Reliability + +* Perform load tests on your cluster to verify that its hardware can support your workloads. + +### Networking + +* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). +* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers/) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scan-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md new file mode 100644 index 0000000000..87f4a0bde1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cis-scans.md @@ -0,0 +1,156 @@ +--- +title: CIS Scans +weight: 18 +aliases: + - /rancher/v2.0-v2.4/en/cis-scans/legacy + - /rancher/v2.0-v2.4/en/cis-scans + - /rancher/v2.x/en/cis-scans/v2.4/ +--- + +_Available as of v2.4.0_ + +- [Prerequisites](#prerequisites) +- [Running a scan](#running-a-scan) +- [Scheduling recurring scans](#scheduling-recurring-scans) +- [Skipping tests](#skipping-tests) +- [Setting alerts](#setting-alerts) +- [Deleting a report](#deleting-a-report) +- [Downloading a report](#downloading-a-report) +- [List of skipped and not applicable tests](#list-of-skipped-and-not-applicable-tests) + +# Prerequisites + +To run security scans on a cluster and access the generated reports, you must be an [Administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [Cluster Owner.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + +Rancher can only run security scans on clusters that were created with RKE, which includes custom clusters and clusters that Rancher created in an infrastructure provider such as Amazon EC2 or GCE. Imported clusters and clusters in hosted Kubernetes providers can't be scanned by Rancher. + +The security scan cannot run in a cluster that has Windows nodes. + +You will only be able to see the CIS scan reports for clusters that you have access to. + +# Running a Scan + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Click **Run Scan.** +1. Choose a CIS scan profile. + +**Result:** A report is generated and displayed in the **CIS Scans** page. To see details of the report, click the report's name. + +# Scheduling Recurring Scans + +Recurring scans can be scheduled to run on any RKE Kubernetes cluster. + +To enable recurring scans, edit the advanced options in the cluster configuration during cluster creation or after the cluster has been created. + +To schedule scans for an existing cluster: + +1. Go to the cluster view in Rancher. +1. Click **Tools > CIS Scans.** +1. Click **Add Schedule.** This takes you to the section of the cluster editing page that is applicable to configuring a schedule for CIS scans. (This section can also be reached by going to the cluster view, clicking **⋮ > Edit,** and going to the **Advanced Options.**) +1. In the **CIS Scan Enabled** field, click **Yes.** +1. In the **CIS Scan Profile** field, choose a **Permissive** or **Hardened** profile. The corresponding CIS Benchmark version is included in the profile name. Note: Any skipped tests [defined in a separate ConfigMap](#skipping-tests) will be skipped regardless of whether a **Permissive** or **Hardened** profile is selected. When selecting the the permissive profile, you should see which tests were skipped by Rancher (tests that are skipped by default for RKE clusters) and which tests were skipped by a Rancher user. In the hardened test profile, the only skipped tests will be skipped by users. +1. In the **CIS Scan Interval (cron)** job, enter a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) to define how often the cluster will be scanned. +1. In the **CIS Scan Report Retention** field, enter the number of past reports that should be kept. + +**Result:** The security scan will run and generate reports at the scheduled intervals. + +The test schedule can be configured in the `cluster.yml`: + +```yaml +scheduled_cluster_scan: +    enabled: true +    scan_config: +        cis_scan_config: +            override_benchmark_version: rke-cis-1.4 +            profile: permissive +    schedule_config: +        cron_schedule: 0 0 * * * +        retention: 24 +``` + + +# Skipping Tests + +You can define a set of tests that will be skipped by the CIS scan when the next report is generated. + +These tests will be skipped for subsequent CIS scans, including both manually triggered and scheduled scans, and the tests will be skipped with any profile. + +The skipped tests will be listed alongside the test profile name in the cluster configuration options when a test profile is selected for a recurring cluster scan. The skipped tests will also be shown every time a scan is triggered manually from the Rancher UI by clicking **Run Scan.** The display of skipped tests allows you to know ahead of time which tests will be run in each scan. + +To skip tests, you will need to define them in a Kubernetes ConfigMap resource. Each skipped CIS scan test is listed in the ConfigMap alongside the version of the CIS benchmark that the test belongs to. + +To skip tests by editing a ConfigMap resource, + +1. Create a `security-scan` namespace. +1. Create a ConfigMap named `security-scan-cfg`. +1. Enter the skip information under the key `config.json` in the following format: + + ```json + { + "skip": { + "rke-cis-1.4": [ + "1.1.1", + "1.2.2" + ] + } + } + ``` + + In the example above, the CIS benchmark version is specified alongside the tests to be skipped for that version. + +**Result:** These tests will be skipped on subsequent scans that use the defined CIS Benchmark version. + +# Setting Alerts + +Rancher provides a set of alerts for cluster scans. which are not configured to have notifiers by default: + +- A manual cluster scan was completed +- A manual cluster scan has failures +- A scheduled cluster scan was completed +- A scheduled cluster scan has failures + +> **Prerequisite:** You need to configure a [notifier](../explanations/integrations-in-rancher/notifiers.md) before configuring, sending, or receiving alerts. + +To activate an existing alert for a CIS scan result, + +1. From the cluster view in Rancher, click **Tools > Alerts.** +1. Go to the section called **A set of alerts for cluster scans.** +1. Go to the alert you want to activate and click **⋮ > Activate.** +1. Go to the alert rule group **A set of alerts for cluster scans** and click **⋮ > Edit.** +1. Scroll down to the **Alert** section. In the **To** field, select the notifier that you would like to use for sending alert notifications. +1. Optional: To limit the frequency of the notifications, click on **Show advanced options** and configure the time interval of the alerts. +1. Click **Save.** + +**Result:** The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. + +To create a new alert, + +1. Go to the cluster view and click **Tools > CIS Scans.** +1. Click **Add Alert.** +1. Fill out the form. +1. Enter a name for the alert. +1. In the **Is** field, set the alert to be triggered when a scan is completed or when a scan has a failure. +1. In the **Send a** field, set the alert as a **Critical,** **Warning,** or **Info** alert level. +1. Choose a [notifier](../explanations/integrations-in-rancher/notifiers.md) for the alert. + +**Result:** The alert is created and activated. The notifications will be triggered when the a scan is run on a cluster and the active alerts have satisfied conditions. + +For more information about alerts, refer to [this page.](cluster-admin/tools/alerts/) + +# Deleting a Report + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Go to the report that should be deleted. +1. Click the **⋮ > Delete.** +1. Click **Delete.** + +# Downloading a Report + +1. From the cluster view in Rancher, click **Tools > CIS Scans.** +1. Go to the report that you want to download. Click **⋮ > Download.** + +**Result:** The report is downloaded in CSV format. + +# List of Skipped and Not Applicable Tests + +For a list of skipped and not applicable tests, refer to this page. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md new file mode 100644 index 0000000000..bcb654ba14 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cli-with-rancher.md @@ -0,0 +1,83 @@ +--- +title: Using the Rancher Command Line Interface +description: The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI +metaTitle: "Using the Rancher Command Line Interface " +metaDescription: "The Rancher CLI is a unified tool that you can use to interact with Rancher. With it, you can operate Rancher using a command line interface rather than the GUI" +weight: 21 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/cluster-access/cli + - /rancher/v2.x/en/cli/ +--- + +The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. + +### Download Rancher CLI + +The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. + +### Requirements + +After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: + +- Your Rancher Server URL, which is used to connect to Rancher Server. +- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../reference-guides/user-settings/api-keys.md). + +### CLI Authentication + +Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): + +```bash +$ ./rancher login https:// --token +``` + +If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. + +### Project Selection + +Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](k8s-in-rancher/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. + +**Example: `./rancher context switch` Output** +``` +User:rancher-cli-directory user$ ./rancher context switch +NUMBER CLUSTER NAME PROJECT ID PROJECT NAME +1 cluster-2 c-7q96s:p-h4tmb project-2 +2 cluster-2 c-7q96s:project-j6z6d Default +3 cluster-1 c-lchzv:p-xbpdt project-1 +4 cluster-1 c-lchzv:project-s2mch Default +Select a Project: +``` + +After you enter a number, the console displays a message that you've changed projects. + +``` +INFO[0005] Setting new context to project project-1 +INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json +``` + +### Commands + +The following commands are available for use in Rancher CLI. + +| Command | Result | +|---|---| +| `apps, [app]` | Performs operations on catalog applications (i.e. individual [Helm charts](https://docs.helm.sh/developing_charts/) or Rancher charts. | +| `catalog` | Performs operations on [catalogs](catalog/). | +| `clusters, [cluster]` | Performs operations on your [clusters](kubernetes-clusters-in-rancher-setup.md). | +| `context` | Switches between Rancher [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). For an example, see [Project Selection](#project-selection). | +| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects](k8s-in-rancher/projects-and-namespaces/) and [workloads](workloads-and-pods.md)). Specify resources by name or ID. | +| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | +| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | +| `namespaces, [namespace]` |Performs operations on namespaces. | +| `nodes, [node]` |Performs operations on nodes. | +| `projects, [project]` | Performs operations on [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). | +| `ps` | Displays [workloads](workloads-and-pods.md) in a project. | +| `settings, [setting]` | Shows the current settings for your Rancher Server. | +| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | +| `help, [h]` | Shows a list of commands or help for one command. | + + +### Rancher CLI Help + +Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. + +All commands accept the `--help` flag, which documents each command's usage. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md new file mode 100644 index 0000000000..a63ac431c7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-alerts.md @@ -0,0 +1,347 @@ +--- +title: Cluster Alerts +shortTitle: Alerts +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/alerts + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/cluster-alerts + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/ +--- + +To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. + +This section covers the following topics: + +- [About Alerts](#about-alerts) + - [Alert Event Examples](#alert-event-examples) + - [Alerts Triggered by Prometheus Queries](#alerts-triggered-by-prometheus-queries) + - [Urgency Levels](#urgency-levels) + - [Scope of Alerts](#scope-of-alerts) + - [Managing Cluster Alerts](#managing-cluster-alerts) +- [Adding Cluster Alerts](#adding-cluster-alerts) +- [Cluster Alert Configuration](#cluster-alert-configuration) + - [System Service Alerts](#system-service-alerts) + - [Resource Event Alerts](#resource-event-alerts) + - [Node Alerts](#node-alerts) + - [Node Selector Alerts](#node-selector-alerts) + - [CIS Scan Alerts](#cis-scan-alerts) + - [Metric Expression Alerts](#metric-expression-alerts) + +# About Alerts + +Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) and [project owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) of events they need to address. + +Before you can receive alerts, you must configure one or more notifier in Rancher. + +When you create a cluster, some alert rules are predefined. You can receive these alerts if you configure a [notifier](../explanations/integrations-in-rancher/notifiers.md) for them. + +For details about what triggers the predefined alerts, refer to the [documentation on default alerts.](cluster-admin/tools/alerts/default-alerts) + +### Alert Event Examples + +Some examples of alert events are: + +- A Kubernetes master component entering an unhealthy state. +- A node or workload error occurring. +- A scheduled deployment taking place as planned. +- A node's hardware resources becoming overstressed. + +### Alerts Triggered by Prometheus Queries + +When you edit an alert rule, you will have the opportunity to configure the alert to be triggered based on a Prometheus expression. For examples of expressions, refer to [this page.](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/expression/) + +Monitoring must be [enabled](monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/) before you can trigger alerts with custom Prometheus queries or expressions. + +### Urgency Levels + +You can set an urgency level for each alert. This urgency appears in the notification you receive, helping you to prioritize your response actions. For example, if you have an alert configured to inform you of a routine deployment, no action is required. These alerts can be assigned a low priority level. However, if a deployment fails, it can critically impact your organization, and you need to react quickly. Assign these alerts a high priority level. + +### Scope of Alerts + +The scope for alerts can be set at either the cluster level or [project level](project-admin/tools/alerts/). + +At the cluster level, Rancher monitors components in your Kubernetes cluster, and sends you alerts related to: + +- The state of your nodes. +- The system services that manage your Kubernetes cluster. +- The resource events from specific system services. +- The Prometheus expression cross the thresholds + +### Managing Cluster Alerts + +After you set up cluster alerts, you can manage each alert object. To manage alerts, browse to the cluster containing the alerts, and then select **Tools > Alerts** that you want to manage. You can: + +- Deactivate/Reactive alerts +- Edit alert settings +- Delete unnecessary alerts +- Mute firing alerts +- Unmute muted alerts + +# Adding Cluster Alerts + +As a [cluster owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to send you alerts for cluster events. + +>**Prerequisite:** Before you can receive cluster alerts, you must [add a notifier](monitoring-alerting/legacy/notifiers/). + +1. From the **Global** view, navigate to the cluster that you want to configure cluster alerts for. Select **Tools > Alerts**. Then click **Add Alert Group**. +1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. +1. Based on the type of alert you want to create, refer to the [cluster alert configuration section.](#cluster-alert-configuration) +1. Continue adding more **Alert Rule** to the group. +1. Finally, choose the [notifiers](../explanations/integrations-in-rancher/notifiers.md) to send the alerts to. + + - You can set up multiple notifiers. + - You can change notifier recipients on the fly. +1. Click **Create.** + +**Result:** Your alert is configured. A notification is sent when the alert is triggered. + + +# Cluster Alert Configuration + + - [System Service Alerts](#system-service-alerts) + - [Resource Event Alerts](#resource-event-alerts) + - [Node Alerts](#node-alerts) + - [Node Selector Alerts](#node-selector-alerts) + - [CIS Scan Alerts](#cis-scan-alerts) + - [Metric Expression Alerts](#metric-expression-alerts) + +# System Service Alerts + +This alert type monitor for events that affect one of the Kubernetes master components, regardless of the node it occurs on. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Select the **System Services** option, and then select an option from the dropdown: + +- [controller-manager](https://kubernetes.io/docs/concepts/overview/components/#kube-controller-manager) +- [etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd) +- [scheduler](https://kubernetes.io/docs/concepts/overview/components/#kube-scheduler) + +### Is + +The alert will be triggered when the selected Kubernetes master component is unhealthy. + +### Send a + +Select the urgency level of the alert. The options are: + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + + Select the urgency level based on the importance of the service and how many nodes fill the role within your cluster. For example, if you're making an alert for the `etcd` service, select **Critical**. If you're making an alert for redundant schedulers, **Warning** is more appropriate. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. + +# Resource Event Alerts + +This alert type monitors for specific events that are thrown from a resource type. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Choose the type of resource event that triggers an alert. The options are: + +- **Normal**: triggers an alert when any standard resource event occurs. +- **Warning**: triggers an alert when unexpected resource events occur. + +Select a resource type from the **Choose a Resource** drop-down that you want to trigger an alert. + +- [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) +- [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +- [Node](https://kubernetes.io/docs/concepts/architecture/nodes/) +- [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) +- [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert by considering factors such as how often the event occurs or its importance. For example: + +- If you set a normal alert for pods, you're likely to receive alerts often, and individual pods usually self-heal, so select an urgency of **Info**. +- If you set a warning alert for StatefulSets, it's very likely to impact operations, so select an urgency of **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. + +# Node Alerts + +This alert type monitors for events that occur on a specific node. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Select the **Node** option, and then make a selection from the **Choose a Node** drop-down. + +### Is + +Choose an event to trigger the alert. + +- **Not Ready**: Sends you an alert when the node is unresponsive. +- **CPU usage over**: Sends you an alert when the node raises above an entered percentage of its processing allocation. +- **Mem usage over**: Sends you an alert when the node raises above an entered percentage of its memory allocation. + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. + +# Node Selector Alerts + +This alert type monitors for events that occur on any node on marked with a label. For more information, see the Kubernetes documentation for [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Select the **Node Selector** option, and then click **Add Selector** to enter a key value pair for a label. This label should be applied to one or more of your nodes. Add as many selectors as you'd like. + +### Is + +Choose an event to trigger the alert. + +- **Not Ready**: Sends you an alert when selected nodes are unresponsive. +- **CPU usage over**: Sends you an alert when selected nodes raise above an entered percentage of processing allocation. +- **Mem usage over**: Sends you an alert when selected nodes raise above an entered percentage of memory allocation. + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. + +# CIS Scan Alerts +_Available as of v2.4.0_ + +This alert type is triggered based on the results of a CIS scan. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Select **CIS Scan.** + +### Is + +Choose an event to trigger the alert: + +- Completed Scan +- Has Failure + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's CPU raises above 60% deems an urgency of **Info**, but a node that is **Not Ready** deems an urgency of **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. + +# Metric Expression Alerts + +This alert type monitors for the overload from Prometheus expression querying, it would be available after you enable monitoring. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Input or select an **Expression**, the dropdown shows the original metrics from Prometheus, including: + +- [**Node**](https://github.com/prometheus/node_exporter) +- [**Container**](https://github.com/google/cadvisor) +- [**ETCD**](https://etcd.io/docs/v3.4.0/op-guide/monitoring/) +- [**Kubernetes Components**](https://github.com/kubernetes/metrics) +- [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) +- [**Fluentd**](https://docs.fluentd.org/v1.0/articles/monitoring-prometheus) (supported by [Logging]({{}}/rancher/v2.0-v2.4//en/cluster-admin/tools/logging)) +- [**Cluster Level Grafana**](http://docs.grafana.org/administration/metrics/) +- **Cluster Level Prometheus** + +### Is + +Choose a comparison: + +- **Equal**: Trigger alert when expression value equal to the threshold. +- **Not Equal**: Trigger alert when expression value not equal to the threshold. +- **Greater Than**: Trigger alert when expression value greater than to threshold. +- **Less Than**: Trigger alert when expression value equal or less than the threshold. +- **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. +- **Less or Equal**: Trigger alert when expression value less or equal to the threshold. + +If applicable, choose a comparison value or a threshold for the alert to be triggered. + +### For + +Select a duration for a trigger alert when the expression value crosses the threshold longer than the configured duration. + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a node's load expression ```sum(node_load5) / count(node_cpu_seconds_total{mode="system"})``` raises above 0.6 deems an urgency of **Info**, but 1 deems an urgency of **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before re-sending a given alert that has already been sent, default to 1 hour. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md new file mode 100644 index 0000000000..7fabf4200e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-configuration.md @@ -0,0 +1,70 @@ +--- +title: Cluster Configuration +weight: 2025 +--- + +After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. + +For information on editing cluster membership, go to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) + +- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) +- [Editing Clusters in the Rancher UI](#editing-clusters-in-the-rancher-ui) +- [Editing Clusters with YAML](#editing-clusters-with-yaml) +- [Updating ingress-nginx](#updating-ingress-nginx) + +### Cluster Management Capabilities by Cluster Type + +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +### Editing Clusters in the Rancher UI + +To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. + +In [clusters launched by RKE](launch-kubernetes-with-rancher.md), you can edit any of the remaining options that follow. + +Note that these options are not available for imported clusters or hosted Kubernetes clusters. + +Option | Description | +---------|----------| + Kubernetes Version | The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes](../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). | + Network Provider | The \container networking interface (CNI) that powers networking for your cluster.

    **Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. | + Project Network Isolation | As of Rancher v2.0.7, if you're using the Canal network provider, you can choose whether to enable or disable inter-project communication. | + Nginx Ingress | If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. | + Metrics Server Monitoring | Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. | + Pod Security Policy Support | Enables [pod security policies](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. | + Docker version on nodes | Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a [supported Docker version](installation/options/rke-add-on/layer-7-lb/), Rancher will stop pods from running on nodes that don't have a supported Docker version installed. | + Docker Root Directory | The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. | + Default Pod Security Policy | If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. | + Cloud Provider | If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option](cluster-provisioning/rke-clusters/options/cloud-providers/) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. | + +### Editing Clusters with YAML + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. +- To read from an existing RKE file, click **Read from File**. + +![image](/img/cluster-options-yaml.png) + +For an example of RKE config file syntax, see the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). + +For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) + +In Rancher v2.0.0-v2.2.x, the config file is identical to the [cluster config file for the Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the [cluster configuration reference.](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +>**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. + + + + +### Updating ingress-nginx + +Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. + +If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md new file mode 100644 index 0000000000..c558971b99 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-logging.md @@ -0,0 +1,128 @@ +--- +title: Cluster Logging +shortTitle: Logging +description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. +metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/tasks/logging/ + - /rancher/v2.0-v2.4/en/cluster-admin/tools/logging + - /rancher/v2.0-v2.4/en/logging/legacy/cluster-logging + - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/cluster-logging/ + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/ + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/cluster-logging/ +--- + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debug and troubleshoot problems + +Rancher supports integration with the following services: + +- Elasticsearch +- Splunk +- Kafka +- Syslog +- Fluentd + +This section covers the following topics: + +- [How logging integrations work](#how-logging-integrations-work) +- [Requirements](#requirements) +- [Logging scope](#logging-scope) +- [Enabling cluster logging](#enabling-cluster-logging) + +# How Logging Integrations Work + +Rancher can integrate with popular external services used for event streams, telemetry, or search. These services can log errors and warnings in your Kubernetes infrastructure to a stream. + +These services collect container log events, which are saved to the `/var/log/containers` directory on each of your nodes. The service collects both standard and error events. You can then log into your services to review the events collected, leveraging each service's unique features. + +When configuring Rancher to integrate with these services, you'll have to point Rancher toward the service's endpoint and provide authentication information. + +Additionally, you'll have the opportunity to enter key-value pairs to filter the log events collected. The service will only collect events for containers marked with your configured key-value pairs. + +>**Note:** You can only configure one logging service per cluster or per project. + +# Requirements + +The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: + +``` +$ docker info | grep 'Logging Driver' +Logging Driver: json-file +``` + +# Logging Scope + +You can configure logging at either cluster level or project level. + +- Cluster logging writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters](launch-kubernetes-with-rancher.md), it also writes logs for all the Kubernetes system components. +- [Project logging](project-admin/tools/logging/) writes logs for every pod in that particular project. + +Logs that are sent to your logging service are from the following locations: + + - Pod logs stored at `/var/log/containers`. + - Kubernetes system components logs stored at `/var/lib/rancher/rke/log/`. + +# Enabling Cluster Logging + +As an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to send Kubernetes logs to a logging service. + +1. From the **Global** view, navigate to the cluster that you want to configure cluster logging. + +1. Select **Tools > Logging** in the navigation bar. + +1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports integration with the following services: + + - [Elasticsearch](cluster-admin/tools/logging/elasticsearch/) + - [Splunk](cluster-admin/tools/logging/splunk/) + - [Kafka](cluster-admin/tools/logging/kafka/) + - [Syslog](cluster-admin/tools/logging/syslog/) + - [Fluentd](cluster-admin/tools/logging/fluentd/) + +1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. + + - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. + + - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) + - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) + - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) + - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) + - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) + + - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. + 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. + + - You can use either a self-signed certificate or one provided by a certificate authority. + + - You can generate a self-signed certificate using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. + +1. (Optional) Complete the **Additional Logging Configuration** form. + + 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. + + 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. + + 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. + +1. Click **Test**. Rancher sends a test log to the service. + + > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. + +1. Click **Save**. + +**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. + +## Related Links + +[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md new file mode 100644 index 0000000000..0e1a74ea9c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-monitoring.md @@ -0,0 +1,124 @@ +--- +title: Integrating Rancher and Prometheus for Cluster Monitoring +shortTitle: Monitoring +description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/monitoring + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/monitoring/cluster-monitoring + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/monitoring/cluster-monitoring + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/ + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/ +--- + +_Available as of v2.2.0_ + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. + +This section covers the following topics: + +- [About Prometheus](#about-prometheus) +- [Monitoring scope](#monitoring-scope) +- [Enabling cluster monitoring](#enabling-cluster-monitoring) +- [Resource consumption](#resource-consumption) + - [Resource consumption of Prometheus pods](#resource-consumption-of-prometheus-pods) + - [Resource consumption of other pods](#resource-consumption-of-other-pods) + +# About Prometheus + +Prometheus provides a _time series_ of your data, which is, according to [Prometheus documentation](https://prometheus.io/docs/concepts/data_model/): + +You can configure these services to collect logs at either the cluster level or the project level. This page describes how to enable monitoring for a cluster. For details on enabling monitoring for a project, refer to the [project administration section](project-admin/tools/monitoring/). + +>A stream of timestamped values belonging to the same metric and the same set of labeled dimensions, along with comprehensive statistics and metrics of the monitored cluster. + +In other words, Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or [Grafana](https://grafana.com/), which is an analytics viewing platform deployed along with Prometheus. + +By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, restore crashed servers, etc. + +Multi-tenancy support in terms of cluster-only and project-only Prometheus instances are also supported. + +# Monitoring Scope + +Using Prometheus, you can monitor Rancher at both the cluster level and [project level](project-admin/tools/monitoring/). For each cluster and project that is enabled for monitoring, Rancher deploys a Prometheus server. + +- Cluster monitoring allows you to view the health of your Kubernetes cluster. Prometheus collects metrics from the cluster components below, which you can view in graphs and charts. + + - Kubernetes control plane + - etcd database + - All nodes (including workers) + +- [Project monitoring](project-admin/tools/monitoring/) allows you to view the state of pods running in a given project. Prometheus collects metrics from the project's deployed HTTP and TCP/UDP workloads. + +# Enabling Cluster Monitoring + +As an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. + +> **Prerequisites:** The following TCP ports need to be opened for metrics scraping: +> +> | Port | Node type | Component | +> | --- | --- | --- | +> | 9796 | Worker | Node exporter | +> | 10254 | Worker | Nginx Ingress Controller | +> | 10250 | Worker/Controlplane | Kubelet | +> | 10251 | Controlplane | Kube scheduler | +> | 10252 | Controlplane | Kube controller manager | +> | 2379 | Etcd | Etcd server | + +> Monitoring V1 requires a Kubernetes verison less than or equal to v1.20.x. To install monitoring on Kubernetes v1.21+, you will need to [migrate to Monitoring V2.]({{}}/rancher/v2.5/en/monitoring-alerting/migrating/) + +1. From the **Global** view, navigate to the cluster that you want to configure cluster monitoring. + +1. Select **Tools > Monitoring** in the navigation bar. + +1. Select **Enable** to show the [Prometheus configuration options](monitoring-alerting/legacy/monitoring/cluster-monitoring/prometheus/). Review the [resource consumption recommendations](#resource-consumption) to ensure you have enough resources for Prometheus and on your worker nodes to enable monitoring. Enter in your desired configuration options. + +1. Click **Save**. + +**Result:** The Prometheus server will be deployed as well as two monitoring applications. The two monitoring applications, `cluster-monitoring` and `monitoring-operator`, are added as an [application](catalog/apps/) to the cluster's `system` project. After the applications are `active`, you can start viewing [cluster metrics](monitoring-alerting/legacy/monitoring/cluster-monitoring/cluster-metrics/) through the Rancher dashboard or directly from Grafana. + +> The default username and password for the Grafana instance will be `admin/admin`. However, Grafana dashboards are served via the Rancher authentication proxy, so only users who are currently authenticated into the Rancher server have access to the Grafana dashboard. + +# Resource Consumption + +When enabling cluster monitoring, you need to ensure your worker nodes and Prometheus pod have enough resources. The tables below provides a guide of how much resource consumption will be used. In larger deployments, it is strongly advised that the monitoring infrastructure be placed on dedicated nodes in the cluster. + +### Resource Consumption of Prometheus Pods + +This table is the resource consumption of the Prometheus pod, which is based on the number of all the nodes in the cluster. The count of nodes includes the worker, control plane and etcd nodes. Total disk space allocation should be approximated by the `rate * retention` period set at the cluster level. When enabling cluster level monitoring, you should adjust the CPU and Memory limits and reservation. + +Number of Cluster Nodes | CPU (milli CPU) | Memory | Disk +------------------------|-----|--------|------ +5 | 500 | 650 MB | ~1 GB/Day +50| 2000 | 2 GB | ~5 GB/Day +256| 4000 | 6 GB | ~18 GB/Day + +Additional pod resource requirements for cluster level monitoring. + +| Workload | Container | CPU - Request | Mem - Request | CPU - Limit | Mem - Limit | Configurable | +|---------------------|---------------------------------|---------------|---------------|-------------|-------------|--------------| +| Prometheus | prometheus | 750m | 750Mi | 1000m | 1000Mi | Y | +| | prometheus-proxy | 50m | 50Mi | 100m | 100Mi | Y | +| | prometheus-auth | 100m | 100Mi | 500m | 200Mi | Y | +| | prometheus-config-reloader | - | - | 50m | 50Mi | N | +| | rules-configmap-reloader | - | - | 100m | 25Mi | N | +| Grafana | grafana-init-plugin-json-copy | 50m | 50Mi | 50m | 50Mi | Y | +| | grafana-init-plugin-json-modify | 50m | 50Mi | 50m | 50Mi | Y | +| | grafana | 100m | 100Mi | 200m | 200Mi | Y | +| | grafana-proxy | 50m | 50Mi | 100m | 100Mi | Y | +| Kube-State Exporter | kube-state | 100m | 130Mi | 100m | 200Mi | Y | +| Node Exporter | exporter-node | 200m | 200Mi | 200m | 200Mi | Y | +| Operator | prometheus-operator | 100m | 50Mi | 200m | 100Mi | Y | + + +### Resource Consumption of Other Pods + +Besides the Prometheus pod, there are components that are deployed that require additional resources on the worker nodes. + +Pod | CPU (milli CPU) | Memory (MB) +----|-----------------|------------ +Node Exporter (Per Node) | 100 | 30 +Kube State Cluster Monitor | 100 | 130 +Grafana | 100 | 150 +Prometheus Cluster Monitoring Nginx | 50 | 50 diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/_index.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-yml.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/advanced/cluster-yml-templates/_index.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/cluster-yml.md diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md new file mode 100644 index 0000000000..5b4b85382f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -0,0 +1,31 @@ +--- +title: Configuring Microsoft Active Directory Federation Service (SAML) +weight: 1205 +--- +_Available as of v2.0.7_ + +If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. + +## Prerequisites + +You must have Rancher installed. + +- Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. +- You must have a global administrator account on your Rancher installation. + +You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. + +- Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. +- You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. + +## Setup Outline + +Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. + +- [1. Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) +- [2. Configuring Rancher for Microsoft AD FS](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md) + +{{< saml_caveats >}} + + +### [Next: Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md new file mode 100644 index 0000000000..f51831b2f6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-openldap.md @@ -0,0 +1,52 @@ +--- +title: Configuring OpenLDAP +weight: 1113 +aliases: + - /rancher/v2.0-v2.4/en/tasks/global-configuration/authentication/openldap/ +--- + +_Available as of v2.0.5_ + +If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. + +## Prerequisites + +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +## Configure OpenLDAP in Rancher + +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) + +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Log into the Rancher UI using the initial local `admin` account. +2. From the **Global** view, navigate to **Security** > **Authentication** +3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. + +### Test Authentication + +Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. + +> **Note:** +> +> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. + +1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. +2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. + +**Result:** + +- OpenLDAP authentication is configured. +- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. + +> **Note:** +> +> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md new file mode 100644 index 0000000000..9a1cd6ac23 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/configure-shibboleth-saml.md @@ -0,0 +1,109 @@ +--- +title: Configuring Shibboleth (SAML) +weight: 1210 +--- + +_Available as of v2.4.0_ + +If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. + +In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. + +If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. + +> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md) + +This section covers the following topics: + +- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) + - [Shibboleth Prerequisites](#shibboleth-prerequisites) + - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) + - [SAML Provider Caveats](#saml-provider-caveats) +- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) + - [OpenLDAP Prerequisites](#openldap-prerequisites) + - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) + - [Troubleshooting](#troubleshooting) + +# Setting up Shibboleth in Rancher + +### Shibboleth Prerequisites +> +>- You must have a Shibboleth IdP Server configured. +>- Following are the Rancher Service Provider URLs needed for configuration: +Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` +Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` +>- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) + +### Configure Shibboleth in Rancher +If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +1. From the **Global** view, select **Security > Authentication** from the main menu. + +1. Select **Shibboleth**. + +1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. + + 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). + + 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). + + 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). + + 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). + + 1. **Rancher API Host**: Enter the URL for your Rancher Server. + + 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. + + You can generate one using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. + + +1. After you complete the **Configure Shibboleth Account** form, click **Authenticate with Shibboleth**, which is at the bottom of the page. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. + + >**Note:** You may have to disable your popup blocker to see the IdP login page. + +**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. + +### SAML Provider Caveats + +If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. + +- There is no validation on users or groups when assigning permissions to them in Rancher. +- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. +- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. +- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. + +To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. + +# Setting up OpenLDAP in Rancher + +If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. + +### OpenLDAP Prerequisites + +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +### Configure OpenLDAP in Rancher + +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) Note that nested group membership is not available for Shibboleth. + +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Log into the Rancher UI using the initial local `admin` account. +2. From the **Global** view, navigate to **Security** > **Authentication** +3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. + +# Troubleshooting + +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md new file mode 100644 index 0000000000..c6bc3b538e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -0,0 +1,70 @@ +--- +title: "Kubernetes Persistent Storage: Volumes and Storage Classes" +description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" +weight: 2031 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/ + - /rancher/v2.0-v2.4/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/ +--- +When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. + +The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) + +### Prerequisites + +To set up persistent storage, the `Manage Volumes` [role](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. + +If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](cluster-provisioning/rke-clusters/options/cloud-providers/) + +For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. + +### Setting up Existing Storage + +The overall workflow for setting up existing storage is as follows: + +1. Set up your persistent storage. This may be storage in an infrastructure provider, or it could be your own storage. +2. Add a persistent volume (PV) that refers to the persistent storage. +3. Add a persistent volume claim (PVC) that refers to the PV. +4. Mount the PVC as a volume in your workload. + +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md) + +### Dynamically Provisioning New Storage in Rancher + +The overall workflow for provisioning new storage is as follows: + +1. Add a StorageClass and configure it to use your storage provider. The StorageClass could refer to storage in an infrastructure provider, or it could refer to your own storage. +2. Add a persistent volume claim (PVC) that refers to the storage class. +3. Mount the PVC as a volume for your workload. + +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md) + +### Longhorn Storage + +[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. + +Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. + +If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/1.0.2/what-is-longhorn/) + +### Provisioning Storage Examples + +We provide examples of how to provision storage with [NFS,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) [vSphere,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) and [Amazon's EBS.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) + +### GlusterFS Volumes + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md) + +### iSCSI Volumes + +In [Rancher Launched Kubernetes clusters](launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md) + +### hostPath Volumes +Before you create a hostPath volume, you need to set up an [extra_bind](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. + +### Related Links + +- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md new file mode 100644 index 0000000000..30fb906c5f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/creating-a-vsphere-cluster.md @@ -0,0 +1,16 @@ +--- +title: VSphere Node Template Configuration +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference + - /rancher/v2.0-v2.4/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids +--- + +The vSphere node templates in Rancher were updated in the following Rancher versions. Refer to the newest configuration reference that is less than or equal to your Rancher version: + +- [v2.3.3](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md) +- [v2.3.0](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md) +- [v2.2.0](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md) +- [v2.0.4](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md) + +For Rancher versions before v2.0.4, refer to [this version.](../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md new file mode 100644 index 0000000000..cc35e1a37f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-manager.md @@ -0,0 +1,16 @@ +--- +title: Deploying Rancher Server +weight: 100 +--- + +Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. + +- [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md) (uses Terraform) +- [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) +- [Azure](../getting-started/quick-start-guides/deploy-rancher-manager/azure.md) (uses Terraform) +- [GCP](../getting-started/quick-start-guides/deploy-rancher-manager/gcp.md) (uses Terraform) +- [Vagrant](../getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md) + +If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. + +- [Manual Install](../getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md new file mode 100644 index 0000000000..cf89f3341a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/deploy-rancher-workloads.md @@ -0,0 +1,9 @@ +--- +title: Deploying Workloads +weight: 200 +--- + +These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. + +- [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) +- [Workload with NodePort](../getting-started/quick-start-guides/deploy-workloads/nodeports.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/downstream-cluster-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md new file mode 100644 index 0000000000..b843f8027c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/enable-experimental-features.md @@ -0,0 +1,161 @@ +--- +title: Enabling Experimental Features +weight: 17 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/feature-flags/ + - /rancher/v2.0-v2.4/en/admin-settings/feature-flags/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](installation/options/feature-flags/enable-not-default-storage-drivers) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. + +The features can be enabled in three ways: + +- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. +- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) in Rancher v2.3.3+ by going to the **Settings** page. +- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. + +Each feature has two values: + +- A default value, which can be configured with a flag or environment variable from the command line +- A set value, which can be configured with the Rancher API or UI + +If no value has been set, Rancher uses the default value. + +Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. + +For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. + +> **Note:** As of v2.4.0, there are some feature flags that may require a restart of the Rancher server container. These features that require a restart are marked in the table of these docs and in the UI. + +The following is a list of the feature flags available in Rancher: + +- `dashboard`: This feature enables the new experimental UI that has a new look and feel. The dashboard also leverages a new API in Rancher which allows the UI to access the default Kubernetes resources without any intervention from Rancher. +- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules](installation/options/feature-flags/istio-virtual-service-ui), which are traffic management features of Istio. +- `proxy`: This feature enables Rancher to use a new simplified code base for the proxy, which can help enhance performance and security. The proxy feature is known to have issues with Helm deployments, which prevents any catalog applications to be deployed which includes Rancher's tools like monitoring, logging, Istio, etc. +- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.](installation/options/feature-flags/enable-not-default-storage-drivers) In other words, it enables types for storage providers and provisioners that are not enabled by default. + +The below table shows the availability and default value for feature flags in Rancher: + +| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | +| ----------------------------- | ------------- | ------------ | --------------- |---| +| `dashboard` | `true` | Experimental | v2.4.0 | x | +| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | +| `istio-virtual-service-ui` | `true` | GA | v2.3.2 | | +| `proxy` | `false` | Experimental | v2.4.0 | | +| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | + +# Enabling Features when Starting Rancher + +When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. + +> **Note:** Values set from the Rancher API will override the value passed in through the command line. + + + + +When installing Rancher with a Helm chart, use the `--features` option. In the below example, two features are enabled by passing the feature flag names names in a comma separated list: + +``` +helm install rancher-latest/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 +``` + +Note: If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +### Rendering the Helm Chart for Air Gap Installations + +For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. + +The Helm 3 command is as follows: + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 +``` + +The Helm 2 command is as follows: + +``` +helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' # Available as of v2.3.0 + --set 'extraEnv[0].value==true,=true' # Available as of v2.3.0 +``` + + + + + +When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: + +``` +docker run -d -p 80:80 -p 443:443 \ + --restart=unless-stopped \ + rancher/rancher:rancher-latest \ + --features==true,=true # Available as of v2.3.0 +``` + + + + +# Enabling Features with the Rancher UI + +1. Go to the **Global** view and click **Settings.** +1. Click the **Feature Flags** tab. You will see a list of experimental features. +1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate.** + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher UI + +1. Go to the **Global** view and click **Settings.** +1. Click the **Feature Flags** tab. You will see a list of experimental features. +1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate.** + +**Result:** The feature is disabled. + +# Enabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit.** +1. In the **Value** drop-down menu, click **True.** +1. Click **Show Request.** +1. Click **Send Request.** +1. Click **Close.** + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit.** +1. In the **Value** drop-down menu, click **False.** +1. Click **Show Request.** +1. Click **Send Request.** +1. Click **Close.** + +**Result:** The feature is disabled. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md new file mode 100644 index 0000000000..0788be4908 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-charts-in-rancher.md @@ -0,0 +1,105 @@ +--- +title: Helm Charts in Rancher +weight: 12 +description: Rancher enables the use of catalogs to repeatedly deploy applications easily. Catalogs are GitHub or Helm Chart repositories filled with deployment-ready apps. +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/catalog/ + - /rancher/v2.0-v2.4/en/concepts/catalogs/ + - /rancher/v2.0-v2.4/en/tasks/global-configuration/catalog/ + - /rancher/v2.0-v2.4/en/catalog + - /rancher/v2.0-v2.4/en/catalog/apps +--- + +Rancher provides the ability to use a catalog of Helm charts that make it easy to repeatedly deploy applications. + +- **Catalogs** are GitHub repositories or Helm Chart repositories filled with applications that are ready-made for deployment. Applications are bundled in objects called _Helm charts_. +- **Helm charts** are a collection of files that describe a related set of Kubernetes resources. A single chart might be used to deploy something simple, like a memcached pod, or something complex, like a full web app stack with HTTP servers, databases, caches, and so on. + +Rancher improves on Helm catalogs and charts. All native Helm charts can work within Rancher, but Rancher adds several enhancements to improve their user experience. + +This section covers the following topics: + +- [Catalog scopes](#catalog-scopes) +- [Catalog Helm Deployment Versions](#catalog-helm-deployment-versions) +- [When to use Helm 3](#when-to-use-helm-3) +- [Helm 3 Backwards Compatibility](#helm-3-backwards-compatibility) +- [Built-in global catalogs](#built-in-global-catalogs) +- [Custom catalogs](#custom-catalogs) +- [Creating and launching applications](#creating-and-launching-applications) +- [Chart compatibility with Rancher](#chart-compatibility-with-rancher) +- [Global DNS](#global-dns) + +# Catalog Scopes + +Within Rancher, you can manage catalogs at three different scopes. Global catalogs are shared across all clusters and project. There are some use cases where you might not want to share catalogs between different clusters or even projects in the same cluster. By leveraging cluster and project scoped catalogs, you will be able to provide applications for specific teams without needing to share them with all clusters and/or projects. + +Scope | Description | Available As of | +--- | --- | --- | +Global | All clusters and all projects can access the Helm charts in this catalog | v2.0.0 | +Cluster | All projects in the specific cluster can access the Helm charts in this catalog | v2.2.0 | +Project | This specific cluster can access the Helm charts in this catalog | v2.2.0 | + +# Catalog Helm Deployment Versions + +_Applicable as of v2.4.0_ + +In November 2019, Helm 3 was released, and some features were deprecated or refactored. It is not fully [backwards compatible](helm-charts/legacy-catalogs/#helm-3-backwards-compatibility) with Helm 2. Therefore, catalogs in Rancher need to be separated, with each catalog only using one Helm version. This will help reduce app deployment issues as your Rancher users will not need to know which version of your chart is compatible with which Helm version - they can just select a catalog, select an app and deploy a version that has already been vetted for compatibility. + +When you create a custom catalog, you will have to configure the catalog to use either Helm 2 or Helm 3. This version cannot be changed later. If the catalog is added with the wrong Helm version, it will need to be deleted and re-added. + +When you launch a new app from a catalog, the app will be managed by the catalog's Helm version. A Helm 2 catalog will use Helm 2 to manage all of the apps, and a Helm 3 catalog will use Helm 3 to manage all apps. + +By default, catalogs are assumed to be deployed using Helm 2. If you run an app in Rancher before v2.4.0, then upgrade to Rancher v2.4.0+, the app will still be managed by Helm 2. If the app was already using a Helm 3 Chart (API version 2) it will no longer work in v2.4.0+. You must either downgrade the chart's API version or recreate the catalog to use Helm 3. + +Charts that are specific to Helm 2 should only be added to a Helm 2 catalog, and Helm 3 specific charts should only be added to a Helm 3 catalog. + +# When to use Helm 3 + +_Applicable as of v2.4.0_ + +- If you want to ensure that the security permissions are being pulled from the kubeconfig file +- If you want to utilize apiVersion `v2` features such as creating a library chart to reduce code duplication, or moving your requirements from the `requirements.yaml` into the `Chart.yaml` + +Overall Helm 3 is a movement towards a more standardized Kubernetes feel. As the Kubernetes community has evolved, standards and best practices have as well. Helm 3 is an attempt to adopt those practices and streamline how charts are maintained. + +# Helm 3 Backwards Compatibility + +_Applicable as of v2.4.0_ + +With the use of the OpenAPI schema to validate your rendered templates in Helm 3, you will find charts that worked in Helm 2 may not work in Helm 3. This will require you to update your chart templates to meet the new validation requirements. This is one of the main reasons support for Helm 2 and Helm 3 was provided starting in Rancher 2.4.x, as not all charts can be deployed immediately in Helm 3. + +Helm 3 does not create a namespace for you, so you will have to provide an existing one. This can cause issues if you have integrated code with Helm 2, as you will need to make code changes to ensure a namespace is being created and passed in for Helm 3. Rancher will continue to manage namespaces for Helm to ensure this does not impact your app deployment. + +apiVersion `v2` is now reserved for Helm 3 charts. This apiVersion enforcement could cause issues as older versions of Helm 2 did not validate the apiVersion in the `Chart.yaml` file. In general, your Helm 2 chart’s apiVersion should be set to `v1` and your Helm 3 chart’s apiVersion should be set to `v2`. You can install charts with apiVersion `v1` with Helm 3, but you cannot install `v2` charts into Helm 2. + +# Built-in Global Catalogs + +Within Rancher, there are default catalogs packaged as part of Rancher. These can be enabled or disabled by an administrator. For details, refer to the section on managing [built-in global catalogs.](catalog/built-in) + +# Custom Catalogs + +There are two types of catalogs in Rancher: [Built-in global catalogs](catalog/built-in/) and [custom catalogs.](catalog/adding-catalogs/) + +Any user can create custom catalogs to add into Rancher. Custom catalogs can be added into Rancher at the global level, cluster level, or project level. For details, refer to the [section on adding custom catalogs](catalog/adding-catalogs) and the [catalog configuration reference.](catalog/catalog-config) + +# Creating and Launching Applications + +In Rancher, applications are deployed from the templates in a catalog. This section covers the following topics: + +* [Multi-cluster applications](catalog/multi-cluster-apps/) +* [Creating catalog apps](catalog/creating-apps) +* [Launching catalog apps within a project](catalog/launching-apps) +* [Managing catalog apps](catalog/managing-apps) +* [Tutorial: Example custom chart creation](catalog/tutorial) + +# Chart Compatibility with Rancher + +Charts now support the fields `rancher_min_version` and `rancher_max_version` in the [`questions.yml` file](https://github.com/rancher/integration-test-charts/blob/master/charts/chartmuseum/v1.6.0/questions.yml) to specify the versions of Rancher that the chart is compatible with. When using the UI, only app versions that are valid for the version of Rancher running will be shown. API validation is done to ensure apps that don't meet the Rancher requirements cannot be launched. An app that is already running will not be affected on a Rancher upgrade if the newer Rancher version does not meet the app's requirements. + +# Global DNS + +_Available as v2.2.0_ + +When creating applications that span multiple Kubernetes clusters, a Global DNS entry can be created to route traffic to the endpoints in all of the different clusters. An external DNS server will need be programmed to assign a fully qualified domain name (a.k.a FQDN) to your application. Rancher will use the FQDN you provide and the IP addresses where your application is running to program the DNS. Rancher will gather endpoints from all the Kubernetes clusters running your application and program the DNS. + +For more information on how to use this feature, see [Global DNS](../how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-rancher.md new file mode 100644 index 0000000000..04bf72fd0e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm-rancher.md @@ -0,0 +1,227 @@ +--- +title: "4. Install Rancher" +weight: 200 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-rancher/ +--- + +Rancher installation is managed using the Helm package manager for Kubernetes. Use `helm` to install the prerequisite and charts to install Rancher. + +For systems without direct internet access, see [Air Gap: Kubernetes install](installation/air-gap-installation/install-rancher/). + +Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) + +### Add the Helm Chart Repository + +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md). + +{{< release-channel >}} + +``` +helm repo add rancher- https://releases.rancher.com/server-charts/ +``` + +### Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +There are three recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +|-----|-----|-----|-----| +| [Rancher Generated Certificates](#rancher-generated-certificates) | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
    This is the **default** | [yes](#optional-install-cert-manager) | +| [Let’s Encrypt](#let-s-encrypt) | `ingress.tls.source=letsEncrypt` | Use [Let's Encrypt](https://letsencrypt.org/) to issue a certificate | [yes](#optional-install-cert-manager) | +| [Certificates from Files](#certificates-from-files) | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s) | no | + +### Optional: Install cert-manager + +**Note:** cert-manager is only required for certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) and Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). You should skip this step if you are using your own certificate files (option `ingress.tls.source=secret`) or if you use [TLS termination on an External Load Balancer](installation/options/helm2/helm-rancher/chart-options/#external-tls-termination). + +> **Important:** +> Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. + +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). + +Rancher relies on [cert-manager](https://github.com/jetstack/cert-manager) to issue certificates from Rancher's own generated CA or to request Let's Encrypt certificates. + +These instructions are adapted from the [official cert-manager documentation](https://docs.cert-manager.io/en/latest/getting-started/install/kubernetes.html#installing-with-helm). + + +1. Install the CustomResourceDefinition resources separately + ```plain + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.9/deploy/manifests/00-crds.yaml + ``` + +1. Create the namespace for cert-manager + ```plain + kubectl create namespace cert-manager + ``` + +1. Label the cert-manager namespace to disable resource validation + ```plain + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + ``` + +1. Add the Jetstack Helm repository + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + ```plain + helm repo update + ``` + +1. Install the cert-manager Helm chart + ```plain + helm install \ + --name cert-manager \ + --namespace cert-manager \ + --version v0.14.2 \ + jetstack/cert-manager + ``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m +cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m +cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m +``` + +If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check the [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. + +
    + +#### Rancher Generated Certificates + +> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. + +The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. + +- Set the `hostname` to the DNS name you pointed at your load balancer. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +#### Let's Encrypt + +> **Note:** You need to have [cert-manager](#optional-install-cert-manager) installed before proceeding. + +This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. This configuration uses HTTP validation (`HTTP-01`) so the load balancer must have a public DNS record and be accessible from the internet. + +In the following command, + +- Set `hostname` to the public DNS record that resolves to your load balancer. +- Set `ingress.tls.source` to `letsEncrypt`. +- Set `letsEncrypt.email` to the email address used for communication about your certificate (for example, expiry notices). +- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=letsEncrypt \ + --set letsEncrypt.email=me@example.org \ + --set letsEncrypt.ingress.class=nginx +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +#### Certificates from Files + +Create Kubernetes secrets from your own certificates for Rancher to use. + + +> **Note:** The `Common Name` or a `Subject Alternative Names` entry in the server certificate must match the `hostname` option, or the ingress controller will fail to configure correctly. Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers/applications. If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?](../faq/technical-items.md#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) + +- Set `hostname` and set `ingress.tls.source` to `secret`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret +``` + +If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: + +``` +helm install rancher-/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret + --set privateCA=true +``` + +Now that Rancher is deployed, see [Adding TLS Secrets](installation/options/helm2/helm-rancher/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. + +After adding the secrets, check if Rancher was rolled out successfully: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: + +``` +kubectl -n cattle-system get deploy rancher +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +rancher 3 3 3 3 3m +``` + +It should show the same count for `DESIRED` and `AVAILABLE`. + +### Advanced Configurations + +The Rancher chart configuration has many options for customizing the install to suit your specific environment. Here are some common advanced scenarios. + +* [HTTP Proxy](../reference-guides/installation-references/helm-chart-options.md) +* [Private Docker Image Registry](../reference-guides/installation-references/helm-chart-options.md#private-registry-and-air-gap-installs) +* [TLS Termination on an External Load Balancer](installation/options/helm2/helm-rancher/chart-options/#external-tls-termination) + +See the [Chart Options](installation/options/helm2/helm-rancher/chart-options/) for the full list of options. + +### Save your options + +Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. + +### Finishing Up + +That's it you should have a functional Rancher server. Point a browser at the hostname you picked and you should be greeted by the colorful login page. + +Doesn't work? Take a look at the [Troubleshooting](installation/options/helm2/helm-rancher/troubleshooting/) Page diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-create-nodes-lb.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-create-nodes-lb.md new file mode 100644 index 0000000000..02b9078dd0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-create-nodes-lb.md @@ -0,0 +1,33 @@ +--- +title: "1. Create Nodes and Load Balancer" +weight: 185 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/create-nodes-lb + - /rancher/v2.x/en/installation/resources/advanced/helm2/create-nodes-lb/ +--- + +Use your provider of choice to provision 3 nodes and a Load Balancer endpoint for your RKE install. + +> **Note:** These nodes must be in the same region/datacenter. You may place these servers in separate availability zones. + +### Node Requirements + +View the supported operating systems and hardware/software/networking requirements for nodes running Rancher at [Node Requirements](installation-requirements.md). + +View the OS requirements for RKE at [RKE Requirements](https://rancher.com/docs/rke/latest/en/os/) + +### Load Balancer + +RKE will configure an Ingress controller pod, on each of your nodes. The Ingress controller pods are bound to ports TCP/80 and TCP/443 on the host network and are the entry point for HTTPS traffic to the Rancher server. + +Configure a load balancer as a basic Layer 4 TCP forwarder. The exact configuration will vary depending on your environment. + +>**Important:** +>Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +#### Examples + +* [Nginx](installation/options/helm2/create-nodes-lb/nginx/) +* [Amazon NLB](installation/options/helm2/create-nodes-lb/nlb/) + +### [Next: Install Kubernetes with RKE](installation/options/helm2/kubernetes-rke/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-helm-init.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-helm-init.md new file mode 100644 index 0000000000..83c576d5b4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-helm-init.md @@ -0,0 +1,69 @@ +--- +title: "Initialize Helm: Install the Tiller Service" +description: "With Helm, you can create configurable deployments instead of using static files. In order to use Helm, the Tiller service needs to be installed on your cluster." +weight: 195 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-init + - /rancher/v2.x/en/installation/resources/advanced/helm2/helm-init/ +--- + +Helm is the package management tool of choice for Kubernetes. Helm "charts" provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh/). To be able to use Helm, the server-side component `tiller` needs to be installed on your cluster. + +For systems without direct internet access, see [Helm - Air Gap](air-gapped-helm-cli-install.md) for install details. + +Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +> **Note:** The installation instructions assume you are using Helm 2. The instructions will be updated for Helm 3 soon. In the meantime, if you want to use Helm 3, refer to [these instructions.](https://github.com/ibrokethecloud/rancher-helm3) + +### Install Tiller on the Cluster + +> **Important:** Due to an issue with Helm v2.12.0 and cert-manager, please use Helm v2.12.1 or higher. + +Helm installs the `tiller` service on your cluster to manage charts. Since RKE enables RBAC by default we will need to use `kubectl` to create a `serviceaccount` and `clusterrolebinding` so `tiller` has permission to deploy to the cluster. + +* Create the `ServiceAccount` in the `kube-system` namespace. +* Create the `ClusterRoleBinding` to give the `tiller` account access to the cluster. +* Finally use `helm` to install the `tiller` service + +```plain +kubectl -n kube-system create serviceaccount tiller + +kubectl create clusterrolebinding tiller \ + --clusterrole=cluster-admin \ + --serviceaccount=kube-system:tiller + +helm init --service-account tiller + +# Users in China: You will need to specify a specific tiller-image in order to initialize tiller. +# The list of tiller image tags are available here: https://dev.aliyun.com/detail.html?spm=5176.1972343.2.18.ErFNgC&repoId=62085. +# When initializing tiller, you'll need to pass in --tiller-image + +helm init --service-account tiller \ +--tiller-image registry.cn-hangzhou.aliyuncs.com/google_containers/tiller: +``` + +> **Note:** This`tiller`install has full cluster access, which should be acceptable if the cluster is dedicated to Rancher server. Check out the [helm docs](https://docs.helm.sh/using_helm/#role-based-access-control) for restricting `tiller` access to suit your security requirements. + +### Test your Tiller installation + +Run the following command to verify the installation of `tiller` on your cluster: + +``` +kubectl -n kube-system rollout status deploy/tiller-deploy +Waiting for deployment "tiller-deploy" rollout to finish: 0 of 1 updated replicas are available... +deployment "tiller-deploy" successfully rolled out +``` + +And run the following command to validate Helm can talk to the `tiller` service: + +``` +helm version +Client: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b4babf9d818144e", GitTreeState:"clean"} +Server: &version.Version{SemVer:"v2.12.1", GitCommit:"02a47c7249b1fc6d8fd3b94e6b4babf9d818144e", GitTreeState:"clean"} +``` + +### Issues or errors? + +See the [Troubleshooting](installation/options/helm2/helm-init/troubleshooting/) page. + +### [Next: Install Rancher](installation/options/helm2/helm-rancher/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-kubernetes-rke.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-kubernetes-rke.md new file mode 100644 index 0000000000..fc3f72ddab --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-kubernetes-rke.md @@ -0,0 +1,135 @@ +--- +title: "2. Install Kubernetes with RKE" +weight: 190 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/kubernetes-rke + - /rancher/v2.x/en/installation/resources/advanced/helm2/kubernetes-rke/ +--- + +Use RKE to install Kubernetes with a high availability etcd configuration. + +>**Note:** For systems without direct internet access see [Air Gap: Kubernetes install](installation/air-gap-high-availability/) for install details. + +### Create the `rancher-cluster.yml` File + +Using the sample below create the `rancher-cluster.yml` file. Replace the IP Addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. + +> **Note:** If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. + + +```yaml +nodes: + - address: 165.227.114.63 + internal_address: 172.16.22.12 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 165.227.116.167 + internal_address: 172.16.32.37 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 165.227.127.226 + internal_address: 172.16.42.73 + user: ubuntu + role: [controlplane,worker,etcd] + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h +``` + +#### Common RKE Nodes Options + +| Option | Required | Description | +| --- | --- | --- | +| `address` | yes | The public DNS or IP address | +| `user` | yes | A user that can run docker commands | +| `role` | yes | List of Kubernetes roles assigned to the node | +| `internal_address` | no | The private DNS or IP address for internal cluster traffic | +| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | + +#### Advanced Configurations + +RKE has many configuration options for customizing the install to suit your specific environment. + +Please see the [RKE Documentation](https://rancher.com/docs/rke/latest/en/config-options/) for the full list of options and capabilities. + +For tuning your etcd cluster for larger Rancher installations see the [etcd settings guide](installation/options/etcd/). + +### Run RKE + +``` +rke up --config ./rancher-cluster.yml +``` + +When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. + +### Testing Your Cluster + +RKE should have created a file `kube_config_rancher-cluster.yml`. This file has the credentials for `kubectl` and `helm`. + +> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. + +You can copy this file to `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_rancher-cluster.yml`. + +``` +export KUBECONFIG=$(pwd)/kube_config_rancher-cluster.yml +``` + +Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state. + +``` +kubectl get nodes + +NAME STATUS ROLES AGE VERSION +165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 +``` + +### Check the Health of Your Cluster Pods + +Check that all the required pods and containers are healthy are ready to continue. + +* Pods are in `Running` or `Completed` state. +* `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` +* Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. + +``` +kubectl get pods --all-namespaces + +NAMESPACE NAME READY STATUS RESTARTS AGE +ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s +kube-system canal-jp4hz 3/3 Running 0 30s +kube-system canal-z2hg8 3/3 Running 0 30s +kube-system canal-z6kpw 3/3 Running 0 30s +kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s +kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s +kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s +kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s +kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s +kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s +kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s +``` + +### Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](installation/options/helm2/kubernetes-rke/troubleshooting/) page. + +### [Next: Initialize Helm (Install tiller)](installation/options/helm2/helm-init/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md new file mode 100644 index 0000000000..d345c76b86 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-4-lb.md @@ -0,0 +1,403 @@ +--- +title: Kubernetes Install with External Load Balancer (TCP/Layer 4) +weight: 275 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-4-lb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-4-lb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). +> +>If you are currently using the RKE add-on install method, see [Migrating from a High-availability Kubernetes install with an RKE add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. + +This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: + +- Layer 4 load balancer (TCP) +- [NGINX ingress controller with SSL termination (HTTPS)](https://kubernetes.github.io/ingress-nginx/) + +In a Kubernetes setup that uses a layer 4 load balancer, the load balancer accepts Rancher client connections over the TCP/UDP protocols (i.e., the transport level). The load balancer then forwards these connections to individual cluster nodes without reading the request itself. Because the load balancer cannot read the packets it's forwarding, the routing decisions it can make are limited. + +Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers +![High-availability Kubernetes installation of Rancher](/img/ha/rancher2ha.svg) + +## Installation Outline + +Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. + + + +- [1. Provision Linux Hosts](#1-provision-linux-hosts) +- [2. Configure Load Balancer](#2-configure-load-balancer) +- [3. Configure DNS](#3-configure-dns) +- [4. Install RKE](#4-install-rke) +- [5. Download RKE Config File Template](#5-download-rke-config-file-template) +- [6. Configure Nodes](#6-configure-nodes) +- [7. Configure Certificates](#7-configure-certificates) +- [8. Configure FQDN](#8-configure-fqdn) +- [9. Configure Rancher version](#9-configure-rancher-version) +- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) +- [11. Run RKE](#11-run-rke) +- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) + + + +
    + +## 1. Provision Linux Hosts + +Provision three Linux hosts according to our [Requirements](installation-requirements.md). + +## 2. Configure Load Balancer + +We will be using NGINX as our Layer 4 Load Balancer (TCP). NGINX will forward all connections to one of your Rancher nodes. If you want to use Amazon NLB, you can skip this step and use [Amazon NLB configuration](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb.md) + +>**Note:** +> In this configuration, the load balancer is positioned in front of your Linux hosts. The load balancer can be any host that you have available that's capable of running NGINX. +> +>One caveat: do not use one of your Rancher nodes as the load balancer. + +### A. Install NGINX + +Start by installing NGINX on your load balancer host. NGINX has packages available for all known operating systems. For help installing NGINX, refer to their [install documentation](https://www.nginx.com/resources/wiki/start/topics/tutorials/install/). + +The `stream` module is required, which is present when using the official NGINX packages. Please refer to your OS documentation how to install and enable the NGINX `stream` module on your operating system. + +### B. Create NGINX Configuration + +After installing NGINX, you need to update the NGINX config file, `nginx.conf`, with the IP addresses for your nodes. + +1. Copy and paste the code sample below into your favorite text editor. Save it as `nginx.conf`. + +2. From `nginx.conf`, replace `IP_NODE_1`, `IP_NODE_2`, and `IP_NODE_3` with the IPs of your [Linux hosts](#1-provision-linux-hosts). + + >**Note:** This Nginx configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + + **Example NGINX config:** + ``` + worker_processes 4; + worker_rlimit_nofile 40000; + + events { + worker_connections 8192; + } + + http { + server { + listen 80; + return 301 https://$host$request_uri; + } + } + + stream { + upstream rancher_servers { + least_conn; + server IP_NODE_1:443 max_fails=3 fail_timeout=5s; + server IP_NODE_2:443 max_fails=3 fail_timeout=5s; + server IP_NODE_3:443 max_fails=3 fail_timeout=5s; + } + server { + listen 443; + proxy_pass rancher_servers; + } + } + ``` + +3. Save `nginx.conf` to your load balancer at the following path: `/etc/nginx/nginx.conf`. + +4. Load the updates to your NGINX configuration by running the following command: + + ``` + # nginx -s reload + ``` + +### Option - Run NGINX as Docker container + +Instead of installing NGINX as a package on the operating system, you can rather run it as a Docker container. Save the edited **Example NGINX config** as `/etc/nginx.conf` and run the following command to launch the NGINX container: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/nginx.conf:/etc/nginx/nginx.conf \ + nginx:1.14 +``` + +## 3. Configure DNS + +Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

    + +1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). + +2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: + + `nslookup HOSTNAME.DOMAIN.COM` + + **Step Result:** Terminal displays output similar to the following: + + ``` + $ nslookup rancher.yourdomain.com + Server: YOUR_HOSTNAME_IP_ADDRESS + Address: YOUR_HOSTNAME_IP_ADDRESS#53 + + Non-authoritative answer: + Name: rancher.yourdomain.com + Address: HOSTNAME.DOMAIN.COM + ``` + +
    + +## 4. Install RKE + +RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. + +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. + +2. Confirm that RKE is now executable by running the following command: + + ``` + rke --version + ``` + +## 5. Download RKE Config File Template + +RKE uses a `.yml` config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. + +1. Download one of following templates, depending on the SSL certificate you're using. + + - [Template for self-signed certificate
    `3-node-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate.yml) + - [Template for certificate signed by recognized CA
    `3-node-certificate-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-certificate-recognizedca.yml) + + >**Advanced Config Options:** + > + >- Want records of all transactions with the Rancher API? Enable the [API Auditing](installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file](installation/options/helm2/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + + +2. Rename the file to `rancher-cluster.yml`. + +## 6. Configure Nodes + +Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. + +1. Open `rancher-cluster.yml` in your favorite text editor. + +1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). + + For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. + + >**Note:** + > When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) >for RHEL/CentOS specific requirements. + + nodes: + # The IP address or hostname of the node + - address: IP_ADDRESS_1 + # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) + # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 + user: USER + role: [controlplane,etcd,worker] + # Path the SSH key that can be used to access to node with the specified user + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_2 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_3 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + +1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. + + services: + etcd: + backup: false + + +## 7. Configure Certificates + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +Choose from the following options: + +
    + Option A—Bring Your Own Certificate: Self-Signed + +>**Prerequisites:** +>Create a self-signed certificate. +> +>- The certificate files must be in PEM format. +>- The certificate files must be encoded in [base64](#base64). +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. + +1. In `kind: Secret` with `name: cattle-keys-ingress`: + + * Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) + * Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) + + >**Note:** + > The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. + + **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-ingress + namespace: cattle-system + type: Opaque + data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + ``` + +2. In `kind: Secret` with `name: cattle-keys-server`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`). + + >**Note:** + > The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. + + + **Step Result:** The file should look like the example below (the base64 encoded string should be different): + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + ``` + +
    + +
    + Option B—Bring Your Own Certificate: Signed by Recognized CA + +If you are using a Certificate Signed By A Recognized Certificate Authority, you will need to generate a base64 encoded string for the Certificate file and the Certificate Key file. Make sure that your certificate file includes all the intermediate certificates in the chain, the order of certificates in this case is first your own certificate, followed by the intermediates. Please refer to the documentation of your CSP (Certificate Service Provider) to see what intermediate certificate(s) need to be included. + +In the `kind: Secret` with `name: cattle-keys-ingress`: + +* Replace `` with the base64 encoded string of the Certificate file (usually called `cert.pem` or `domain.crt`) +* Replace `` with the base64 encoded string of the Certificate Key file (usually called `key.pem` or `domain.key`) + +After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + +>**Note:** +> The base64 encoded string should be on the same line as `tls.crt` or `tls.key`, without any newline at the beginning, in between or at the end. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cattle-keys-ingress + namespace: cattle-system +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1RENDQWN5Z0F3SUJBZ0lKQUlHc25NeG1LeGxLTUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NVGd3TlRBMk1qRXdOREE1V2hjTk1UZ3dOekExTWpFd05EQTVXakFXTVJRdwpFZ1lEVlFRRERBdG9ZUzV5Ym1Ob2NpNXViRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTFJlMXdzekZSb2Rib2pZV05DSHA3UkdJaUVIMENDZ1F2MmdMRXNkUUNKZlcrUFEvVjM0NnQ3bSs3TFEKZXJaV3ZZMWpuY2VuWU5JSGRBU0VnU0ducWExYnhUSU9FaE0zQXpib3B0WDhjSW1OSGZoQlZETGdiTEYzUk0xaQpPM1JLTGdIS2tYSTMxZndjbU9zWGUwaElYQnpUbmxnM20vUzlXL3NTc0l1dDVwNENDUWV3TWlpWFhuUElKb21lCmpkS3VjSHFnMTlzd0YvcGVUalZrcVpuMkJHazZRaWFpMU41bldRV0pjcThTenZxTTViZElDaWlwYU9hWWQ3RFEKYWRTejV5dlF0YkxQNW4wTXpnOU43S3pGcEpvUys5QWdkWDI5cmZqV2JSekp3RzM5R3dRemN6VWtLcnZEb05JaQo0UFJHc01yclFNVXFSYjRSajNQOEJodEMxWXNDQXdFQUFhTTVNRGN3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFCkJBTUNCZUF3SFFZRFZSMGxCQll3RkFZSUt3WUJCUVVIQXdJR0NDc0dBUVVGQndNQk1BMEdDU3FHU0liM0RRRUIKQ3dVQUE0SUJBUUNKZm5PWlFLWkowTFliOGNWUW5Vdi9NZkRZVEJIQ0pZcGM4MmgzUGlXWElMQk1jWDhQRC93MgpoOUExNkE4NGNxODJuQXEvaFZYYy9JNG9yaFY5WW9jSEg5UlcvbGthTUQ2VEJVR0Q1U1k4S292MHpHQ1ROaDZ6Ci9wZTNqTC9uU0pYSjRtQm51czJheHFtWnIvM3hhaWpYZG9kMmd3eGVhTklvRjNLbHB2aGU3ZjRBNmpsQTM0MmkKVVlCZ09iN1F5KytRZWd4U1diSmdoSzg1MmUvUUhnU2FVSkN6NW1sNGc1WndnNnBTUXhySUhCNkcvREc4dElSYwprZDMxSk1qY25Fb1Rhc1Jyc1NwVmNGdXZyQXlXN2liakZyYzhienBNcE1obDVwYUZRcEZzMnIwaXpZekhwakFsCk5ZR2I2OHJHcjBwQkp3YU5DS2ErbCtLRTk4M3A3NDYwCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdEY3WEN6TVZHaDF1aU5oWTBJZW50RVlpSVFmUUlLQkMvYUFzU3gxQUlsOWI0OUQ5ClhmanEzdWI3c3RCNnRsYTlqV09keDZkZzBnZDBCSVNCSWFlcHJWdkZNZzRTRXpjRE51aW0xZnh3aVkwZCtFRlUKTXVCc3NYZEV6V0k3ZEVvdUFjcVJjamZWL0J5WTZ4ZDdTRWhjSE5PZVdEZWI5TDFiK3hLd2k2M21uZ0lKQjdBeQpLSmRlYzhnbWlaNk4wcTV3ZXFEWDJ6QVgrbDVPTldTcG1mWUVhVHBDSnFMVTNtZFpCWWx5cnhMTytvemx0MGdLCktLbG81cGgzc05CcDFMUG5LOUMxc3MvbWZRek9EMDNzck1Xa21oTDcwQ0IxZmIydCtOWnRITW5BYmYwYkJETnoKTlNRcXU4T2cwaUxnOUVhd3l1dEF4U3BGdmhHUGMvd0dHMExWaXdJREFRQUJBb0lCQUJKYUErOHp4MVhjNEw0egpwUFd5bDdHVDRTMFRLbTNuWUdtRnZudjJBZXg5WDFBU2wzVFVPckZyTnZpK2xYMnYzYUZoSFZDUEN4N1RlMDVxClhPa2JzZnZkZG5iZFQ2RjgyMnJleVByRXNINk9TUnBWSzBmeDVaMDQwVnRFUDJCWm04eTYyNG1QZk1vbDdya2MKcm9Kd09rOEVpUHZZekpsZUd0bTAwUm1sRysyL2c0aWJsOTVmQXpyc1MvcGUyS3ZoN2NBVEtIcVh6MjlpUmZpbApiTGhBamQwcEVSMjNYU0hHR1ZqRmF3amNJK1c2L2RtbDZURDhrSzFGaUtldmJKTlREeVNXQnpPbXRTYUp1K01JCm9iUnVWWG4yZVNoamVGM1BYcHZRMWRhNXdBa0dJQWxOWjRHTG5QU2ZwVmJyU0plU3RrTGNzdEJheVlJS3BWZVgKSVVTTHM0RUNnWUVBMmNnZUE2WHh0TXdFNU5QWlNWdGhzbXRiYi9YYmtsSTdrWHlsdk5zZjFPdXRYVzkybVJneQpHcEhUQ0VubDB0Z1p3T081T1FLNjdFT3JUdDBRWStxMDJzZndwcmgwNFZEVGZhcW5QNTBxa3BmZEJLQWpmanEyCjFoZDZMd2hLeDRxSm9aelp2VkowV0lvR1ZLcjhJSjJOWGRTUVlUanZUZHhGczRTamdqNFFiaEVDZ1lFQTFBWUUKSEo3eVlza2EvS2V2OVVYbmVrSTRvMm5aYjJ1UVZXazRXSHlaY2NRN3VMQVhGY3lJcW5SZnoxczVzN3RMTzJCagozTFZNUVBzazFNY25oTTl4WE4vQ3ZDTys5b2t0RnNaMGJqWFh6NEJ5V2lFNHJPS1lhVEFwcDVsWlpUT3ZVMWNyCm05R3NwMWJoVDVZb2RaZ3IwUHQyYzR4U2krUVlEWnNFb2lFdzNkc0NnWUVBcVJLYWNweWZKSXlMZEJjZ0JycGkKQTRFalVLMWZsSjR3enNjbGFKUDVoM1NjZUFCejQzRU1YT0kvSXAwMFJsY3N6em83N3cyMmpud09mOEJSM0RBMwp6ZTRSWDIydWw4b0hGdldvdUZOTTNOZjNaNExuYXpVc0F0UGhNS2hRWGMrcEFBWGthUDJkZzZ0TU5PazFxaUNHCndvU212a1BVVE84b1ViRTB1NFZ4ZmZFQ2dZQUpPdDNROVNadUlIMFpSSitIV095enlOQTRaUEkvUkhwN0RXS1QKajVFS2Y5VnR1OVMxY1RyOTJLVVhITXlOUTNrSjg2OUZPMnMvWk85OGg5THptQ2hDTjhkOWN6enI5SnJPNUFMTApqWEtBcVFIUlpLTFgrK0ZRcXZVVlE3cTlpaHQyMEZPb3E5OE5SZDMzSGYxUzZUWDNHZ3RWQ21YSml6dDAxQ3ZHCmR4VnVnd0tCZ0M2Mlp0b0RLb3JyT2hvdTBPelprK2YwQS9rNDJBOENiL29VMGpwSzZtdmxEWmNYdUF1QVZTVXIKNXJCZjRVYmdVYndqa1ZWSFR6LzdDb1BWSjUvVUxJWk1Db1RUNFprNTZXWDk4ZE93Q3VTVFpZYnlBbDZNS1BBZApTZEpuVVIraEpnSVFDVGJ4K1dzYnh2d0FkbWErWUhtaVlPRzZhSklXMXdSd1VGOURLUEhHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +``` + +
    + + + +## 8. Configure FQDN + +There are two references to `` in the config file (one in this step and one in the next). Both need to be replaced with the FQDN chosen in [Configure DNS](#3-configure-dns). + +In the `kind: Ingress` with `name: cattle-ingress-http`: + +* Replace `` with the FQDN chosen in [Configure DNS](#3-configure-dns). + +After replacing `` with the FQDN chosen in [Configure DNS](#3-configure-dns), the file should look like the example below (`rancher.yourdomain.com` is the FQDN used in this example): + +```yaml + --- + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: rancher.yourdomain.com + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + tls: + - secretName: cattle-keys-ingress + hosts: + - rancher.yourdomain.com +``` + +Save the `.yml` file and close it. + +## 9. Configure Rancher version + +The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. + +``` + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:v2.0.6 + imagePullPolicy: Always +``` + +## 10. Back Up Your RKE Config File + +After you close your `.yml` file, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. + +## 11. Run RKE + +With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. + +1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. + +2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. + +3. Enter one of the `rke up` commands listen below. + +``` +rke up --config rancher-cluster.yml +``` + +**Step Result:** The output should be similar to the snippet below: + +``` +INFO[0000] Building Kubernetes cluster +INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] +INFO[0000] [network] Deploying port listener containers +INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] +... +INFO[0101] Finished building Kubernetes cluster successfully +``` + +## 12. Back Up Auto-Generated Config File + +During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the RKE binary. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. + +## What's Next? + +You have a couple of options: + +- Create a backup of your Rancher Server in case of a disaster scenario: [High Availability Back Up and Restore](installation/backups-and-restoration/ha-backup-and-restoration). +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](kubernetes-clusters-in-rancher-setup.md). + +
    + +## FAQ and Troubleshooting + +{{< ssl_faq_ha >}} diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md new file mode 100644 index 0000000000..a410512521 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-layer-7-lb.md @@ -0,0 +1,294 @@ +--- +title: Kubernetes Install with External Load Balancer (HTTPS/Layer 7) +weight: 276 +aliases: + - /rancher/v2.0-v2.4/en/installation/ha-server-install-external-lb/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/layer-7-lb + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/layer-7-lb/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +This procedure walks you through setting up a 3-node cluster using the Rancher Kubernetes Engine (RKE). The cluster's sole purpose is running pods for Rancher. The setup is based on: + +- Layer 7 Loadbalancer with SSL termination (HTTPS) +- [NGINX Ingress controller (HTTP)](https://kubernetes.github.io/ingress-nginx/) + +In an Kubernetes setup that uses a layer 7 load balancer, the load balancer accepts Rancher client connections over the HTTP protocol (i.e., the application level). This application-level access allows the load balancer to read client requests and then redirect to them to cluster nodes using logic that optimally distributes load. + +Kubernetes Rancher install with layer 7 load balancer, depicting SSL termination at load balancer +![Rancher HA](/img/ha/rancher2ha-l7.svg) + +## Installation Outline + +Installation of Rancher in a high-availability configuration involves multiple procedures. Review this outline to learn about each procedure you need to complete. + + + +- [1. Provision Linux Hosts](#1-provision-linux-hosts) +- [2. Configure Load Balancer](#2-configure-load-balancer) +- [3. Configure DNS](#3-configure-dns) +- [4. Install RKE](#4-install-rke) +- [5. Download RKE Config File Template](#5-download-rke-config-file-template) +- [6. Configure Nodes](#6-configure-nodes) +- [7. Configure Certificates](#7-configure-certificates) +- [8. Configure FQDN](#8-configure-fqdn) +- [9. Configure Rancher version](#9-configure-rancher-version) +- [10. Back Up Your RKE Config File](#10-back-up-your-rke-config-file) +- [11. Run RKE](#11-run-rke) +- [12. Back Up Auto-Generated Config File](#12-back-up-auto-generated-config-file) + + + +## 1. Provision Linux Hosts + +Provision three Linux hosts according to our [Requirements](installation-requirements.md). + +## 2. Configure Load Balancer + +When using a load balancer in front of Rancher, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https`, this redirect is disabled. This is the expected configuration when terminating SSL externally. + +The load balancer has to be configured to support the following: + +* **WebSocket** connections +* **SPDY** / **HTTP/2** protocols +* Passing / setting the following headers: + +| Header | Value | Description | +|---------------------|----------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Host` | FQDN used to reach Rancher. | To identify the server requested by the client. | +| `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer.

    **Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. | +| `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer. | +| `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. | + +Health checks can be executed on the `/healthz` endpoint of the node, this will return HTTP 200. + +We have example configurations for the following load balancers: + +* [Amazon ALB configuration](alb/) +* [NGINX configuration](nginx/) + +## 3. Configure DNS + +Choose a fully qualified domain name (FQDN) that you want to use to access Rancher (e.g., `rancher.yourdomain.com`).

    + +1. Log into your DNS server a create a `DNS A` record that points to the IP address of your [load balancer](#2-configure-load-balancer). + +2. Validate that the `DNS A` is working correctly. Run the following command from any terminal, replacing `HOSTNAME.DOMAIN.COM` with your chosen FQDN: + + `nslookup HOSTNAME.DOMAIN.COM` + + **Step Result:** Terminal displays output similar to the following: + + ``` + $ nslookup rancher.yourdomain.com + Server: YOUR_HOSTNAME_IP_ADDRESS + Address: YOUR_HOSTNAME_IP_ADDRESS#53 + + Non-authoritative answer: + Name: rancher.yourdomain.com + Address: HOSTNAME.DOMAIN.COM + ``` + +
    + +## 4. Install RKE + +RKE (Rancher Kubernetes Engine) is a fast, versatile Kubernetes installer that you can use to install Kubernetes on your Linux hosts. We will use RKE to setup our cluster and run Rancher. + +1. Follow the [RKE Install](https://rancher.com/docs/rke/latest/en/installation) instructions. + +2. Confirm that RKE is now executable by running the following command: + + ``` + rke --version + ``` + +## 5. Download RKE Config File Template + +RKE uses a YAML config file to install and configure your Kubernetes cluster. There are 2 templates to choose from, depending on the SSL certificate you want to use. + +1. Download one of following templates, depending on the SSL certificate you're using. + + - [Template for self-signed certificate
    `3-node-externalssl-certificate.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-certificate.yml) + - [Template for certificate signed by recognized CA
    `3-node-externalssl-recognizedca.yml`](https://raw.githubusercontent.com/rancher/rancher/master/rke-templates/3-node-externalssl-recognizedca.yml) + + >**Advanced Config Options:** + > + >- Want records of all transactions with the Rancher API? Enable the [API Auditing](installation/api-auditing) feature by editing your RKE config file. For more information, see how to enable it in [your RKE config file](installation/options/helm2/rke-add-on/api-auditing/). + >- Want to know the other config options available for your RKE template? See the [RKE Documentation: Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + + +2. Rename the file to `rancher-cluster.yml`. + +## 6. Configure Nodes + +Once you have the `rancher-cluster.yml` config file template, edit the nodes section to point toward your Linux hosts. + +1. Open `rancher-cluster.yml` in your favorite text editor. + +1. Update the `nodes` section with the information of your [Linux hosts](#1-provision-linux-hosts). + + For each node in your cluster, update the following placeholders: `IP_ADDRESS_X` and `USER`. The specified user should be able to access the Docker socket, you can test this by logging in with the specified user and run `docker ps`. + + >**Note:** + > + >When using RHEL/CentOS, the SSH user can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565. See [Operating System Requirements](https://rancher.com/docs/rke/latest/en/installation/os#redhat-enterprise-linux-rhel-centos) for RHEL/CentOS specific requirements. + + nodes: + # The IP address or hostname of the node + - address: IP_ADDRESS_1 + # User that can login to the node and has access to the Docker socket (i.e. can execute `docker ps` on the node) + # When using RHEL/CentOS, this can't be root due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 + user: USER + role: [controlplane,etcd,worker] + # Path the SSH key that can be used to access to node with the specified user + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_2 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + - address: IP_ADDRESS_3 + user: USER + role: [controlplane,etcd,worker] + ssh_key_path: ~/.ssh/id_rsa + +1. **Optional:** By default, `rancher-cluster.yml` is configured to take backup snapshots of your data. To disable these snapshots, change the `backup` directive setting to `false`, as depicted below. + + services: + etcd: + backup: false + +## 7. Configure Certificates + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +Choose from the following options: + +
    + Option A—Bring Your Own Certificate: Self-Signed + +>**Prerequisites:** +>Create a self-signed certificate. +> +>- The certificate files must be in PEM format. +>- The certificate files must be encoded in [base64](#base64). +>- In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +In `kind: Secret` with `name: cattle-keys-ingress`, replace `` with the base64 encoded string of the CA Certificate file (usually called `ca.pem` or `ca.crt`) + +>**Note:** The base64 encoded string should be on the same line as `cacerts.pem`, without any newline at the beginning, in between or at the end. + +After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + --- + apiVersion: v1 + kind: Secret + metadata: + name: cattle-keys-server + namespace: cattle-system + type: Opaque + data: + cacerts.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNvRENDQVlnQ0NRRHVVWjZuMEZWeU16QU5CZ2txaGtpRzl3MEJBUXNGQURBU01SQXdEZ1lEVlFRRERBZDAKWlhOMExXTmhNQjRYRFRFNE1EVXdOakl4TURRd09Wb1hEVEU0TURjd05USXhNRFF3T1Zvd0VqRVFNQTRHQTFVRQpBd3dIZEdWemRDMWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNQmpBS3dQCndhRUhwQTdaRW1iWWczaTNYNlppVmtGZFJGckJlTmFYTHFPL2R0RUdmWktqYUF0Wm45R1VsckQxZUlUS3UzVHgKOWlGVlV4Mmo1Z0tyWmpwWitCUnFiZ1BNbk5hS1hocmRTdDRtUUN0VFFZdGRYMVFZS0pUbWF5NU45N3FoNTZtWQprMllKRkpOWVhHWlJabkdMUXJQNk04VHZramF0ZnZOdmJ0WmtkY2orYlY3aWhXanp2d2theHRUVjZlUGxuM2p5CnJUeXBBTDliYnlVcHlad3E2MWQvb0Q4VUtwZ2lZM1dOWmN1YnNvSjhxWlRsTnN6UjVadEFJV0tjSE5ZbE93d2oKaG41RE1tSFpwZ0ZGNW14TU52akxPRUc0S0ZRU3laYlV2QzlZRUhLZTUxbGVxa1lmQmtBZWpPY002TnlWQUh1dApuay9DMHpXcGdENkIwbkVDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFHTCtaNkRzK2R4WTZsU2VBClZHSkMvdzE1bHJ2ZXdia1YxN3hvcmlyNEMxVURJSXB6YXdCdFJRSGdSWXVtblVqOGo4T0hFWUFDUEthR3BTVUsKRDVuVWdzV0pMUUV0TDA2eTh6M3A0MDBrSlZFZW9xZlVnYjQrK1JLRVJrWmowWXR3NEN0WHhwOVMzVkd4NmNOQQozZVlqRnRQd2hoYWVEQmdma1hXQWtISXFDcEsrN3RYem9pRGpXbi8walI2VDcrSGlaNEZjZ1AzYnd3K3NjUDIyCjlDQVZ1ZFg4TWpEQ1hTcll0Y0ZINllBanlCSTJjbDhoSkJqa2E3aERpVC9DaFlEZlFFVFZDM3crQjBDYjF1NWcKdE03Z2NGcUw4OVdhMnp5UzdNdXk5bEthUDBvTXl1Ty82Tm1wNjNsVnRHeEZKSFh4WTN6M0lycGxlbTNZQThpTwpmbmlYZXc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + +
    +
    + Option B—Bring Your Own Certificate: Signed by Recognized CA + +If you are using a Certificate Signed By A Recognized Certificate Authority, you don't need to perform any step in this part. +
    + +## 8. Configure FQDN + +There is one reference to `` in the RKE config file. Replace this reference with the FQDN you chose in [3. Configure DNS](#3-configure-dns). + +1. Open `rancher-cluster.yml`. + +2. In the `kind: Ingress` with `name: cattle-ingress-http:` + + Replace `` with the FQDN chosen in [3. Configure DNS](#3-configure-dns). + + **Step Result:** After replacing the values, the file should look like the example below (the base64 encoded strings should be different): + + ``` + apiVersion: extensions/v1beta1 + kind: Ingress + metadata: + namespace: cattle-system + name: cattle-ingress-http + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" # Max time in seconds for ws to remain shell window open + nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" # Max time in seconds for ws to remain shell window open + spec: + rules: + - host: rancher.yourdomain.com + http: + paths: + - backend: + serviceName: cattle-service + servicePort: 80 + ``` + + +3. Save the file and close it. + +## 9. Configure Rancher version + +The last reference that needs to be replaced is ``. This needs to be replaced with a Rancher version which is marked as stable. The latest stable release of Rancher can be found in the [GitHub README](https://github.com/rancher/rancher/blob/master/README.md). Make sure the version is an actual version number, and not a named tag like `stable` or `latest`. The example below shows the version configured to `v2.0.6`. + +``` + spec: + serviceAccountName: cattle-admin + containers: + - image: rancher/rancher:v2.0.6 + imagePullPolicy: Always +``` + +## 10. Back Up Your RKE Config File + +After you close your RKE config file, `rancher-cluster.yml`, back it up to a secure location. You can use this file again when it's time to upgrade Rancher. + +## 11. Run RKE + +With all configuration in place, use RKE to launch Rancher. You can complete this action by running the `rke up` command and using the `--config` parameter to point toward your config file. + +1. From your workstation, make sure `rancher-cluster.yml` and the downloaded `rke` binary are in the same directory. + +2. Open a Terminal instance. Change to the directory that contains your config file and `rke`. + +3. Enter one of the `rke up` commands listen below. + + ``` + rke up --config rancher-cluster.yml + ``` + + **Step Result:** The output should be similar to the snippet below: + + ``` + INFO[0000] Building Kubernetes cluster + INFO[0000] [dialer] Setup tunnel for host [1.1.1.1] + INFO[0000] [network] Deploying port listener containers + INFO[0000] [network] Pulling image [alpine:latest] on host [1.1.1.1] + ... + INFO[0101] Finished building Kubernetes cluster successfully + ``` + +## 12. Back Up Auto-Generated Config File + +During installation, RKE automatically generates a config file named `kube_config_rancher-cluster.yml` in the same directory as the `rancher-cluster.yml` file. Copy this file and back it up to a safe location. You'll use this file later when upgrading Rancher Server. + +## What's Next? + +- **Recommended:** Review [Creating Backups—High Availability Back Up and Restore](backups/backups/ha-backups/) to learn how to backup your Rancher Server in case of a disaster scenario. +- Create a Kubernetes cluster: [Creating a Cluster](tasks/clusters/creating-a-cluster/). + +
    + +## FAQ and Troubleshooting + +{{< ssl_faq_ha >}} diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-troubleshooting.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-troubleshooting.md new file mode 100644 index 0000000000..d6eb2e41a4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on-troubleshooting.md @@ -0,0 +1,35 @@ +--- +title: Troubleshooting HA RKE Add-On Install +weight: 370 +aliases: + - /rancher/v2.0-v2.4/en/installation/troubleshooting-ha/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on/troubleshooting + - /rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/404-default-backend/ + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/troubleshooting/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher Helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + +This section contains common errors seen when setting up a Kubernetes installation. + +Choose from the following options: + +- [Generic troubleshooting](generic-troubleshooting/) + + In this section, you can find generic ways to debug your Kubernetes cluster. + +- [Failed to set up SSH tunneling for host](https://rancher.com/docs/rke/latest/en/troubleshooting/ssh-connectivity-errors/) + + In this section, you can find errors related to SSH tunneling when you run the `rke` command to setup your nodes. + +- [Failed to get job complete status](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status.md) + + In this section, you can find errors related to deploying addons. + +- [404 - default backend](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/404-default-backend.md) + + In this section, you can find errors related to the `404 - default backend` page that is shown when trying to access Rancher. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md new file mode 100644 index 0000000000..430999ac08 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2-rke-add-on.md @@ -0,0 +1,19 @@ +--- +title: RKE Add-On Install +weight: 276 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2/rke-add-on + - /rancher/v2.x/en/installation/resources/advanced/helm2/rke-add-on/ +--- + +> #### **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +>Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). +> +>If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. + + +* [Kubernetes installation with External Load Balancer (TCP/Layer 4)](installation/options/helm2/rke-add-on/layer-4-lb) +* [Kubernetes installation with External Load Balancer (HTTPS/Layer 7)](installation/options/helm2/rke-add-on/layer-7-lb) +* [HTTP Proxy Configuration for a Kubernetes installation](installation/options/helm2/rke-add-on/proxy/) +* [Troubleshooting RKE Add-on Installs](installation/options/helm2/rke-add-on/troubleshooting/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2.md new file mode 100644 index 0000000000..ef3559cdda --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/helm2.md @@ -0,0 +1,61 @@ +--- +title: Kubernetes Installation Using Helm 2 +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/helm2 + - /rancher/v2.x/en/installation/resources/advanced/helm2/ +--- + +> After Helm 3 was released, the Rancher installation instructions were updated to use Helm 3. +> +> If you are using Helm 2, we recommend [migrating to Helm 3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) because it is simpler to use and more secure than Helm 2. +> +> This section provides a copy of the older high-availability Kubernetes Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +For production environments, we recommend installing Rancher in a high-availability configuration so that your user base can always access Rancher Server. When installed in a Kubernetes cluster, Rancher will integrate with the cluster's etcd database and take advantage of Kubernetes scheduling for high-availability. + +This procedure walks you through setting up a 3-node cluster with Rancher Kubernetes Engine (RKE) and installing the Rancher chart with the Helm package manager. + +> **Important:** The Rancher management server can only be run on an RKE-managed Kubernetes cluster. Use of Rancher on hosted Kubernetes or other providers is not supported. + +> **Important:** For the best performance, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +## Recommended Architecture + +- DNS for Rancher should resolve to a Layer 4 load balancer (TCP) +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
    Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
    +![High-availability Kubernetes Install](/img/ha/rancher2ha.svg) +Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers + +## Required Tools + +The following CLI tools are required for this install. Please make sure these tools are installed and available in your `$PATH` + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [rke](https://rancher.com/docs/rke/latest/en/installation/) - Rancher Kubernetes Engine, cli for building Kubernetes clusters. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +## Installation Outline + +- [Create Nodes and Load Balancer](installation/options/helm2/create-nodes-lb/) +- [Install Kubernetes with RKE](installation/options/helm2/kubernetes-rke/) +- [Initialize Helm (tiller)](installation/options/helm2/helm-init/) +- [Install Rancher](installation/options/helm2/helm-rancher/) + +## Additional Install Options + +- [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) + +## Previous Methods + +[RKE add-on install](installation/options/helm2/rke-add-on/) + +> **Important: RKE add-on install is only supported up to Rancher v2.0.8** +> +> Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install ](installation/options/helm2/). +> +> If you are currently using the RKE add-on install method, see [Migrating from a Kubernetes Install with an RKE Add-on](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the Helm chart. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md new file mode 100644 index 0000000000..64195f8f1f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -0,0 +1,37 @@ +--- +title: The Horizontal Pod Autoscaler +description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment +weight: 3026 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/horizontal-pod-autoscaler +--- + +The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. + +Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. + +You can create, manage, and delete HPAs using the Rancher UI in Rancher v2.3.0-alpha4 and higher versions. It only supports HPA in the `autoscaling/v2beta2` API. + +## Managing HPAs + +The way that you manage HPAs is different based on your version of the Kubernetes API: + +- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. +- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. + +HPAs are also managed differently based on your version of Rancher: + +- **For Rancher v2.3.0+**: You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). +- **For Rancher Before v2.3.0:** To manage and configure HPAs, you need to use `kubectl`. For instructions on how to create, manage, and scale HPAs, refer to [Managing HPAs with kubectl](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md). + +You might have additional HPA installation steps if you are using an older version of Rancher: + +- **For Rancher v2.0.7+:** Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. +- **For Rancher Before v2.0.7:** Clusters created in Rancher before v2.0.7 don't automatically have the requirements needed to use HPA. For instructions on installing HPA for these clusters, refer to [Manual HPA Installation for Clusters Created Before Rancher v2.0.7](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7.md). + +## Testing HPAs with a Service Deployment + +In Rancher v2.3.x+, you can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md). + +You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] +(k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md new file mode 100644 index 0000000000..081207021e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/infrastructure-setup.md @@ -0,0 +1,10 @@ +--- +title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. +shortTitle: Infrastructure Tutorials +weight: 5 +--- + +To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) + + +To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md new file mode 100644 index 0000000000..40dafa4b01 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-cluster-autoscaler.md @@ -0,0 +1,25 @@ +--- +title: Cluster Autoscaler +weight: 1 +--- + +In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. + +To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. + +Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. + +It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. + +# Cloud Providers + +Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) + +### Setting up Cluster Autoscaler on Amazon Cloud Provider + +For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md new file mode 100644 index 0000000000..f6e7859ec2 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -0,0 +1,294 @@ +--- +title: Install Rancher on a Kubernetes Cluster +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/k8s-install/ + - /rancher/v2.0-v2.4/en/installation/k8s-install/helm-rancher + - /rancher/v2.0-v2.4/en/installation/k8s-install/kubernetes-rke + - /rancher/v2.0-v2.4/en/installation/ha-server-install + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/install +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Prerequisite + +Set up the Rancher server's local Kubernetes cluster. + +The cluster requirements depend on the Rancher version: + +- **In Rancher v2.4.x,** Rancher needs to be installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. +- **In Rancher before v2.4,** Rancher needs to be installed on an RKE Kubernetes cluster. + +For the tutorial to install an RKE Kubernetes cluster, refer to [this page.](installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) + +For the tutorial to install a K3s Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) + +# Install the Rancher Helm Chart + +Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. + +With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. + +For systems without direct internet access, see [Air Gap: Kubernetes install](installation/air-gap-installation/install-rancher/). + +To choose a Rancher version to install, refer to [Choosing a Rancher Version.](installation/options/server-tags) + +To choose a version of Helm to install Rancher with, refer to the [Helm version requirements](installation/options/helm-version) + +> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section](installation/options/helm2) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +To set up Rancher, + +1. [Install the required CLI tools](#1-install-the-required-cli-tools) +2. [Add the Helm chart repository](#2-add-the-helm-chart-repository) +3. [Create a namespace for Rancher](#3-create-a-namespace-for-rancher) +4. [Choose your SSL configuration](#4-choose-your-ssl-configuration) +5. [Install cert-manager](#5-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) +6. [Install Rancher with Helm and your chosen certificate option](#6-install-rancher-with-helm-and-your-chosen-certificate-option) +7. [Verify that the Rancher server is successfully deployed](#7-verify-that-the-rancher-server-is-successfully-deployed) +8. [Save your options](#8-save-your-options) + +### 1. Install the Required CLI Tools + +The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. + +Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](installation/options/helm-version) to choose a version of Helm to install Rancher. + +### 2. Add the Helm Chart Repository + +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + +{{< release-channel >}} + +``` +helm repo add rancher- https://releases.rancher.com/server-charts/ +``` + +### 3. Create a Namespace for Rancher + +We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: + +``` +kubectl create namespace cattle-system +``` + +### 4. Choose your SSL Configuration + +The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: + +- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. +- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. +- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. + + +| Configuration | Helm Chart Option | Requires cert-manager | +| ------------------------------ | ----------------------- | ------------------------------------- | +| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#5-install-cert-manager) | +| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#5-install-cert-manager) | +| Certificates from Files | `ingress.tls.source=secret` | no | + +### 5. Install cert-manager + +> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). + +
    + Click to Expand + +> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation](installation/options/upgrading-cert-manager/). + +These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). + +``` +# Install the CustomResourceDefinition resources separately +kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.crds.yaml + +# **Important:** +# If you are running Kubernetes v1.15 or below, you +# will need to add the `--validate=false` flag to your +# kubectl apply command, or else you will receive a +# validation error relating to the +# x-kubernetes-preserve-unknown-fields field in +# cert-manager’s CustomResourceDefinition resources. +# This is a benign error and occurs due to the way kubectl +# performs resource validation. + +# Create the namespace for cert-manager +kubectl create namespace cert-manager + +# Add the Jetstack Helm repository +helm repo add jetstack https://charts.jetstack.io + +# Update your local Helm chart repository cache +helm repo update + +# Install the cert-manager Helm chart +helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v1.0.4 +``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +
    + +### 6. Install Rancher with Helm and Your Chosen Certificate Option + +The exact command to install Rancher differs depending on the certificate configuration. + + + + +The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. + +Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. + +- Set the `hostname` to the DNS name you pointed at your load balancer. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. +- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + + +This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. + +In the following command, + +- `hostname` is set to the public DNS record, +- `ingress.tls.source` is set to `letsEncrypt` +- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=letsEncrypt \ + --set letsEncrypt.email=me@example.org \ +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + + +In this option, Kubernetes secrets are created from your own certificates for Rancher to use. + +When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. + +Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. + +> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?](../faq/technical-items.md#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) + +- Set the `hostname`. +- Set `ingress.tls.source` to `secret`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret +``` + +If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret \ + --set privateCA=true +``` + +Now that Rancher is deployed, see [Adding TLS Secrets](installation/resources/encryption/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. + + + + +The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. + +- [HTTP Proxy](../reference-guides/installation-references/helm-chart-options.md#http-proxy) +- [Private Docker Image Registry](../reference-guides/installation-references/helm-chart-options.md#private-registry-and-air-gap-installs) +- [TLS Termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +See the [Chart Options](installation/resources/chart-options/) for the full list of options. + + +### 7. Verify that the Rancher Server is Successfully Deployed + +After adding the secrets, check if Rancher was rolled out successfully: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: + +``` +kubectl -n cattle-system get deploy rancher +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +rancher 3 3 3 3 3m +``` + +It should show the same count for `DESIRED` and `AVAILABLE`. + +### 8. Save Your Options + +Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. + +### Finishing Up + +That's it. You should have a functional Rancher server. + +In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. + +Doesn't work? Take a look at the [Troubleshooting](installation/options/troubleshooting/) Page + + +### Optional Next Steps + +Enable the Enterprise Cluster Manager. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md new file mode 100644 index 0000000000..a37fd0b144 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-and-upgrade.md @@ -0,0 +1,94 @@ +--- +title: Installing/Upgrading Rancher +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/how-ha-works/ +--- + +This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. + +# Terminology + +In this section, + +- **The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. +- **RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. +- **K3s (Lightweight Kubernetes)** is also a fully compliant Kubernetes distribution. It is newer than RKE, easier to use, and more lightweight, with a binary size of less than 100 MB. As of Rancher v2.4, Rancher can be installed on a K3s cluster. + +# Overview of Installation Options + +Rancher can be installed on these main architectures: + +### High-availability Kubernetes Install with the Helm CLI + +We recommend using Helm, a Kubernetes package manager, to install Rancher on multiple nodes on a dedicated Kubernetes cluster. For RKE clusters, three nodes are required to achieve a high-availability cluster. For K3s clusters, only two nodes are required. + +### Single-node Kubernetes Install + +Rancher can be installed on a single-node Kubernetes cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. + +However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. + +### Docker Install + +For test and demonstration purposes, Rancher can be installed with Docker on a single node. + +For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. + +### Other Options + +There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: + +| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | +| ---------------------------------- | ------------------------------ | ---------- | +| With direct access to the Internet | [Docs](install-upgrade-on-a-kubernetes-cluster.md) | [Docs](rancher-on-a-single-node-with-docker.md) | +| Behind an HTTP proxy | These [docs,](install-upgrade-on-a-kubernetes-cluster.md) plus this [configuration](../reference-guides/installation-references/helm-chart-options.md#http-proxy) | These [docs,](rancher-on-a-single-node-with-docker.md) plus this [configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) | +| In an air gap environment | [Docs](air-gapped-helm-cli-install.md) | [Docs](air-gapped-helm-cli-install.md) | + +We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. + +For that reason, we recommend that for a production-grade architecture, you should set up a high-availability Kubernetes cluster, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. + +> The type of cluster that Rancher needs to be installed on depends on the Rancher version. +> +> For Rancher v2.4.x, either an RKE Kubernetes cluster or K3s Kubernetes cluster can be used. +> For Rancher before v2.4, an RKE cluster must be used. + +For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. + +Our [instructions for installing Rancher on Kubernetes](install-upgrade-on-a-kubernetes-cluster.md) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. + +When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,](installation-requirements.md) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. + +For a longer discussion of Rancher architecture, refer to the [architecture overview,](rancher-manager-architecture.md) [recommendations for production-grade architecture,](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) or our [best practices guide.](../reference-guides/best-practices/deployment-types.md) + +# Prerequisites +Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.](installation-requirements.md) + +# Architecture Tip + +For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +For more architecture recommendations, refer to [this page.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +### More Options for Installations on a Kubernetes Cluster + +Refer to the [Helm chart options](installation/resources/chart-options/) for details on installing Rancher on a Kubernetes cluster with other configurations, including: + +- With [API auditing to record all transactions](../reference-guides/installation-references/helm-chart-options.md#api-audit-log) +- With [TLS termination on a load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) +- With a [custom Ingress](../reference-guides/installation-references/helm-chart-options.md#customizing-your-ingress) + +In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: + +- [RKE configuration options](https://rancher.com/docs/rke/latest/en/config-options/) +- [K3s configuration options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) + +### More Options for Installations with Docker + +Refer to the [docs about options for Docker installs](rancher-on-a-single-node-with-docker.md) for details about other configurations including: + +- With [API auditing to record all transactions](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) +- With an [external load balancer](installation/options/single-node-install-external-lb/) +- With a [persistent data store](../reference-guides/single-node-rancher-in-docker/advanced-options.md#persistent-data) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-references.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md new file mode 100644 index 0000000000..c1b1c248fe --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/installation-requirements.md @@ -0,0 +1,147 @@ +--- +title: Installation Requirements +description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. + +> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) which will run your apps and services. + +Make sure the node(s) for the Rancher server fulfill the following requirements: + +- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) +- [Hardware Requirements](#hardware-requirements) + - [CPU and Memory](#cpu-and-memory) + - [CPU and Memory for Rancher before v2.4.0](#cpu-and-memory-for-rancher-before-v2-4-0) + - [Disks](#disks) +- [Networking Requirements](#networking-requirements) + - [Node IP Addresses](#node-ip-addresses) + - [Port Requirements](#port-requirements) + +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.](../reference-guides/best-practices/deployment-types.md) + +The Rancher UI works best in Firefox or Chrome. + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution. + +For details on which OS, Docker, and Kubernetes versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. + +Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. + +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).](installation/options/arm64-platform/) + +### RKE Specific Requirements + +For the container runtime, RKE should work with any modern Docker version. + +### K3s Specific Requirements + +For the container runtime, K3s should work with any modern version of Docker or containerd. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. + +If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. + + +### Installing Docker + +Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts](../getting-started/installation-and-upgrade/installation-requirements/install-docker.md) to install Docker with one command. +# Hardware Requirements + +This section describes the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. + +### CPU and Memory + +Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. + + + + +These requirements apply to each host in an [RKE Kubernetes cluster where the Rancher server is installed.](install-upgrade-on-a-kubernetes-cluster.md) + +Performance increased in Rancher v2.4.0. For the requirements of Rancher before v2.4.0, refer to [this section.](#cpu-and-memory-for-rancher-before-v2-4-0) + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | ---------- | ------------ | -------| ------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + + + + + +These requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.](install-upgrade-on-a-kubernetes-cluster.md) + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | +| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + + + + + +These requirements apply to a host with a [single-node](rancher-on-a-single-node-with-docker.md) installation of Rancher. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 1 | 4 GB | +| Medium | Up to 15 | Up to 200 | 2 | 8 GB | + + + + + +### CPU and Memory for Rancher before v2.4.0 + +
    + Click to expand + +These requirements apply to installing Rancher on an RKE Kubernetes cluster before Rancher v2.4.0: + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | --------- | ---------- | ----------------------------------------------- | ----------------------------------------------- | +| Small | Up to 5 | Up to 50 | 2 | 8 GB | +| Medium | Up to 15 | Up to 200 | 4 | 16 GB | +| Large | Up to 50 | Up to 500 | 8 | 32 GB | +| X-Large | Up to 100 | Up to 1000 | 32 | 128 GB | +| XX-Large | 100+ | 1000+ | [Contact Rancher](https://rancher.com/contact/) | [Contact Rancher](https://rancher.com/contact/) | +
    + +### Disks + +Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. + +# Networking Requirements + +This section describes the networking requirements for the node(s) where the Rancher server is installed. + +### Node IP Addresses + +Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +### Port Requirements + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements](../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/integrations-in-rancher.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/introduction.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md new file mode 100644 index 0000000000..850213fe2b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio-setup-guide.md @@ -0,0 +1,26 @@ +--- +title: Setup Guide +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/setup + - /rancher/v2.0-v2.4/en/istio/legacy/setup + - /rancher/v2.0-v2.4/en/istio/v2.3.x-v2.4.x/setup + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/setup/ +--- + +This section describes how to enable Istio and start using it in your projects. + +This section assumes that you have Rancher installed, and you have a Rancher-provisioned Kubernetes cluster where you would like to set up Istio. + +If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. + +> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) and [setting up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) + +1. [Enable Istio in the cluster.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md) +1. [Enable Istio in all the namespaces where you want to use it.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md) +1. [Select the nodes where the main Istio components will be deployed.](../how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors.md) +1. [Add deployments and services that have the Istio sidecar injected.](../how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md) +1. [Set up the Istio gateway. ](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) +1. [Set up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) +1. [Generate traffic and see Istio in action.](istio-setup-guide.md#view-traffic) + diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md new file mode 100644 index 0000000000..ca8bea220f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/istio.md @@ -0,0 +1,93 @@ +--- +title: Istio +weight: 15 +aliases: + - /rancher/v2.0-v2.4/en/dashboard/istio + - /rancher/v2.0-v2.4/en/project-admin/istio/configuring-resource-allocations/ + - /rancher/v2.0-v2.4/en/cluster-admin/tools/istio/ + - /rancher/v2.0-v2.4/en/project-admin/istio + - /rancher/v2.0-v2.4/en/istio/legacy/cluster-istio + - /rancher/v2.x/en/istio/v2.3.x-v2.4.x/ +--- +_Available as of v2.3.0_ + +[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. + +As a network of microservices changes and grows, the interactions between them can become more difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. + +Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +This service mesh provides features that include but are not limited to the following: + +- Traffic management features +- Enhanced monitoring and tracing +- Service discovery and routing +- Secure connections and service-to-service authentication with mutual TLS +- Load balancing +- Automatic retries, backoff, and circuit breaking + +After Istio is enabled in a cluster, you can leverage Istio's control plane functionality with `kubectl`. + +Rancher's Istio integration comes with comprehensive visualization aids: + +- **Trace the root cause of errors with Jaeger.** [Jaeger](https://www.jaegertracing.io/) is an open-source tool that provides a UI for a distributed tracing system, which is useful for root cause analysis and for determining what causes poor performance. Distributed tracing allows you to view an entire chain of calls, which might originate with a user request and traverse dozens of microservices. +- **Get the full picture of your microservice architecture with Kiali.** [Kiali](https://www.kiali.io/) provides a diagram that shows the services within a service mesh and how they are connected, including the traffic rates and latencies between them. You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. +- **Gain insights from time series analytics with Grafana dashboards.** [Grafana](https://grafana.com/) is an analytics platform that allows you to query, visualize, alert on and understand the data gathered by Prometheus. +- **Write custom queries for time series data with the Prometheus UI.** [Prometheus](https://prometheus.io/) is a systems monitoring and alerting toolkit. Prometheus scrapes data from your cluster, which is then used by Grafana. A Prometheus UI is also integrated into Rancher, and lets you write custom queries for time series data and see the results in the UI. + + +Istio needs to be set up by a Rancher administrator or cluster administrator before it can be used in a project. + +# Prerequisites + +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory](../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) to run all of the components of Istio. + +# Setup Guide + +Refer to the [setup guide](istio-setup-guide.md) for instructions on how to set up Istio and use it in a project. + +# Disabling Istio + +To remove Istio components from a cluster, namespace, or workload, refer to the section on [disabling Istio.](../explanations/integrations-in-rancher/istio/disable-istio.md) + +# Accessing Visualizations + +> By default, only cluster owners have access to Jaeger and Kiali. For instructions on how to allow project members to access them, see [this section.](../explanations/integrations-in-rancher/istio/rbac-for-istio.md) + +After Istio is set up in a cluster, Grafana, Prometheus, Jaeger, and Kiali are available in the Rancher UI. + +Your access to the visualizations depend on your role. Grafana and Prometheus are only available for cluster owners. The Kiali and Jaeger UIs are available only to cluster owners by default, but cluster owners can allow project members to access them by editing the Istio settings. When you go to your project and click **Resources > Istio,** you can go to each UI for Kiali, Jaeger, Grafana, and Prometheus by clicking their icons in the top right corner of the page. + +To see the visualizations, go to the cluster where Istio is set up and click **Tools > Istio.** You should see links to each UI at the top of the page. + +You can also get to the visualization tools from the project view. + +# Viewing the Kiali Traffic Graph + +1. From the project view in Rancher, click **Resources > Istio.** +1. If you are a cluster owner, you can go to the **Traffic Graph** tab. This tab has the Kiali network visualization integrated into the UI. + +# Viewing Traffic Metrics + +Istio’s monitoring features provide visibility into the performance of all your services. + +1. From the project view in Rancher, click **Resources > Istio.** +1. Go to the **Traffic Metrics** tab. After traffic is generated in your cluster, you should be able to see metrics for **Success Rate, Request Volume, 4xx Response Count, Project 5xx Response Count** and **Request Duration.** Cluster owners can see all of the metrics, while project members can see a subset of the metrics. + +# Architecture + +Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. + +Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. + +Enabling Istio in Rancher enables monitoring in the cluster, and enables Istio in all new namespaces that are created in a cluster. You need to manually enable Istio in preexisting namespaces. + +When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. + +For more information on the Istio sidecar, refer to the [Istio docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/). + +### Two Ingresses + +By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. To allow Istio to receive external traffic, you need to enable the Istio ingress gateway for the cluster. The result is that your cluster will have two ingresses. + +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.](/img/istio-ingress.svg) \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/_index.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/installation/resources/k8s-tutorials/_index.md rename to versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-cluster-setup.md diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md new file mode 100644 index 0000000000..93b6c6a30c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -0,0 +1,100 @@ +--- +title: Setting up Kubernetes Clusters in Rancher +description: Provisioning Kubernetes Clusters +weight: 7 +aliases: + - /rancher/v2.0-v2.4/en/concepts/clusters/ + - /rancher/v2.0-v2.4/en/concepts/clusters/cluster-providers/ + - /rancher/v2.0-v2.4/en/tasks/clusters/ +--- + +Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. + +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture](rancher-manager-architecture.md) page. + +This section covers the following topics: + + + +- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) +- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) + - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) + - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) +- [Importing Existing Clusters](#importing-existing-clusters) + + + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +# Setting up Clusters in a Hosted Kubernetes Provider + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +For more information, refer to the section on [hosted Kubernetes clusters.](set-up-clusters-from-hosted-kubernetes-providers.md) + +# Launching Kubernetes with Rancher + +Rancher uses the [Rancher Kubernetes Engine (RKE)](https://rancher.com/docs/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. + +In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. + +These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. + +If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. + +For more information, refer to the section on [RKE clusters.](launch-kubernetes-with-rancher.md) + +### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider + +Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This template defines the parameters used to launch nodes in your cloud providers. + +One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. + +The cloud providers available for creating a node template are decided based on the [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers) active in the Rancher UI. + +For more information, refer to the section on [nodes hosted by an infrastructure provider](use-new-nodes-in-an-infra-provider.md) + +### Launching Kubernetes on Existing Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,](use-existing-nodes.md) which creates a custom cluster. + +You can bring any nodes you want to Rancher and use them to create a cluster. + +These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. + +# Importing Existing Clusters + +_Available from Rancher v2.0.x-v2.4.x_ + +In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +Note that Rancher does not automate the provisioning, scaling, or upgrade of imported clusters. Other Rancher features, including management of cluster, role-based access control, policy, and workloads, are available for imported clusters. + +For all imported Kubernetes clusters except for K3s clusters, the configuration of an imported cluster still has to be edited outside of Rancher. Some examples of editing the cluster include adding and removing nodes, upgrading the Kubernetes version, and changing Kubernetes component parameters. + +In Rancher v2.4, it became possible to import a K3s cluster and upgrade Kubernetes by editing the cluster in the Rancher UI. + +For more information, refer to the section on [importing existing clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) + +### Importing and Editing K3s Clusters + +_Available as of Rancher v2.4.0_ + +[K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. K3s Kubernetes clusters can now be imported into Rancher. + +When a K3s cluster is imported, Rancher will recognize it as K3s, and the Rancher UI will expose the following features in addition to the functionality for other imported clusters: + +- The ability to upgrade the K3s version +- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster. + +For more information, refer to the section on [imported K3s clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md new file mode 100644 index 0000000000..4fc8354908 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-components.md @@ -0,0 +1,18 @@ +--- +title: Kubernetes Components +weight: 100 +--- + +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](launch-kubernetes-with-rancher.md) clusters. + +This section includes troubleshooting tips in the following categories: + +- [Troubleshooting etcd Nodes](../troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md) +- [Troubleshooting Controlplane Nodes](../troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md) +- [Troubleshooting nginx-proxy Nodes](../troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md) +- [Troubleshooting Worker Nodes and Generic Components](../troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md) + +# Kubernetes Component Diagram + +![Cluster diagram](/img/clusterdiagram.svg)
    +Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md new file mode 100644 index 0000000000..1649faf19f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/kubernetes-resources-setup.md @@ -0,0 +1,76 @@ +--- +title: Kubernetes Resources +weight: 19 +aliases: + - /rancher/v2.0-v2.4/en/concepts/ + - /rancher/v2.0-v2.4/en/tasks/ + - /rancher/v2.0-v2.4/en/concepts/resources/ +--- + +## Workloads + +Deploy applications to your cluster nodes using [workloads](workloads-and-pods.md), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. + +When deploying a workload, you can deploy from any image. There are a variety of [workload types](workloads-and-pods.md#workload-types) to choose from which determine how your application should run. + +Following a workload deployment, you can continue working with it. You can: + +- [Upgrade](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) the workload to a newer version of the application it's running. +- [Roll back](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) a workload to a previous version, if an issue occurs during upgrade. +- [Add a sidecar](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md), which is a workload that supports a primary workload. + +## Load Balancing and Ingress + +### Load Balancers + +After you launch an application, it's only available within the cluster. It can't be reached externally. + +If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +#### Ingress + +Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. + +Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. + +For more information, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). + +When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. + +For more information, see [Global DNS](../how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md). + +## Service Discovery + +After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. + +For more information, see [Service Discovery](../how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md). + +## Pipelines + +After your project has been [configured to a version control provider](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. + +For more information, see [Pipelines](k8s-in-rancher/pipelines/). + +## Applications + +Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. + +For more information, see [Applications in a Project](catalog/apps/). + +## Kubernetes Resources + +Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. + +Resources include: + +- [Certificates](../how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md): Files used to encrypt/decrypt data entering or leaving the cluster. +- [ConfigMaps](../how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md): Files that store general configuration information, such as a group of config files. +- [Secrets](../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md): Files that store sensitive data like passwords, tokens, or keys. +- [Registries](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md): Files that carry credentials used to authenticate with private registries. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md new file mode 100644 index 0000000000..42f4d9b31d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -0,0 +1,34 @@ +--- +title: Launching Kubernetes with Rancher +weight: 4 +--- + +You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: + +- Bare-metal servers +- On-premise virtual machines +- Virtual machines hosted by an infrastructure provider + +Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. + +RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. + +### Requirements + +If you use RKE to set up a cluster, your nodes must meet the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) for nodes in downstream user clusters. + +### Launching Kubernetes on New Nodes in an Infrastructure Provider + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +For more information, refer to the section on [launching Kubernetes on new nodes.](use-new-nodes-in-an-infra-provider.md) + +### Launching Kubernetes on Existing Custom Nodes + +In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. + +If you want to reuse a node from a previous custom cluster, [clean the node](admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +For more information, refer to the section on [custom nodes.](use-existing-nodes.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md new file mode 100644 index 0000000000..09c4a898ac --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -0,0 +1,63 @@ +--- +title: Set Up Load Balancer and Ingress Controller within Rancher +description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers +weight: 3040 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/load-balancers-and-ingress +--- + +Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. + +## Load Balancers + +After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. + +If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +### Load Balancer Limitations + +Load Balancers have a couple of limitations you should be aware of: + +- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. + +- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: + + + - [Support for Layer-4 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-4-load-balancing) + + - [Support for Layer-7 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-7-load-balancing) + +## Ingress + +As mentioned in the limitations above, the disadvantages of using a load balancer are: + +- Load Balancers can only handle one IP address per service. +- If you run multiple services in your cluster, you must have a load balancer for each service. +- It can be expensive to have a load balancer for every service. + +In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. + +Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. + +Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. + +Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. + +Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). + +Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. + +>**Using Rancher in a High Availability Configuration?** +> +>Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. + +- For more information on how to set up ingress in Rancher, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). +- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry, see [Global DNS](../how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns.md). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md new file mode 100644 index 0000000000..8168179c4c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-clusters.md @@ -0,0 +1,41 @@ +--- +title: Cluster Administration +weight: 8 +--- + +After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. + +This page covers the following topics: + +- [Switching between clusters](#switching-between-clusters) +- [Managing clusters in Rancher](#managing-clusters-in-rancher) +- [Configuring tools](#configuring-tools) + +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +## Switching between Clusters + +To switch between clusters, use the drop-down available in the navigation bar. + +Alternatively, you can switch between projects and clusters directly in the navigation bar. Open the **Global** view and select **Clusters** from the main menu. Then select the name of the cluster you want to open. + +## Managing Clusters in Rancher + +After clusters have been [provisioned into Rancher](kubernetes-clusters-in-rancher-setup.md), [cluster owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +## Configuring Tools + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + +- Alerts +- Notifiers +- Logging +- Monitoring +- Istio Service Mesh +- OPA Gatekeeper + +For more information, see [Tools](../reference-guides/rancher-cluster-tools.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-persistent-storage.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md new file mode 100644 index 0000000000..d3a6dd7b62 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-project-resource-quotas.md @@ -0,0 +1,48 @@ +--- +title: Project Resource Quotas +weight: 2515 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/resource-quotas +--- + +_Available as of v2.1.0_ + +In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. + +This page is a how-to guide for creating resource quotas in existing projects. + +Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md#creating-projects) + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md) + +### Applying Resource Quotas to Existing Projects + +_Available as of v2.0.1_ + +Edit [resource quotas](k8s-in-rancher/projects-and-namespaces/resource-quotas) when: + +- You want to limit the resources that a project and its namespaces can use. +- You want to scale the resources available to a project up or down when a research quota is already in effect. + +1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. + +1. From the main menu, select **Projects/Namespaces**. + +1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit**. + +1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. + +1. Select a Resource Type. For more information on types, see the [quota type reference.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md) + +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. + + | Field | Description | + | ----------------------- | -------------------------------------------------------------------------------------------------------- | + | Project Limit | The overall resource limit for the project. | + | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | + +1. **Optional:** Add more quotas. + +1. Click **Create**. + +**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, you may still create namespaces, but they will be given a resource quota of 0. Subsequently, Rancher will not allow you to create any resources restricted by this quota. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md new file mode 100644 index 0000000000..63bb5b0ac9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-projects.md @@ -0,0 +1,44 @@ +--- +title: Project Administration +weight: 9 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/editing-projects/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/projects-and-namespaces/editing-projects/ +--- + +_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! + +Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. + +You can use projects to perform actions like: + +- [Assign users access to a group of namespaces](../how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md) +- Assign users [specific roles in a project](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). A role can be owner, member, read-only, or [custom](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md) +- [Set resource quotas](manage-project-resource-quotas.md) +- [Manage namespaces](../how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md) +- [Configure tools](project-tools.md) +- [Set up pipelines for continuous integration and deployment](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- [Configure pod security policies](../how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md) + +### Authorization + +Non-administrative users are only authorized for project access after an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owner or member](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) adds them to the project's **Members** tab. + +Whoever creates the project automatically becomes a [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). + +## Switching between Projects + +To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. + +1. From the **Global** view, navigate to the project that you want to configure. + +1. Select **Projects/Namespaces** from the navigation bar. + +1. Select the link for the project that you want to open. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md new file mode 100644 index 0000000000..c8afca26eb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -0,0 +1,28 @@ +--- +title: Role-Based Access Control (RBAC) +weight: 1120 +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/users-permissions-roles/ +--- + +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](about-authentication.md), users can either be local or external. + +After you configure external authentication, the users that display on the **Users** page changes. + +- If you are logged in as a local user, only local users display. + +- If you are logged in as an external user, both external and local users display. + +## Users and Roles + +Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. + +- [Global Permissions](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md): + + Define user authorization outside the scope of any particular cluster. + +- [Cluster and Project Roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md): + + Define user authorization inside the specific cluster or project where they are assigned the role. + +Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/migrate-from-v1.6-v2.x.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/migrate-from-v1.6-v2.x.md new file mode 100644 index 0000000000..186cc30e79 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/migrate-from-v1.6-v2.x.md @@ -0,0 +1,53 @@ +--- +title: Migrating from v1.6 to v2.x +weight: 28 +aliases: + - /rancher/v2.x/en/v1.6-migration/ +--- + +Rancher v2.x has been rearchitected and rewritten with the goal of providing a complete management solution for Kubernetes and Docker. Due to these extensive changes, there is no direct upgrade path from v1.6 to v2.x, but rather a migration of your v1.6 services into v2.x as Kubernetes workloads. In v1.6, the most common orchestration used was Rancher's own engine called Cattle. The following guide explains and educates our Cattle users on running workloads in a Kubernetes environment. + +## Video + +This video demonstrates a complete walk through of migration from Rancher v1.6 to v2.x. + +{{< youtube OIifcqj5Srw >}} + +## Migration Plan + +>**Want to more about Kubernetes before getting started?** Read our [Kubernetes Introduction](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction.md). + + +- [1. Get Started](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md) + + >**Already a Kubernetes user in v1.6?** + > + > _Get Started_ is the only section you need to review for migration to v2.x. You can skip everything else. +- [2. Migrate Your Services](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services.md) +- [3. Expose Your Services](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services.md) +- [4. Configure Health Checks](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps.md) +- [5. Schedule Your Services](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services.md) +- [6. Service Discovery](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services.md) +- [7. Load Balancing](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing.md) + + +## Migration Example Files + +Throughout this migration guide, we will reference several example services from Rancher v1.6 that we're migrating to v2.x. These services are: + +- A service named `web`, which runs [Let's Chat](http://sdelements.github.io/lets-chat/), a self-hosted chat for small teams. +- A service named `database`, which runs [Mongo DB](https://www.mongodb.com/), an open source document database. +- A service named `webLB`, which runs [HAProxy](http://www.haproxy.org/), an open source load balancer used in Rancher v1.6. + +During migration, we'll export these services from Rancher v1.6. The export generates a unique directory for each Rancher v1.6 environment and stack, and two files are output into each stack's directory: + +- `docker-compose.yml` + + A file that contains standard Docker directives for each service in your stack. We'll be converting these files to Kubernetes manifests that can be read by Rancher v2.x. + +- `rancher-compose.yml` + + A file for Rancher-specific functionality such as health checks and load balancers. These files cannot be read by Rancher v2.x, so don't worry about their contents—we're discarding them and recreating them using the v2.x UI. + + +### [Next: Get Started](../how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/new-user-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/node-template-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-cloud-providers.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md new file mode 100644 index 0000000000..979c1de293 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-installation-methods.md @@ -0,0 +1,20 @@ +--- +title: Other Installation Methods +weight: 3 +--- + +### Air Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +The Docker installation is for development and testing environments only. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +There is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/other-troubleshooting-tips.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/pipelines.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/pipelines.md new file mode 100644 index 0000000000..357108d130 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/pipelines.md @@ -0,0 +1,284 @@ +--- +title: Pipelines +weight: 11 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. + +Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +>**Notes:** +> +>- Pipelines improved in Rancher v2.1. Therefore, if you configured pipelines while using v2.0.x, you'll have to reconfigure them after upgrading to v2.1. +>- Still using v2.0.x? See the pipeline documentation for [previous versions](k8s-in-rancher/pipelines/docs-for-v2.0.x). +>- Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. + +This section covers the following topics: + +- [Concepts](#concepts) +- [How Pipelines Work](#how-pipelines-work) +- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) +- [Setting up Pipelines](#setting-up-pipelines) + - [Configure version control providers](#1-configure-version-control-providers) + - [Configure repositories](#2-configure-repositories) + - [Configure the pipeline](#3-configure-the-pipeline) +- [Pipeline Configuration Reference](#pipeline-configuration-reference) +- [Running your Pipelines](#running-your-pipelines) +- [Triggering a Pipeline](#triggering-a-pipeline) + - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) + +# Concepts + +For an explanation of concepts and terminology used in this section, refer to [this page.](k8s-in-rancher/pipelines/concepts) + +# How Pipelines Work + +After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. + +A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. + +Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories](k8s-in-rancher/pipelines/example-repos/) to view some common pipeline deployments. + +When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: + + - **Jenkins:** + + The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. + + >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. + + - **Docker Registry:** + + Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. + + - **Minio:** + + Minio storage is used to store the logs for pipeline executions. + + >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components](k8s-in-rancher/pipelines/storage). + +# Roles-based Access Control for Pipelines + +If you can access a project, you can enable repositories to start building pipelines. + +Only [administrators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure version control providers and manage global pipeline execution settings. + +Project members can only configure repositories and pipelines. + +# Setting up Pipelines + +To set up pipelines, you will need to do the following: + +1. [Configure version control providers](#1-configure-version-control-providers) +2. [Configure repositories](#2-configure-repositories) +3. [Configure the pipeline](#3-configure-the-pipeline) + +### 1. Configure Version Control Providers + +Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider. + +| Provider | Available as of | +| --- | --- | +| GitHub | v2.0.0 | +| GitLab | v2.1.0 | +| Bitbucket | v2.2.0 | + +Select your provider's tab below and follow the directions. + + + + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. + +1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to setup an OAuth App in Github. + +1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. + +1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. + +1. Click **Authenticate**. + + + + + +_Available as of v2.1.0_ + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. + +1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. + +1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. + +1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. + +1. Click **Authenticate**. + +>**Note:** +> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. +> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. + + + + +_Available as of v2.2.0_ + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. + +1. Choose the **Use public Bitbucket Cloud** option. + +1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. + +1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. + +1. Click **Authenticate**. + + + + + +_Available as of v2.2.0_ + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. + +1. Choose the **Use private Bitbucket Server setup** option. + +1. Follow the directions displayed to **Setup a Bitbucket Server application**. + +1. Enter the host address of your Bitbucket server installation. + +1. Click **Authenticate**. + +>**Note:** +> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: +> +> 1. Setup Rancher server with a certificate from a trusted CA. +> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). +> + + + + +**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. + +### 2. Configure Repositories + +After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. Click on **Configure Repositories**. + +1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. + +1. For each repository that you want to set up a pipeline, click on **Enable**. + +1. When you're done enabling all your repositories, click on **Done**. + +**Results:** You have a list of repositories that you can start configuring pipelines for. + +### 3. Configure the Pipeline + +Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. Find the repository that you want to set up a pipeline for. + +1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.](k8s-in-rancher/pipelines/config) + + * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. + * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. + +1. Select which `branch` to use from the list of branches. + +1. _Available as of v2.2.0_ Optional: Set up notifications. + +1. Set up the trigger rules for the pipeline. + +1. Enter a **Timeout** for the pipeline. + +1. When all the stages and steps are configured, click **Done**. + +**Results:** Your pipeline is now configured and ready to be run. + + +# Pipeline Configuration Reference + +Refer to [this page](k8s-in-rancher/pipelines/config) for details on how to configure a pipeline to: + +- Run a script +- Build and publish images +- Publish catalog templates +- Deploy YAML +- Deploy a catalog app + +The configuration reference also covers how to configure: + +- Notifications +- Timeouts +- The rules that trigger a pipeline +- Environment variables +- Secrets + + +# Running your Pipelines + +Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** (In versions before v2.3.0, go to the **Pipelines** tab.) Find your pipeline and select the vertical **⋮ > Run**. + +During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: + +- `docker-registry` +- `jenkins` +- `minio` + +This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. + +# Triggering a Pipeline + +When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. + +Available Events: + +* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. +* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. +* **Tag**: When a tag is created in the repository, the pipeline is triggered. + +> **Note:** This option doesn't exist for Rancher's [example repositories](k8s-in-rancher/pipelines/example-repos/). + +### Modifying the Event Triggers for the Repository + +1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. + +1. 1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. Find the repository that you want to modify the event triggers. Select the vertical **⋮ > Setting**. + +1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. + +1. Click **Save**. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md new file mode 100644 index 0000000000..10d9011b84 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/project-tools.md @@ -0,0 +1,46 @@ +--- +title: Tools for Logging, Monitoring, and More +weight: 2525 +--- + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + + +- [Notifiers](#notifiers) +- [Alerts](#alerts) +- [Logging](#logging) +- [Monitoring](#monitoring) + + + +# Notifiers + +[Notifiers](../explanations/integrations-in-rancher/notifiers.md) are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. + +# Alerts + +[Alerts](cluster-admin/tools/alerts) are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. + +For details on project-level alerts, see [this page.](../reference-guides/rancher-project-tools/project-alerts.md) + +# Logging + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debug and troubleshoot problems + +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. + +For details on setting up logging at the cluster level, refer to the [logging section.](cluster-admin/tools/logging) + +For details on project-level logging, see [this section.](../reference-guides/rancher-project-tools/project-logging.md) + +# Monitoring + +_Available as of v2.2.0_ + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.](cluster-monitoring.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md new file mode 100644 index 0000000000..1aa159e456 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/provisioning-storage-examples.md @@ -0,0 +1,15 @@ +--- +title: Provisioning Storage Examples +weight: 3053 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/adding-storage/provisioning-storage/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/volumes-and-storage/examples/ +--- + +Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. + +For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: + +- [NFS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) +- [vSphere](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) +- [EBS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md new file mode 100644 index 0000000000..ab4d7ab47a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/quick-start-guides.md @@ -0,0 +1,17 @@ +--- +title: Rancher Deployment Quick Start Guides +metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +weight: 2 +--- +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). + +Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. + +We have Quick Start Guides for: + +- [Deploying Rancher Server](deploy-rancher-manager.md): Get started running Rancher using the method most convenient for you. + +- [Deploying Workloads](deploy-rancher-workloads.md): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. + +- [Using the CLI](../getting-started/quick-start-guides/cli.md): Use `kubectl` or Rancher command line interface (CLI) to interact with your Rancher instance. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md new file mode 100644 index 0000000000..a2dc3b1eda --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -0,0 +1,14 @@ +--- +title: Installing Rancher behind an HTTP Proxy +weight: 4 +--- + +In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. + +Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). + +# Installation Outline + +1. [Set up infrastructure](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md) +2. [Set up a Kubernetes cluster](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md) +3. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md new file mode 100644 index 0000000000..46216f2e10 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-manager-architecture.md @@ -0,0 +1,181 @@ +--- +title: Architecture +weight: 1 +--- + +This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. + +For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) + +For a list of main features of the Rancher API server, refer to the [overview section.](../getting-started/introduction/overview.md#features-of-the-rancher-api-server) + +For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +This section covers the following topics: + +- [Rancher server architecture](#rancher-server-architecture) +- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) + - [The authentication proxy](#1-the-authentication-proxy) + - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) + - [Node agents](#3-node-agents) + - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) +- [Important files](#important-files) +- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) +- [Rancher server components and source code](#rancher-server-components-and-source-code) + +# Rancher Server Architecture + +The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. + +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). + +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +The diagram below shows how users can manipulate both [Rancher-launched Kubernetes](launch-kubernetes-with-rancher.md) clusters and [hosted Kubernetes](set-up-clusters-from-hosted-kubernetes-providers.md) clusters through Rancher's authentication proxy: + +
    Managing Kubernetes Clusters through Rancher's Authentication Proxy
    + +![Architecture](/img/rancher-architecture-rancher-api-server.svg) + +You can install Rancher on a single node, or on a high-availability Kubernetes cluster. + +A high-availability Kubernetes installation is recommended for production. + +A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: + +For Rancher v2.0-v2.4, there was no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. + +The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. + +# Communicating with Downstream User Clusters + +This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. + +The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. + +
    Communicating with Downstream Clusters
    + +![Rancher Components](/img/rancher-architecture-cluster-controller.svg) + +The following descriptions correspond to the numbers in the diagram above: + +1. [The Authentication Proxy](#1-the-authentication-proxy) +2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) +3. [Node Agents](#3-node-agents) +4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) + +### 1. The Authentication Proxy + +In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see +the pods. Bob is authenticated through Rancher's authentication proxy. + +The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. + +Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. + +By default, Rancher generates a [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_rancher-cluster.yml`) contains full access to the cluster. + +### 2. Cluster Controllers and Cluster Agents + +Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. + +There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: + +- Watches for resource changes in the downstream cluster +- Brings the current state of the downstream cluster to the desired state +- Configures access control policies to clusters and projects +- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE + +By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. + +The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: + +- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters +- Manages workloads, pod creation and deployment within each cluster +- Applies the roles and bindings defined in each cluster's global policies +- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health + +### 3. Node Agents + +If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. + +The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. + +### 4. Authorized Cluster Endpoint + +An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. + +> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](launch-kubernetes-with-rancher.md) to provision the cluster. It is not available for imported clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. + +There are two main reasons why a user might need the authorized cluster endpoint: + +- To access a downstream user cluster while Rancher is down +- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. + +> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. + +With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. + +You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) + +# Important Files + +The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_rancher-cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. +- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) documentation. + +# Tools for Provisioning Kubernetes Clusters + +The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. + +### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider + +Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) + +### Rancher Launched Kubernetes for Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. + +Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) + +### Hosted Kubernetes Providers + +When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) + +### Imported Kubernetes Clusters + +In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +# Rancher Server Components and Source Code + +This diagram shows each component that the Rancher server is composed of: + +![Rancher Components](/img/rancher-architecture-rancher-components.svg) + +The GitHub repositories for Rancher can be found at the following links: + +- [Main Rancher server repository](https://github.com/rancher/rancher) +- [Rancher UI](https://github.com/rancher/ui) +- [Rancher API UI](https://github.com/rancher/api-ui) +- [Norman,](https://github.com/rancher/norman) Rancher's API framework +- [Types](https://github.com/rancher/types) +- [Rancher CLI](https://github.com/rancher/cli) +- [Catalog applications](https://github.com/rancher/helm) + +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md new file mode 100644 index 0000000000..365317df2c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -0,0 +1,163 @@ +--- +title: Installing Rancher on a Single Node Using Docker +description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/installation/single-node-install/ + - /rancher/v2.0-v2.4/en/installation/single-node + - /rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node +--- + +Rancher can be installed by running a single Docker container. + +In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. + +> **Want to use an external load balancer?** +> See [Docker Install with an External Load Balancer](installation/options/single-node-install-external-lb) instead. + +A Docker installation of Rancher is recommended only for development and testing purposes. + +For Rancher v2.0-v2.4, there is no migration path from a Docker installation to a high-availability installation. Therefore, you may want to use a Kubernetes installation from the start. + +# Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.](installation-requirements.md) + +# 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements](installation-requirements.md) to launch your Rancher server. + +# 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> **Do you want to...** +> +> - Use a proxy? See [HTTP Proxy Configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate/) +> - Complete an Air Gap Installation? See [Air Gap: Docker Install](installation/air-gap-single-node/) +> - Record all transactions with the Rancher API? See [API Auditing](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/advanced/#api-audit-log) + +Choose from the following options: + +- [Option A: Default Rancher-generated Self-signed Certificate](#option-a-default-rancher-generated-self-signed-certificate) +- [Option B: Bring Your Own Certificate, Self-signed](#option-b-bring-your-own-certificate-self-signed) +- [Option C: Bring Your Own Certificate, Signed by a Recognized CA](#option-c-bring-your-own-certificate-signed-by-a-recognized-ca) +- [Option D: Let's Encrypt Certificate](#option-d-let-s-encrypt-certificate) + +### Option A: Default Rancher-generated Self-signed Certificate + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the minimum installation command below. + + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest +``` + +### Option B: Bring Your Own Certificate, Self-signed +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) + +After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| ------------------- | --------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest +``` + +### Option C: Bring Your Own Certificate, Signed by a Recognized CA + +In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisites:** +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) + +After obtaining your certificate, run the Docker command below. + +- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. +- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +| Placeholder | Description | +| ------------------- | ----------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | + + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + rancher/rancher:latest \ + --no-cacerts +``` + +### Option D: Let's Encrypt Certificate + +> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. + +> **Prerequisites:** +> +> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. +> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. + +| Placeholder | Description | +| ----------------- | ------------------- | +| `` | Your domain address | + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest \ + --acme-domain +``` + +## Advanced Options + +When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: + +- Custom CA Certificate +- API Audit Log +- TLS Settings +- Air Gap +- Persistent Data +- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +Refer to [this page](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for details. + +## Troubleshooting + +Refer to [this page](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) for frequently asked questions and troubleshooting tips. + +## What's Next? + +- **Recommended:** Review [Single Node Backup and Restore](installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](kubernetes-clusters-in-rancher-setup.md). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md new file mode 100644 index 0000000000..175898e6e6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-security.md @@ -0,0 +1,98 @@ +--- +title: Security +weight: 20 +--- + + + + + + + +
    +

    Security policy

    +

    Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

    +
    +

    Reporting process

    +

    Please submit possible security issues by emailing security@rancher.com

    +
    +

    Announcements

    +

    Subscribe to the Rancher announcements forum for release updates.

    +
    + +Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,](manage-role-based-access-control-rbac.md) Rancher makes your Kubernetes clusters even more secure. + +On this page, we provide security-related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: + +- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) +- [Guide to hardening Rancher installations](#rancher-hardening-guide) +- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) +- [Third-party penetration test reports](#third-party-penetration-test-reports) +- [Rancher CVEs and resolutions](#rancher-cves-and-resolutions) + +### Running a CIS Security Scan on a Kubernetes Cluster + +_Available as of v2.4.0_ + +Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS (Center for Internet Security) Kubernetes Benchmark. + +The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. + +The Center for Internet Security (CIS) is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace." + +CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. + +The Benchmark provides recommendations of two types: Scored and Not Scored. We run tests related to only Scored recommendations. + +When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. + +For details, refer to the section on [security scans.](cis-scans) + +### Rancher Hardening Guide + +The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. + +The hardening guide provides prescriptive guidance for hardening a production installation of Rancher v2.1.x, v2.2.x and v.2.3.x. See Rancher's guides for [Self Assessment of the CIS Kubernetes Benchmark](#the-cis-benchmark-and-self-sssessment) for the full list of security controls. + +> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. + +Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +[Hardening Guide v2.4](security/hardening-2.4/) | Rancher v2.4 | Benchmark v1.5 | Kubernetes v1.15 +[Hardening Guide v2.3.5](security/hardening-2.3.5/) | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes v1.15 +[Hardening Guide v2.3.3](security/hardening-2.3.3/) | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes v1.14, v1.15, and v1.16 +[Hardening Guide v2.3](security/hardening-2.3/) | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes v1.15 +[Hardening Guide v2.2](security/hardening-2.2/) | Rancher v2.2.x | Benchmark v1.4.1 and 1.4.0 | Kubernetes v1.13 +[Hardening Guide v2.1](security/hardening-2.1/) | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes v1.11 + +### The CIS Benchmark and Self-Assessment + +The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). + +Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +[Self Assessment Guide v2.4](security/benchmark-2.4/#cis-kubernetes-benchmark-1-5-0-rancher-2-4-with-kubernetes-1-15) | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 +[Self Assessment Guide v2.3.5](security/benchmark-2.3.5/#cis-kubernetes-benchmark-1-5-0-rancher-2-3-5-with-kubernetes-1-15) | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 +[Self Assessment Guide v2.3.3](security/benchmark-2.3.3/#cis-kubernetes-benchmark-1-4-1-rancher-2-3-3-with-kubernetes-1-16) | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 +[Self Assessment Guide v2.3](../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md) | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes v1.15 | Benchmark v1.4.1 +[Self Assessment Guide v2.2](security/benchmark-2.2/) | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes v1.13 | Benchmark v1.4.0 and v1.4.1 +[Self Assessment Guide v2.1](security/benchmark-2.1/) | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes v1.11 | Benchmark 1.3.0 + +### Third-party Penetration Test Reports + +Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. + +Results: + +- [Cure53 Pen Test - 7/2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) +- [Untamed Theory Pen Test- 3/2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) + +### Rancher CVEs and Resolutions + +Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](../reference-guides/rancher-security/security-advisories-and-cves.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-server-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.1-hardening-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.1-hardening-guides.md new file mode 100644 index 0000000000..232d2a371a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.1-hardening-guides.md @@ -0,0 +1,22 @@ +--- +title: Rancher v2.1 +weight: 5 +aliases: + - /rancher/v2.x/en/security/rancher-2.1/ +--- + +### Self Assessment Guide + +This [guide](../reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.1 | Rancher v2.1.x | Hardening Guide v2.1 | Kubernetes 1.11 | Benchmark 1.3.0 + +### Hardening Guide + +This hardening [guide](../reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.2-hardening-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.2-hardening-guides.md new file mode 100644 index 0000000000..1b955de4eb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.2-hardening-guides.md @@ -0,0 +1,22 @@ +--- +title: Rancher v2.2 +weight: 4 +aliases: + - /rancher/v2.x/en/security/rancher-2.2/ +--- + +### Self Assessment Guide + +This [guide](../reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.2 | Rancher v2.2.x | Hardening Guide v2.2 | Kubernetes 1.13 | Benchmark v1.4.0 and v1.4.1 + +### Hardening Guide + +This hardening [guide](../reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.3-hardening-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.3-hardening-guides.md new file mode 100644 index 0000000000..4d97e16bf6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.3-hardening-guides.md @@ -0,0 +1,12 @@ +--- +title: Rancher v2.3.x +weight: 3 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/ +--- + +The relevant Hardening Guide and Self Assessment guide depends on your Rancher version: + +- [Rancher v2.3.5](security/rancher-2.3.x/rancher-v2.3.5) +- [Rancher v2.3.3](security/rancher-2.3.x/rancher-v2.3.3) +- [Rancher v2.3.0](security/rancher-2.3.x/rancher-v2.3.0) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.4-hardening-guides.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.4-hardening-guides.md new file mode 100644 index 0000000000..d15db3271d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rancher-v2.4-hardening-guides.md @@ -0,0 +1,22 @@ +--- +title: Rancher v2.4 +weight: 2 +aliases: + - /rancher/v2.x/en/security/rancher-2.4/ +--- + +### Self Assessment Guide + +This [guide](../reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 + +### Hardening Guide + +This hardening [guide](../reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md new file mode 100644 index 0000000000..24432dfc54 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/resources.md @@ -0,0 +1,30 @@ +--- +title: Resources +weight: 5 +aliases: +- /rancher/v2.0-v2.4/en/installation/options +--- + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +### Air Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Advanced Options + +When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: + +| Advanced Option | Available as of | +| ----------------------------------------------------------------------------------------------------------------------- | --------------- | +| [Custom CA Certificate](installation/options/custom-ca-root-certificate/) | v2.0.0 | +| [API Audit Log](installation/options/api-audit-log/) | v2.0.0 | +| [TLS Settings](installation/options/tls-settings/) | v2.1.7 | +| [etcd configuration](installation/options/etcd/) | v2.2.0 | +| [Local System Charts for Air Gap Installations](installation/options/local-system-charts) | v2.3.0 | diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/rke-add-on.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md new file mode 100644 index 0000000000..15fa601180 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-cloud-providers.md @@ -0,0 +1,46 @@ +--- +title: Setting up Cloud Providers +weight: 2300 +aliases: + - /rancher/v2.0-v2.4/en/concepts/clusters/cloud-providers/ + - /rancher/v2.0-v2.4/en/cluster-provisioning/rke-clusters/options/cloud-providers +--- +A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. For more information, refer to the [official Kubernetes documentation on cloud providers.](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) + +When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. + +Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. + +By default, the **Cloud Provider** option is set to `None`. + +The following cloud providers can be enabled: + +* Amazon +* Azure +* GCE (Google Compute Engine) +* vSphere + +### Setting up the Amazon Cloud Provider + +For details on enabling the Amazon cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) + +### Setting up the Azure Cloud Provider + +For details on enabling the Azure cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md) + +### Setting up the GCE Cloud Provider + +For details on enabling the Google Compute Engine cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) + +### Setting up the vSphere Cloud Provider + +For details on enabling the vSphere cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) + +### Setting up a Custom Cloud Provider + +The `Custom` cloud provider is available if you want to configure any [Kubernetes cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). + +For the custom cloud provider option, you can refer to the [RKE docs](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration : + +* [vSphere](https://rancher.com/docs/rke/latest/en/config-options/cloud-providercluster-provisioning/rke-clusters/cloud-providers/vsphere/) +* [OpenStack](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md new file mode 100644 index 0000000000..033fc28dd0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -0,0 +1,32 @@ +--- +title: Setting up Clusters from Hosted Kubernetes Providers +weight: 3 +--- + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-prem or in an infrastructure provider. + +Rancher supports the following Kubernetes providers: + +Kubernetes Providers | Available as of | + --- | --- | +[Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) | v2.0.0 | +[Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) | v2.0.0 | +[Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) | v2.0.0 | +[Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) | v2.2.0 | +[Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) | v2.2.0 | +[Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) | v2.2.0 | + +## Hosted Kubernetes Provider Authentication + +When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: + +- [Creating a GKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +- [Creating an EKS Cluster](../reference-guides/installation-references/amazon-eks-permissions.md) +- [Creating an AKS Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) +- [Creating an ACK Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +- [Creating a TKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) +- [Creating a CCE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/single-node-rancher-in-docker.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/upgrades.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/upgrades.md new file mode 100644 index 0000000000..4afaf46cfb --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/upgrades.md @@ -0,0 +1,288 @@ +--- +title: Upgrades +weight: 2 +aliases: + - /rancher/v2.0-v2.4/en/upgrades/upgrades + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades + - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha-server-upgrade-helm-airgap + - /rancher/v2.0-v2.4/en/upgrades/air-gap-upgrade/ + - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/upgrades/ha + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ + - /rancher/v2.0-v2.4/en/upgrades/upgrades/ha-server-upgrade-helm/ + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades + - /rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha + - /rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/ + - /rancher/v2.0-v2.4/en/upgrades/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. + +For the instructions to upgrade Rancher installed with Docker, refer to [this page.](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md) + +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. + +If you installed Rancher using the RKE Add-on yaml, follow the directions to [migrate or upgrade](upgrades/upgrades/migrating-from-rke-add-on). + +- [Prerequisites](#prerequisites) +- [Upgrade Outline](#upgrade-outline) +- [Known Upgrade Issues](#known-upgrade-issues) +- [RKE Add-on Installs](#rke-add-on-installs) + +# Prerequisites + +### Access to kubeconfig + +Helm should be run from the same location as your kubeconfig file, or the same location where you run your kubectl commands from. + +If you installed Kubernetes with RKE, the config will have been created in the directory you ran `rke up` in. + +The kubeconfig can also be manually targeted for the intended cluster with the `--kubeconfig` tag (see: https://helm.sh/docs/helm/helm/) + +### Review Known Issues + +Review the [known upgrade issues](#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. + +A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) + +Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren't supported. + +### Helm Version + +The upgrade instructions assume you are using Helm 3. + +For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here](installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +### For air gap installs: Populate private registry + +-For [air gap installs only,](air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +### For upgrades from v2.0-v2.2 with external TLS termination + +If you are upgrading Rancher from v2.x to v2.3+, and you are using external TLS termination, you will need to edit the cluster.yml to [enable using forwarded host headers.](../reference-guides/installation-references/helm-chart-options.md#configuring-ingress-for-external-tls-when-using-nginx-v0-25) + +### For upgrades with cert-manager older than 0.8.0 + +[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.](installation/options/upgrading-cert-manager) + +# Upgrade Outline + +Follow the steps to upgrade Rancher server: + +- [1. Back up your Kubernetes cluster that is running Rancher server](#1-back-up-your-kubernetes-cluster-that-is-running-rancher-server) +- [2. Update the Helm chart repository](#2-update-the-helm-chart-repository) +- [3. Upgrade Rancher](#3-upgrade-rancher) +- [4. Verify the Upgrade](#4-verify-the-upgrade) + +# 1. Back up Your Kubernetes Cluster that is Running Rancher Server + + +[Take a one-time snapshot](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md#option-b-one-time-snapshots) +of your Kubernetes cluster running Rancher server. + +You'll use the backup as a restoration point if something goes wrong during upgrade. + +# 2. Update the Helm chart repository + +1. Update your local helm repo cache. + + ``` + helm repo update + ``` + +1. Get the repository name that you used to install Rancher. + + For information about the repos and their differences, see [Helm Chart Repositories](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + + {{< release-channel >}} + + ``` + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + + +1. Fetch the latest chart to install Rancher from the Helm chart repository. + + This command will pull down the latest charts and save it in the current directory as a `.tgz` file. + + ```plain + helm fetch rancher-/rancher + ``` + You can fetch the chart for the specific version you are upgrading to by adding in the `--version=` tag. For example: + + ```plain + helm fetch rancher-/rancher --version=v2.4.11 + ``` + +# 3. Upgrade Rancher + +This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. + + + + +Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. + +``` +helm get values rancher -n cattle-system + +hostname: rancher.my.org +``` + +> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. + +If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow [Option B: Reinstalling Rancher and cert-manager.](#option-b-reinstalling-rancher-and-cert-manager) + +Otherwise, follow [Option A: Upgrading Rancher.](#option-a-upgrading-rancher) + +### Option A: Upgrading Rancher + +Upgrade Rancher to the latest version with all your settings. + +Take all the values from the previous step and append them to the command using `--set key=value`: + +``` +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +> **Note:** The above is an example, there may be more values from the previous step that need to be appended. + +Alternatively, it's possible to export the current values to a file and reference that file during upgrade. For example, to only change the Rancher version: + +``` +helm get values rancher -n cattle-system -o yaml > values.yaml + +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + -f values.yaml \ + --version=2.4.5 +``` + +### Option B: Reinstalling Rancher and cert-manager + +If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11. + +1. Uninstall Rancher + + ``` + helm delete rancher -n cattle-system + ``` + +2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager](installation/options/upgrading-cert-manager) page. + +3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. + + ``` + helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org + ``` + + + + + +Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +Based on the choice you made during installation, complete one of the procedures below. + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. + + +### Option A: Default Self-signed Certificate + + ```plain +helm template ./rancher-.tgz --output-dir . \ + --name rancher \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +### Option B: Certificates from Files using Kubernetes Secrets + +```plain +helm template ./rancher-.tgz --output-dir . \ +--name rancher \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain +helm template ./rancher-.tgz --output-dir . \ +--name rancher \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set privateCA=true \ +--set systemDefaultRegistry= \ # Available as of v2.2.0, set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Available as of v2.3.0, use the packaged Rancher system charts +``` + +### Apply the Rendered Templates + +Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. + +Use `kubectl` to apply the rendered manifests. + +```plain +kubectl -n cattle-system apply -R -f ./rancher +``` + + + + +# 4. Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +>**Having network issues following upgrade?** +> +> See [Restoring Cluster Networking](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md#restoring-cluster-networking). + +# Known Upgrade Issues + +The following table lists some of the most noteworthy issues to be considered when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) + +Upgrade Scenario | Issue +---|--- +Upgrading to v2.4.6 or v2.4.7 | These Rancher versions had an issue where the `kms:ListKeys` permission was required to create, edit, or clone Amazon EC2 node templates. This requirement was removed in v2.4.8. +Upgrading to v2.3.0+ | Any user provisioned cluster will be automatically updated upon any edit as tolerations were added to the images used for Kubernetes provisioning. +Upgrading to v2.2.0-v2.2.x | Rancher introduced the [system charts](https://github.com/rancher/system-charts) repository which contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. To be able to use these features in an air gap install, you will need to mirror the `system-charts` repository locally and configure Rancher to use that repository. Please follow the instructions to [configure Rancher system charts](../getting-started/installation-and-upgrade/resources/local-system-charts.md). +Upgrading from v2.0.13 or earlier | If your cluster's certificates have expired, you will need to perform [additional steps](../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md#rotating-expired-certificates-after-upgrading-older-rancher-versions) to rotate the certificates. +Upgrading from v2.0.7 or earlier | Rancher introduced the `system` project, which is a project that's automatically created to store important namespaces that Kubernetes needs to operate. During upgrade to v2.0.7+, Rancher expects these namespaces to be unassigned from all projects. Before beginning upgrade, check your system namespaces to make sure that they're unassigned to [prevent cluster networking issues.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md) + +# RKE Add-on Installs + +**Important: RKE add-on install is only supported up to Rancher v2.0.8** + +Please use the Rancher helm chart to install Rancher on a Kubernetes cluster. For details, see the [Kubernetes Install](install-upgrade-on-a-kubernetes-cluster.md). + +If you are currently using the RKE add-on install method, see [Migrating from a RKE add-on install](upgrades/upgrades/migrating-from-rke-add-on/) for details on how to move to using the helm chart. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md new file mode 100644 index 0000000000..6df90aa605 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-existing-nodes.md @@ -0,0 +1,126 @@ +--- +title: Launching Kubernetes on Existing Custom Nodes +description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements +metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" +weight: 2225 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ + - /rancher/v2.0-v2.4/en/cluster-provisioning/custom-clusters/ +--- + +When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. + +To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. + +This section describes how to set up a custom cluster. + +# Creating a Cluster with Custom Nodes + +>**Want to use Windows hosts as Kubernetes workers?** +> +>See [Configuring Custom Clusters for Windows](use-windows-clusters.md) before you start. + + + +- [1. Provision a Linux Host](#1-provision-a-linux-host) +- [2. Create the Custom Cluster](#2-create-the-custom-cluster) +- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) + + + +### 1. Provision a Linux Host + +Begin creation of a custom cluster by provisioning a Linux host. Your host can be: + +- A cloud-host virtual machine (VM) +- An on-prem VM +- A bare-metal server + +If you want to reuse a node from a previous custom cluster, [clean the node](admin-settings/removing-rancher/rancher-cluster-nodes/) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +Provision the host according to the [installation requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) and the [checklist for production-ready clusters.](checklist-for-production-ready-clusters.md) + +### 2. Create the Custom Cluster + +1. From the **Clusters** page, click **Add Cluster**. + +2. Choose **Custom**. + +3. Enter a **Cluster Name**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +5. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** + + >**Using Windows nodes as Kubernetes workers?** + > + >- See [Enable the Windows Support Option](use-windows-clusters.md). + >- The only Network Provider available for clusters with Windows support is Flannel. +6. Click **Next**. + +7. From **Node Role**, choose the roles that you want filled by a cluster node. + + >**Notes:** + > + >- Using Windows nodes as Kubernetes workers? See [this section](use-windows-clusters.md). + >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). + +8. **Optional**: Click **[Show advanced options](admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. + +9. Copy the command displayed on screen to your clipboard. + +10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. + + >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. + +11. When you finish running the command(s) on your Linux host(s), click **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +### 3. Amazon Only: Tag Resources + +If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. + +[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) + +>**Note:** You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) + + +The following resources need to tagged with a `ClusterID`: + +- **Nodes**: All hosts added in Rancher. +- **Subnet**: The subnet used for your cluster +- **Security Group**: The security group used for your cluster. + + >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. + +The tag that should be used is: + +``` +Key=kubernetes.io/cluster/, Value=owned +``` + +`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. + +If you share resources between clusters, you can change the tag to: + +``` +Key=kubernetes.io/cluster/CLUSTERID, Value=shared +``` + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md new file mode 100644 index 0000000000..14550b9601 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -0,0 +1,145 @@ +--- +title: Launching Kubernetes on New Nodes in an Infrastructure Provider +weight: 2205 +aliases: + - /rancher/v2.0-v2.4/en/concepts/global-configuration/node-templates/ +--- + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +The available cloud providers to create a node template are decided based on active [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers). + +This section covers the following topics: + +- [Node templates](#node-templates) + - [Node labels](#node-labels) + - [Node taints](#node-taints) + - [Administrator control of node templates](#administrator-control-of-node-templates) +- [Node pools](#node-pools) + - [Node pool taints](#node-pool-taints) + - [About node auto-replace](#about-node-auto-replace) + - [Enabling node auto-replace](#enabling-node-auto-replace) + - [Disabling node auto-replace](#disabling-node-auto-replace) +- [Cloud credentials](#cloud-credentials) +- [Node drivers](#node-drivers) + +# Node Templates + +A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. + +After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. + +### Node Labels + +You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. + +### Node Taints + +_Available as of Rancher v2.3.0_ + +You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. + +Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +### Administrator Control of Node Templates + +_Available as of v2.3.3_ + +Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. + +To access all node templates, an administrator will need to do the following: + +1. In the Rancher UI, click the user profile icon in the upper right corner. +1. Click **Node Templates.** + +**Result:** All node templates are listed and grouped by owner. The templates can be edited or cloned by clicking the **⋮.** + +# Node Pools + +Using Rancher, you can create pools of nodes based on a [node template](#node-templates). + +A node template defines the configuration of a node, like what operating system to use, number of CPUs and amount of memory. + +The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. + +Each node pool must have one or more nodes roles assigned. + +Each node role (i.e. etcd, control plane, and worker) should be assigned to a distinct node pool. Although it is possible to assign multiple node roles to a node pool, this should not be done for production clusters. + +The recommended setup is to have: + +- a node pool with the etcd node role and a count of three +- a node pool with the control plane node role and a count of at least two +- a node pool with the worker node role and a count of at least two + +### Node Pool Taints + +_Available as of Rancher v2.3.0_ + +If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. + +For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. + +When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +### About Node Auto-replace + +_Available as of Rancher v2.3.0_ + +If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. + +> **Important:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. + +### Enabling Node Auto-replace + +When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. + +1. In the form for creating a cluster, go to the **Node Pools** section. +1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Fill out the rest of the form for creating a cluster. + +**Result:** Node auto-replace is enabled for the node pool. + +You can also enable node auto-replace after the cluster is created with the following steps: + +1. From the Global view, click the Clusters tab. +1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Click **Save.** + +**Result:** Node auto-replace is enabled for the node pool. + +### Disabling Node Auto-replace + +You can disable node auto-replace from the Rancher UI with the following steps: + +1. From the Global view, click the Clusters tab. +1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. +1. Click **Save.** + +**Result:** Node auto-replace is disabled for the node pool. + +# Cloud Credentials + +_Available as of v2.2.0_ + +Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: + +- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. + +- After the cloud credential is created, it can be re-used to create additional node templates. + +- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. + +> **Note:** As of v2.2.0, the default `active` [node drivers](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) and any node driver, that has fields marked as `password`, are required to use cloud credentials. If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. + +After cloud credentials are created, the user can start [managing the cloud credentials that they created](../reference-guides/user-settings/manage-cloud-credentials.md). + +# Node Drivers + +If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#activating-deactivating-node-drivers), or you can [add your own custom node driver](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#adding-custom-node-drivers). diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md new file mode 100644 index 0000000000..4f28114367 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/use-windows-clusters.md @@ -0,0 +1,242 @@ +--- +title: Launching Kubernetes on Windows Clusters +weight: 2240 +--- + +_Available as of v2.3.0_ + +When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. + +In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. + +Some other requirements for Windows clusters include: + +- You can only add Windows nodes to a cluster if Windows support is enabled when the cluster is created. Windows support cannot be enabled for existing clusters. +- Kubernetes 1.15+ is required. +- The Flannel network provider must be used. +- Windows nodes must have 50 GB of disk space. + +For the full list of requirements, see [this section.](#requirements-for-windows-clusters) + +For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). + +This guide covers the following topics: + + + +- [Requirements](#requirements-for-windows-clusters) +- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) +- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) + + +# Requirements for Windows Clusters + +The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation](installation-requirements.md). + +### OS and Docker Requirements + +In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): + +- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. +- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. + +> **Notes:** +> +> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). +> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. + +### Kubernetes Version + +Kubernetes v1.15+ is required. + +### Node Requirements + +The hosts in the cluster need to have at least: + +- 2 core CPUs +- 5 GB memory +- 50 GB disk space + +Rancher will not provision the node if the node does not meet these requirements. + +### Networking Requirements + +Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation](installation-and-upgrade.md) before proceeding with this guide. + +Rancher only supports Windows using Flannel as the network provider. + +There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. + +For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. + +If you are configuring DHCP options sets for an AWS virtual private cloud, note that in the `domain-name` option field, only one domain name can be specified. According to the DHCP options [documentation:](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) + +> Some Linux operating systems accept multiple domain names separated by spaces. However, other Linux operating systems and Windows treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name. + +### Architecture Requirements + +The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. + +The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. + +We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: + + + +| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | +| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | Control plane, etcd, worker | Manage the Kubernetes cluster | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | Worker | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | +| Node 3 | Windows (Windows Server core version 1809 or above) | Worker | Run your Windows containers | + +### Container Requirements + +Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. + +### Cloud Provider Specific Requirements + +If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page](cluster-provisioning/rke-clusters/options/cloud-providers/) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. + +If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: + +- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) +- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. + +# Tutorial: How to Create a Cluster with Windows Support + +This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) + +When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent](../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. + +To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. + + + +1. [Provision Hosts](#1-provision-hosts) +1. [Create the Cluster on Existing Nodes](#2-create-the-cluster-on-existing-nodes) +1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) +1. [Optional: Configuration for Azure Files](#4-optional-configuration-for-azure-files) + + +# 1. Provision Hosts + +To begin provisioning a cluster on existing nodes with Windows support, prepare your hosts. + +Your hosts can be: + +- Cloud-hosted VMs +- VMs from virtualization clusters +- Bare-metal servers + +You will provision three nodes: + +- One Linux node, which manages the Kubernetes control plane and stores your `etcd` +- A second Linux node, which will be another worker node +- The Windows node, which will run your Windows containers as a worker node + +| Node | Operating System | +| ------ | ------------------------------------------------------------ | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | +| Node 3 | Windows (Windows Server core version 1809 or above required) | + +If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.](cluster-provisioning/rke-clusters/options/cloud-providers) + +# 2. Create the Cluster on Existing Nodes + +The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster](use-existing-nodes.md) with some Windows-specific requirements. + +1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. +1. Click **From existing nodes (Custom)**. +1. Enter a name for your cluster in the **Cluster Name** text box. +1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. +1. In the **Network Provider** field, select **Flannel.** +1. In the **Windows Support** section, click **Enable.** +1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. +1. Click **Next**. + +> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +# 3. Add Nodes to the Cluster + +This section describes how to register your Linux and Worker nodes to your cluster. You will run a command on each node, which will install the Rancher agent and allow Rancher to manage each node. + +### Add Linux Master Node + +In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. + +The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. + +1. In the **Node Operating System** section, click **Linux**. +1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. +1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent](admin-settings/agent-options/) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +1. Copy the command displayed on the screen to your clipboard. +1. SSH into your Linux host and run the command that you copied to your clipboard. +1. When you are finished provisioning your Linux node(s), select **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +It may take a few minutes for the node to be registered in your cluster. + +### Add Linux Worker Node + +In this section, we run a command to register the Linux worker node to the cluster. + +After the initial provisioning of your cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. + +1. From the **Global** view, click **Clusters.** +1. Go to the cluster that you created and click **⋮ > Edit.** +1. Scroll down to **Node Operating System**. Choose **Linux**. +1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. +1. Copy the command displayed on screen to your clipboard. +1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. +1. From **Rancher**, click **Save**. + +**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. + +> **Note:** Taints on Linux Worker Nodes +> +> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the Windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. + +> | Taint Key | Taint Value | Taint Effect | +> | -------------- | ----------- | ------------ | +> | `cattle.io/os` | `linux` | `NoSchedule` | + +### Add a Windows Worker Node + +In this section, we run a command to register the Windows worker node to the cluster. + +You can add Windows hosts to the cluster by editing the cluster and choosing the **Windows** option. + +1. From the **Global** view, click **Clusters.** +1. Go to the cluster that you created and click **⋮ > Edit.** +1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. +1. Copy the command displayed on screen to your clipboard. +1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. +1. From Rancher, click **Save**. +1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. + +**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# Configuration for Storage Classes in Azure + +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md new file mode 100644 index 0000000000..bf76a7e591 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/user-settings.md @@ -0,0 +1,18 @@ +--- +title: User Settings +weight: 23 +aliases: + - /rancher/v2.0-v2.4/en/tasks/user-settings/ +--- + +Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. + +![User Settings Menu](/img/user-settings.png) + +The available user settings are: + +- [API & Keys](../reference-guides/user-settings/api-keys.md): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. +- [Cloud Credentials](../reference-guides/user-settings/manage-cloud-credentials.md): Manage cloud credentials [used by node templates](use-new-nodes-in-an-infra-provider.md#node-templates) to [provision nodes for clusters](launch-kubernetes-with-rancher.md). Note: Available as of v2.2.0. +- [Node Templates](../reference-guides/user-settings/manage-node-templates.md): Manage templates [used by Rancher to provision nodes for clusters](launch-kubernetes-with-rancher.md). +- [Preferences](../reference-guides/user-settings/user-preferences.md): Sets superficial preferences for the Rancher UI. +- Log Out: Ends your user session. diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md new file mode 100644 index 0000000000..92a5c79f91 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/vsphere.md @@ -0,0 +1,66 @@ +--- +title: Creating a vSphere Cluster +shortTitle: vSphere +description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +weight: 2225 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ +--- + +By using Rancher with vSphere, you can bring cloud operations on-premises. + +Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. + +A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. + +- [vSphere Enhancements in Rancher v2.3](#vsphere-enhancements-in-rancher-v2-3) +- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) +- [Provisioning Storage](#provisioning-storage) +- [Enabling the vSphere Cloud Provider](#enabling-the-vsphere-cloud-provider) + +# vSphere Enhancements in Rancher v2.3 + +The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: + +### Self-healing Node Pools + +_Available as of v2.3.0_ + +One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,](use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. + +> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +### Dynamically Populated Options for Instances and Scheduling + +_Available as of v2.3.3_ + +Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. + +For the fields to be populated, your setup needs to fulfill the [prerequisites.](cluster-provisioning/rke-clusters/node-pools/vsphercluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/#prerequisites) + +### More Supported Operating Systems + +In Rancher v2.3.3+, you can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) + +In Rancher before v2.3.3, the vSphere node driver included in Rancher only supported the provisioning of VMs with [RancherOS]({{}}/os/v1.x/en/) as the guest operating system. + +### Video Walkthrough of v2.3.3 Node Template Features + +In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. + +{{< youtube id="dPIwg6x1AlU">}} + +# Creating a vSphere Cluster + +In [this section,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md) you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. + +# Provisioning Storage + +For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) + +# Enabling the vSphere Cloud Provider + +When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. + +For details, refer to the section on [enabling the vSphere cloud provider.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md b/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md new file mode 100644 index 0000000000..b1916615b1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/pages-for-subheaders/workloads-and-pods.md @@ -0,0 +1,83 @@ +--- +title: "Kubernetes Workloads and Pods" +description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" +weight: 3025 +aliases: + - /rancher/v2.0-v2.4/en/concepts/workloads/ + - /rancher/v2.0-v2.4/en/tasks/workloads/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/workloads +--- + +You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. + +### Pods + +[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. + +### Workloads + +_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. +Workloads let you define the rules for application scheduling, scaling, and upgrade. + +#### Workload Types + +Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: + +- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) + + _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. + +- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) + + _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. + +- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) + + _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. + +- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) + + _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. + +- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) + + _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. + +### Services + +In many use cases, a workload has to be either: + +- Accessed by other workloads in the cluster. +- Exposed to the outside world. + +You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. + +#### Service Types + +There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). + +- **ClusterIP** + + >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. + +- **NodePort** + + >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. + +- **LoadBalancer** + + >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. + +## Workload Options + +This section of the documentation contains instructions for deploying workloads and using workload options. + +- [Deploy Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md) +- [Upgrade Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) +- [Rollback Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) + +## Related Links + +### External Links + +- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/content/rancher/v2.0-v2.4/en/_index.md b/versioned_docs/version-2.0-2.4/rancher-manager.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/_index.md rename to versioned_docs/version-2.0-2.4/rancher-manager.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides.md b/versioned_docs/version-2.0-2.4/reference-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/api/api-tokens/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/about-the-api/api-tokens.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/api/api-tokens/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/about-the-api/api-tokens.md diff --git a/content/rancher/v2.0-v2.4/en/best-practices/containers/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/containers.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/best-practices/containers/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/best-practices/containers.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-strategies.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-strategies.md new file mode 100644 index 0000000000..950fc01a4f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-strategies.md @@ -0,0 +1,49 @@ +--- +title: Rancher Deployment Strategies +weight: 100 +aliases: + - /rancher/v2.0-v2.4/en/best-practices/deployment-strategies + - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/deployment-strategies + - /rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-strategies/ +--- + +There are two recommended deployment strategies. Each one has its own pros and cons. Read more about which one would fit best for your use case: + +* [Hub and Spoke](#hub-and-spoke-strategy) +* [Regional](#regional-strategy) + +# Hub and Spoke Strategy +--- + +In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. + +![](/img/bpg/hub-and-spoke.png) + +### Pros + +* Environments could have nodes and network connectivity across regions. +* Single control plane interface to view/see all regions and environments. +* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. + +### Cons + +* Subject to network latencies. +* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. + +# Regional Strategy +--- +In the regional deployment model a control plane is deployed in close proximity to the compute nodes. + +![](/img/bpg/regional.png) + +### Pros + +* Rancher functionality in regions stay operational if a control plane in another region goes down. +* Network latency is greatly reduced, improving the performance of functionality in Rancher. +* Upgrades of the Rancher control plane can be done independently per region. + +### Cons + +* Overhead of managing multiple Rancher installations. +* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. +* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-types.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-types.md new file mode 100644 index 0000000000..c7a217854c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/deployment-types.md @@ -0,0 +1,42 @@ +--- +title: Tips for Running Rancher +weight: 100 +aliases: + - /rancher/v2.0-v2.4/en/best-practices/deployment-types + - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/deployment-types + - /rancher/v2.x/en/best-practices/v2.0-v2.4/deployment-types/ +--- + +A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. + +When you set up your high-availability Rancher installation, consider the following: + +### Run Rancher on a Separate Cluster +Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. + +### Don't Run Rancher on a Hosted Kubernetes Environment +When the Rancher server is installed on a Kubernetes cluster, it should not be run in a hosted Kubernetes environment such as Google's GKE, Amazon's EKS, or Microsoft's AKS. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. + +It is strongly recommended to use hosted infrastructure such as Amazon's EC2 or Google's GCE instead. When you create a cluster using RKE on an infrastructure provider, you can configure the cluster to create etcd snapshots as a backup. You can then [use RKE](https://rancher.com/docs/rke/latest/en/etcd-snapshots/) or [Rancher](backups/restorations/) to restore your cluster from one of these snapshots. In a hosted Kubernetes environment, this backup and restore functionality is not supported. + +### Make sure nodes are configured correctly for Kubernetes ### +It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/performance.md) + +### When using RKE: Backup the Statefile +RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. + +### Run All Nodes in the Cluster in the Same Datacenter +For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. + +### Development and Production Environments Should be Similar +It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. + +### Monitor Your Clusters to Plan Capacity +The Rancher server's Kubernetes cluster should run within the [system and hardware requirements](../../pages-for-subheaders/installation-requirements.md) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. + +However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. + +After you [enable monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) in the cluster, you can set up [a notification channel](../../explanations/integrations-in-rancher/notifiers.md) and [cluster alerts](cluster-admin/tools/alerts/) to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. + diff --git a/versioned_docs/version-2.0-2.4/reference-guides/best-practices/management.md b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/management.md new file mode 100644 index 0000000000..1f4d04112c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/best-practices/management.md @@ -0,0 +1,145 @@ +--- +title: Tips for Scaling, Security and Reliability +weight: 101 +aliases: + - /rancher/v2.0-v2.4/en/best-practices/management + - /rancher/v2.0-v2.4/en/best-practices/v2.0-v2.4/management + - /rancher/v2.x/en/best-practices/management/ + - /rancher/v2.x/en/best-practices/v2.0-v2.4/management/ +--- + +Rancher allows you to set up numerous combinations of configurations. Some configurations are more appropriate for development and testing, while there are other best practices for production environments for maximum availability and fault tolerance. The following best practices should be followed for production. + +- [Tips for Preventing and Handling Problems](#tips-for-preventing-and-handling-problems) +- [Network Topology](#network-topology) +- [Tips for Scaling and Reliability](#tips-for-scaling-and-reliability) +- [Tips for Security](#tips-for-security) +- [Tips for Multi-Tenant Clusters](#tips-for-multi-tenant-clusters) +- [Class of Service and Kubernetes Clusters](#class-of-service-and-kubernetes-clusters) +- [Network Security](#network-security) + +# Tips for Preventing and Handling Problems + +These tips can help you solve problems before they happen. + +### Run Rancher on a Supported OS and Supported Docker Version +Rancher is container-based and can potentially run on any Linux-based operating system. However, only operating systems listed in the [requirements documentation](../../pages-for-subheaders/installation-requirements.md) should be used for running Rancher, along with a supported version of Docker. These versions have been most thoroughly tested and can be properly supported by the Rancher Support team. + +### Upgrade Your Kubernetes Version +Keep your Kubernetes cluster up to date with a recent and supported version. Typically the Kubernetes community will support the current version and previous three minor releases (for example, 1.14.x, 1.13.x, 1.12.x, and 1.11.x). After a new version is released, the third-oldest supported version reaches EOL (End of Life) status. Running on an EOL release can be a risk if a security issues are found and patches are not available. The community typically makes minor releases every quarter (every three months). + +Rancher’s SLAs are not community dependent, but as Kubernetes is a community-driven software, the quality of experience will degrade as you get farther away from the community's supported target. + +### Kill Pods Randomly During Testing +Run chaoskube or a similar mechanism to randomly kill pods in your test environment. This will test the resiliency of your infrastructure and the ability of Kubernetes to self-heal. It's not recommended to run this in your production environment. + +### Deploy Complicated Clusters with Terraform +Rancher's "Add Cluster" UI is preferable for getting started with Kubernetes cluster orchestration or for simple use cases. However, for more complex or demanding use cases, it is recommended to use a CLI/API driven approach. [Terraform](https://www.terraform.io/) is recommended as the tooling to implement this. When you use Terraform with version control and a CI/CD environment, you can have high assurances of consistency and reliability when deploying Kubernetes clusters. This approach also gives you the most customization options. + +Rancher [maintains a Terraform provider](https://rancher.com/blog/2019/rancher-2-terraform-provider/) for working with Rancher 2.0 Kubernetes. It is called the [Rancher2 Provider.](https://www.terraform.io/docs/providers/rancher2/index.html) + +### Upgrade Rancher in a Staging Environment +All upgrades, both patch and feature upgrades, should be first tested on a staging environment before production is upgraded. The more closely the staging environment mirrors production, the higher chance your production upgrade will be successful. + +### Renew Certificates Before they Expire +Multiple people in your organization should set up calendar reminders for certificate renewal. Consider renewing the certificate two weeks to one month in advance. If you have multiple certificates to track, consider using [monitoring and alerting mechanisms](../rancher-cluster-tools.md) to track certificate expiration. + +Rancher-provisioned Kubernetes clusters will use certificates that expire in one year. Clusters provisioned by other means may have a longer or shorter expiration. + +Certificates can be renewed for Rancher-provisioned clusters [through the Rancher user interface](../../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md). + +### Enable Recurring Snapshots for Backing up and Restoring the Cluster +Make sure etcd recurring snapshots are enabled. Extend the snapshot retention to a period of time that meets your business needs. In the event of a catastrophic failure or deletion of data, this may be your only recourse for recovery. For details about configuring snapshots, refer to the [RKE documentation](https://rancher.com/docs/rke/latest/en/etcd-snapshots/) or the [Rancher documentation on backups](../../pages-for-subheaders/backup-restore-and-disaster-recovery.md). + +### Provision Clusters with Rancher +When possible, use Rancher to provision your Kubernetes cluster rather than importing a cluster. This will ensure the best compatibility and supportability. + +### Use Stable and Supported Rancher Versions for Production +Do not upgrade production environments to alpha, beta, release candidate (rc), or "latest" versions. These early releases are often not stable and may not have a future upgrade path. + +When installing or upgrading a non-production environment to an early release, anticipate problems such as features not working, data loss, outages, and inability to upgrade without a reinstall. + +Make sure the feature version you are upgrading to is considered "stable" as determined by Rancher. Use the beta, release candidate, and "latest" versions in a testing, development, or demo environment to try out new features. Feature version upgrades, for example 2.1.x to 2.2.x, should be considered as and when they are released. Some bug fixes and most features are not back ported into older versions. + +Keep in mind that Rancher does End of Life support for old versions, so you will eventually want to upgrade if you want to continue to receive patches. + +For more detail on what happens during the Rancher product lifecycle, refer to the [Support Maintenance Terms](https://rancher.com/support-maintenance-terms/). + +# Network Topology +These tips can help Rancher work more smoothly with your network. + +### Use Low-latency Networks for Communication Within Clusters +Kubernetes clusters are best served by low-latency networks. This is especially true for the control plane components and etcd, where lots of coordination and leader election traffic occurs. Networking between Rancher server and the Kubernetes clusters it manages are more tolerant of latency. + +### Allow Rancher to Communicate Directly with Clusters +Limit the use of proxies or load balancers between Rancher server and Kubernetes clusters. As Rancher is maintaining a long-lived web sockets connection, these intermediaries can interfere with the connection lifecycle as they often weren't configured with this use case in mind. + + +# Tips for Scaling and Reliability +These tips can help you scale your cluster more easily. + +### Use One Kubernetes Role Per Host +Separate the etcd, control plane, and worker roles onto different hosts. Don't assign multiple roles to the same host, such as a worker and control plane. This will give you maximum scalability. + +### Run the Control Plane and etcd on Virtual Machines +Run your etcd and control plane nodes on virtual machines where you can scale vCPU and memory easily if needed in the future. + +### Use at Least Three etcd Nodes +Provision 3 or 5 etcd nodes. Etcd requires a quorum to determine a leader by the majority of nodes, therefore it is not recommended to have clusters of even numbers. Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. + +### Use at Least Three Control Plane Nodes +Provision three or more control plane nodes. Some control plane components, such as the `kube-apiserver`, run in [active-active](https://www.jscape.com/blog/active-active-vs-active-passive-high-availability-cluster) mode and will give you more scalability. Other components such as kube-scheduler and kube-controller run in active-passive mode (leader elect) and give you more fault tolerance. + +### Monitor Your Cluster +Closely monitor and scale your nodes as needed. You should [enable cluster monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/) and use the Prometheus metrics and Grafana visualization options as a starting point. + + +# Tips for Security +Below are some basic tips for increasing security in Rancher. For more detailed information about securing your cluster, you can refer to these resources: + +- Rancher's [security documentation and Kubernetes cluster hardening guide](../../pages-for-subheaders/rancher-security.md) +- [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) + +### Update Rancher with Security Patches +Keep your Rancher installation up to date with the latest patches. Patch updates have important software fixes and sometimes have security fixes. When patches with security fixes are released, customers with Rancher licenses are notified by e-mail. These updates are also posted on Rancher's [forum](https://forums.rancher.com/). + +### Report Security Issues Directly to Rancher +If you believe you have uncovered a security-related problem in Rancher, please communicate this immediately and discretely to the Rancher team (security@rancher.com). Posting security issues on public forums such as Twitter, Rancher Slack, GitHub, etc. can potentially compromise security for all Rancher customers. Reporting security issues discretely allows Rancher to assess and mitigate the problem. Security patches are typically given high priority and released as quickly as possible. + +### Only Upgrade One Component at a Time +In addition to Rancher software updates, closely monitor security fixes for related software, such as Docker, Linux, and any libraries used by your workloads. For production environments, try to avoid upgrading too many entities during a single maintenance window. Upgrading multiple components can make it difficult to root cause an issue in the event of a failure. As business requirements allow, upgrade one component at a time. + +# Tips for Multi-Tenant Clusters + +### Namespaces +Each tenant should have their own unique namespaces within the cluster. This avoids naming conflicts and allows resources to be only visible to their owner through use of RBAC policy + +### Project Isolation +Use Rancher's Project Isolation to automatically generate Network Policy between Projects (sets of Namespaces). This further protects workloads from interference + +### Resource Limits +Enforce use of sane resource limit definitions for every deployment in your cluster. This not only protects the owners of the deployment, but the neighboring resources from other tenants as well. Remember, namespaces do not isolate at the node level, so over-consumption of resources on a node affects other namespace deployments. Admission controllers can be written to require resource limit definitions + +### Resource Requirements +Enforce use of resource requirement definitions for each deployment in your cluster. This enables the scheduler to appropriately schedule workloads. Otherwise you will eventually end up with over-provisioned nodes. + +# Class of Service and Kubernetes Clusters +A class of service describes the expectations around cluster uptime, durability, and duration of maintenance windows. Typically organizations group these characteristics into labels such as "dev" or "prod" + +### Consider fault domains +Kubernetes clusters can span multiple classes of service, however it is important to consider the ability for one workload to affect another. Without proper deployment practices such as resource limits, requirements, etc, a deployment that is not behaving well has the potential to impact the health of the cluster. In a "dev" environment it is common for end-users to exercise less caution with deployments, thus increasing the chance of such behavior. Sharing this behavior with your production workload increases risk. + +### Upgrade risks +Upgrades of Kubernetes are not without risk, the best way to predict the outcome of an upgrade is try it on a cluster of similar load and use case as your production cluster. This is where having non-prod class of service clusters can be advantageous. + +### Resource Efficiency +Clusters can be built with varying degrees of redundancy. In a class of service with low expectations for uptime, resources and cost can be conserved by building clusters without redundant Kubernetes control components. This approach may also free up more budget/resources to increase the redundancy at the production level + +# Network Security +In general, you can use network security best practices in your Rancher and Kubernetes clusters. Consider the following: + +### Use a Firewall Between your Hosts and the Internet +Firewalls should be used between your hosts and the Internet (or corporate Intranet). This could be enterprise firewall appliances in a datacenter or SDN constructs in the cloud, such as VPCs, security groups, ingress, and egress rules. Try to limit inbound access only to ports and IP addresses that require it. Outbound access can be shut off (air gap) if environment sensitive information that requires this restriction. If available, use firewalls with intrusion detection and DDoS prevention. + +### Run Periodic Security Scans +Run security and penetration scans on your environment periodically. Even with well design infrastructure, a poorly designed microservice could compromise the entire environment. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/kubectl-utility.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cli-with-rancher/rancher-cli.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md new file mode 100644 index 0000000000..bcc3023047 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md @@ -0,0 +1,103 @@ +--- +title: EC2 Node Template Configuration +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). + + + + +### Region + +In the **Region** field, select the same region that you used when creating your cloud credentials. + +### Cloud Credentials + +Your AWS account access information, stored in a [cloud credential.](../../../user-settings/manage-cloud-credentials.md) + +See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. + +See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. + +See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM + +See our three example JSON policies: + +- [Example IAM Policy](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy) +- [Example IAM Policy with PassRole](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) +- [Example IAM Policy to allow encrypted EBS volumes](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. + +### Authenticate & Configure Nodes + +Choose an availability zone and network settings for your cluster. + +### Security Group + +Choose the default security group or configure a security group. + +Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. + +### Instance Options + +Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. + +If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + +### Engine Options + +In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. + + + + +### Account Access + +**Account Access** is where you configure the region of the nodes, and the credentials (Access Key and Secret Key) used to create the machine. + +See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. + +See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. + +See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM + +See our three example JSON policies: + +- [Example IAM Policy](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy) +- [Example IAM Policy with PassRole](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers) or want to pass an IAM Profile to an instance) +- [Example IAM Policy to allow encrypted EBS volumes](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. + +### Zone and Network + +**Zone and Network** configures the availability zone and network settings for your cluster. + +### Security Groups + +**Security Groups** creates or configures the Security Groups applied to your nodes. Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. + +### Instance + +**Instance** configures the instances that will be created. + +### SSH User + +Make sure you configure the correct **SSH User** for the configured AMI. + +### IAM Instance Profile Name + +If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider](cluster-provisioning/rke-clusters/options/cloud-providers), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + +### Docker Daemon + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) + + + diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md new file mode 100644 index 0000000000..e4551ddaf8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md @@ -0,0 +1,43 @@ +--- +title: Azure Node Template Configuration +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +For more information about Azure, refer to the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/?product=featured) + + + + +Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. + +- **Placement** sets the geographical region where your cluster is hosted and other location metadata. +- **Network** configures the networking used in your cluster. +- **Instance** customizes your VM configuration. + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) + + + + +- **Account Access** stores your account information for authenticating with Azure. +- **Placement** sets the geographical region where your cluster is hosted and other location metadata. +- **Network** configures the networking used in your cluster. +- **Instance** customizes your VM configuration. + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) + + + diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md new file mode 100644 index 0000000000..813016e829 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md @@ -0,0 +1,48 @@ +--- +title: DigitalOcean Node Template Configuration +weight: 1 +---- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. + +### Droplet Options + +The **Droplet Options** provision your cluster's geographical region and specifications. + +### Docker Daemon + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) + + + + +### Access Token + +The **Access Token** stores your DigitalOcean Personal Access Token. Refer to [DigitalOcean Instructions: How To Generate a Personal Access Token](https://www.digitalocean.com/community/tutorials/how-to-use-the-digitalocean-api-v2#how-to-generate-a-personal-access-token). + +### Droplet Options + +The **Droplet Options** provision your cluster's geographical region and specifications. + +### Docker Daemon + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) + + + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md new file mode 100644 index 0000000000..9d0791eb81 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4.md @@ -0,0 +1,90 @@ +--- +title: vSphere Node Template Configuration in Rancher before v2.0.4 +shortTitle: Before v2.0.4 +weight: 5 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/prior-to-2.0.4/ +--- + +- [Account access](#account-access) +- [Scheduling](#scheduling) +- [Instance options](#instance-options) +- [Disk UUIDs](#disk-uuids) +- [Node Tags and Custom Attributes](#node-tags-and-custom-attributes) +- [Cloud Init](#cloud-init) + +# Account Access +In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | * | Port to use when connecting to the server. Defaults to `443`. | +| Username | * | vCenter/ESXi user to authenticate with the server. | +| Password | * | User's password. | + + +# Scheduling + +Choose what hypervisor the virtual machine will be scheduled to. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| Data Center | * | Name/path of the datacenter to create VMs in. | +| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | +| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | +| Network | * | Name of the VM network to attach VMs to. | +| Data Store | * | Datastore to store the VM disks. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | + +# Instance Options +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +Only VMs booting from RancherOS ISO are supported. + +Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. + + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + +# Disk UUIDs + +In order to provision nodes with RKE, all nodes must be configured with disk UUIDs. Follow these instructions to enable UUIDs for the nodes in your vSphere cluster. + +To enable disk UUIDs for all VMs created for a cluster, + +1. Navigate to the **Node Templates** in the Rancher UI while logged in as an administrator. +2. Add or edit an existing vSphere node template. +3. Under **Instance Options** click on **Add Parameter**. +4. Enter `disk.enableUUID` as key with a value of **TRUE**. + + ![](/img/vsphere-nodedriver-enable-uuid.png) + +5. Click **Create** or **Save**. + +**Result:** The disk UUID is enabled in the vSphere node template. + +# Node Tags and Custom Attributes + +These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +Optionally, you can: + +- Provide a set of configuration parameters (instance-options) for the VMs. +- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. +- Customize the configuration of the Docker daemon on the VMs that will be created. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. + +# Cloud Init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md new file mode 100644 index 0000000000..83f7c9b58b --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4.md @@ -0,0 +1,69 @@ +--- +title: vSphere Node Template Configuration in Rancher v2.0.4 +shortTitle: v2.0.4 +weight: 4 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.0.4/ +--- +- [Account access](#account-access) +- [Scheduling](#scheduling) +- [Instance options](#instance-options) +- [Node Tags and Custom Attributes](#node-tags-and-custom-attributes) +- [Cloud Init](#cloud-init) + +# Account Access +In the **Account Access** section, enter the vCenter FQDN or IP address and the credentials for the vSphere user account. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| vCenter or ESXi Server | * | IP or FQDN of the vCenter or ESXi server used for managing VMs. Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | * | Port to use when connecting to the server. Defaults to `443`. | +| Username | * | vCenter/ESXi user to authenticate with the server. | +| Password | * | User's password. | + +# Scheduling + +Choose what hypervisor the virtual machine will be scheduled to. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| Data Center | * | Name/path of the datacenter to create VMs in. | +| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | +| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | +| Network | * | Name of the VM network to attach VMs to. | +| Data Store | * | Datastore to store the VM disks. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | + +# Instance Options +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +Only VMs booting from RancherOS ISO are supported. + +Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + +# Node Tags and Custom Attributes + +These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +Optionally, you can: + +- Provide a set of configuration parameters (instance-options) for the VMs. +- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. +- Customize the configuration of the Docker daemon on the VMs that will be created. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. + +# Cloud Init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md new file mode 100644 index 0000000000..a5352740e8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0.md @@ -0,0 +1,72 @@ +--- +title: vSphere Node Template Configuration in Rancher v2.2.0 +shortTitle: v2.2.0 +weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.2.0/ +--- +- [Account Access](#account-access) +- [Scheduling](#scheduling) +- [Instance Options](#instance-options) +- [Node tags and custom attributes](#node-tags-and-custom-attributes) +- [Cloud Init](#cloud-init) + +# Account Access + +| Parameter | Required | Description | +|:----------------------|:--------:|:-----| +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../../user-settings/manage-cloud-credentials.md) | + +Your cloud credential has these fields: + +| Credential Field | Description | +|-----------|----------| +| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | Optional: configure configure the port of the vCenter or ESXi server. | +| Username and password | Enter your vSphere login username and password. | + +# Scheduling +Choose what hypervisor the virtual machine will be scheduled to. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| Data Center | * | Name/path of the datacenter to create VMs in. | +| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | +| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | +| Network | * | Name of the VM network to attach VMs to. | +| Data Store | * | Datastore to store the VM disks. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | + +# Instance Options + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +Only VMs booting from RancherOS ISO are supported. + +Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + +# Node Tags and Custom Attributes + +These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +Optionally, you can: + +- Provide a set of configuration parameters (instance-options) for the VMs. +- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. +- Customize the configuration of the Docker daemon on the VMs that will be created. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. + +# Cloud Init +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md new file mode 100644 index 0000000000..c664e2f246 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0.md @@ -0,0 +1,80 @@ +--- +title: vSphere Node Template Configuration in Rancher v2.3.0 +shortTitle: v2.3.0 +weight: 2 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.0/ +--- +- [Account Access](#account-access) +- [Scheduling](#scheduling) +- [Instance Options](#instance-options) +- [Node tags and custom attributes](#node-tags-and-custom-attributes) +- [Cloud Init](#cloud-init) + +# Account Access + +| Parameter | Required | Description | +|:----------------------|:--------:|:-----| +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../../user-settings/manage-cloud-credentials.md) | + +Your cloud credential has these fields: + +| Credential Field | Description | +|-----------------|-----------------| +| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | Optional: configure configure the port of the vCenter or ESXi server. | +| Username and password | Enter your vSphere login username and password. | + +# Scheduling +Choose what hypervisor the virtual machine will be scheduled to. + +In the **Scheduling** section, enter: + +- The name/path of the **Data Center** to create the VMs in +- The name of the **VM Network** to attach to +- The name/path of the **Datastore** to store the disks in + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| Data Center | * | Name/path of the datacenter to create VMs in. | +| Pool | | Name/path of the resource pool to schedule the VMs in. If not specified, the default resource pool is used. | +| Host | | Name/path of the host system to schedule VMs in. If specified, the host system's pool will be used and the *Pool* parameter will be ignored. | +| Network | * | Name of the VM network to attach VMs to. | +| Data Store | * | Datastore to store the VM disks. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The folder name should be prefaced with `vm/` in your vSphere config file. | + +# Instance Options + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +Only VMs booting from RancherOS ISO are supported. + +Ensure that the OS ISO URL contains the URL of the VMware ISO release for RancherOS: `rancheros-vmware.iso`. + +| Parameter | Required | Description | +|:------------------------|:--------:|:------------------------------------------------------------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Cloud Init | | URL of a [RancherOS cloud-config]({{}}/os/v1.x/en/configuration/) file to provision VMs with. This file allows further customization of the RancherOS operating system, such as network configuration, DNS servers, or system daemons.| +| OS ISO URL | * | URL of a RancherOS vSphere ISO file to boot the VMs from. You can find URLs for specific versions in the [Rancher OS GitHub Repo](https://github.com/rancher/os). | +| Configuration Parameters | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + + +# Node Tags and Custom Attributes + +These attributes allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +Optionally, you can: + +- Provide a set of configuration parameters (instance-options) for the VMs. +- Assign labels to the VMs that can be used as a base for scheduling rules in the cluster. +- Customize the configuration of the Docker daemon on the VMs that will be created. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. + +# Cloud Init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +You may specify the URL of a RancherOS cloud-config.yaml file in the the **Cloud Init** field. Refer to the [RancherOS Documentation](https://rancher.com/docs/os/v1.x/en/configuration/#cloud-config) for details on the supported configuration directives. Note that the URL must be network accessible from the VMs created by the template. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md new file mode 100644 index 0000000000..a9d55fe1e5 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3.md @@ -0,0 +1,91 @@ +--- +title: vSphere Node Template Configuration in Rancher v2.3.3 +shortTitle: v2.3.3 +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/v2.3.3/ +--- +- [Account Access](#account-access) +- [Scheduling](#scheduling) +- [Instance Options](#instance-options) +- [Networks](#networks) +- [Node tags and custom attributes](#node-tags-and-custom-attributes) +- [cloud-init](#cloud-init) + +# Account Access + +| Parameter | Required | Description | +|:----------------------|:--------:|:-----| +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../../user-settings/manage-cloud-credentials.md) | + +Your cloud credential has these fields: + +| Credential Field | Description | +|-----------------|--------------| +| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | Optional: configure configure the port of the vCenter or ESXi server. | +| Username and password | Enter your vSphere login username and password. | + +# Scheduling + +Choose what hypervisor the virtual machine will be scheduled to. + +The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. + +| Field | Required | Explanation | +|---------|---------------|-----------| +| Data Center | * | Choose the name/path of the data center where the VM will be scheduled. | +| Resource Pool | | Name of the resource pool to schedule the VMs in. Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | +| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. | +| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. | + +# Instance Options + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +| Parameter | Required | Description | +|:----------------|:--------:|:-----------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | +| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | +| Networks | | Name(s) of the network to attach the VM to. | +| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + + +### About VM Creation Methods + +In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). + +The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). + +Choose the way that the VM will be created: + +- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. +- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list **Library templates.** +- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. +- **Install from boot2docker ISO:** Ensure that the **OS ISO URL** field contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). Note that this URL must be accessible from the nodes running your Rancher server installation. + +# Networks + +The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. + +# Node Tags and Custom Attributes + +Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +For tags, all your vSphere tags will show up as options to select from in your node template. + +In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. + +# cloud-init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. + +Note that cloud-init is not supported when using the ISO creation method. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md new file mode 100644 index 0000000000..5b64ad1eea --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -0,0 +1,404 @@ +--- +title: RKE Cluster Configuration Reference +weight: 2250 +--- + +When Rancher installs Kubernetes, it uses [RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) as the Kubernetes distribution. + +This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. + +You can configure the Kubernetes options one of two ways: + +- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +In Rancher v2.0.0-v2.2.x, the RKE cluster config file in Rancher is identical to the [cluster config file for the Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/config-options/), which is the tool Rancher uses to provision clusters. In Rancher v2.3.0, the RKE information is still included in the config file, but it is separated from other options, so that the RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) + +This section is a cluster configuration reference, covering the following topics: + +- [Rancher UI Options](#rancher-ui-options) + - [Kubernetes version](#kubernetes-version) + - [Network provider](#network-provider) + - [Kubernetes cloud providers](#kubernetes-cloud-providers) + - [Private registries](#private-registries) + - [Authorized cluster endpoint](#authorized-cluster-endpoint) + - [Node pools](#node-pools) +- [Advanced Options](#advanced-options) + - [NGINX Ingress](#nginx-ingress) + - [Node port range](#node-port-range) + - [Metrics server monitoring](#metrics-server-monitoring) + - [Pod security policy support](#pod-security-policy-support) + - [Docker version on nodes](#docker-version-on-nodes) + - [Docker root directory](#docker-root-directory) + - [Recurring etcd snapshots](#recurring-etcd-snapshots) +- [Cluster config file](#cluster-config-file) + - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0) + - [Config file structure in Rancher v2.0.0-v2.2.x](#config-file-structure-in-rancher-v2-0-0-v2-2-x) + - [Default DNS provider](#default-dns-provider) +- [Rancher specific parameters](#rancher-specific-parameters) + +# Rancher UI Options + +When creating a cluster using one of the options described in [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), you can configure basic Kubernetes options using the **Cluster Options** section. + +### Kubernetes Version + +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). + +### Network Provider + +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ](../../../faq/container-network-interface-providers.md). + +>**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. + +Out of the box, Rancher is compatible with the following network providers: + +- [Canal](https://github.com/projectcalico/canal) +- [Flannel](https://github.com/coreos/flannel#flannel) +- [Calico](https://docs.projectcalico.org/v3.11/introduction/) +- [Weave](https://github.com/weaveworks/weave) (Available as of v2.2.0) + +**Notes on Canal:** + +In v2.0.0 - v2.0.4 and v2.0.6, this was the default option for these clusters was Canal with network isolation. With the network isolation automatically enabled, it prevented any pod communication between [projects](k8s-in-rancher/projects-and-namespaces/). + +As of v2.0.7, if you use Canal, you also have the option of using **Project Network Isolation**, which will enable or disable communication between pods in different [projects](k8s-in-rancher/projects-and-namespaces/). + +>**Attention Rancher v2.0.0 - v2.0.6 Users** +> +>- In previous Rancher releases, Canal isolates project network communications with no option to disable it. If you are using any of these Rancher releases, be aware that using Canal prevents all communication between pods in different projects. +>- If you have clusters using Canal and are upgrading to v2.0.7, those clusters enable Project Network Isolation by default. If you want to disable Project Network Isolation, edit the cluster and disable the option. + +**Notes on Flannel:** + +In v2.0.5, this was the default option, which did not prevent any network isolation between projects. + +**Notes on Weave:** + +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File](rke1-cluster-configuration.md#cluster-config-file) and the [Weave Network Plug-in Options](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). + +### Kubernetes Cloud Providers + +You can configure a [Kubernetes cloud provider](cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage](k8s-in-rancher/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. + +>**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. + +If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: + +### Private registries + +_Available as of v2.2.0_ + +The cluster-level private registry configuration is only used for provisioning clusters. + +There are two main ways to set up private registries in Rancher: by setting up the [global default registry](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. + +The private registry configuration option tells Rancher where to pull the [system images](https://rancher.com/docs/rke/latest/en/config-options/system-images/) or [addon images](https://rancher.com/docs/rke/latest/en/config-options/add-ons/) that will be used in your cluster. + +- **System images** are components needed to maintain the Kubernetes cluster. +- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. + +See the [RKE documentation on private registries](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. + +### Authorized Cluster Endpoint + +_Available as of v2.2.0_ + +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. + +> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE](rke1-cluster-configuration.md#authorized-cluster-endpoint). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are imported into Rancher; it is available only on Rancher-launched Kubernetes clusters. + +This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +### Node Pools + +For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +# Advanced Options + +The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** + +### NGINX Ingress + +Option to enable or disable the [NGINX ingress controller](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/). + +### Node Port Range + +Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. + +### Metrics Server Monitoring + +Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). + +### Pod Security Policy Support + +Option to enable and select a default [Pod Security Policy](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). You must have an existing Pod Security Policy configured before you can use this option. + +### Docker Version on Nodes + +Option to require [a supported Docker version](../../../pages-for-subheaders/installation-requirements.md) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. + +### Docker Root Directory + +If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. + +### Recurring etcd Snapshots + +Option to enable or disable [recurring etcd snapshots](https://rancher.com/docs/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). + +# Cluster Config File + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available](https://rancher.com/docs/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. + +>**Note:** In Rancher v2.0.5 and v2.0.6, the names of services in the Config File (YAML) should contain underscores only: `kube_api` and `kube_controller`. + +- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. +- To read from an existing RKE file, click **Read from a file**. + +![image](/img/cluster-options-yaml.png) + +The structure of the config file is different depending on your version of Rancher. Below are example config files for Rancher v2.0.0-v2.2.x and for Rancher v2.3.0+. + +### Config File Structure in Rancher v2.3.0+ + +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,](https://rancher.com/docs/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. + +
    + Example Cluster Config File for Rancher v2.3.0+ + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` +
    + +### Config File Structure in Rancher v2.0.0-v2.2.x + +An example cluster config file is included below. + +
    + Example Cluster Config File for Rancher v2.0.0-v2.2.x + +```yaml +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.15.3-rancher3-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 +ssh_agent_auth: false +``` +
    + +### Default DNS provider + +The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider](https://rancher.com/docs/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. + +| Rancher version | Kubernetes version | Default DNS provider | +|-------------|--------------------|----------------------| +| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | +| v2.2.5 and higher | v1.13.x and lower | kube-dns | +| v2.2.4 and lower | any | kube-dns | + +# Rancher specific parameters + +_Available as of v2.2.0_ + +Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): + +### docker_root_dir + +See [Docker Root Directory](#docker-root-directory). + +### enable_cluster_monitoring + +Option to enable or disable [Cluster Monitoring](monitoring-alerting/legacy/monitoring/cluster-monitoring/). + +### enable_network_policy + +Option to enable or disable Project Network Isolation. + +### local_cluster_auth_endpoint + +See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). + +Example: + +```yaml +local_cluster_auth_endpoint: + enabled: true + fqdn: "FQDN" + ca_certs: "BASE64_CACERT" +``` + +### Custom Network Plug-in + +_Available as of v2.2.4_ + +You can add a custom network plug-in by using the [user-defined add-on functionality](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. + +There are two ways that you can specify an add-on: + +- [In-line Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) + +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md new file mode 100644 index 0000000000..0675c6ea9f --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md @@ -0,0 +1,57 @@ +--- +title: Rancher Agent Options +weight: 2500 +aliases: + - /rancher/v2.0-v2.4/en/admin-settings/agent-options/ + - /rancher/v2.0-v2.4/en/cluster-provisioning/custom-clusters/agent-options +--- + +Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes](../../../../pages-for-subheaders/use-existing-nodes.md) and add the options to the generated `docker run` command when adding a node. + +For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.](../../../../pages-for-subheaders/rancher-manager-architecture.md#3-node-agents) + +## General options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | +| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | +| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | +| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | +| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | +| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | + +## Role options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | +| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | +| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | +| `--worker` | `WORKER=true` | Apply the role `worker` to the node | + +## IP address options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | +| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | + +### Dynamic IP address options + +For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. + +| Value | Example | Description | +| ---------- | -------------------- | ----------- | +| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | +| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | +| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | +| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | +| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | +| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | +| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | +| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | +| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | +| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | +| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | +| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/versioned_docs/version-2.0-2.4/reference-guides/configure-openldap/openldap-config-reference.md b/versioned_docs/version-2.0-2.4/reference-guides/configure-openldap/openldap-config-reference.md new file mode 100644 index 0000000000..9efc94d203 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/configure-openldap/openldap-config-reference.md @@ -0,0 +1,86 @@ +--- +title: OpenLDAP Configuration Reference +weight: 2 +--- + +This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. + +For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) + +> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users](../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) +- [OpenLDAP server configuration](#openldap-server-configuration) +- [User/group schema configuration](#user-group-schema-configuration) + - [User schema configuration](#user-schema-configuration) + - [Group schema configuration](#group-schema-configuration) + +## Background: OpenLDAP Authentication Flow + +1. When a user attempts to login with his LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. +2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. +3. Once the user has been found, he is authenticated with another LDAP bind request using the user's DN and provided password. +4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. + +# OpenLDAP Server Configuration + +You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. + +
    OpenLDAP Server Parameters
    + +| Parameter | Description | +|:--|:--| +| Hostname | Specify the hostname or IP address of the OpenLDAP server | +| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| +| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | +| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | +| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. | +| Service Account Password | The password for the service account. | +| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| +| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| + +# User/Group Schema Configuration + +If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. + +Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. + +If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. + +### User Schema Configuration + +The table below details the parameters for the user schema configuration. + +
    User Schema Configuration Parameters
    + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Username Attribute | The user attribute whose value is suitable as a display name. | +| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | +| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | +| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | +| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | +| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | + +### Group Schema Configuration + +The table below details the parameters for the group schema configuration. + +
    Group Schema Configuration Parameters
    + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Name Attribute | The group attribute whose value is suitable for a display name. | +| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | +| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | +| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | +| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | +| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/installation-references/amazon-eks-permissions.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/amazon-eks-permissions.md new file mode 100644 index 0000000000..dc98a57a81 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/amazon-eks-permissions.md @@ -0,0 +1,432 @@ +--- +title: Creating an EKS Cluster +shortTitle: Amazon EKS +weight: 2110 +aliases: + - /rancher/v2.0-v2.4/en/tasks/clusters/creating-a-cluster/create-cluster-eks/ +--- + +Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). + +- [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) + - [Amazon VPC](#amazon-vpc) + - [IAM Policies](#iam-policies) +- [Architecture](#architecture) +- [Create the EKS Cluster](#create-the-eks-cluster) +- [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) +- [Troubleshooting](#troubleshooting) +- [AWS Service Events](#aws-service-events) +- [Security and Compliance](#security-and-compliance) +- [Tutorial](#tutorial) +- [Minimum EKS Permissions](#minimum-eks-permissions) + - [Service Role Permissions](#service-role-permissions) + - [VPC Permissions](#vpc-permissions) +- [Syncing](#syncing) + +# Prerequisites in Amazon Web Services + +>**Note** +>Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). + +To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate [permissions.](#minimum-eks-permissions) For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). + +### Amazon VPC + +You need to set up an Amazon VPC to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). + +### IAM Policies + +Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. + +1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + +2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. The minimum permissions required for an EKS cluster are listed [here.](#minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. + +3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. + +> **Note:** It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. + +For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). + +# Architecture + +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. + +
    Managing Kubernetes Clusters through Rancher's Authentication Proxy
    + +![Architecture](/img/rancher-architecture-rancher-api-server.svg) + +# Create the EKS Cluster + +Use Rancher to set up and configure your Kubernetes cluster. + +1. From the **Clusters** page, click **Add Cluster**. + +1. Choose **Amazon EKS**. + +1. Enter a **Cluster Name.** + +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +1. Fill out the rest of the form. For help, refer to the [configuration reference.](#eks-cluster-configuration-reference) + +1. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +# EKS Cluster Configuration Reference + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Access Key | Enter the access key that you created for your IAM policy. | +| Secret Key | Enter the secret key that you created for your IAM policy. | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Public IP for Worker Nodes + + + +Your selection for this option determines what options are available for **VPC & Subnet**. + +Option | Description +-------|------------ +Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. +No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

    If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. + +### VPC & Subnet + + + +The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) + +Option | Description + -------|------------ + Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. + Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + + +If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. + +
    + Click to expand + +If you're using **Custom: Choose from your existing VPC and Subnets**: + +(If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) + +1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. + +1. From the drop-down that displays, choose a VPC. + +1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. + +1. Click **Next: Select Security Group**. + +
    + +If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. + +
    + Click to expand + +Follow the steps below. + +>**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). + +1. From the drop-down that displays, choose a VPC. + +1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. + +
    + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Instance Options + + + +Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. + +Option | Description +-------|------------ +Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. +Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. +Desired ASG Size | The number of instances that your cluster will provision. +User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ + +# Troubleshooting + +If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) + +If an unauthorized error is returned while attempting to modify or import the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) + +For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). + +# AWS Service Events + +To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). + +# Security and Compliance + +By default only the IAM user or role that created a cluster has access to it. Attempting to access the cluster with any other user or role without additional configuration will lead to an error. In Rancher, this means using a credential that maps to a user or role that was not used to create the cluster will cause an unauthorized error. For example, an EKSCtl cluster will not be imported in Rancher unless the credentials used to import the cluster match the role or user used by EKSCtl. Additional users and roles can be authorized to access a cluster by being added to the aws-auth configmap in the kube-system namespace. For a more in-depth explanation and detailed instructions, please see this [documentation](https://aws.amazon.com/premiumsupport/knowledge-center/amazon-eks-cluster-access/). + +For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). + +# Tutorial + +This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. + +# Minimum EKS Permissions + +Documented here is a minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. Additional permissions are required for Rancher to provision the `Service Role` and `VPC` resources. Optionally these resources can be created **before** the cluster creation and will be selectable when defining the cluster configuration. + +Resource | Description +---------|------------ +Service Role | The service role provides Kubernetes the permissions it requires to manage resources on your behalf. Rancher can create the service role with the following [Service Role Permissions](amazon-eks-permissions.md#service-role-permissions). +VPC | Provides isolated network resources utilised by EKS and worker nodes. Rancher can create the VPC resources with the following [VPC Permissions](amazon-eks-permissions.md#vpc-permissions). + + +Resource targeting uses `*` as the ARN of many of the resources created cannot be known before creating the EKS cluster in Rancher. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EC2Permisssions", + "Effect": "Allow", + "Action": [ + "ec2:RunInstances", + "ec2:RevokeSecurityGroupIngress", + "ec2:RevokeSecurityGroupEgress", + "ec2:DescribeVpcs", + "ec2:DescribeTags", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeRouteTables", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeKeyPairs", + "ec2:DescribeInternetGateways", + "ec2:DescribeImages", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeAccountAttributes", + "ec2:DeleteTags", + "ec2:DeleteSecurityGroup", + "ec2:DeleteKeyPair", + "ec2:CreateTags", + "ec2:CreateSecurityGroup", + "ec2:CreateLaunchTemplateVersion", + "ec2:CreateLaunchTemplate", + "ec2:CreateKeyPair", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:AuthorizeSecurityGroupEgress" + ], + "Resource": "*" + }, + { + "Sid": "CloudFormationPermisssions", + "Effect": "Allow", + "Action": [ + "cloudformation:ListStacks", + "cloudformation:ListStackResources", + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStackResource", + "cloudformation:DeleteStack", + "cloudformation:CreateStackSet", + "cloudformation:CreateStack" + ], + "Resource": "*" + }, + { + "Sid": "IAMPermissions", + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:ListInstanceProfilesForRole", + "iam:ListInstanceProfiles", + "iam:ListAttachedRolePolicies", + "iam:GetRole", + "iam:GetInstanceProfile", + "iam:DetachRolePolicy", + "iam:DeleteRole", + "iam:CreateRole", + "iam:AttachRolePolicy" + ], + "Resource": "*" + }, + { + "Sid": "KMSPermisssions", + "Effect": "Allow", + "Action": "kms:ListKeys", + "Resource": "*" + }, + { + "Sid": "EKSPermisssions", + "Effect": "Allow", + "Action": [ + "eks:UpdateNodegroupVersion", + "eks:UpdateNodegroupConfig", + "eks:UpdateClusterVersion", + "eks:UpdateClusterConfig", + "eks:UntagResource", + "eks:TagResource", + "eks:ListUpdates", + "eks:ListTagsForResource", + "eks:ListNodegroups", + "eks:ListFargateProfiles", + "eks:ListClusters", + "eks:DescribeUpdate", + "eks:DescribeNodegroup", + "eks:DescribeFargateProfile", + "eks:DescribeCluster", + "eks:DeleteNodegroup", + "eks:DeleteFargateProfile", + "eks:DeleteCluster", + "eks:CreateNodegroup", + "eks:CreateFargateProfile", + "eks:CreateCluster" + ], + "Resource": "*" + } + ] +} +``` + +### Service Role Permissions + +Rancher will create a service role with the following trust policy: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Effect": "Allow", + "Sid": "" + } + ] +} +``` + +This role will also have two role policy attachments with the following policies ARNs: + +``` +arn:aws:iam::aws:policy/AmazonEKSClusterPolicy +arn:aws:iam::aws:policy/AmazonEKSServicePolicy +``` + +Permissions required for Rancher to create service role on users behalf during the EKS cluster creation process. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "IAMPermisssions", + "Effect": "Allow", + "Action": [ + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:CreateInstanceProfile", + "iam:CreateRole", + "iam:CreateServiceLinkedRole", + "iam:DeleteInstanceProfile", + "iam:DeleteRole", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile" + ], + "Resource": "*" + } + ] +} +``` + +### VPC Permissions + +Permissions required for Rancher to create VPC and associated resources. + +```json +{ + "Sid": "VPCPermissions", + "Effect": "Allow", + "Action": [ + "ec2:ReplaceRoute", + "ec2:ModifyVpcAttribute", + "ec2:ModifySubnetAttribute", + "ec2:DisassociateRouteTable", + "ec2:DetachInternetGateway", + "ec2:DescribeVpcs", + "ec2:DeleteVpc", + "ec2:DeleteTags", + "ec2:DeleteSubnet", + "ec2:DeleteRouteTable", + "ec2:DeleteRoute", + "ec2:DeleteInternetGateway", + "ec2:CreateVpc", + "ec2:CreateSubnet", + "ec2:CreateSecurityGroup", + "ec2:CreateRouteTable", + "ec2:CreateRoute", + "ec2:CreateInternetGateway", + "ec2:AttachInternetGateway", + "ec2:AssociateRouteTable" + ], + "Resource": "*" +} +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/feature-flags.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/installation-references/helm-chart-options.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/helm-chart-options.md new file mode 100644 index 0000000000..16663dd3c1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/helm-chart-options.md @@ -0,0 +1,265 @@ +--- +title: Rancher Helm Chart Options +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/ + - /rancher/v2.0-v2.4/en/installation/options/chart-options/ + - /rancher/v2.0-v2.4/en/installation/options/helm2/helm-rancher/chart-options/ + - /rancher/v2.0-v2.4/en/installation/resources/chart-options +--- + +This page is a configuration reference for the Rancher Helm chart. + +For help choosing a Helm chart version, refer to [this page.](../../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md) + +For information on enabling experimental features, refer to [this page.](../../pages-for-subheaders/enable-experimental-features.md) + +- [Common Options](#common-options) +- [Advanced Options](#advanced-options) +- [API Audit Log](#api-audit-log) +- [Setting Extra Environment Variables](#setting-extra-environment-variables) +- [TLS Settings](#tls-settings) +- [Customizing your Ingress](#customizing-your-ingress) +- [HTTP Proxy](#http-proxy) +- [Additional Trusted CAs](#additional-trusted-cas) +- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) +- [External TLS Termination](#external-tls-termination) + +### Common Options + +| Option | Default Value | Description | +| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | +| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | +| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | +| `letsEncrypt.email` | " " | `string` - Your email address | +| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | +| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | + +
    + +### Advanced Options + +| Option | Default Value | Description | +| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | +| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" Rancher server cluster. _Note: This option is no longer available in v2.5.0. In v2.5.0, the `restrictedAdmin` option is used to prevent users from modifying the local cluster._ | +| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | +| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | +| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.level` | 0 | `int` - set the [API Audit Log](installation/api-auditing) level. 0 is off. [0-3] | +| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | +| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs _Note: Available as of v2.2.0_ | +| `certmanager.version` | "" | `string` - set cert-manager compatibility | +| `debug` | false | `bool` - set debug flag on rancher server | +| `extraEnv` | [] | `list` - set additional environment variables for Rancher _Note: Available as of v2.2.0_ | +| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | +| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. _Note: Available as of v2.0.15, v2.1.10 and v2.2.4_ | +| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | +| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges. Options: traefik, nginx. | | +| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - comma separated list of hostnames or ip address not to use the proxy | | +| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | +| `rancherImage` | "rancher/rancher" | `string` - rancher image source | +| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | +| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | +| `replicas` | 3 | `int` - Number of replicas of Rancher pods | +| `resources` | {} | `map` - rancher pod resource requests & limits | +| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ | +| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | +| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | + + + +### API Audit Log + +Enabling the [API Audit Log](installation/api-auditing/). + +You can collect this log as you would any container log. Enable the [Logging service under Rancher Tools](cluster-admin/tools/logging/) for the `System` Project on the Rancher server cluster. + +```plain +--set auditLog.level=1 +``` + +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable the [Logging service under Rancher Tools](cluster-admin/tools/logging/) for the Rancher server cluster or System Project. + +Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. + +### Setting Extra Environment Variables + +You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +### TLS Settings + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +See [TLS settings](admin-settings/tls-settings) for more information and options. + +### Import `local` Cluster + +By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. + +> **Important if you are considering upgrading to Rancher v2.5:** If you turn addLocal off, most Rancher v2.5 features won't work, including the EKS provisioner. In Rancher v2.5, the restrictedAdmin option is used to prevent users from modifying the local cluster. + +If this is a concern in your environment you can set this option to "false" on your initial install. + +This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. + +```plain +--set addLocal="false" +``` + +### Customizing your Ingress + +To customize or use a different ingress with Rancher server you can set your own Ingress annotations. + +Example on setting a custom certificate issuer: + +```plain +--set ingress.extraAnnotations.'cert-manager\.io/cluster-issuer'=issuer-name +``` + +Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. + +```plain +--set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' +``` + +### HTTP Proxy + +Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. + +Add your IP exceptions to the `noProxy` list. Make sure you add the Pod cluster IP range (default: `10.42.0.0/16`), Service cluster IP range (default: `10.43.0.0/16`), the internal cluster domains (default: `.svc,.cluster.local`) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. + +```plain +--set proxy="http://:@:/" +--set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local" +``` + +### Additional Trusted CAs + +If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. + +```plain +--set additionalTrustedCAs=true +``` + +Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. + +```plain +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem +``` + +### Private Registry and Air Gap Installs + +For details on installing Rancher with a private registry, see: + +- [Air Gap: Docker Install](installation/air-gap-single-node/) +- [Air Gap: Kubernetes Install](installation/air-gap-high-availability/) + +# External TLS Termination + +We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. + +You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. + +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](installation/resources/encryption/tls-secrets/) to add the CA cert for Rancher. + +Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. + +### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: 'true' +``` + +### Required Headers + +- `Host` +- `X-Forwarded-Proto` +- `X-Forwarded-Port` +- `X-Forwarded-For` + +### Recommended Timeouts + +- Read Timeout: `1800 seconds` +- Write Timeout: `1800 seconds` +- Connect Timeout: `30 seconds` + +### Health Checks + +Rancher will respond `200` to health checks on the `/healthz` endpoint. + +### Example NGINX config + +This NGINX configuration is tested on NGINX 1.14. + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server IP_NODE_1:80; + server IP_NODE_2:80; + server IP_NODE_3:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` diff --git a/versioned_docs/version-2.0-2.4/reference-guides/installation-references/tls-settings.md b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/tls-settings.md new file mode 100644 index 0000000000..7b076dfc82 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/installation-references/tls-settings.md @@ -0,0 +1,36 @@ +--- +title: TLS Settings +weight: 3 +aliases: + - /rancher/v2.0-v2.4/en/installation/options/tls-settings/ + - /rancher/v2.0-v2.4/en/admin-settings/tls-settings + - /rancher/v2.0-v2.4/en/installation/resources/encryption/tls-settings +--- + +In Rancher v2.1.7, the default TLS configuration changed to only accept TLS 1.2 and secure TLS cipher suites. TLS 1.3 and TLS 1.3 exclusive cipher suites are not supported. + +# Configuring TLS settings + +The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. + +- [TLS settings in Docker options](../single-node-rancher-in-docker/advanced-options.md#tls-settings) + +- [TLS settings in Helm chart options](helm-chart-options.md#tls-settings) + +# TLS Environment Variables + +| Parameter | Description | Default | Available options | +|-----|-----|-----|-----| +| `CATTLE_TLS_MIN_VERSION` | Minimum TLS version | `1.2` | `1.0`, `1.1`, `1.2` | +| `CATTLE_TLS_CIPHERS` | Allowed TLS cipher suites | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
    `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
    `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
    `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
    `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
    `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | See [Golang tls constants](https://golang.org/pkg/crypto/tls/#pkg-constants) | + + +# Legacy configuration + +If you need to configure TLS the same way as it was before Rancher v2.1.7, please use the following settings: + + +| Parameter | Legacy value | +|-----|-----| +| `CATTLE_TLS_MIN_VERSION` | `1.0` | +| `CATTLE_TLS_CIPHERS` | `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,`
    `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,`
    `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,`
    `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,`
    `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,`
    `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,`
    `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,`
    `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,`
    `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,`
    `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,`
    `TLS_RSA_WITH_AES_128_GCM_SHA256,`
    `TLS_RSA_WITH_AES_256_GCM_SHA384,`
    `TLS_RSA_WITH_AES_128_CBC_SHA,`
    `TLS_RSA_WITH_AES_256_CBC_SHA,`
    `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,`
    `TLS_RSA_WITH_3DES_EDE_CBC_SHA` diff --git a/versioned_docs/version-2.0-2.4/reference-guides/kubernetes-concepts.md b/versioned_docs/version-2.0-2.4/reference-guides/kubernetes-concepts.md new file mode 100644 index 0000000000..c6813e7ba0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/kubernetes-concepts.md @@ -0,0 +1,72 @@ +--- +title: Kubernetes Concepts +weight: 4 +--- + +This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) + +This section covers the following topics: + +- [About Docker](#about-docker) +- [About Kubernetes](#about-kubernetes) +- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) +- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) + - [etcd Nodes](#etcd-nodes) + - [Controlplane Nodes](#controlplane-nodes) + - [Worker Nodes](#worker-nodes) +- [About Helm](#about-helm) + +# About Docker + +Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. + +>**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. + +# About Kubernetes + +Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. + +# What is a Kubernetes Cluster? + +A cluster is a group of computers that work together as a single system. + +A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. + +# Roles for Nodes in Kubernetes Clusters + +Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. + +A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. + +### etcd Nodes + +Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. + +The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. + +The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. + +Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. + +Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. + +### Controlplane Nodes + +Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although three or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. + +### Worker Nodes + +Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: + +- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. +- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. + +Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads](../pages-for-subheaders/workloads-and-pods.md). + +# About Helm + +For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. + +Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). + +For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) diff --git a/content/rancher/v2.0-v2.4/en/pipelines/concepts/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/concepts.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/pipelines/concepts/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/pipelines/concepts.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/pipelines/configure-persistent-data.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/configure-persistent-data.md new file mode 100644 index 0000000000..8dcbcef9c0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/configure-persistent-data.md @@ -0,0 +1,91 @@ +--- +title: Configuring Persistent Data for Pipeline Components +weight: 600 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/storage +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) + +>**Prerequisites (for both parts A and B):** +> +>[Persistent volumes](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) must be available for the cluster. + +### A. Configuring Persistent Data for Docker Registry + +1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** In versions before v2.3.0, select the **Workloads** tab. + +1. Find the `docker-registry` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. + +1. Click **Upgrade**. + +### B. Configuring Persistent Data for Minio + +1. From the project view, click **Resources > Workloads.** (In versions before v2.3.0, click the **Workloads** tab.) Find the `minio` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. + +1. Click **Upgrade**. + +**Result:** Persistent storage is configured for your pipeline components. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-repositories.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-repositories.md new file mode 100644 index 0000000000..8321430b7a --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-repositories.md @@ -0,0 +1,77 @@ +--- +title: Example Repositories +weight: 500 +aliases: + - /rancher/v2.0-v2.4/en/tools/pipelines/quick-start-guide/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example-repos +--- + +Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: + +- Go +- Maven +- php + +> **Note:** The example repositories are only available if you have not [configured a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md). + +To start using these example repositories, + +1. [Enable the example repositories](#1-enable-the-example-repositories) +2. [View the example pipeline](#2-view-the-example-pipeline) +3. [Run the example pipeline](#3-run-the-example-pipeline) + +### 1. Enable the Example Repositories + +By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. + +1. From the **Global** view, navigate to the project that you want to test out pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. Click **Configure Repositories**. + + **Step Result:** A list of example repositories displays. + + >**Note:** Example repositories only display if you haven't fetched your own repos. + +1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. + +**Results:** + +- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. + +- The following workloads are deployed to a new namespace: + + - `docker-registry` + - `jenkins` + - `minio` + +### 2. View the Example Pipeline + +After enabling an example repository, review the pipeline to see how it is set up. + +1. From the **Global** view, navigate to the project that you want to test out pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. Find the example repository, select the vertical **⋮**. There are two ways to view the pipeline: + * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. + * **YAML**: Click on View/Edit YAML to view the `./rancher-pipeline.yml` file. + +### 3. Run the Example Pipeline + +After enabling an example repository, run the pipeline to see how it works. + +1. From the **Global** view, navigate to the project that you want to test out pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. Find the example repository, select the vertical **⋮ > Run**. + + >**Note:** When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. + +**Result:** The pipeline runs. You can see the results in the logs. + +### What's Next? + +For detailed information about setting up your own pipeline for your repository, [configure a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md), enable a repository and finally configure your pipeline. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-yaml.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-yaml.md new file mode 100644 index 0000000000..02ba54ce73 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/example-yaml.md @@ -0,0 +1,75 @@ +--- +title: Example YAML File +weight: 501 +aliases: + - /rancher/v2.0-v2.4/en/tools/pipelines/reference/ + - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/example +--- + +Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. + +In the [pipeline configuration reference](k8s-in-rancher/pipelines/config), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. + +Below is a full example `rancher-pipeline.yml` for those who want to jump right in. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} + # Set environment variables in container for the step + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 + # Set environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com + - name: Deploy some workloads + steps: + - applyYamlConfig: + path: ./deployment.yaml +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +# timeout in minutes +timeout: 30 +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` diff --git a/versioned_docs/version-2.0-2.4/reference-guides/pipelines/pipeline-configuration.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/pipeline-configuration.md new file mode 100644 index 0000000000..65436b10d0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/pipeline-configuration.md @@ -0,0 +1,660 @@ +--- +title: Pipeline Configuration Reference +weight: 1 +aliases: + - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/config +--- + +In this section, you'll learn how to configure pipelines. + +- [Step Types](#step-types) +- [Step Type: Run Script](#step-type-run-script) +- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) +- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) +- [Step Type: Deploy YAML](#step-type-deploy-yaml) +- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) +- [Notifications](#notifications) +- [Timeouts](#timeouts) +- [Triggers and Trigger Rules](#triggers-and-trigger-rules) +- [Environment Variables](#environment-variables) +- [Secrets](#secrets) +- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) +- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) + - [Executor Quota](#executor-quota) + - [Resource Quota for Executors](#resource-quota-for-executors) + - [Custom CA](#custom-ca) +- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) +- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) + +# Step Types + +Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. + +Step types include: + +- [Run Script](#step-type-run-script) +- [Build and Publish Images](#step-type-build-and-publish-images) +- [Publish Catalog Template](#step-type-publish-catalog-template) +- [Deploy YAML](#step-type-deploy-yaml) +- [Deploy Catalog App](#step-type-deploy-catalog-app) + + + +### Configuring Steps By UI + +If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. + +1. Add stages to your pipeline execution by clicking **Add Stage**. + + 1. Enter a **Name** for each stage of your pipeline. + 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. + +1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. + +### Configuring Steps by YAML + +For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com +``` +# Step Type: Run Script + +The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configuring Script by UI + +1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. + +1. Click **Add**. + +### Configuring Script by YAML +```yaml +# example +stages: +- name: Build something + steps: + - runScriptConfig: + image: golang + shellScript: go build +``` +# Step Type: Build and Publish Images + +_Available as of Rancher v2.1.0_ + +The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. + +The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. + +### Configuring Building and Publishing Images by UI +1. From the **Step Type** drop-down, choose **Build and Publish**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | + Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | + Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | + Build Context

    (**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). + +### Configuring Building and Publishing Images by YAML + +You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: + +Variable Name | Description +------------------------|------------------------------------------------------------ +PLUGIN_DRY_RUN | Disable docker push +PLUGIN_DEBUG | Docker daemon executes in debug mode +PLUGIN_MIRROR | Docker daemon registry mirror +PLUGIN_INSECURE | Docker daemon allows insecure registries +PLUGIN_BUILD_ARGS | Docker build args, a comma separated list + +
    + +```yaml +# This example shows an environment variable being used +# in the Publish Image step. This variable allows you to +# publish an image to an insecure registry: + +stages: +- name: Publish Image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + pushRemote: true + registry: example.com + env: + PLUGIN_INSECURE: "true" +``` + +# Step Type: Publish Catalog Template + +_Available as of v2.2.0_ + +The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a [git hosted chart repository](catalog/custom/). It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. + +### Configuring Publishing a Catalog Template by UI + +1. From the **Step Type** drop-down, choose **Publish Catalog Template**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | + Catalog Template Name | The name of the template. For example, wordpress. | + Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | + Protocol | You can choose to publish via HTTP(S) or SSH protocol. | + Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | + Git URL | The Git URL of the chart repository that the template will be published to. | + Git Branch | The Git branch of the chart repository that the template will be published to. | + Author Name | The author name used in the commit message. | + Author Email | The author email used in the commit message. | + + +### Configuring Publishing a Catalog Template by YAML + +You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: + +* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. +* CatalogTemplate: The name of the template. +* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. +* GitUrl: The git URL of the chart repository that the template will be published to. +* GitBranch: The git branch of the chart repository that the template will be published to. +* GitAuthor: The author name used in the commit message. +* GitEmail: The author email used in the commit message. +* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. + +```yaml +# example +stages: +- name: Publish Wordpress Template + steps: + - publishCatalogConfig: + path: ./charts/wordpress/latest + catalogTemplate: wordpress + version: ${CICD_GIT_TAG} + gitUrl: git@github.com:myrepo/charts.git + gitBranch: master + gitAuthor: example-user + gitEmail: user@example.com + envFrom: + - sourceName: publish-keys + sourceKey: DEPLOY_KEY +``` + +# Step Type: Deploy YAML + +This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configure Deploying YAML by UI + +1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. + +1. Enter the **YAML Path**, which is the path to the manifest file in the source code. + +1. Click **Add**. + +### Configure Deploying YAML by YAML + +```yaml +# example +stages: +- name: Deploy + steps: + - applyYamlConfig: + path: ./deployment.yaml +``` + +# Step Type :Deploy Catalog App + +_Available as of v2.2.0_ + +The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. + +### Configure Deploying Catalog App by UI + +1. From the **Step Type** drop-down, choose **Deploy Catalog App**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Catalog | The catalog from which the app template will be used. | + Template Name | The name of the app template. For example, wordpress. | + Template Version | The version of the app template you want to deploy. | + Namespace | The target namespace where you want to deploy the app. | + App Name | The name of the app you want to deploy. | + Answers | Key-value pairs of answers used to deploy the app. | + + +### Configure Deploying Catalog App by YAML + +You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: + +* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. +* Version: The version of the template you want to deploy. +* Answers: Key-value pairs of answers used to deploy the app. +* Name: The name of the app you want to deploy. +* TargetNamespace: The target namespace where you want to deploy the app. + +```yaml +# example +stages: +- name: Deploy App + steps: + - applyAppConfig: + catalogTemplate: cattle-global-data:library-mysql + version: 0.3.8 + answers: + persistence.enabled: "false" + name: testmysql + targetNamespace: test +``` + +# Timeouts + +By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. + +### Configuring Timeouts by UI + +Enter a new value in the **Timeout** field. + +### Configuring Timeouts by YAML + +In the `timeout` section, enter the timeout value in minutes. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +# timeout in minutes +timeout: 30 +``` + +# Notifications + +You can enable notifications to any [notifiers](../../explanations/integrations-in-rancher/notifiers.md) based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers](monitoring-alerting/legacy/notifiers/) so it will be easy to add recipients immediately. + +### Configuring Notifications by UI + +_Available as of v2.2.0_ + +1. Within the **Notification** section, turn on notifications by clicking **Enable**. + +1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. + +1. If you don't have any existing [notifiers](../../explanations/integrations-in-rancher/notifiers.md), Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions](monitoring-alerting/legacy/notifiers/) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. + + > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. + +1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. + +### Configuring Notifications by YAML +_Available as of v2.2.0_ + +In the `notification` section, you will provide the following information: + +* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. + * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. + * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. +* **Condition:** Select which conditions of when you want the notification to be sent. +* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. + +```yaml +# Example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` + +# Triggers and Trigger Rules + +After you configure a pipeline, you can trigger it using different methods: + +- **Manually:** + + After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. + +- **Automatically:** + + When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. + + To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. + +Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: + +- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. + +- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. + +If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. + +Wildcard character (`*`) expansion is supported in `branch` conditions. + +This section covers the following topics: + +- [Configuring pipeline triggers](#configuring-pipeline-triggers) +- [Configuring stage triggers](#configuring-stage-triggers) +- [Configuring step triggers](#configuring-step-triggers) +- [Configuring triggers by YAML](#configuring-triggers-by-yaml) + +### Configuring Pipeline Triggers + +1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. + +1. Click on **Show Advanced Options**. + +1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. + + 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. + + 1. **Optional:** Add more branches that trigger a build. + +1. Click **Done.** + +### Configuring Stage Triggers + +1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. + +1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. + +1. Click **Show advanced options**. + +1. In the **Trigger Rules** section, configure rules to run or skip the stage. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the stage and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the stage. | + | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + +### Configuring Step Triggers + +1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. + +1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. + +1. Click **Show advanced options**. + +1. In the **Trigger Rules** section, configure rules to run or skip the step. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the step and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the step. | + | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + + +### Configuring Triggers by YAML + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +``` + +# Environment Variables + +When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. + +### Configuring Environment Variables by UI + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. + +1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. + +1. Click **Show advanced options**. + +1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. + +1. Add your environment variable(s) into either the script or file. + +1. Click **Save**. + +### Configuring Environment Variables by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 +``` + +# Secrets + +If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets](../../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md). + +### Prerequisite +Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. +
    + +>**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). + +### Configuring Secrets by UI + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** In versions before v2.3.0, click **Workloads > Pipelines.** + +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. + +1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. + +1. Click **Show advanced options**. + +1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. + +1. Click **Save**. + +### Configuring Secrets by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${ALIAS_ENV} + # environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV +``` + +# Pipeline Variable Substitution Reference + +For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. + +Variable Name | Description +------------------------|------------------------------------------------------------ +`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). +`CICD_GIT_URL` | URL of the Git repository. +`CICD_GIT_COMMIT` | Git commit ID being executed. +`CICD_GIT_BRANCH` | Git branch of this event. +`CICD_GIT_REF` | Git reference specification of this event. +`CICD_GIT_TAG` | Git tag name, set on tag event. +`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). +`CICD_PIPELINE_ID` | Rancher ID for the pipeline. +`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. +`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. +`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. +`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

    [Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) + +# Global Pipeline Execution Settings + +After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. These settings can be edited by selecting **Tools > Pipelines** in the navigation bar. In versions before v2.2.0, you can select **Resources > Pipelines**. + +- [Executor Quota](#executor-quota) +- [Resource Quota for Executors](#resource-quota-for-executors) +- [Custom CA](#custom-ca) + +### Executor Quota + +Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. + +### Resource Quota for Executors + +_Available as of v2.2.0_ + +Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. + +Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. + +To configure compute resources for pipeline-step containers: + +You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. + +In a step, you will provide the following information: + +* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. +* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. +* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. +* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi +``` + +>**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. + +### Custom CA + +_Available as of v2.2.0_ + +If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. + +1. Click **Edit cacerts**. + +1. Paste in the CA root certificates and click **Save cacerts**. + +**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. + +# Persistent Data for Pipeline Components + +The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +For details on setting up persistent storage for pipelines, refer to [this page.](k8s-in-rancher/pipelines/storage) + +# Example rancher-pipeline.yml + +An example pipeline configuration file is on [this page.](k8s-in-rancher/pipelines/example) diff --git a/versioned_docs/version-2.0-2.4/reference-guides/pipelines/v2.0.x.md b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/v2.0.x.md new file mode 100644 index 0000000000..18001af57c --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/pipelines/v2.0.x.md @@ -0,0 +1,128 @@ +--- +title: v2.0.x Pipeline Documentation +weight: 9000 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/pipelines/docs-for-v2.0.x + - /rancher/v2.0-v2.4/en/project-admin/pipelines/docs-for-v2.0.x + - /rancher/v2.0-v2.4/en/k8s-in-rancher/pipelines/docs-for-v2.0.x + - /rancher/v2.x/en/pipelines/docs-for-v2.0.x/ +--- + +>**Note:** This section describes the pipeline feature as implemented in Rancher v2.0.x. If you are using Rancher v2.1 or later, where pipelines have been significantly improved, please refer to the new documentation for [v2.1 or later](k8s-in-rancher/pipelines/). + + + +Pipelines help you automate the software delivery process. You can integrate Rancher with GitHub to create a pipeline. + +You can set up your pipeline to run a series of stages and steps to test your code and deploy it. + +
    +
    Pipelines
    +
    Contain a series of stages and steps. Out-of-the-box, the pipelines feature supports fan out and in capabilities.
    +
    Stages
    +
    Executed sequentially. The next stage will not execute until all of the steps within the stage execute.
    +
    Steps
    +
    Are executed in parallel within a stage.
    +
    + +## Enabling CI Pipelines + +1. Select cluster from drop down. + +2. Under tools menu select pipelines. + +3. Follow instructions for setting up github auth on page. + + +## Creating CI Pipelines + +1. Go to the project you want this pipeline to run in. + +2. Click **Resources > Pipelines.** In versions before v2.3.0,click **Workloads > Pipelines.** + +4. Click Add pipeline button. + +5. Enter in your repository name (Autocomplete should help zero in on it quickly). + +6. Select Branch options. + + - Only the branch {BRANCH NAME}: Only events triggered by changes to this branch will be built. + + - Everything but {BRANCH NAME}: Build any branch that triggered an event EXCEPT events from this branch. + + - All branches: Regardless of the branch that triggered the event always build. + + >**Note:** If you want one path for master, but another for PRs or development/test/feature branches, create two separate pipelines. + +7. Select the build trigger events. By default, builds will only happen by manually clicking build now in Rancher UI. + + - Automatically build this pipeline whenever there is a git commit. (This respects the branch selection above) + + - Automatically build this pipeline whenever there is a new PR. + + - Automatically build the pipeline. (Allows you to configure scheduled builds similar to Cron) + +8. Click Add button. + + By default, Rancher provides a three stage pipeline for you. It consists of a build stage where you would compile, unit test, and scan code. The publish stage has a single step to publish a docker image. + + +8. Add a name to the pipeline in order to complete adding a pipeline. + +9. Click on the ‘run a script’ box under the ‘Build’ stage. + + Here you can set the image, or select from pre-packaged envs. + +10. Configure a shell script to run inside the container when building. + +11. Click Save to persist the changes. + +12. Click the “publish an image’ box under the “Publish” stage. + +13. Set the location of the Dockerfile. By default it looks in the root of the workspace. Instead, set the build context for building the image relative to the root of the workspace. + +14. Set the image information. + + The registry is the remote registry URL. It is defaulted to Docker hub. + Repository is the `/` in the repository. + +15. Select the Tag. You can hard code a tag like ‘latest’ or select from a list of available variables. + +16. If this is the first time using this registry, you can add the username/password for pushing the image. You must click save for the registry credentials AND also save for the modal. + + + + +## Creating a New Stage + +1. To add a new stage the user must click the ‘add a new stage’ link in either create or edit mode of the pipeline view. + +2. Provide a name for the stage. + +3. Click save. + + +## Creating a New Step + +1. Go to create / edit mode of the pipeline. + +2. Click “Add Step” button in the stage that you would like to add a step in. + +3. Fill out the form as detailed above + + +## Environment Variables + +For your convenience the following environment variables are available in your build steps: + +Variable Name | Description +------------------------|------------------------------------------------------------ +CICD_GIT_REPO_NAME | Repository Name (Stripped of Github Organization) +CICD_PIPELINE_NAME | Name of the pipeline +CICD_GIT_BRANCH | Git branch of this event +CICD_TRIGGER_TYPE | Event that triggered the build +CICD_PIPELINE_ID | Rancher ID for the pipeline +CICD_GIT_URL | URL of the Git repository +CICD_EXECUTION_SEQUENCE | Build number of the pipeline +CICD_EXECUTION_ID | Combination of {CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE} +CICD_GIT_COMMIT | Git commit ID being executed. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-cluster-tools.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-cluster-tools.md new file mode 100644 index 0000000000..c45b2b9638 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-cluster-tools.md @@ -0,0 +1,71 @@ +--- +title: Tools for Logging, Monitoring, and More +weight: 2033 +aliases: + - /rancher/v2.0-v2.4/en/toolcluster-admin/tools/notifiers-and-alerts/ +--- + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + + + +- [Logging](#logging) +- [Monitoring](#monitoring) +- [Alerts](#alerts) +- [Notifiers](#notifiers) +- [Istio](#istio) +- [OPA Gatekeeper](#opa-gatekeeper) +- [CIS Scans](#cis-scans) + + + + +# Logging + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems + +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. + +Refer to the logging documentation [here.](../pages-for-subheaders/cluster-logging.md) + +# Monitoring + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. + +For details, refer to [Monitoring.](../pages-for-subheaders/cluster-monitoring.md) +# Alerts + +After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. + +Alerts are rules that trigger notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. + +For details, refer to [Alerts.](../pages-for-subheaders/cluster-alerts.md) +# Notifiers + +Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. + +For details, refer to [Notifiers.](../explanations/integrations-in-rancher/notifiers.md) +# Istio + +_Available as of v2.3_ + +[Istio](https:cluster-admin/tools/istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. + +Refer to the Istio documentation [here.](../pages-for-subheaders/istio.md) + +# OPA Gatekeeper + +[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.](../explanations/integrations-in-rancher/opa-gatekeeper.md) + + +# CIS Scans + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. + +Refer to the CIS scan documentation [here.](../pages-for-subheaders/cis-scans.md) \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/architecture-recommendations.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/architecture-recommendations.md new file mode 100644 index 0000000000..010866535e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/architecture-recommendations.md @@ -0,0 +1,121 @@ +--- +title: Architecture Recommendations +weight: 3 +--- + +Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the cluster running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) + +This section covers the following topics: + +- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) +- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) +- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-kubernetes-installations) +- [Environment for Kubernetes Installations](#environment-for-kubernetes-installations) +- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-kubernetes-installations) +- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) + +# Separation of Rancher and User Clusters + +A user cluster is a downstream Kubernetes cluster that runs your apps and services. + +If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. + +In Kubernetes installations of Rancher, the Rancher server cluster should also be separate from the user clusters. + +![Separation of Rancher Server from User Clusters](/img/rancher-architecture-separation-of-rancher-server.svg) + +# Why HA is Better for Rancher in Production + +We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. + +We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. + +As of v2.4, Rancher needs to be installed on either a high-availability [RKE (Rancher Kubernetes Engine)](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster, or a high-availability [K3s (Lightweight Kubernetes)](https://rancher.com/docs/k3s/latest/en/) Kubernetes cluster. Both RKE and K3s are fully certified Kubernetes distributions. + +Rancher versions before v2.4 need to be installed on an RKE cluster. + +### K3s Kubernetes Cluster Installations + +If you are installing Rancher v2.4 for the first time, we recommend installing it on a K3s Kubernetes cluster. One main advantage of this K3s architecture is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. + +The option to install Rancher on a K3s cluster is a feature introduced in Rancher v2.4. K3s is easy to install, with half the memory of Kubernetes, all in a binary less than 100 MB. + +
    Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
    +![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server](/img/k3s-server-storage.svg) + +### RKE Kubernetes Cluster Installations + +If you are installing Rancher before v2.4, you will need to install Rancher on an RKE cluster, in which the cluster data is stored on each node with the etcd role. As of Rancher v2.4, there is no migration path to transition the Rancher server from an RKE cluster to a K3s cluster. All versions of the Rancher server, including v2.4+, can be installed on an RKE cluster. + +In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. + +
    Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
    +![Architecture of an RKE Kubernetes cluster running the Rancher management server](/img/rke-server-storage.svg) + +# Recommended Load Balancer Configuration for Kubernetes Installations + +We recommend the following configurations for the load balancer and Ingress controllers: + +* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) +* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
    Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
    +![Rancher HA](/img/ha/rancher2ha.svg) + +# Environment for Kubernetes Installations + +It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. + +For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +It is not recommended to install Rancher on top of a managed Kubernetes service such as Amazon’s EKS or Google Kubernetes Engine. These hosted Kubernetes solutions do not expose etcd to a degree that is manageable for Rancher, and their customizations can interfere with Rancher operations. + +# Recommended Node Roles for Kubernetes Installations + +Our recommendations for the roles of each node differ depending on whether Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. + +### K3s Cluster Roles + +In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. + +For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. + +### RKE Cluster Roles + +If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. + +### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters + +Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. + +Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. + +For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. + +![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters](/img/rancher-architecture-node-roles.svg) + +RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. + +We recommend that downstream user clusters should have at least: + +- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available +- **Two nodes with only the controlplane role** to make the master component highly available +- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services + +With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. + +For more best practices for downstream clusters, refer to the [production checklist](../../pages-for-subheaders/checklist-for-production-ready-clusters.md) or our [best practices guide.](../../pages-for-subheaders/best-practices.md) + +# Architecture for an Authorized Cluster Endpoint + +If you are using an [authorized cluster endpoint,](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. + +If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files](k8s-in-rancher/kubeconfig/) and [API keys](../user-settings/api-keys.md#creating-an-api-key) for more information. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-manager-architecture/rancher-server-and-components.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-alerts.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-alerts.md new file mode 100644 index 0000000000..b101398ccc --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-alerts.md @@ -0,0 +1,252 @@ +--- +title: Project Alerts +weight: 2526 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/alerts + - /rancher/v2.0-v2.4/en/monitoring-alerting/legacy/alerts/project-alerts + - /rancher/v2.0-v2.4/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-alerts/project-alerts/ +--- + +To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. When an event occurs, your alert is triggered, and you are sent a notification. You can then, if necessary, follow up with corrective actions. + +Notifiers and alerts are built on top of the [Prometheus Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Leveraging these tools, Rancher can notify [cluster owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) and [project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) of events they need to address. + +Before you can receive alerts, one or more [notifier](../../explanations/integrations-in-rancher/notifiers.md) must be configured at the cluster level. + +Only [administrators](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can manage project alerts. + +This section covers the following topics: + +- [Alerts scope](#alerts-scope) +- [Default project-level alerts](#default-project-level-alerts) +- [Adding project alerts](#adding-project-alerts) +- [Managing project alerts](#managing-project-alerts) +- [Project Alert Rule Configuration](#project-alert-rule-configuration) + - [Pod Alerts](#pod-alerts) + - [Workload Alerts](#workload-alerts) + - [Workload Selector Alerts](#workload-selector-alerts) + - [Metric Expression Alerts](#metric-expression-alerts) + + +# Alerts Scope + +The scope for alerts can be set at either the [cluster level](cluster-admin/tools/alerts/) or project level. + +At the project level, Rancher monitors specific deployments and sends alerts for: + +* Deployment availability +* Workloads status +* Pod status +* The Prometheus expression cross the thresholds + +# Default Project-level Alerts + +When you enable monitoring for the project, some project-level alerts are provided. You can receive these alerts if a [notifier](../../explanations/integrations-in-rancher/notifiers.md) for them is configured at the cluster level. + +| Alert | Explanation | +|-------|-------------| +| Less than half workload available | A critical alert is triggered if less than half of a workload is available, based on workloads where the key is `app` and the value is `workload`. | +| Memory usage close to the quota | A warning alert is triggered if the workload's memory usage exceeds the memory resource quota that is set for the workload. You can see the memory limit in the Rancher UI if you go to the workload under the **Security & Host Config** tab. | + +For information on other default alerts, refer to the section on [cluster-level alerts.](cluster-admin/tools/alerts/default-alerts) + +# Adding Project Alerts + +>**Prerequisite:** Before you can receive project alerts, you must add a notifier. + +1. From the **Global** view, navigate to the project that you want to configure project alerts for. Select **Tools > Alerts**. In versions before v2.2.0, you can choose **Resources > Alerts**. + +1. Click **Add Alert Group**. + +1. Enter a **Name** for the alert that describes its purpose, you could group alert rules for the different purpose. + +1. Based on the type of alert you want to create, fill out the form. For help, refer to the [configuration](#project-alert-rule-configuration) section below. + +1. Continue adding more alert rules to the group. + +1. Finally, choose the [notifiers](../../explanations/integrations-in-rancher/notifiers.md) that send you alerts. + + - You can set up multiple notifiers. + - You can change notifier recipients on the fly. + +1. Click **Create.** + +**Result:** Your alert is configured. A notification is sent when the alert is triggered. + + +# Managing Project Alerts + +To manage project alerts, browse to the project that alerts you want to manage. Then select **Tools > Alerts**. In versions before v2.2.0, you can choose **Resources > Alerts**. You can: + +- Deactivate/Reactive alerts +- Edit alert settings +- Delete unnecessary alerts +- Mute firing alerts +- Unmute muted alerts + + +# Project Alert Rule Configuration + +- [Pod Alerts](#pod-alerts) +- [Workload Alerts](#workload-alerts) +- [Workload Selector Alerts](#workload-selector-alerts) +- [Metric Expression Alerts](#metric-expression-alerts) + +# Pod Alerts + +This alert type monitors for the status of a specific pod. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Select the **Pod** option, and then select a pod from the drop-down. + +### Is + +Select a pod status that triggers an alert: + +- **Not Running** +- **Not Scheduled** +- **Restarted <x> times within the last <x> Minutes** + +### Send a + +Select the urgency level of the alert. The options are: + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on pod state. For example, select **Info** for Job pod which stop running after job finished. However, if an important pod isn't scheduled, it may affect operations, so choose **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. + +You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. + +# Workload Alerts + +This alert type monitors for the availability of a workload. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Choose the **Workload** option. Then choose a workload from the drop-down. + +### Is + +Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on the percentage you choose and the importance of the workload. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. + +You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. + +# Workload Selector Alerts + +This alert type monitors for the availability of all workloads marked with tags that you've specified. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When a + +Select the **Workload Selector** option, and then click **Add Selector** to enter the key value pair for a label. If one of the workloads drops below your specifications, an alert is triggered. This label should be applied to one or more of your workloads. + +### Is + +Choose an availability percentage using the slider. The alert is triggered when the workload's availability on your cluster nodes drops below the set percentage. + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on the percentage you choose and the importance of the workload. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. + +You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. + +# Metric Expression Alerts +_Available as of v2.2.4_ + +If you enable [project monitoring](../../pages-for-subheaders/project-tools.md#monitoring), this alert type monitors for the overload from Prometheus expression querying. + +Each of the below sections corresponds to a part of the alert rule configuration section in the Rancher UI. + +### When A + +Input or select an **Expression**. The dropdown shows the original metrics from Prometheus, including: + +- [**Container**](https://github.com/google/cadvisor) +- [**Kubernetes Resources**](https://github.com/kubernetes/kube-state-metrics) +- **Customize** +- [**Project Level Grafana**](http://docs.grafana.org/administration/metrics/) +- **Project Level Prometheus** + +### Is + +Choose a comparison. + +- **Equal**: Trigger alert when expression value equal to the threshold. +- **Not Equal**: Trigger alert when expression value not equal to the threshold. +- **Greater Than**: Trigger alert when expression value greater than to threshold. +- **Less Than**: Trigger alert when expression value equal or less than the threshold. +- **Greater or Equal**: Trigger alert when expression value greater to equal to the threshold. +- **Less or Equal**: Trigger alert when expression value less or equal to the threshold. + +If applicable, choose a comparison value or a threshold for the alert to be triggered. + +### For + +Select a duration for a trigger alert when the expression value crosses the threshold longer than the configured duration. + +### Send a + +Select the urgency level of the alert. + +- **Critical**: Most urgent +- **Warning**: Normal urgency +- **Info**: Least urgent + +Select the urgency level of the alert based on its impact on operations. For example, an alert triggered when a expression for container memory close to the limit raises above 60% deems an urgency of **Info**, but raised about 95% deems an urgency of **Critical**. + +### Advanced Options + +By default, the below options will apply to all alert rules within the group. You can disable these advanced options when configuring a specific rule. + +- **Group Wait Time**: How long to wait to buffer alerts of the same group before sending initially, default to 30 seconds. +- **Group Interval Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 30 seconds. +- **Repeat Wait Time**: How long to wait before sending an alert that has been added to a group which contains already fired alerts, default to 1 hour. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-logging.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-logging.md new file mode 100644 index 0000000000..ecf521ad4d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-project-tools/project-logging.md @@ -0,0 +1,115 @@ +--- +title: Project Logging +shortTitle: Project Logging +weight: 2527 +aliases: + - /rancher/v2.0-v2.4/en/project-admin/tools/logging + - /rancher/v2.0-v2.4/en/logging/legacy/project-logging + - /rancher/v2.0-v2.4/en/logging/v2.0.x-v2.4.x/project-logging + - /rancher/v2.x/en/logging/v2.0.x-v2.4.x/project-logging/ + - /rancher/v2.x/en/monitoring-alerting/v2.0.x-v2.4.x/cluster-monitoring/project-monitoring/ +--- + +Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. + +For background information about how logging integrations work, refer to the [cluster administration section.](logging/v2.0.x-v2.4.x/cluster-logging/#how-logging-integrations-work) + +Rancher supports the following services: + +- Elasticsearch +- Splunk +- Kafka +- Syslog +- Fluentd + +>**Note:** You can only configure one logging service per cluster or per project. + +Only [administrators](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure Rancher to send Kubernetes logs to a logging service. + +# Requirements + +The Docker daemon on each node in the cluster should be [configured](https://docs.docker.com/config/containers/logging/configure/) with the (default) log-driver: `json-file`. You can check the log-driver by running the following command: + +``` +$ docker info | grep 'Logging Driver' +Logging Driver: json-file +``` + +# Advantages + +Setting up a logging service to collect logs from your cluster/project has several advantages: + +- Logs errors and warnings in your Kubernetes infrastructure to a stream. The stream informs you of events like a container crashing, a pod eviction, or a node dying. +- Allows you to capture and analyze the state of your cluster and look for trends in your environment using the log stream. +- Helps you when troubleshooting or debugging. +- Saves your logs to a safe location outside of your cluster, so that you can still access them even if your cluster encounters issues. + +# Logging Scope + +You can configure logging at either cluster level or project level. + +- [Cluster logging](cluster-admin/tools/logging/) writes logs for every pod in the cluster, i.e. in all the projects. For [RKE clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md), it also writes logs for all the Kubernetes system components. + +- Project logging writes logs for every pod in that particular project. + +Logs that are sent to your logging service are from the following locations: + + - Pod logs stored at `/var/log/containers`. + + - Kubernetes system components logs stored at `/var/lib/rancher/rke/logs/`. + +# Enabling Project Logging + +1. From the **Global** view, navigate to the project that you want to configure project logging. + +1. Select **Tools > Logging** in the navigation bar. In versions before v2.2.0, you can choose **Resources > Logging**. + +1. Select a logging service and enter the configuration. Refer to the specific service for detailed configuration. Rancher supports the following services: + + - [Elasticsearch](cluster-admin/tools/logging/elasticsearch/) + - [Splunk](cluster-admin/tools/logging/splunk/) + - [Kafka](cluster-admin/tools/logging/kafka/) + - [Syslog](cluster-admin/tools/logging/syslog/) + - [Fluentd](cluster-admin/tools/logging/fluentd/) + +1. (Optional) Instead of using the UI to configure the logging services, you can enter custom advanced configurations by clicking on **Edit as File**, which is located above the logging targets. This link is only visible after you select a logging service. + + - With the file editor, enter raw fluentd configuration for any logging service. Refer to the documentation for each logging service on how to setup the output configuration. + + - [Elasticsearch Documentation](https://github.com/uken/fluent-plugin-elasticsearch) + - [Splunk Documentation](https://github.com/fluent/fluent-plugin-splunk) + - [Kafka Documentation](https://github.com/fluent/fluent-plugin-kafka) + - [Syslog Documentation](https://github.com/dlackty/fluent-plugin-remote_syslog) + - [Fluentd Documentation](https://docs.fluentd.org/v1.0/articles/out_forward) + + - If the logging service is using TLS, you also need to complete the **SSL Configuration** form. + 1. Provide the **Client Private Key** and **Client Certificate**. You can either copy and paste them or upload them by using the **Read from a file** button. + + - You can use either a self-signed certificate or one provided by a certificate authority. + + - You can generate a self-signed certificate using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + 2. If you are using a self-signed certificate, provide the **CA Certificate PEM**. + +1. (Optional) Complete the **Additional Logging Configuration** form. + + 1. **Optional:** Use the **Add Field** button to add custom log fields to your logging configuration. These fields are key value pairs (such as `foo=bar`) that you can use to filter the logs from another system. + + 1. Enter a **Flush Interval**. This value determines how often [Fluentd](https://www.fluentd.org/) flushes data to the logging server. Intervals are measured in seconds. + + 1. **Include System Log**. The logs from pods in system project and RKE components will be sent to the target. Uncheck it to exclude the system logs. + +1. Click **Test**. Rancher sends a test log to the service. + + > **Note:** This button is replaced with _Dry Run_ if you are using the custom configuration editor. In this case, Rancher calls the fluentd dry run command to validate the configuration. + +1. Click **Save**. + +**Result:** Rancher is now configured to send logs to the selected service. Log into the logging service so that you can start viewing the logs. + +# Related Links + +[Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/) diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md new file mode 100644 index 0000000000..2be7b2deef --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark.md @@ -0,0 +1,1177 @@ +--- +title: Hardening Guide v2.1 +weight: 104 +aliases: + - /rancher/v2.0-v2.4/en/security/hardening-2.1 + - /rancher/v2.x/en/security/rancher-2.1/hardening-2.1/ +--- + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.1.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.1 | Rancher v2.1.x | Benchmark v1.3.0 | Kubernetes 1.11 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.1.x/Rancher_Hardening_Guide.pdf) + +For more detail on how a hardened cluster scores against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.1.x](security/benchmark-2.1/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher HA Kubernetes cluster host configuration + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +kernel.panic=10 +kernel.panic_on_oops=1 +``` + +- Run `sysctl -p` to enable the settings. + +### 1.1.2 - Install the encryption provider configuration on all control plane nodes + +**Profile Applicability** + +- Level 1 + +**Description** + +Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: + +**Rationale** + +This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. + +This supports the following controls: + +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) + +**Audit** + +On the control plane hosts for the Rancher HA cluster run: + +``` bash +stat /etc/kubernetes/encryption.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: v1 +kind: EncryptionConfig +resources: +- resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. + +**Remediation** + +- Generate a key and an empty configuration file: + +``` bash +head -c 32 /dev/urandom | base64 -i - +touch /etc/kubernetes/encryption.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /etc/kubernetes/encryption.yaml +chmod 0600 /etc/kubernetes/encryption.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: v1 +kind: EncryptionConfig +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `secret` is the 32-byte base64-encoded string generated in the first step. + +### 1.1.3 - Install the audit log configuration on all control plane nodes. + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. + +**Rationale** + +The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. + +This supports the following controls: + +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) + +**Audit** + +On each control plane node, run: + +``` bash +stat /etc/kubernetes/audit.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /etc/kubernetes/audit.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /etc/kubernetes/audit.yaml +chmod 0600 /etc/kubernetes/audit.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +### 1.1.4 - Place Kubernetes event limit configuration on each control plane host + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. + +**Rationale** + +Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. + +This supports the following control: + +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Audit** + +On nodes with the `controlplane` role run: + +``` bash +stat /etc/kubernetes/admission.yaml +stat /etc/kubernetes/event.yaml +``` + +For each file, ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` + +For `admission.yaml` ensure that the file contains: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /etc/kubernetes/event.yaml +``` + +For `event.yaml` ensure that the file contains: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 500 + burst: 5000 +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /etc/kubernetes/admission.yaml +touch /etc/kubernetes/event.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /etc/kubernetes/admission.yaml +chown root:root /etc/kubernetes/event.yaml +chmod 0600 /etc/kubernetes/admission.yaml +chmod 0600 /etc/kubernetes/event.yaml +``` + +- For `admission.yaml` set the contents to: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /etc/kubernetes/event.yaml +``` + +- For `event.yaml` set the contents to: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 500 + burst: 5000 +``` + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix A. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + extra_args: + streaming-connection-idle-timeout: "" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling argument` is set to false (Scored) +- 1.1.9 - Ensure that the `--repair-malformed-updates` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--repair-malformed-updates=false +--service-account-lookup=true +--enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" +--experimental-encryption-provider-config=/etc/kubernetes/encryption.yaml +--admission-control-config-file=/etc/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=5 +--audit-log-maxbackup=5 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/etc/kubernetes/audit.yaml +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + repair-malformed-updates: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + experimental-encryption-provider-config: /etc/kubernetes/encryption.yaml + admission-control-config-file: "/etc/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /etc/kubernetes/audit.yaml + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + … + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive PodSecurityPolicy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole psp:restricted +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding psp:restricted +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- Upgrade to Rancher v2.1.2 via the Helm chart. While performing the upgrade, provide the following installation flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local administrator password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local administrator password should be changed from the default. + +**Rationale** + +The default administrator password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +--- + +## Appendix A - Complete RKE `cluster.yml` Example + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] + +services: + kubelet: + extra_args: + streaming-connection-idle-timeout: "1800s" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + repair-malformed-updates: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + experimental-encryption-provider-config: /etc/kubernetes/encryption.yaml + admission-control-config-file: "/etc/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /etc/kubernetes/audit.yaml + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.1/benchmark-2.1/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/security/rancher-2.1/benchmark-2.1/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md new file mode 100644 index 0000000000..9d94cad815 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark.md @@ -0,0 +1,1231 @@ +--- +title: Hardening Guide v2.2 +weight: 103 +aliases: + - /rancher/v2.0-v2.4/en/security/hardening-2.2 + - /rancher/v2.x/en/security/rancher-2.2/hardening-2.2/ +--- + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.2.x. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.2 | Rancher v2.2.x | Benchmark v1.4.1, 1.4.0 | Kubernetes 1.13 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.2.x/Rancher_Hardening_Guide.pdf) + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.2.x](security/benchmark-2.2/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher HA Kubernetes cluster host configuration + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +kernel.panic=10 +kernel.panic_on_oops=1 +``` + +- Run `sysctl -p` to enable the settings. + +### 1.1.2 - Install the encryption provider configuration on all control plane nodes + +**Profile Applicability** + +- Level 1 + +**Description** + +Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: + +**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` + +**Rationale** + +This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. + +This supports the following controls: + +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) + +**Audit** + +On the control plane hosts for the Rancher HA cluster run: + +``` bash +stat /opt/kubernetes/encryption.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. + +**Remediation** + +- Generate a key and an empty configuration file: + +``` bash +head -c 32 /dev/urandom | base64 -i - +touch /opt/kubernetes/encryption.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/encryption.yaml +chmod 0600 /opt/kubernetes/encryption.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: v1 +kind: EncryptionConfig +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `secret` is the 32-byte base64-encoded string generated in the first step. + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.3 - Install the audit log configuration on all control plane nodes. + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. + +**Rationale** + +The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. + +This supports the following controls: + +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) + +**Audit** + +On each control plane node, run: + +``` bash +stat /opt/kubernetes/audit.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/audit.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/audit.yaml +chmod 0600 /opt/kubernetes/audit.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.4 - Place Kubernetes event limit configuration on each control plane host + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. + +**Rationale** + +Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. + +This supports the following control: + +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Audit** + +On nodes with the `controlplane` role run: + +``` bash +stat /opt/kubernetes/admission.yaml +stat /opt/kubernetes/event.yaml +``` + +For each file, ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` + +For `admission.yaml` ensure that the file contains: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +For `event.yaml` ensure that the file contains: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/admission.yaml +touch /opt/kubernetes/event.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/admission.yaml +chown root:root /opt/kubernetes/event.yaml +chmod 0600 /opt/kubernetes/admission.yaml +chmod 0600 /opt/kubernetes/event.yaml +``` + +- For `admission.yaml` set the contents to: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +- For `event.yaml` set the contents to: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix A. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + extra_args: + authorization-mode: "Webhook" + streaming-connection-idle-timeout: "" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling argument` is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" +--encryption-provider-config=/opt/kubernetes/encryption.yaml +--admission-control-config-file=/opt/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=5 +--audit-log-maxbackup=5 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/opt/kubernetes/audit.yaml +--tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + … + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole psp:restricted +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding psp:restricted +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- While upgrading or installing Rancher 2.2.x, provide the following flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local administrator password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local administrator password should be changed from the default. + +**Rationale** + +The default administrator password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +--- + +## Appendix A - Complete RKE `cluster.yml` Example + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] + +services: + kubelet: + extra_args: + streaming-connection-idle-timeout: "1800s" + authorization-mode: "Webhook" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-api: + pod_security_policy: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + encryption-provider-config: /opt/kubernetes/encryption.yaml + admission-control-config-file: "/opt/kubernetes/admission.yaml" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/var/log/kube-audit:/var/log/kube-audit" + - "/opt/kubernetes:/opt/kubernetes" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +addons: | + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.2/benchmark-2.2/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/security/rancher-2.2/benchmark-2.2/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md new file mode 100644 index 0000000000..ddf33d55f9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md @@ -0,0 +1,1546 @@ +--- +title: Hardening Guide v2.3 +weight: 102 +aliases: + - /rancher/v2.0-v2.4/en/security/hardening-2.3 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/hardening-2.3/ +--- +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.0-v2.3.2. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.x/Rancher_Hardening_Guide.pdf) + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.x](security/benchmark-2.3/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher HA Kubernetes cluster host configuration + +(See Appendix A. for full ubuntu `cloud-config` example) + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `vm.panic_on_oom = 0` + +``` bash +sysctl vm.panic_on_oom +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +- Verify `kernel.keys.root_maxkeys = 1000000` + +``` bash +sysctl kernel.keys.root_maxkeys +``` + +- Verify `kernel.keys.root_maxbytes = 25000000` + +``` bash +sysctl kernel.keys.root_maxbytes +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxkeys=1000000 +kernel.keys.root_maxbytes=25000000 +``` + +- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### 1.1.2 - Install the encryption provider configuration on all control plane nodes + +**Profile Applicability** + +- Level 1 + +**Description** + +Create a Kubernetes encryption configuration file on each of the RKE nodes that will be provisioned with the `controlplane` role: + +**NOTE:** The `--experimental-encryption-provider-config` flag in Kubernetes 1.13+ is actually `--encryption-provider-config` + +**Rationale** + +This configuration file will ensure that the Rancher RKE cluster encrypts secrets at rest, which Kubernetes does not do by default. + +This supports the following controls: + +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) + +**Audit** + +On the control plane hosts for the Rancher HA cluster run: + +``` bash +stat /opt/kubernetes/encryption.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `aescbc` is the key type, and `secret` is populated with a 32-byte base64 encoded string. + +**Remediation** + +- Generate a key and an empty configuration file: + +``` bash +head -c 32 /dev/urandom | base64 -i - +touch /opt/kubernetes/encryption.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/encryption.yaml +chmod 0600 /opt/kubernetes/encryption.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: v1 +kind: EncryptionConfig +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: <32-byte base64 encoded string> + - identity: {} +``` + +Where `secret` is the 32-byte base64-encoded string generated in the first step. + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.3 - Install the audit log configuration on all control plane nodes. + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes audit logging on each of the control plane nodes in the cluster. + +**Rationale** + +The Kubernetes API has audit logging capability that is the best way to track actions in the cluster. + +This supports the following controls: + +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to false (Scored) + +**Audit** + +On each control plane node, run: + +``` bash +stat /opt/kubernetes/audit.yaml +``` + +Ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` +- The file contains: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/audit.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/audit.yaml +chmod 0600 /opt/kubernetes/audit.yaml +``` + +- Set the contents to: + +``` yaml +apiVersion: audit.k8s.io/v1beta1 +kind: Policy +rules: +- level: Metadata +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.1.4 - Place Kubernetes event limit configuration on each control plane host + +**Profile Applicability** + +- Level 1 + +**Description** + +Place the configuration file for Kubernetes event limit configuration on each of the control plane nodes in the cluster. + +**Rationale** + +Set up the `EventRateLimit` admission control plugin to prevent clients from overwhelming the API server. The settings below are intended as an initial value and may need to be adjusted for larger clusters. + +This supports the following control: + +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) + +**Audit** + +On nodes with the `controlplane` role run: + +``` bash +stat /opt/kubernetes/admission.yaml +stat /opt/kubernetes/event.yaml +``` + +For each file, ensure that: + +- The file is present +- The file mode is `0600` +- The file owner is `root:root` + +For `admission.yaml` ensure that the file contains: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +For `event.yaml` ensure that the file contains: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**Remediation** + +On nodes with the `controlplane` role: + +- Generate an empty configuration file: + +``` bash +touch /opt/kubernetes/admission.yaml +touch /opt/kubernetes/event.yaml +``` + +- Set the file ownership to `root:root` and the permissions to `0600` + +``` bash +chown root:root /opt/kubernetes/admission.yaml +chown root:root /opt/kubernetes/event.yaml +chmod 0600 /opt/kubernetes/admission.yaml +chmod 0600 /opt/kubernetes/event.yaml +``` + +- For `admission.yaml` set the contents to: + +``` yaml +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: EventRateLimit + path: /opt/kubernetes/event.yaml +``` + +- For `event.yaml` set the contents to: + +``` yaml +apiVersion: eventratelimit.admission.k8s.io/v1alpha1 +kind: Configuration +limits: +- type: Server + qps: 5000 + burst: 20000 +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory has permissions of 700 or more restrictive. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. + +**Audit** + +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %a /var/lib/rancher/etcd +``` + +Verify that the permissions are `700` or more restrictive. + +**Remediation** + +Follow the steps as documented in [1.4.12](#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. + +### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory ownership is set to `etcd:etcd`. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. + +**Audit** + +On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %U:%G /var/lib/rancher/etcd +``` + +Verify that the ownership is set to `etcd:etcd`. + +**Remediation** + +- On the etcd server node(s) add the `etcd` user: + +``` bash +useradd etcd +``` + +Record the uid/gid: + +``` bash +id etcd +``` + +- Add the following to the RKE `cluster.yml` etcd section under `services`: + +``` yaml +services: + etcd: + uid: + gid: +``` + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix B. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + extra_args: + authorization-mode: "Webhook" + streaming-connection-idle-timeout: "" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--experimental-encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins= "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" +--encryption-provider-config=/opt/kubernetes/encryption.yaml +--admission-control-config-file=/opt/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=5 +--audit-log-maxbackup=5 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/opt/kubernetes/audit.yaml +--tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube-api: + pod_security_policy: true + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + … + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole psp:restricted +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding psp:restricted +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- While upgrading or installing Rancher 2.3.x, provide the following flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local administrator password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local administrator password should be changed from the default. + +**Rationale** + +The default administrator password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +--- + +## Appendix A - Complete ubuntu `cloud-config` Example + +`cloud-config` file to automate hardening manual steps on nodes deployment. + +``` +#cloud-config +bootcmd: +- apt-get update +- apt-get install -y apt-transport-https +apt: + sources: + docker: + source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" + keyid: 0EBFCD88 +packages: +- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] +- jq +write_files: +# 1.1.1 - Configure default sysctl settings on all hosts +- path: /etc/sysctl.d/90-kubelet.conf + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxkeys=1000000 + kernel.keys.root_maxbytes=25000000 +# 1.1.2 encription provider +- path: /opt/kubernetes/encryption.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: apiserver.config.k8s.io/v1 + kind: EncryptionConfiguration + resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: QRCexFindur3dzS0P/UmHs5xA6sKu58RbtWOQFarfh4= + - identity: {} +# 1.1.3 audit log +- path: /opt/kubernetes/audit.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + - level: Metadata +# 1.1.4 event limit +- path: /opt/kubernetes/admission.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: apiserver.k8s.io/v1alpha1 + kind: AdmissionConfiguration + plugins: + - name: EventRateLimit + path: /opt/kubernetes/event.yaml +- path: /opt/kubernetes/event.yaml + owner: root:root + permissions: '0600' + content: | + apiVersion: eventratelimit.admission.k8s.io/v1alpha1 + kind: Configuration + limits: + - type: Server + qps: 5000 + burst: 20000 +# 1.4.12 etcd user +groups: + - etcd +users: + - default + - name: etcd + gecos: Etcd user + primary_group: etcd + homedir: /var/lib/etcd +# 1.4.11 etcd data dir +runcmd: + - chmod 0700 /var/lib/etcd + - usermod -G docker -a ubuntu + - sysctl -p /etc/sysctl.d/90-kubelet.conf +``` + +## Appendix B - Complete RKE `cluster.yml` Example + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] + +services: + kubelet: + extra_args: + streaming-connection-idle-timeout: "1800s" + authorization-mode: "Webhook" + protect-kernel-defaults: "true" + make-iptables-util-chains: "true" + event-qps: "0" + anonymous-auth: "false" + feature-gates: "RotateKubeletServerCertificate=true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + generate_serving_certificate: true + kube-api: + pod_security_policy: true + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: "false" + profiling: "false" + service-account-lookup: "true" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + audit-log-path: "/var/log/kube-audit/audit-log.json" + audit-log-maxage: "5" + audit-log-maxbackup: "5" + audit-log-maxsize: "100" + audit-log-format: "json" + audit-policy-file: /opt/kubernetes/audit.yaml + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + services: + etcd: + uid: 1001 + gid: 1001 +addons: | + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: extensions/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +## Appendix C - Complete RKE Template Example + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + ignore_docker_version: true +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1001 + retention: 72h + snapshot: false + uid: 1001 + kube_api: + always_pull_images: false + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: 'false' + audit-log-format: json + audit-log-maxage: '5' + audit-log-maxbackup: '5' + audit-log-maxsize: '100' + audit-log-path: /var/log/kube-audit/audit-log.json + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + anonymous-auth: 'false' + event-qps: '0' + feature-gates: RotateKubeletServerCertificate=true + make-iptables-util-chains: 'true' + protect-kernel-defaults: 'true' + streaming-connection-idle-timeout: 1800s + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.0/benchmark-2.3/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md new file mode 100644 index 0000000000..e142b73e32 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md @@ -0,0 +1,2051 @@ +--- +title: Hardening Guide v2.3.3 +weight: 101 +aliases: + - /rancher/v2.0-v2.4/en/security/hardening-2.3.3 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/hardening-2.3.3/ +--- + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.3. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.3/Rancher_Hardening_Guide.pdf) + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide v2.3.3](security/benchmark-2.3.3/). + +### Profile Definitions + +The following profile definitions agree with the CIS benchmarks for Kubernetes. + +A profile is a set of configurations that provide a certain amount of hardening. Generally, the more hardened an environment is, the more it affects performance. + +#### Level 1 + +Items in this profile intend to: + +- offer practical advice appropriate for the environment; +- deliver an obvious security benefit; and +- not alter the functionality or utility of the environment beyond an acceptable margin + +#### Level 2 + +Items in this profile extend the “Level 1” profile and exhibit one or more of the following characteristics: + +- are intended for use in environments or use cases where security is paramount +- act as a defense in depth measure +- may negatively impact the utility or performance of the technology + +--- + +## 1.1 - Rancher RKE Kubernetes cluster host configuration + +(See Appendix A. for full ubuntu `cloud-config` example) + +### 1.1.1 - Configure default sysctl settings on all hosts + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure sysctl settings to match what the kubelet would set if allowed. + +**Rationale** + +We recommend that users launch the kubelet with the `--protect-kernel-defaults` option. The settings that the kubelet initially attempts to change can be set manually. + +This supports the following control: + +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) + +**Audit** + +- Verify `vm.overcommit_memory = 1` + +``` bash +sysctl vm.overcommit_memory +``` + +- Verify `vm.panic_on_oom = 0` + +``` bash +sysctl vm.panic_on_oom +``` + +- Verify `kernel.panic = 10` + +``` bash +sysctl kernel.panic +``` + +- Verify `kernel.panic_on_oops = 1` + +``` bash +sysctl kernel.panic_on_oops +``` + +- Verify `kernel.keys.root_maxkeys = 1000000` + +``` bash +sysctl kernel.keys.root_maxkeys +``` + +- Verify `kernel.keys.root_maxbytes = 25000000` + +``` bash +sysctl kernel.keys.root_maxbytes +``` + +**Remediation** + +- Set the following parameters in `/etc/sysctl.d/90-kubelet.conf` on all nodes: + +``` plain +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxkeys=1000000 +kernel.keys.root_maxbytes=25000000 +``` + +- Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### 1.4.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory has permissions of 700 or more restrictive. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should not be readable or writable by any group members or the world. + +**Audit** + +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir` , +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %a /var/lib/etcd +``` + +Verify that the permissions are `700` or more restrictive. + +**Remediation** + +Follow the steps as documented in [1.4.12](#1-4-12-ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd) remediation. + +### 1.4.12 - Ensure that the etcd data directory ownership is set to `etcd:etcd` + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that the etcd data directory ownership is set to `etcd:etcd`. + +**Rationale** + +etcd is a highly-available key-value store used by Kubernetes deployments for persistent storage of all of its REST API objects. This data directory should be protected from any unauthorized reads or writes. It should be owned by `etcd:etcd`. + +**Audit** + +On a etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +stat -c %U:%G /var/lib/etcd +``` + +Verify that the ownership is set to `etcd:etcd`. + +**Remediation** + +- On the etcd server node(s) add the `etcd` user: + +``` bash +useradd -c "Etcd user" -d /var/lib/etcd etcd +``` + +Record the uid/gid: + +``` bash +id etcd +``` + +- Add the following to the RKE `cluster.yml` etcd section under `services`: + +``` yaml +services: + etcd: + uid: + gid: +``` + +## 2.1 - Rancher HA Kubernetes Cluster Configuration via RKE + +(See Appendix B. for full RKE `cluster.yml` example) + +### 2.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy +--encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml +--admission-control-config-file=/etc/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=30 +--audit-log-maxbackup=10 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/etc/kubernetes/audit-policy.yaml +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" +``` + +For k8s 1.14 `enable-admission-plugins` should be + +``` yaml + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 2.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 2.1.5 - Configure addons and PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole restricted-clusterrole +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +kubectl get clusterrolebinding restricted-clusterrolebinding +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted-psp +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +addons: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted-psp + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: restricted-clusterrole + rules: + - apiGroups: + - extensions + resourceNames: + - restricted-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: restricted-clusterrolebinding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: restricted-clusterrole + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +## 3.1 - Rancher Management Control Plane Installation + +### 3.1.1 - Disable the local cluster option + +**Profile Applicability** + +- Level 2 + +**Description** + +When deploying Rancher, disable the local cluster option on the Rancher Server. + +**NOTE:** This requires Rancher v2.1.2 or above. + +**Rationale** + +Having access to the local cluster from the Rancher UI is convenient for troubleshooting and debugging; however, if the local cluster is enabled in the Rancher UI, a user has access to all elements of the system, including the Rancher management server itself. Disabling the local cluster is a defense in depth measure and removes the possible attack vector from the Rancher UI and API. + +**Audit** + +- Verify the Rancher deployment has the `--add-local=false` option set. + +``` bash +kubectl get deployment rancher -n cattle-system -o yaml |grep 'add-local' +``` + +- In the Rancher UI go to _Clusters_ in the _Global_ view and verify that no `local` cluster is present. + +**Remediation** + +- While upgrading or installing Rancher 2.3.3 or above, provide the following flag: + +``` text +--set addLocal="false" +``` + +### 3.1.2 - Enable Rancher Audit logging + +**Profile Applicability** + +- Level 1 + +**Description** + +Enable Rancher’s built-in audit logging capability. + +**Rationale** + +Tracking down what actions were performed by users in Rancher can provide insight during post mortems, and if monitored proactively can be used to quickly detect malicious actions. + +**Audit** + +- Verify that the audit log parameters were passed into the Rancher deployment. + +``` +kubectl get deployment rancher -n cattle-system -o yaml | grep auditLog +``` + +- Verify that the log is going to the appropriate destination, as set by +`auditLog.destination` + + - `sidecar`: + + 1. List pods: + + ``` bash + kubectl get pods -n cattle-system + ``` + + 2. Tail logs: + + ``` bash + kubectl logs -n cattle-system -c rancher-audit-log + ``` + + - `hostPath` + + 1. On the worker nodes running the Rancher pods, verify that the log files are being written to the destination indicated in `auditlog.hostPath`. + +**Remediation** + +Upgrade the Rancher server installation using Helm, and configure the audit log settings. The instructions for doing so can be found in the reference section below. + +#### Reference + +- + +## 3.2 - Rancher Management Control Plane Authentication + +### 3.2.1 - Change the local admin password from the default value + +**Profile Applicability** + +- Level 1 + +**Description** + +The local admin password should be changed from the default. + +**Rationale** + +The default admin password is common across all Rancher installations and should be changed immediately upon startup. + +**Audit** + +Attempt to login into the UI with the following credentials: + - Username: admin + - Password: admin + +The login attempt must not succeed. + +**Remediation** + +Change the password from `admin` to a password that meets the recommended password standards for your organization. + +### 3.2.2 - Configure an Identity Provider for Authentication + +**Profile Applicability** + +- Level 1 + +**Description** + +When running Rancher in a production environment, configure an identity provider for authentication. + +**Rationale** + +Rancher supports several authentication backends that are common in enterprises. It is recommended to tie Rancher into an external authentication system to simplify user and group access in the Rancher cluster. Doing so assures that access control follows the organization's change management process for user accounts. + +**Audit** + +- In the Rancher UI, select _Global_ +- Select _Security_ +- Select _Authentication_ +- Ensure the authentication provider for your environment is active and configured correctly + +**Remediation** + +Configure the appropriate authentication provider for your Rancher installation according to the documentation found at the link in the reference section below. + +#### Reference + +- + +## 3.3 - Rancher Management Control Plane RBAC + +### 3.3.1 - Ensure that administrator privileges are only granted to those who require them + +**Profile Applicability** + +- Level 1 + +**Description** + +Restrict administrator access to only those responsible for managing and operating the Rancher server. + +**Rationale** + +The `admin` privilege level gives the user the highest level of access to the Rancher server and all attached clusters. This privilege should only be granted to a few people who are responsible for the availability and support of Rancher and the clusters that it manages. + +**Audit** + +The following script uses the Rancher API to show users with administrator privileges: + +``` bash +#!/bin/bash +for i in $(curl -sk -u 'token-:' https:///v3/users|jq -r .data[].links.globalRoleBindings); do + +curl -sk -u 'token-:' $i| jq '.data[] | "\(.userId) \(.globalRoleId)"' + +done + +``` + +The `admin` role should only be assigned to users that require administrative privileges. Any role that is not `admin` or `user` should be audited in the RBAC section of the UI to ensure that the privileges adhere to policies for global access. + +The Rancher server permits customization of the default global permissions. We recommend that auditors also review the policies of any custom global roles. + +**Remediation** + +Remove the `admin` role from any user that does not require administrative privileges. + +## 3.4 - Rancher Management Control Plane Configuration + +### 3.4.1 - Ensure only approved node drivers are active + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure that node drivers that are not needed or approved are not active in the Rancher console. + +**Rationale** + +Node drivers are used to provision compute nodes in various cloud providers and local IaaS infrastructure. For convenience, popular cloud providers are enabled by default. If the organization does not intend to use these or does not allow users to provision resources in certain providers, the drivers should be disabled. This will prevent users from using Rancher resources to provision the nodes. + +**Audit** + +- In the Rancher UI select _Global_ +- Select _Node Drivers_ +- Review the list of node drivers that are in an _Active_ state. + +**Remediation** + +If a disallowed node driver is active, visit the _Node Drivers_ page under _Global_ and disable it. + +## 4.1 - Rancher Kubernetes Custom Cluster Configuration via RKE + +(See Appendix C. for full RKE template example) + +### 4.1.1 - Configure kubelet options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure Kubelet options are configured to match CIS controls. + +**Rationale** + +To pass the following controls in the CIS benchmark, ensure the appropriate flags are passed to the Kubelet. + +- 2.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 2.1.2 - Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) +- 2.1.6 - Ensure that the `--streaming-connection-idle-timeout` argument is not set to 0 (Scored) +- 2.1.7 - Ensure that the `--protect-kernel-defaults` argument is set to true (Scored) +- 2.1.8 - Ensure that the `--make-iptables-util-chains` argument is set to true (Scored) +- 2.1.10 - Ensure that the `--event-qps` argument is set to 0 (Scored) +- 2.1.13 - Ensure that the `RotateKubeletServerCertificate` argument is set to true (Scored) +- 2.1.14 - Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Scored) + +**Audit** + +Inspect the Kubelet containers on all hosts and verify that they are running with the following options: + +- `--streaming-connection-idle-timeout=` +- `--authorization-mode=Webhook` +- `--protect-kernel-defaults=true` +- `--make-iptables-util-chains=true` +- `--event-qps=0` +- `--anonymous-auth=false` +- `--feature-gates="RotateKubeletServerCertificate=true"` +- `--tls-cipher-suites="TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256"` + +**Remediation** + +- Add the following to the RKE `cluster.yml` kubelet section under `services`: + +``` yaml +services: + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" +``` + + Where `` is in a form like `1800s`. + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 4.1.2 - Configure kube-api options + +**Profile Applicability** + +- Level 1 + +**Description** + +Ensure the RKE configuration is set to deploy the `kube-api` service with the options required for controls. + +**NOTE:** + +Enabling the `AlwaysPullImages` admission control plugin can cause degraded performance due to overhead of always pulling images. +Enabling the `DenyEscalatingExec` admission control plugin will prevent the 'Launch kubectl' functionality in the UI from working. + +**Rationale** + +To pass the following controls for the kube-api server ensure RKE configuration passes the appropriate options. + +- 1.1.1 - Ensure that the `--anonymous-auth` argument is set to false (Scored) +- 1.1.8 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.1.11 - Ensure that the admission control plugin `AlwaysPullImages` is set (Scored) +- 1.1.12 - Ensure that the admission control plugin `DenyEscalatingExec` is set (Scored) +- 1.1.14 - Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) +- 1.1.15 - Ensure that the `--audit-log-path` argument is set as appropriate (Scored) +- 1.1.16 - Ensure that the `--audit-log-maxage` argument is set as appropriate (Scored) +- 1.1.17 - Ensure that the `--audit-log-maxbackup` argument is set as appropriate (Scored) +- 1.1.18 - Ensure that the `--audit-log-maxsize` argument is set as appropriate (Scored) +- 1.1.23 - Ensure that the `--service-account-lookup` argument is set to true (Scored) +- 1.1.24 - Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) +- 1.1.30 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Not Scored) +- 1.1.34 - Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) +- 1.1.35 - Ensure that the encryption provider is set to `aescbc` (Scored) +- 1.1.36 - Ensure that the admission control plugin `EventRateLimit` is set (Scored) +- 1.1.37 - Ensure that the `AdvancedAuditing` argument is not set to `false` (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-apiserver` containers: + + ``` bash + docker inspect kube-apiserver + ``` + +- Look for the following options in the command section of the output: + +``` text +--anonymous-auth=false +--profiling=false +--service-account-lookup=true +--enable-admission-plugins=ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy +--encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml +--admission-control-config-file=/etc/kubernetes/admission.yaml +--audit-log-path=/var/log/kube-audit/audit-log.json +--audit-log-maxage=30 +--audit-log-maxbackup=10 +--audit-log-maxsize=100 +--audit-log-format=json +--audit-policy-file=/etc/kubernetes/audit-policy.yaml +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +- In the `volume` section of the output ensure the bind mount is present: + +``` text +/var/log/kube-audit:/var/log/kube-audit +``` + +**Remediation** + +- In the RKE `cluster.yml` add the following directives to the `kube-api` section under `services`: + +``` yaml +services: + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" +``` + +For k8s 1.14 `enable-admission-plugins` should be + +``` yaml + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +**NOTE:** + +Files that are placed in `/opt/kubernetes` need to be mounted in using the `extra_binds` functionality in RKE. + +### 4.1.3 - Configure scheduler options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate options for the Kubernetes scheduling service. + +**NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls on the CIS benchmark, the command line options should be set on the Kubernetes scheduler. + +- 1.2.1 - Ensure that the `--profiling` argument is set to `false` (Scored) +- 1.2.2 - Ensure that the `--address` argument is set to `127.0.0.1` (Scored) + +**Audit** + +- On nodes with the `controlplane` role: inspect the `kube-scheduler` containers: + +``` bash +docker inspect kube-scheduler +``` + +- Verify the following options are set in the `command` section. + +``` text +--profiling=false +--address=127.0.0.1 +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 4.1.4 - Configure controller options + +**Profile Applicability** + +- Level 1 + +**Description** + +Set the appropriate arguments on the Kubernetes controller manager. + +5*NOTE:** Setting `--address` to `127.0.0.1` will prevent Rancher cluster monitoring from scraping this endpoint. + +**Rationale** + +To address the following controls the options need to be passed to the Kubernetes controller manager. + +- 1.3.1 - Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) +- 1.3.2 - Ensure that the `--profiling` argument is set to false (Scored) +- 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Scored) +- 1.3.7 - Ensure that the `--address` argument is set to 127.0.0.1 (Scored) + +**Audit** + +- On nodes with the `controlplane` role inspect the `kube-controller-manager` container: + +``` bash +docker inspect kube-controller-manager +``` + +- Verify the following options are set in the `command` section: + +``` text +--terminated-pod-gc-threshold=1000 +--profiling=false +--address=127.0.0.1 +--feature-gates="RotateKubeletServerCertificate=true" +``` + +**Remediation** + +- In the RKE `cluster.yml` file ensure the following options are set: + +``` yaml +services: + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" +``` + +- Reconfigure the cluster: + +``` bash +rke up --config cluster.yml +``` + +### 4.1.5 - Check PSPs + +**Profile Applicability** + +- Level 1 + +**Description** + +Configure a restrictive pod security policy (PSP) as the default and create role bindings for system level services to use the less restrictive default PSP. + +**Rationale** + +To address the following controls, a restrictive default PSP needs to be applied as the default. Role bindings need to be in place to allow system services to still function. + +- 1.7.1 - Do not admit privileged containers (Not Scored) +- 1.7.2 - Do not admit containers wishing to share the host process ID namespace (Not Scored) +- 1.7.3 - Do not admit containers wishing to share the host IPC namespace (Not Scored) +- 1.7.4 - Do not admit containers wishing to share the host network namespace (Not Scored) +- 1.7.5 - Do not admit containers with `allowPrivilegeEscalation` (Not Scored) +- 1.7.6 - Do not admit root containers (Not Scored) +- 1.7.7 - Do not admit containers with dangerous capabilities (Not Scored) + +**Audit** + +- Verify that the `cattle-system` namespace exists: + +``` bash +kubectl get ns |grep cattle +``` + +- Verify that the roles exist: + +``` bash +kubectl get role default-psp-role -n ingress-nginx +kubectl get role default-psp-role -n cattle-system +kubectl get clusterrole restricted-clusterrole +``` + +- Verify the bindings are set correctly: + +``` bash +kubectl get rolebinding -n ingress-nginx default-psp-rolebinding +kubectl get rolebinding -n cattle-system default-psp-rolebinding +``` + +- Verify the restricted PSP is present. + +``` bash +kubectl get psp restricted-psp +``` + +--- + +## Appendix A - Complete ubuntu `cloud-config` Example + +`cloud-config` file to automate hardening manual steps on nodes deployment. + +``` +#cloud-config +bootcmd: +- apt-get update +- apt-get install -y apt-transport-https +apt: + sources: + docker: + source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable" + keyid: 0EBFCD88 +packages: +- [docker-ce, '5:19.03.5~3-0~ubuntu-bionic'] +- jq +write_files: +# 1.1.1 - Configure default sysctl settings on all hosts +- path: /etc/sysctl.d/90-kubelet.conf + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxkeys=1000000 + kernel.keys.root_maxbytes=25000000 +# 1.4.12 etcd user +groups: + - etcd +users: + - default + - name: etcd + gecos: Etcd user + primary_group: etcd + homedir: /var/lib/etcd +# 1.4.11 etcd data dir +runcmd: + - chmod 0700 /var/lib/etcd + - usermod -G docker -a ubuntu + - sysctl -p /etc/sysctl.d/90-kubelet.conf +``` + +## Appendix B - Complete RKE `cluster.yml` Example + +Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. + +
    + RKE yaml for k8s 1.14 + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +addon_job_timeout: 30 +authentication: + strategy: x509 +authorization: {} +bastion_host: + ssh_agent_auth: false +cloud_provider: {} +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.14.9-rancher1-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +restore: + restore: false +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube-api: + always_pull_images: true + audit_log: + enabled: true + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: 'false' + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube-controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + protect-kernel-defaults: 'true' + fail_swap_on: false + generate_serving_certificate: true + kubeproxy: {} + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' +ssh_agent_auth: false +``` + +
    + +
    + RKE yaml for k8s 1.15 + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.15.6-rancher1-2 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +ssh_agent_auth: false +``` + +
    + +
    + RKE yaml for k8s 1.16 + +``` yaml +nodes: +- address: 18.191.190.205 + internal_address: 172.31.24.213 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.203 + internal_address: 172.31.24.203 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +- address: 18.191.190.10 + internal_address: 172.31.24.244 + user: ubuntu + role: [ "controlplane", "etcd", "worker" ] +addon_job_timeout: 30 +authentication: + strategy: x509 +ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# +ingress: + provider: nginx +kubernetes_version: v1.16.3-rancher1-1 +monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# +network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# +services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" +ssh_agent_auth: false +``` + +
    + +## Appendix C - Complete RKE Template Example + +Before apply, replace `rancher_kubernetes_engine_config.services.etcd.gid` and `rancher_kubernetes_engine_config.services.etcd.uid` with the proper etcd group and user ids that were created on etcd nodes. + + +
    + RKE template for k8s 1.14 + +``` yaml +# +# Cluster Config +# +answers: {} +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: false +name: test-35378 +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + authentication: + strategy: x509 + authorization: {} + bastion_host: + ssh_agent_auth: false + cloud_provider: {} + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.14.9-rancher1-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal + restore: + restore: false +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube-api: + always_pull_images: true + audit_log: + enabled: true + event_rate_limit: + enabled: true + extra_args: + anonymous-auth: 'false' + enable-admission-plugins: >- + ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,PodSecurityPolicy,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,EventRateLimit + profiling: 'false' + service-account-lookup: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: + - '/opt/kubernetes:/opt/kubernetes' + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube-controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + protect-kernel-defaults: 'true' + fail_swap_on: false + generate_serving_certificate: true + kubeproxy: {} + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +
    + +
    + RKE template for k8s 1.15 + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.6-rancher1-2 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +
    + +
    + RKE template for k8s 1.16 + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.16.3-rancher1-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 1000 + retention: 72h + snapshot: false + uid: 1000 + kube_api: + always_pull_images: true + pod_security_policy: true + service_node_port_range: 30000-32767 + event_rate_limit: + enabled: true + audit_log: + enabled: true + secrets_encryption_config: + enabled: true + extra_args: + anonymous-auth: "false" + enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" + profiling: "false" + service-account-lookup: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: + - "/opt/kubernetes:/opt/kubernetes" + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + kube-controller: + extra_args: + profiling: "false" + address: "127.0.0.1" + terminated-pod-gc-threshold: "1000" + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + extra_args: + profiling: "false" + address: "127.0.0.1" + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +
    diff --git a/content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/security/rancher-2.3.x/rancher-v2.3.3/benchmark-2.3.3/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md new file mode 100644 index 0000000000..cafaf6f8b6 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md @@ -0,0 +1,716 @@ +--- +title: Hardening Guide v2.3.5 +weight: 100 +aliases: + - /rancher/v2.0-v2.4/en/security/hardening-2.3.5 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/hardening-2.3.5/ +--- + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 + + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Hardening_Guide.pdf) + +### Overview + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.3.5 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.3.5](security/benchmark-2.3.5/). + +#### Known Issues + +- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. +- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +### Configure Kernel Runtime Parameters + +The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: + +``` +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxbytes=25000000 +``` + +Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### Configure `etcd` user and group +A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. + +#### create `etcd` user and group +To create the **etcd** group run the following console commands. + +``` +groupadd --gid 52034 etcd +useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +``` + +Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: + +``` yaml +services: + etcd: + gid: 52034 + uid: 52034 +``` + +#### Set `automountServiceAccountToken` to `false` for `default` service accounts +Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +For each namespace the **default** service account must include this value: + +``` +automountServiceAccountToken: false +``` + +Save the following yaml to a file called `account_update.yaml` + +``` yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default +automountServiceAccountToken: false +``` + +Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. + +``` +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" +done +``` + +### Ensure that all Namespaces have Network Policies defined + +Running different applications on the same Kubernetes cluster creates a risk of one +compromised application attacking a neighboring application. Network segmentation is +important to ensure that containers can communicate only with those they are supposed +to. A network policy is a specification of how selections of pods are allowed to +communicate with each other and other network endpoints. + +Network Policies are namespace scoped. When a network policy is introduced to a given +namespace, all traffic not allowed by the policy is denied. However, if there are no network +policies in a namespace all traffic will be allowed into and out of the pods in that +namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. +This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. +Additional information about CNI providers can be found +[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) + +Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a +**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace +(even if policies are added that cause some pods to be treated as “isolated”), +you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as +`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +about network policies can be found on the Kubernetes site. + +> This `NetworkPolicy` is not recommended for production use + +``` yaml +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress +``` + +Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to +`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. + +``` +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl apply -f default-allow-all.yaml -n ${namespace} +done +``` +Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. + +### Reference Hardened RKE `cluster.yml` configuration +The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install +of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is +provided with additional details about the configuration items. + +``` yaml +# If you intend to deploy Kubernetes in an air-gapped environment, +# please consult the documentation on how to configure custom RKE images. +kubernetes_version: "v1.15.9-rancher1-1" +enable_network_policy: true +default_pod_security_policy_template_id: "restricted" +services: + etcd: + uid: 52034 + gid: 52034 + kube-api: + pod_security_policy: true + secrets_encryption_config: + enabled: true + audit_log: + enabled: true + admission_configuration: + event_rate_limit: + enabled: true + kube-controller: + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: [] + extra_env: [] + cluster_domain: "" + infra_container_image: "" + cluster_dns_server: "" + fail_swap_on: false + kubeproxy: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] +network: + plugin: "" + options: {} + mtu: 0 + node_selector: {} +authentication: + strategy: "" + sans: [] + webhook: null +addons: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: tiller + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: tiller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system + +addons_include: [] +system_images: + etcd: "" + alpine: "" + nginx_proxy: "" + cert_downloader: "" + kubernetes_services_sidecar: "" + kubedns: "" + dnsmasq: "" + kubedns_sidecar: "" + kubedns_autoscaler: "" + coredns: "" + coredns_autoscaler: "" + kubernetes: "" + flannel: "" + flannel_cni: "" + calico_node: "" + calico_cni: "" + calico_controllers: "" + calico_ctl: "" + calico_flexvol: "" + canal_node: "" + canal_cni: "" + canal_flannel: "" + canal_flexvol: "" + weave_node: "" + weave_cni: "" + pod_infra_container: "" + ingress: "" + ingress_backend: "" + metrics_server: "" + windows_pod_infra_container: "" +ssh_key_path: "" +ssh_cert_path: "" +ssh_agent_auth: false +authorization: + mode: "" + options: {} +ignore_docker_version: false +private_registries: [] +ingress: + provider: "" + options: {} + node_selector: {} + extra_args: {} + dns_policy: "" + extra_envs: [] + extra_volumes: [] + extra_volume_mounts: [] +cluster_name: "" +prefix_path: "" +addon_job_timeout: 0 +bastion_host: + address: "" + port: "" + user: "" + ssh_key: "" + ssh_key_path: "" + ssh_cert: "" + ssh_cert_path: "" +monitoring: + provider: "" + options: {} + node_selector: {} +restore: + restore: false + snapshot_name: "" +dns: null +``` + +### Reference Hardened RKE Template configuration + +The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. +RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher +[documentaion](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation) for additional installation and RKE Template details. + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + addons: |- + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: tiller + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: tiller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system + ignore_docker_version: true + kubernetes_version: v1.15.9-rancher1-1 +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + mtu: 0 + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 52034 + retention: 72h + snapshot: false + uid: 52034 + kube_api: + always_pull_images: false + audit_log: + enabled: true + event_rate_limit: + enabled: true + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + anonymous-auth: 'false' + event-qps: '0' + feature-gates: RotateKubeletServerCertificate=true + make-iptables-util-chains: 'true' + protect-kernel-defaults: 'true' + streaming-connection-idle-timeout: 1800s + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + generate_serving_certificate: true + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: + +The reference **cloud-config** is generally used in cloud infrastructure environments to allow for +configuration management of compute instances. The reference config configures Ubuntu operating system level settings +needed before installing kubernetes. + +``` yaml +#cloud-config +packages: + - curl + - jq +runcmd: + - sysctl -w vm.overcommit_memory=1 + - sysctl -w kernel.panic=10 + - sysctl -w kernel.panic_on_oops=1 + - curl https://releases.rancher.com/install-docker/18.09.sh | sh + - usermod -aG docker ubuntu + - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done + - addgroup --gid 52034 etcd + - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +write_files: + - path: /etc/sysctl.d/kubelet.conf + owner: root:root + permissions: "0644" + content: | + vm.overcommit_memory=1 + kernel.panic=10 + kernel.panic_on_oops=1 +``` diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md new file mode 100644 index 0000000000..f6331e1176 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md @@ -0,0 +1,2269 @@ +--- +title: CIS Benchmark Rancher Self-Assessment Guide - v2.3.5 +weight: 205 +aliases: + - /rancher/v2.0-v2.4/en/security/benchmark-2.3.5 + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/benchmark-2.3.5/ +--- + +### CIS Kubernetes Benchmark v1.5 - Rancher v2.3.5 with Kubernetes v1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.3.5/Rancher_Benchmark_Assessment.pdf) + +#### Overview + +This document is a companion to the Rancher v2.3.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. + +> NOTE: only scored tests are covered in this guide. + +### Controls + +--- +## 1 Master Node Security Configuration +### 1.1 Master Node Configuration Files + +#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. + +#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. + +#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +chmod 700 /var/lib/etcd +``` + +**Audit Script:** 1.1.11.sh + +``` +#!/bin/bash -e + +etcd_bin=${1} + +test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') + +docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a +``` + +**Audit Execution:** + +``` +./1.1.11.sh etcd +``` + +**Expected result**: + +``` +'700' is equal to '700' +``` + +#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) + +**Result:** PASS + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). +For example, +``` bash +chown etcd:etcd /var/lib/etcd +``` + +**Audit Script:** 1.1.12.sh + +``` +#!/bin/bash -e + +etcd_bin=${1} + +test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') + +docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G +``` + +**Audit Execution:** + +``` +./1.1.12.sh etcd +``` + +**Expected result**: + +``` +'etcd:etcd' is present +``` + +#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. +We recommend that this `kube_config_cluster.yml` file be kept in secure store. + +#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. +We recommend that this `kube_config_cluster.yml` file be kept in secure store. + +#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chown -R root:root /etc/kubernetes/ssl +``` + +**Audit:** + +``` +stat -c %U:%G /etc/kubernetes/ssl +``` + +**Expected result**: + +``` +'root:root' is present +``` + +#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chmod -R 644 /etc/kubernetes/ssl +``` + +**Audit Script:** check_files_permissions.sh + +``` +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit +``` + +**Audit Execution:** + +``` +./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' +``` + +**Expected result**: + +``` +'true' is present +``` + +#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chmod -R 600 /etc/kubernetes/ssl/certs/serverca +``` + +**Audit Script:** 1.1.21.sh + +``` +#!/bin/bash -e +check_dir=${1:-/etc/kubernetes/ssl} + +for file in $(find ${check_dir} -name "*key.pem"); do + file_permission=$(stat -c %a ${file}) + if [[ "${file_permission}" == "600" ]]; then + continue + else + echo "FAIL: ${file} ${file_permission}" + exit 1 + fi +done + +echo "pass" +``` + +**Audit Execution:** + +``` +./1.1.21.sh /etc/kubernetes/ssl +``` + +**Expected result**: + +``` +'pass' is present +``` + +### 1.2 API Server + +#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--basic-auth-file' is not present +``` + +#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--token-auth-file' is not present +``` + +#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--kubelet-https` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-https' is present OR '--kubelet-https' is not present +``` + +#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the +kubelet client certificate and key parameters as below. + +``` bash +--kubelet-client-certificate= +--kubelet-client-key= +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-certificate-authority' is present +``` + +#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. +One such example could be as below. + +``` bash +--authorization-mode=RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' not have 'AlwaysAllow' +``` + +#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. + +``` bash +--authorization-mode=Node,RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' has 'Node' +``` + +#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, +for example: + +``` bash +--authorization-mode=Node,RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' has 'RBAC' +``` + +#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a +value that does not include `AlwaysAdmit`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and ensure that the `--disable-admission-plugins` parameter is set to a +value that does not include `ServiceAccount`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present +``` + +#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--disable-admission-plugins` parameter to +ensure it does not include `NamespaceLifecycle`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--enable-admission-plugins` parameter to a +value that includes `PodSecurityPolicy`: + +``` bash +--enable-admission-plugins=...,PodSecurityPolicy,... +``` + +Then restart the API Server. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' +``` + +#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--enable-admission-plugins` parameter to a +value that includes `NodeRestriction`. + +``` bash +--enable-admission-plugins=...,NodeRestriction,... +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' +``` + +#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--insecure-bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--insecure-bind-address' is not present +``` + +#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--insecure-port=0 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + +#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and either remove the `--secure-port` parameter or +set it to a different **(non-zero)** desired port. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +6443 is greater than 0 OR '--secure-port' is not present +``` + +#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-path` parameter to a suitable path and +file where you would like audit logs to be written, for example: + +``` bash +--audit-log-path=/var/log/apiserver/audit.log +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--audit-log-path' is present +``` + +#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: + +``` bash +--audit-log-maxage=30 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +30 is greater or equal to 30 +``` + +#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate +value. + +``` bash +--audit-log-maxbackup=10 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +10 is greater or equal to 10 +``` + +#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. +For example, to set it as `100` **MB**: + +``` bash +--audit-log-maxsize=100 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +100 is greater or equal to 100 +``` + +#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +and set the below parameter as appropriate and if needed. +For example, + +``` bash +--request-timeout=300s +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--request-timeout' is not present OR '--request-timeout' is present +``` + +#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--service-account-lookup=true +``` + +Alternatively, you can delete the `--service-account-lookup` parameter from this file so +that the default takes effect. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-lookup' is not present OR 'true' is equal to 'true' +``` + +#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--service-account-key-file` parameter +to the public key file for service accounts: + +``` bash +`--service-account-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-key-file' is present +``` + +#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the **etcd** certificate and **key** file parameters. + +``` bash +`--etcd-certfile=` +`--etcd-keyfile=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the TLS certificate and private key file parameters. + +``` bash +`--tls-cert-file=` +`--tls-private-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the client certificate authority file. + +``` bash +`--client-ca-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--client-ca-file' is present +``` + +#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the etcd certificate authority file parameter. + +``` bash +`--etcd-cafile=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--etcd-cafile' is present +``` + +#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--encryption-provider-config` parameter to the path of that file: + +``` bash +--encryption-provider-config= +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--encryption-provider-config' is present +``` + +#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure a `EncryptionConfig` file. +In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. + +**Audit Script:** 1.2.34.sh + +``` +#!/bin/bash -e + +check_file=${1} + +grep -q -E 'aescbc|kms|secretbox' ${check_file} +if [ $? -eq 0 ]; then + echo "--pass" + exit 0 +else + echo "fail: encryption provider found in ${check_file}" + exit 1 +fi +``` + +**Audit Execution:** + +``` +./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml +``` + +**Expected result**: + +``` +'--pass' is present +``` + +### 1.3 Controller Manager + +#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, +for example: + +``` bash +--terminated-pod-gc-threshold=10 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--terminated-pod-gc-threshold' is present +``` + +#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node to set the below parameter. + +``` bash +--use-service-account-credentials=true +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'true' is not equal to 'false' +``` + +#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--service-account-private-key-file` parameter +to the private key file for service accounts. + +``` bash +`--service-account-private-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-private-key-file' is present +``` + +#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. + +``` bash +`--root-ca-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--root-ca-file' is present +``` + +#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. + +``` bash +--feature-gates=RotateKubeletServerCertificate=true +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' +``` + +#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and ensure the correct value for the `--bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--bind-address' is present OR '--bind-address' is not present +``` + +### 1.4 Scheduler + +#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` +on the master node and ensure the correct value for the `--bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected result**: + +``` +'--bind-address' is present OR '--bind-address' is not present +``` + +## 2 Etcd Node Configuration +### 2 Etcd Node Configuration Files + +#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` +on the master node and set the below parameters. + +``` bash +`--cert-file=` +`--key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--cert-file' is present AND '--key-file' is present +``` + +#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and set the below parameter. + +``` bash +--client-cert-auth="true" +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and either remove the `--auto-tls` parameter or set it to `false`. + +``` bash + --auto-tls=false +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the +master node and set the below parameters. + +``` bash +`--peer-client-file=` +`--peer-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and set the below parameter. + +``` bash +--peer-client-cert-auth=true +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and either remove the `--peer-auto-tls` parameter or set it to `false`. + +``` bash +--peer-auto-tls=false +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--peer-auto-tls' is not present OR '--peer-auto-tls' is present +``` + +## 3 Control Plane Configuration +### 3.2 Logging + +#### 3.2.1 Ensure that a minimal audit policy is created (Scored) + +**Result:** PASS + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit Script:** 3.2.1.sh + +``` +#!/bin/bash -e + +api_server_bin=${1} + +/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep +``` + +**Audit Execution:** + +``` +./3.2.1.sh kube-apiserver +``` + +**Expected result**: + +``` +'--audit-policy-file' is present +``` + +## 4 Worker Node Security Configuration +### 4.1 Worker Node Configuration Files + +#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is present +``` + +#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` + +#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the following command to modify the file permissions of the + +``` bash +`--client-ca-file chmod 644 ` +``` + +**Audit:** + +``` +stat -c %a /etc/kubernetes/ssl/kube-ca.pem +``` + +**Expected result**: + +``` +'644' is equal to '644' OR '640' is present OR '600' is present +``` + +#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the following command to modify the ownership of the `--client-ca-file`. + +``` bash +chown root:root +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` + +#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +### 4.2 Kubelet + +#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to +`false`. +If using executable arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--anonymous-auth=false +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If +using executable arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + +``` bash +--authorization-mode=Webhook +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'Webhook' not have 'AlwaysAllow' +``` + +#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + +``` bash +`--client-ca-file=` +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'--client-ca-file' is present +``` + +#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--read-only-port=0 +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + +#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than `0`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--streaming-connection-idle-timeout=5m +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--protect-kernel-defaults=true +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +remove the `--make-iptables-util-chains` argument from the +`KUBELET_SYSTEM_PODS_ARGS` variable. +Based on your system, restart the kubelet service. For example: + +```bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' OR '--make-iptables-util-chains' is not present +``` + +#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` +variable. +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'--rotate-certificates' is present OR '--rotate-certificates' is not present +``` + +#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` +on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. + +``` bash +--feature-gates=RotateKubeletServerCertificate=true +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +## 5 Kubernetes Policies +### 5.1 RBAC and Service Accounts + +#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) + +**Result:** PASS + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value + +``` bash +automountServiceAccountToken: false +``` + +**Audit Script:** 5.1.5.sh + +``` +#!/bin/bash + +export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} + +kubectl version > /dev/null +if [ $? -ne 0 ]; then + echo "fail: kubectl failed" + exit 1 +fi + +accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" + +if [[ "${accounts}" != "" ]]; then + echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" + exit 1 +fi + +default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" + +if [[ "${default_binding}" -gt 0 ]]; then + echo "fail: default service accounts have non default bindings" + exit 1 +fi + +echo "--pass" +exit 0 +``` + +**Audit Execution:** + +``` +./5.1.5.sh +``` + +**Expected result**: + +``` +'--pass' is present +``` + +### 5.2 Pod Security Policies + +#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostPID` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostIPC` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostNetwork` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +### 5.3 Network Policies and CNI + +#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create `NetworkPolicy` objects as you need them. + +**Audit Script:** 5.3.2.sh + +``` +#!/bin/bash -e + +export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} + +kubectl version > /dev/null +if [ $? -ne 0 ]; then + echo "fail: kubectl failed" + exit 1 +fi + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [ ${policy_count} -eq 0 ]; then + echo "fail: ${namespace}" + exit 1 + fi +done + +echo "pass" +``` + +**Audit Execution:** + +``` +./5.3.2.sh +``` + +**Expected result**: + +``` +'pass' is present +``` + +### 5.6 General Policies + +#### 5.6.4 The default namespace should not be used (Scored) + +**Result:** PASS + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + +**Audit Script:** 5.6.4.sh + +``` +#!/bin/bash -e + +export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} + +kubectl version > /dev/null +if [[ $? -gt 0 ]]; then + echo "fail: kubectl failed" + exit 1 +fi + +default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) + +echo "--count=${default_resources}" +``` + +**Audit Execution:** + +``` +./5.6.4.sh +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md new file mode 100644 index 0000000000..2b3ee57ba7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md @@ -0,0 +1,723 @@ +--- +title: Hardening Guide v2.4 +weight: 99 +aliases: + - /rancher/v2.0-v2.4/en/security/hardening-2.4 + - /rancher/v2.x/en/security/rancher-2.4/hardening-2.4/ +--- + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.4 | Rancher v2.4 | Benchmark v1.5 | Kubernetes 1.15 + + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Hardening_Guide.pdf) + +### Overview + +This document provides prescriptive guidance for hardening a production installation of Rancher v2.4 with Kubernetes v1.15. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS Benchmark Rancher Self-Assessment Guide - Rancher v2.4](security/benchmark-2.4/). + +#### Known Issues + +- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. +- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +### Configure Kernel Runtime Parameters + +The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: + +``` +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxbytes=25000000 +``` + +Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### Configure `etcd` user and group +A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. + +#### create `etcd` user and group +To create the **etcd** group run the following console commands. + +The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. + +``` +groupadd --gid 52034 etcd +useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +``` + +Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: + +``` yaml +services: + etcd: + gid: 52034 + uid: 52034 +``` + +#### Set `automountServiceAccountToken` to `false` for `default` service accounts +Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: + +``` +automountServiceAccountToken: false +``` + +Save the following yaml to a file called `account_update.yaml` + +``` yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default +automountServiceAccountToken: false +``` + +Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. + +``` +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" +done +``` + +### Ensure that all Namespaces have Network Policies defined + +Running different applications on the same Kubernetes cluster creates a risk of one +compromised application attacking a neighboring application. Network segmentation is +important to ensure that containers can communicate only with those they are supposed +to. A network policy is a specification of how selections of pods are allowed to +communicate with each other and other network endpoints. + +Network Policies are namespace scoped. When a network policy is introduced to a given +namespace, all traffic not allowed by the policy is denied. However, if there are no network +policies in a namespace all traffic will be allowed into and out of the pods in that +namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. +This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. +Additional information about CNI providers can be found +[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) + +Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a +**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace +(even if policies are added that cause some pods to be treated as “isolated”), +you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as +`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +about network policies can be found on the Kubernetes site. + +> This `NetworkPolicy` is not recommended for production use + +``` yaml +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress +``` + +Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to +`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. + +``` +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl apply -f default-allow-all.yaml -n ${namespace} +done +``` +Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. + +### Reference Hardened RKE `cluster.yml` configuration +The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install +of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is +provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes + + +``` yaml +# If you intend to deploy Kubernetes in an air-gapped environment, +# please consult the documentation on how to configure custom RKE images. +kubernetes_version: "v1.15.9-rancher1-1" +enable_network_policy: true +default_pod_security_policy_template_id: "restricted" +# the nodes directive is required and will vary depending on your environment +# documentation for node configuration can be found here: +# https://rancher.com/docs/rke/latest/en/config-options/nodes +nodes: +services: + etcd: + uid: 52034 + gid: 52034 + kube-api: + pod_security_policy: true + secrets_encryption_config: + enabled: true + audit_log: + enabled: true + admission_configuration: + event_rate_limit: + enabled: true + kube-controller: + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: [] + extra_env: [] + cluster_domain: "" + infra_container_image: "" + cluster_dns_server: "" + fail_swap_on: false + kubeproxy: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] +network: + plugin: "" + options: {} + mtu: 0 + node_selector: {} +authentication: + strategy: "" + sans: [] + webhook: null +addons: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: tiller + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: tiller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system + +addons_include: [] +system_images: + etcd: "" + alpine: "" + nginx_proxy: "" + cert_downloader: "" + kubernetes_services_sidecar: "" + kubedns: "" + dnsmasq: "" + kubedns_sidecar: "" + kubedns_autoscaler: "" + coredns: "" + coredns_autoscaler: "" + kubernetes: "" + flannel: "" + flannel_cni: "" + calico_node: "" + calico_cni: "" + calico_controllers: "" + calico_ctl: "" + calico_flexvol: "" + canal_node: "" + canal_cni: "" + canal_flannel: "" + canal_flexvol: "" + weave_node: "" + weave_cni: "" + pod_infra_container: "" + ingress: "" + ingress_backend: "" + metrics_server: "" + windows_pod_infra_container: "" +ssh_key_path: "" +ssh_cert_path: "" +ssh_agent_auth: false +authorization: + mode: "" + options: {} +ignore_docker_version: false +private_registries: [] +ingress: + provider: "" + options: {} + node_selector: {} + extra_args: {} + dns_policy: "" + extra_envs: [] + extra_volumes: [] + extra_volume_mounts: [] +cluster_name: "" +prefix_path: "" +addon_job_timeout: 0 +bastion_host: + address: "" + port: "" + user: "" + ssh_key: "" + ssh_key_path: "" + ssh_cert: "" + ssh_cert_path: "" +monitoring: + provider: "" + options: {} + node_selector: {} +restore: + restore: false + snapshot_name: "" +dns: null +``` + +### Reference Hardened RKE Template configuration + +The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. +RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher +[documentaion](https://rancher.com/docs/rancher/v2.0-v2.4/en/installation) for additional installation and RKE Template details. + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + addons: |- + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: tiller + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: tiller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system + ignore_docker_version: true + kubernetes_version: v1.15.9-rancher1-1 +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + mtu: 0 + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 52034 + retention: 72h + snapshot: false + uid: 52034 + kube_api: + always_pull_images: false + audit_log: + enabled: true + event_rate_limit: + enabled: true + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + anonymous-auth: 'false' + event-qps: '0' + feature-gates: RotateKubeletServerCertificate=true + make-iptables-util-chains: 'true' + protect-kernel-defaults: 'true' + streaming-connection-idle-timeout: 1800s + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + generate_serving_certificate: true + scheduler: + extra_args: + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: + +The reference **cloud-config** is generally used in cloud infrastructure environments to allow for +configuration management of compute instances. The reference config configures Ubuntu operating system level settings +needed before installing kubernetes. + +``` yaml +#cloud-config +packages: + - curl + - jq +runcmd: + - sysctl -w vm.overcommit_memory=1 + - sysctl -w kernel.panic=10 + - sysctl -w kernel.panic_on_oops=1 + - curl https://releases.rancher.com/install-docker/18.09.sh | sh + - usermod -aG docker ubuntu + - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done + - addgroup --gid 52034 etcd + - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +write_files: + - path: /etc/sysctl.d/kubelet.conf + owner: root:root + permissions: "0644" + content: | + vm.overcommit_memory=1 + kernel.panic=10 + kernel.panic_on_oops=1 +``` diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md new file mode 100644 index 0000000000..6c9debdaf1 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md @@ -0,0 +1,2269 @@ +--- +title: CIS Benchmark Rancher Self-Assessment Guide - v2.4 +weight: 204 +aliases: + - /rancher/v2.0-v2.4/en/security/benchmark-2.4 + - /rancher/v2.x/en/security/rancher-2.4/benchmark-2.4/ +--- + +### CIS Kubernetes Benchmark v1.5 - Rancher v2.4 with Kubernetes v1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.4/Rancher_Benchmark_Assessment.pdf) + +#### Overview + +This document is a companion to the Rancher v2.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.4 | Rancher v2.4 | Hardening Guide v2.4 | Kubernetes v1.15 | Benchmark v1.5 + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. + +> NOTE: only scored tests are covered in this guide. + +### Controls + +--- +## 1 Master Node Security Configuration +### 1.1 Master Node Configuration Files + +#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. + +#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. + +#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +chmod 700 /var/lib/etcd +``` + +**Audit Script:** 1.1.11.sh + +``` +#!/bin/bash -e + +etcd_bin=${1} + +test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') + +docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a +``` + +**Audit Execution:** + +``` +./1.1.11.sh etcd +``` + +**Expected result**: + +``` +'700' is equal to '700' +``` + +#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) + +**Result:** PASS + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). +For example, +``` bash +chown etcd:etcd /var/lib/etcd +``` + +**Audit Script:** 1.1.12.sh + +``` +#!/bin/bash -e + +etcd_bin=${1} + +test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') + +docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G +``` + +**Audit Execution:** + +``` +./1.1.12.sh etcd +``` + +**Expected result**: + +``` +'etcd:etcd' is present +``` + +#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. +We recommend that this `kube_config_cluster.yml` file be kept in secure store. + +#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. +We recommend that this `kube_config_cluster.yml` file be kept in secure store. + +#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chown -R root:root /etc/kubernetes/ssl +``` + +**Audit:** + +``` +stat -c %U:%G /etc/kubernetes/ssl +``` + +**Expected result**: + +``` +'root:root' is present +``` + +#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chmod -R 644 /etc/kubernetes/ssl +``` + +**Audit Script:** check_files_permissions.sh + +``` +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit +``` + +**Audit Execution:** + +``` +./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' +``` + +**Expected result**: + +``` +'true' is present +``` + +#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chmod -R 600 /etc/kubernetes/ssl/certs/serverca +``` + +**Audit Script:** 1.1.21.sh + +``` +#!/bin/bash -e +check_dir=${1:-/etc/kubernetes/ssl} + +for file in $(find ${check_dir} -name "*key.pem"); do + file_permission=$(stat -c %a ${file}) + if [[ "${file_permission}" == "600" ]]; then + continue + else + echo "FAIL: ${file} ${file_permission}" + exit 1 + fi +done + +echo "pass" +``` + +**Audit Execution:** + +``` +./1.1.21.sh /etc/kubernetes/ssl +``` + +**Expected result**: + +``` +'pass' is present +``` + +### 1.2 API Server + +#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--basic-auth-file' is not present +``` + +#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--token-auth-file' is not present +``` + +#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--kubelet-https` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-https' is present OR '--kubelet-https' is not present +``` + +#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the +kubelet client certificate and key parameters as below. + +``` bash +--kubelet-client-certificate= +--kubelet-client-key= +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-certificate-authority' is present +``` + +#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. +One such example could be as below. + +``` bash +--authorization-mode=RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' not have 'AlwaysAllow' +``` + +#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. + +``` bash +--authorization-mode=Node,RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' has 'Node' +``` + +#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, +for example: + +``` bash +--authorization-mode=Node,RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' has 'RBAC' +``` + +#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a +value that does not include `AlwaysAdmit`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and ensure that the `--disable-admission-plugins` parameter is set to a +value that does not include `ServiceAccount`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present +``` + +#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--disable-admission-plugins` parameter to +ensure it does not include `NamespaceLifecycle`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--enable-admission-plugins` parameter to a +value that includes `PodSecurityPolicy`: + +``` bash +--enable-admission-plugins=...,PodSecurityPolicy,... +``` + +Then restart the API Server. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' +``` + +#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--enable-admission-plugins` parameter to a +value that includes `NodeRestriction`. + +``` bash +--enable-admission-plugins=...,NodeRestriction,... +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' +``` + +#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--insecure-bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--insecure-bind-address' is not present +``` + +#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--insecure-port=0 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + +#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and either remove the `--secure-port` parameter or +set it to a different **(non-zero)** desired port. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +6443 is greater than 0 OR '--secure-port' is not present +``` + +#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-path` parameter to a suitable path and +file where you would like audit logs to be written, for example: + +``` bash +--audit-log-path=/var/log/apiserver/audit.log +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--audit-log-path' is present +``` + +#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: + +``` bash +--audit-log-maxage=30 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +30 is greater or equal to 30 +``` + +#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate +value. + +``` bash +--audit-log-maxbackup=10 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +10 is greater or equal to 10 +``` + +#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. +For example, to set it as `100` **MB**: + +``` bash +--audit-log-maxsize=100 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +100 is greater or equal to 100 +``` + +#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +and set the below parameter as appropriate and if needed. +For example, + +``` bash +--request-timeout=300s +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--request-timeout' is not present OR '--request-timeout' is present +``` + +#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--service-account-lookup=true +``` + +Alternatively, you can delete the `--service-account-lookup` parameter from this file so +that the default takes effect. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-lookup' is not present OR 'true' is equal to 'true' +``` + +#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--service-account-key-file` parameter +to the public key file for service accounts: + +``` bash +`--service-account-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-key-file' is present +``` + +#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the **etcd** certificate and **key** file parameters. + +``` bash +`--etcd-certfile=` +`--etcd-keyfile=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the TLS certificate and private key file parameters. + +``` bash +`--tls-cert-file=` +`--tls-private-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the client certificate authority file. + +``` bash +`--client-ca-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--client-ca-file' is present +``` + +#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the etcd certificate authority file parameter. + +``` bash +`--etcd-cafile=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--etcd-cafile' is present +``` + +#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--encryption-provider-config` parameter to the path of that file: + +``` bash +--encryption-provider-config= +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--encryption-provider-config' is present +``` + +#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure a `EncryptionConfig` file. +In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. + +**Audit Script:** 1.2.34.sh + +``` +#!/bin/bash -e + +check_file=${1} + +grep -q -E 'aescbc|kms|secretbox' ${check_file} +if [ $? -eq 0 ]; then + echo "--pass" + exit 0 +else + echo "fail: encryption provider found in ${check_file}" + exit 1 +fi +``` + +**Audit Execution:** + +``` +./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml +``` + +**Expected result**: + +``` +'--pass' is present +``` + +### 1.3 Controller Manager + +#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, +for example: + +``` bash +--terminated-pod-gc-threshold=10 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--terminated-pod-gc-threshold' is present +``` + +#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node to set the below parameter. + +``` bash +--use-service-account-credentials=true +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'true' is not equal to 'false' +``` + +#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--service-account-private-key-file` parameter +to the private key file for service accounts. + +``` bash +`--service-account-private-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-private-key-file' is present +``` + +#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. + +``` bash +`--root-ca-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--root-ca-file' is present +``` + +#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. + +``` bash +--feature-gates=RotateKubeletServerCertificate=true +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' +``` + +#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and ensure the correct value for the `--bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--bind-address' is present OR '--bind-address' is not present +``` + +### 1.4 Scheduler + +#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` +on the master node and ensure the correct value for the `--bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected result**: + +``` +'--bind-address' is present OR '--bind-address' is not present +``` + +## 2 Etcd Node Configuration +### 2 Etcd Node Configuration Files + +#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` +on the master node and set the below parameters. + +``` bash +`--cert-file=` +`--key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--cert-file' is present AND '--key-file' is present +``` + +#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and set the below parameter. + +``` bash +--client-cert-auth="true" +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and either remove the `--auto-tls` parameter or set it to `false`. + +``` bash + --auto-tls=false +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the +master node and set the below parameters. + +``` bash +`--peer-client-file=` +`--peer-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and set the below parameter. + +``` bash +--peer-client-cert-auth=true +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and either remove the `--peer-auto-tls` parameter or set it to `false`. + +``` bash +--peer-auto-tls=false +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--peer-auto-tls' is not present OR '--peer-auto-tls' is present +``` + +## 3 Control Plane Configuration +### 3.2 Logging + +#### 3.2.1 Ensure that a minimal audit policy is created (Scored) + +**Result:** PASS + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit Script:** 3.2.1.sh + +``` +#!/bin/bash -e + +api_server_bin=${1} + +/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep +``` + +**Audit Execution:** + +``` +./3.2.1.sh kube-apiserver +``` + +**Expected result**: + +``` +'--audit-policy-file' is present +``` + +## 4 Worker Node Security Configuration +### 4.1 Worker Node Configuration Files + +#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is present +``` + +#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` + +#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the following command to modify the file permissions of the + +``` bash +`--client-ca-file chmod 644 ` +``` + +**Audit:** + +``` +stat -c %a /etc/kubernetes/ssl/kube-ca.pem +``` + +**Expected result**: + +``` +'644' is equal to '644' OR '640' is present OR '600' is present +``` + +#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the following command to modify the ownership of the `--client-ca-file`. + +``` bash +chown root:root +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` + +#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +### 4.2 Kubelet + +#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to +`false`. +If using executable arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--anonymous-auth=false +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If +using executable arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + +``` bash +--authorization-mode=Webhook +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'Webhook' not have 'AlwaysAllow' +``` + +#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + +``` bash +`--client-ca-file=` +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'--client-ca-file' is present +``` + +#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--read-only-port=0 +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + +#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than `0`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--streaming-connection-idle-timeout=5m +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--protect-kernel-defaults=true +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +remove the `--make-iptables-util-chains` argument from the +`KUBELET_SYSTEM_PODS_ARGS` variable. +Based on your system, restart the kubelet service. For example: + +```bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' OR '--make-iptables-util-chains' is not present +``` + +#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` +variable. +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'--rotate-certificates' is present OR '--rotate-certificates' is not present +``` + +#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` +on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. + +``` bash +--feature-gates=RotateKubeletServerCertificate=true +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +## 5 Kubernetes Policies +### 5.1 RBAC and Service Accounts + +#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) + +**Result:** PASS + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value + +``` bash +automountServiceAccountToken: false +``` + +**Audit Script:** 5.1.5.sh + +``` +#!/bin/bash + +export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} + +kubectl version > /dev/null +if [ $? -ne 0 ]; then + echo "fail: kubectl failed" + exit 1 +fi + +accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" + +if [[ "${accounts}" != "" ]]; then + echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" + exit 1 +fi + +default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" + +if [[ "${default_binding}" -gt 0 ]]; then + echo "fail: default service accounts have non default bindings" + exit 1 +fi + +echo "--pass" +exit 0 +``` + +**Audit Execution:** + +``` +./5.1.5.sh +``` + +**Expected result**: + +``` +'--pass' is present +``` + +### 5.2 Pod Security Policies + +#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostPID` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostIPC` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostNetwork` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +### 5.3 Network Policies and CNI + +#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create `NetworkPolicy` objects as you need them. + +**Audit Script:** 5.3.2.sh + +``` +#!/bin/bash -e + +export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} + +kubectl version > /dev/null +if [ $? -ne 0 ]; then + echo "fail: kubectl failed" + exit 1 +fi + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [ ${policy_count} -eq 0 ]; then + echo "fail: ${namespace}" + exit 1 + fi +done + +echo "pass" +``` + +**Audit Execution:** + +``` +./5.3.2.sh +``` + +**Expected result**: + +``` +'pass' is present +``` + +### 5.6 General Policies + +#### 5.6.4 The default namespace should not be used (Scored) + +**Result:** PASS + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + +**Audit Script:** 5.6.4.sh + +``` +#!/bin/bash -e + +export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} + +kubectl version > /dev/null +if [[ $? -gt 0 ]]; then + echo "fail: kubectl failed" + exit 1 +fi + +default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) + +echo "--count=${default_resources}" +``` + +**Audit Execution:** + +``` +./5.6.4.sh +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + diff --git a/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/security-advisories-and-cves.md b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/security-advisories-and-cves.md new file mode 100644 index 0000000000..9d93aee20d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/rancher-security/security-advisories-and-cves.md @@ -0,0 +1,20 @@ +--- +title: Rancher CVEs and Resolutions +weight: 300 +--- + +Rancher is committed to informing the community of security issues in our products. Rancher will publish CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. + +| ID | Description | Date | Resolution | +|----|-------------|------|------------| +| [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server, i.e. local server, and return the requested information. You are vulnerable if you are running any Rancher 2.x version. Only valid Rancher users who have some level of permission on the cluster can perform the request. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher where users were granted access to resources regardless of the resource's API group. For example Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. You are vulnerable if you are running any Rancher 2.x version. The extent of the exploit increases if there are other matching CRD resources installed in the cluster. There is no direct mitigation besides upgrading to the patched versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud credential ID that was valid for a given cloud provider could make requests against that cloud provider's API through the proxy API, and the cloud credential would be attached. You are vulnerable if you are running any Rancher 2.2.0 or above and use cloud credentials. The exploit is limited to valid Rancher users. There is no direct mitigation besides upgrading to the patched versions. You can limit wider exposure by ensuring all Rancher users are trusted. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9), [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions](upgrades/rollbacks/). | +| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | +| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | +| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | +| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/example-yaml/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/rke1-template-example-yaml.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/example-yaml/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/rke1-template-example-yaml.md diff --git a/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/advanced-options.md b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/advanced-options.md new file mode 100644 index 0000000000..538d1be925 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/advanced-options.md @@ -0,0 +1,102 @@ +--- +title: Advanced Options for Docker Installs +weight: 5 +--- + +When installing Rancher, there are several [advanced options](installation/options/) that can be enabled: + +- [Custom CA Certificate](#custom-ca-certificate) +- [API Audit Log](#api-audit-log) +- [TLS Settings](#tls-settings) +- [Air Gap](#air-gap) +- [Persistent Data](#persistent-data) +- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) + +### Custom CA Certificate + +If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. + +Use the command example to start a Rancher container with your private CA certificates mounted. + +- The volume flag (`-v`) should specify the host directory containing the CA root certificates. +- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. +- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. +- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. + +The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /host/certs:/container/certs \ + -e SSL_CERT_DIR="/container/certs" \ + rancher/rancher:latest +``` + +### API Audit Log + +The API Audit Log records all the user and system transactions made through Rancher server. + +The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. + +See [API Audit Log](installation/api-auditing) for more information and options. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /var/log/rancher/auditlog:/var/log/auditlog \ + -e AUDIT_LEVEL=1 \ + rancher/rancher:latest +``` + +### TLS settings + +_Available as of v2.1.7_ + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_TLS_MIN_VERSION="1.0" \ + rancher/rancher:latest +``` + +See [TLS settings](admin-settings/tls-settings) for more information and options. + +### Air Gap + +If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + rancher/rancher:latest +``` + +### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. + +If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. + +Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. + +To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: + +``` +docker run -d --restart=unless-stopped \ + -p 8080:80 -p 8443:443 \ + rancher/rancher:latest +``` diff --git a/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md new file mode 100644 index 0000000000..f8884542ba --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md @@ -0,0 +1,42 @@ +--- +title: HTTP Proxy Configuration +weight: 251 +aliases: + - /rancher/v2.0-v2.4/en/installation/proxy-configuration/ + - /rancher/v2.0-v2.4/en/installation/single-node/proxy +--- + +If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. + +Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. + +| Environment variable | Purpose | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | +| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | +| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | + +> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. + +## Docker Installation + +Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation](installation/single-node-install/) are: + +- `localhost` +- `127.0.0.1` +- `0.0.0.0` +- `10.0.0.0/8` +- `cattle-system.svc` +- `.svc` +- `.cluster.local` + +The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e HTTP_PROXY="http://192.168.10.1:3128" \ + -e HTTPS_PROXY="http://192.168.10.1:3128" \ + -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local,example.com" \ + rancher/rancher:latest +``` \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/reference-guides/system-tools.md b/versioned_docs/version-2.0-2.4/reference-guides/system-tools.md new file mode 100644 index 0000000000..d177aa1f16 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/system-tools.md @@ -0,0 +1,116 @@ +--- +title: System Tools +weight: 22 +--- + +System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters or [installations of Rancher on an RKE cluster.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) The tasks include: + +* Collect logging and system metrics from nodes. +* Remove Kubernetes resources created by Rancher. + +The following commands are available: + +| Command | Description +|---|--- +| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. +| [stats](#stats) | Stream system metrics from nodes. +| [remove](#remove) | Remove Kubernetes resources created by Rancher. + +# Download System Tools + +You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. + +Operating System | Filename +-----------------|----- +MacOS | `system-tools_darwin-amd64` +Linux | `system-tools_linux-amd64` +Windows | `system-tools_windows-amd64.exe` + +After you download the tools, complete the following actions: + +1. Rename the file to `system-tools`. + +1. Give the file executable permissions by running the following command: + + > **Using Windows?** + The file is already an executable, you can skip this step. + + ``` + chmod +x system-tools + ``` + +# Logs + +The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). See [Troubleshooting]({{}}//rancher/v2.0-v2.4/en/troubleshooting/) for a list of core Kubernetes cluster components. + +System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. + +### Usage + +``` +./system-tools_darwin-amd64 logs --kubeconfig +``` + +The following are the options for the logs command: + +| Option | Description +| ------------------------------------------------------ | ------------------------------------------------------ +| `--kubeconfig , -c ` | The cluster's kubeconfig file. +| `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. +| `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. + +# Stats + +The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). + +System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. + +### Usage + +``` +./system-tools_darwin-amd64 stats --kubeconfig +``` + +The following are the options for the stats command: + +| Option | Description +| ------------------------------------------------------ | ------------------------------ +| `--kubeconfig , -c ` | The cluster's kubeconfig file. +| `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. +| `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. + +# Remove + +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd](backups/backups) before executing the command. + +When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: + +- The Rancher deployment namespace (`cattle-system` by default). +- Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates as of v2.1.0. +- Labels, annotations, and finalizers. +- Rancher Deployment. +- Machines, clusters, projects, and user custom resource deployments (CRDs). +- All resources create under the `management.cattle.io` API Group. +- All CRDs created by Rancher v2.x. + +>**Using 2.0.8 or Earlier?** +> +>These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. + +### Usage + +When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. + +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd](backups/backups) before executing the command. + +``` +./system-tools remove --kubeconfig --namespace +``` + +The following are the options for the `remove` command: + +| Option | Description +| ---------------------------------------------- | ------------ +| `--kubeconfig , -c ` | The cluster's kubeconfig file +| `--namespace , -n cattle-system` | Rancher 2.x deployment namespace (``). If no namespace is defined, the options defaults to `cattle-system`. +| `--force` | Skips the interactive removal confirmation and removes the Rancher deployment without prompt. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/user-settings/api-keys.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/api-keys.md new file mode 100644 index 0000000000..258d048816 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/api-keys.md @@ -0,0 +1,58 @@ +--- +title: API Keys +weight: 7005 +aliases: + - /rancher/v2.0-v2.4/en/concepts/api-keys/ + - /rancher/v2.0-v2.4/en/tasks/user-settings/api-keys/ +--- + +## API Keys and User Authentication + +If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. + +An API key is also required for using Rancher CLI. + +API Keys are composed of four components: + +- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. +- **Access Key:** The token's username. +- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. +- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. + +## Creating an API Key + +1. Select **User Avatar** > **API & Keys** from the **User Settings** menu in the upper-right. + +2. Click **Add Key**. + +3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. + + The API key won't be valid after expiration. Shorter expiration periods are more secure. + + _Available as of v2.4.6_ + Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. + + A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) for more information. + +4. Click **Create**. + + **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. + + Use the **Bearer Token** to authenticate with Rancher CLI. + +5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. + +## What's Next? + +- Enter your API key information into the application that will send requests to the Rancher API. +- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. +- API keys are used for API calls and [Rancher CLI](../../pages-for-subheaders/cli-with-rancher.md). + +## Deleting API Keys + +If you need to revoke an API key, delete it. You should delete API keys: + +- That may have been compromised. +- That have expired. + +To delete an API, select the stale key and click **Delete**. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-cloud-credentials.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-cloud-credentials.md new file mode 100644 index 0000000000..6d95c0f9ca --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-cloud-credentials.md @@ -0,0 +1,51 @@ +--- +title: Managing Cloud Credentials +weight: 7011 +--- + +_Available as of v2.2.0_ + +When you create a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. + +Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. + +Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. + +You can create cloud credentials in two contexts: + +- [During creation of a node template](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for a cluster. +- In the **User Settings** + +All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. + +## Creating a Cloud Credential from User Settings + +1. From your user settings, select **User Avatar > Cloud Credentials**. +1. Click **Add Cloud Credential**. +1. Enter a name for the cloud credential. +1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) in Rancher. +1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. +1. Click **Create**. + +**Result:** The cloud credential is created and can immediately be used to [create node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates). + +## Updating a Cloud Credential + +When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. + +1. From your user settings, select **User Avatar > Cloud Credentials**. +1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. +1. Update the credential information and click **Save**. + +**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Deleting a Cloud Credential + +In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates](manage-node-templates.md#deleting-a-node-template) that are still associated to that cloud credential. + +1. From your user settings, select **User Avatar > Cloud Credentials**. +1. You can either individually delete a cloud credential or bulk delete. + + - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. + - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. +1. Confirm that you want to delete these cloud credentials. diff --git a/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-node-templates.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-node-templates.md new file mode 100644 index 0000000000..31777be5f9 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/manage-node-templates.md @@ -0,0 +1,47 @@ +--- +title: Managing Node Templates +weight: 7010 +--- + +When you provision a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: + +- While [provisioning a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). +- At any time, from your [user settings](#creating-a-node-template-from-user-settings). + +When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. + +## Creating a Node Template from User Settings + +1. From your user settings, select **User Avatar > Node Templates**. +1. Click **Add Template**. +1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. + +**Result:** The template is configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Updating a Node Template + +1. From your user settings, select **User Avatar > Node Templates**. +1. Choose the node template that you want to edit and click the **⋮ > Edit**. + + > **Note:** As of v2.2.0, the default `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) and any node driver, that has fields marked as `password`, are required to use [cloud credentials](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. + +1. Edit the required information and click **Save**. + +**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. + +## Cloning Node Templates + +When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. + +1. From your user settings, select **User Avatar > Node Templates**. +1. Find the template you want to clone. Then select **⋮ > Clone**. +1. Complete the rest of the form. + +**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Deleting a Node Template + +When you no longer use a node template, you can delete it from your user settings. + +1. From your user settings, select **User Avatar > Node Templates**. +1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/content/rancher/v2.6/en/user-settings/preferences/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/user-settings/user-preferences.md similarity index 100% rename from content/rancher/v2.6/en/user-settings/preferences/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/user-settings/user-preferences.md diff --git a/content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md b/versioned_docs/version-2.0-2.4/reference-guides/v1.6-migration/migration-tools-cli-reference.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/v1.6-migration/run-migration-tool/migration-tools-ref/_index.md rename to versioned_docs/version-2.0-2.4/reference-guides/v1.6-migration/migration-tools-cli-reference.md diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md new file mode 100644 index 0000000000..93db3d9414 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.0/rancher-v2.3.0.md @@ -0,0 +1,22 @@ +--- +title: Rancher v2.3.0 +weight: 3 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.0/ +--- + +### Self Assessment Guide + +This [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.3 | Rancher v2.3.0-2.3.2 | Hardening Guide v2.3 | Kubernetes 1.15 | Benchmark v1.4.1 + +### Hardening Guide + +This hardening [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3 | Rancher v2.3.0-v2.3.2 | Benchmark v1.4.1 | Kubernetes 1.15 \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md new file mode 100644 index 0000000000..8eb00861b0 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.3/rancher-v2.3.3.md @@ -0,0 +1,22 @@ +--- +title: Rancher v2.3.3 +weight: 2 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.3/ +--- + +### Self Assessment Guide + +This [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.3.3 | Rancher v2.3.3 | Hardening Guide v2.3.3 | Kubernetes v1.16 | Benchmark v1.4.1 + +### Hardening Guide + +This hardening [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3.3 | Rancher v2.3.3 | Benchmark v1.4.1 | Kubernetes 1.14, 1.15, and 1.16 \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md new file mode 100644 index 0000000000..b0351ac054 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/security/rancher-2.3.x/rancher-v2.3.5/rancher-v2.3.5.md @@ -0,0 +1,22 @@ +--- +title: Rancher v2.3.5 +weight: 1 +aliases: + - /rancher/v2.x/en/security/rancher-2.3.x/rancher-v2.3.5/ +--- + +### Self Assessment Guide + +This [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark.md) corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark: + +Self Assessment Guide Version | Rancher Version | Hardening Guide Version | Kubernetes Version | CIS Benchmark Version +---------------------------|----------|---------|-------|----- +Self Assessment Guide v2.3.5 | Rancher v2.3.5 | Hardening Guide v2.3.5 | Kubernetes v1.15 | Benchmark v1.5 + +### Hardening Guide + +This hardening [guide](../../../reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark.md) is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +------------------------|----------------|-----------------------|------------------ +Hardening Guide v2.3.5 | Rancher v2.3.5 | Benchmark v1.5 | Kubernetes 1.15 \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md b/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md new file mode 100644 index 0000000000..d709c5be32 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/security/security-scan/security-scan.md @@ -0,0 +1,8 @@ +--- +title: Security Scans +weight: 299 +aliases: + - /rancher/v2.x/en/security/security-scan/ +--- + +The documentation about CIS security scans has moved [here.](cis-scans) diff --git a/versioned_docs/version-2.0-2.4/shared-files/_cluster-capabilities-table.md b/versioned_docs/version-2.0-2.4/shared-files/_cluster-capabilities-table.md new file mode 100644 index 0000000000..24dd38a5b7 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/shared-files/_cluster-capabilities-table.md @@ -0,0 +1,20 @@ +| Action | [Rancher launched Kubernetes Clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) | [Hosted Kubernetes Clusters](../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) | [Imported Clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) | +| --- | --- | ---| ---| +| [Using kubectl and a kubeconfig file to Access a Cluster](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) | ✓ | ✓ | ✓ | +| [Managing Cluster Members](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) | ✓ | ✓ | ✓ | +| [Editing and Upgrading Clusters](../pages-for-subheaders/cluster-configuration.md) | ✓ | ✓ | * | +| [Managing Nodes](../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) | ✓ | ✓ | ✓ | +| [Managing Persistent Volumes and Storage Classes](../pages-for-subheaders/create-kubernetes-persistent-storage.md) | ✓ | ✓ | ✓ | +| [Managing Projects, Namespaces and Workloads](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) | ✓ | ✓ | ✓ | +| [Using App Catalogs](../pages-for-subheaders/helm-charts-in-rancher.md/) | ✓ | ✓ | ✓ | +| [Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio)](../reference-guides/rancher-cluster-tools.md) | ✓ | ✓ | ✓ | +| [Cloning Clusters](../how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md)| ✓ | ✓ | | +| [Ability to rotate certificates](../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md) | ✓ | | | +| [Ability to back up your Kubernetes Clusters](../how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd.md) | ✓ | | | +| [Ability to recover and restore etcd](../how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd.md) | ✓ | | | +| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) | ✓ | | | +| [Configuring Pod Security Policies](../how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md) | ✓ | | | +| [Running Security Scans](../pages-for-subheaders/cis-scans.md) | ✓ | | | +| [Authorized Cluster Endpoint](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#authorized-cluster-endpoint) | ✓ | | | + +\* Cluster configuration options can't be edited for imported clusters, except for K3s clusters. \ No newline at end of file diff --git a/versioned_docs/version-2.0-2.4/shared-files/_common-ports-table.md b/versioned_docs/version-2.0-2.4/shared-files/_common-ports-table.md new file mode 100644 index 0000000000..70749ce397 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/shared-files/_common-ports-table.md @@ -0,0 +1,19 @@ +| Protocol | Port | Description | +|:--------: |:----------------: |---------------------------------------------------------------------------------- | +| TCP | 22 | Node driver SSH provisioning | +| TCP | 179 | Calico BGP Port | +| TCP | 2376 | Node driver Docker daemon TLS port | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | +| TCP | 8443 | Rancher webhook | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | +| TCP | 9443 | Rancher webhook | +| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | +| TCP | 6783 | Weave Port | +| UDP | 6783-6784 | Weave UDP Ports | +| TCP | 10250 | kubelet API | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | +| TCP/UDP | 30000-32767 | NodePort port range | diff --git a/versioned_docs/version-2.0-2.4/troubleshooting.md b/versioned_docs/version-2.0-2.4/troubleshooting.md new file mode 100644 index 0000000000..269c396b2e --- /dev/null +++ b/versioned_docs/version-2.0-2.4/troubleshooting.md @@ -0,0 +1,42 @@ +--- +title: Troubleshooting +weight: 26 +--- + +This section contains information to help you troubleshoot issues when using Rancher. + +- [Kubernetes components](pages-for-subheaders/kubernetes-components.md) + + If you need help troubleshooting core Kubernetes cluster components like: + * `etcd` + * `kube-apiserver` + * `kube-controller-manager` + * `kube-scheduler` + * `kubelet` + * `kube-proxy` + * `nginx-proxy` + +- [Kubernetes resources](troubleshooting/other-troubleshooting-tips/kubernetes-resources.md) + + Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. + +- [Networking](troubleshooting/other-troubleshooting-tips/networking.md) + + Steps to troubleshoot networking issues can be found here. + +- [DNS](troubleshooting/other-troubleshooting-tips/dns.md) + + When you experience name resolution issues in your cluster. + +- [Troubleshooting Rancher installed on Kubernetes](troubleshooting/other-troubleshooting-tips/rancher-ha.md) + + If you experience issues with your [Rancher server installed on Kubernetes](pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + +- [Imported clusters](troubleshooting/other-troubleshooting-tips/registered-clusters.md) + + If you experience issues when [Importing Kubernetes Clusters](how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters.md) + +- [Logging](troubleshooting/other-troubleshooting-tips/logging.md) + + Read more about what log levels can be configured and how to configure a log level. + diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md new file mode 100644 index 0000000000..5fc74e0aae --- /dev/null +++ b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -0,0 +1,40 @@ +--- +title: Troubleshooting Controlplane Nodes +weight: 2 +--- + +This section applies to nodes with the `controlplane` role. + +# Check if the Controlplane Containers are Running + +There are three specific containers launched on nodes with the `controlplane` role: + +* `kube-apiserver` +* `kube-controller-manager` +* `kube-scheduler` + +The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver +f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler +bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager +``` + +# Controlplane Container Logging + +> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election](../other-troubleshooting-tips/kubernetes-resources.md#kubernetes-leader-election) how to retrieve the current leader. + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kube-apiserver +docker logs kube-controller-manager +docker logs kube-scheduler +``` \ No newline at end of file diff --git a/content/rancher/v2.6/en/troubleshooting/kubernetes-components/etcd/_index.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/kubernetes-components/etcd/_index.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md diff --git a/content/rancher/v2.6/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md diff --git a/content/rancher/v2.6/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md rename to versioned_docs/version-2.0-2.4/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md new file mode 100644 index 0000000000..8a91d3cba4 --- /dev/null +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/dns.md @@ -0,0 +1,217 @@ +--- +title: DNS +weight: 103 +--- + +The commands/steps listed on this page can be used to check name resolution issues in your cluster. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#default-dns-provider) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. + +### Check if DNS pods are running + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns +``` + +Example output when using CoreDNS: +``` +NAME READY STATUS RESTARTS AGE +coredns-799dffd9c4-6jhlz 1/1 Running 0 76m +``` + +Example output when using kube-dns: +``` +NAME READY STATUS RESTARTS AGE +kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s +``` + +### Check if the DNS service is present with the correct cluster-ip + +``` +kubectl -n kube-system get svc -l k8s-app=kube-dns +``` + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s +``` + +### Check if domain names are resolving + +Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. + +``` +kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default +``` + +Example output: +``` +Server: 10.43.0.10 +Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kubernetes.default +Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local +pod "busybox" deleted +``` + +Check if external names are resolving (in this example, `www.google.com`) + +``` +kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com +``` + +Example output: +``` +Server: 10.43.0.10 +Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local + +Name: www.google.com +Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net +Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net +pod "busybox" deleted +``` + +If you want to check resolving of domain names on all of the hosts, execute the following steps: + +1. Save the following file as `ds-dnstest.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: dnstest + spec: + selector: + matchLabels: + name: dnstest + template: + metadata: + labels: + name: dnstest + spec: + tolerations: + - operator: Exists + containers: + - image: busybox:1.28 + imagePullPolicy: Always + name: alpine + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + ``` + +2. Launch it using `kubectl create -f ds-dnstest.yml` +3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. +4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). + + ``` + export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" + ``` + +5. When this command has finished running, the output indicating everything is correct is: + + ``` + => Start DNS resolve test + => End DNS resolve test + ``` + +If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. + +Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. + +``` +=> Start DNS resolve test +command terminated with exit code 1 +209.97.182.150 cannot resolve www.google.com +=> End DNS resolve test +``` + +Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. + +### CoreDNS specific + +#### Check CoreDNS logging + +``` +kubectl -n kube-system logs -l k8s-app=kube-dns +``` + +#### Check configuration + +CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. + +``` +kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} +``` + +#### Check upstream nameservers in resolv.conf + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. + +``` +kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' +``` + +#### Enable query logging + +Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: + +``` +kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - +``` + +All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). + +### kube-dns specific + +#### Check upstream nameservers in kubedns container + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. Since Rancher v2.0.7, we detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). + +Use the following command to check the upstream nameservers used by the kubedns container: + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done +``` + +Example output: +``` +Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x +nameserver 1.1.1.1 +nameserver 8.8.4.4 +``` + +If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: + +* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. +* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): + +``` +services: + kubelet: + extra_args: + resolv-conf: "/run/resolvconf/resolv.conf" +``` + +> **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. + +See [Editing Cluster as YAML](../../pages-for-subheaders/cluster-configuration.md#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: + +``` +kubectl delete pods -n kube-system -l k8s-app=kube-dns +pod "kube-dns-5fd74c7488-6pwsf" deleted +``` + +Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). + +If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: + +``` +kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' +``` + +Example output: +``` +upstreamNameservers:["1.1.1.1"] +``` diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md new file mode 100644 index 0000000000..ea701c2c0d --- /dev/null +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -0,0 +1,271 @@ +--- +title: Kubernetes resources +weight: 101 +--- + +The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +- [Nodes](#nodes) + - [Get nodes](#get-nodes) + - [Get node conditions](#get-node-conditions) +- [Kubernetes leader election](#kubernetes-leader-election) + - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) + - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) +- [Ingress controller](#ingress-controller) + - [Pod details](#pod-details) + - [Pod container logs](#pod-container-logs) + - [Namespace events](#namespace-events) + - [Debug logging](#debug-logging) + - [Check configuration](#check-configuration) +- [Rancher agents](#rancher-agents) + - [cattle-node-agent](#cattle-node-agent) + - [cattle-cluster-agent](#cattle-cluster-agent) +- [Jobs and pods](#jobs-and-pods) + - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) + - [Describe pod](#describe-pod) + - [Pod container logs](#pod-container-logs) + - [Describe job](#describe-job) + - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) + - [Evicted pods](#evicted-pods) + - [Job does not complete](#job-does-not-complete) + +# Nodes + +### Get nodes + +Run the command below and check the following: + +- All nodes in your cluster should be listed, make sure there is not one missing. +- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) +- Check if all nodes report the correct version. +- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) + + +``` +kubectl get nodes -o wide +``` + +Example output: + +``` +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +``` + +### Get node conditions + +Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) + +``` +kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' +``` + +Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. + +``` +kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' +``` + +Example output: + +``` +worker-0: DiskPressure:True +``` + +# Kubernetes leader election + +### Kubernetes Controller Manager leader + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). + +``` +kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> +``` + +### Kubernetes Scheduler leader + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). + +``` +kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> +``` + +# Ingress Controller + +The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. + +Check if the pods are running on all nodes: + +``` +kubectl -n ingress-nginx get pods -o wide +``` + +Example output: + +``` +kubectl -n ingress-nginx get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE +default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 +nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 +nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 +``` + +If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. + +### Pod details + +``` +kubectl -n ingress-nginx describe pods -l app=ingress-nginx +``` + +### Pod container logs + +``` +kubectl -n ingress-nginx logs -l app=ingress-nginx +``` + +### Namespace events + +``` +kubectl -n ingress-nginx get events +``` + +### Debug logging + +To enable debug logging: + +``` +kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' +``` + +### Check configuration + +Retrieve generated configuration in each pod: + +``` +kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done +``` + +# Rancher agents + +Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. + +#### cattle-node-agent + +Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 +cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 +cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 +cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 +cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 +cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 +cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 +``` + +Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: + +``` +kubectl -n cattle-system logs -l app=cattle-agent +``` + +#### cattle-cluster-agent + +Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 +``` + +Check logging of cattle-cluster-agent pod: + +``` +kubectl -n cattle-system logs -l app=cattle-cluster-agent +``` + +# Jobs and Pods + +### Check that pods or jobs have status **Running**/**Completed** + +To check, run the command: + +``` +kubectl get pods --all-namespaces +``` + +If a pod is not in **Running** state, you can dig into the root cause by running: + +### Describe pod + +``` +kubectl describe pod POD_NAME -n NAMESPACE +``` + +### Pod container logs + +``` +kubectl logs POD_NAME -n NAMESPACE +``` + +If a job is not in **Completed** state, you can dig into the root cause by running: + +### Describe job + +``` +kubectl describe job JOB_NAME -n NAMESPACE +``` + +### Logs from the containers of pods of the job + +``` +kubectl logs -l job-name=JOB_NAME -n NAMESPACE +``` + +### Evicted pods + +Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). + +Retrieve a list of evicted pods (podname and namespace): + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' +``` + +To delete all evicted pods: + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done +``` + +Retrieve a list of evicted pods, scheduled node and the reason: + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done +``` + +### Job does not complete + +If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.](istio/v2.3.x-v2.4.x/setup/enable-istio-in-namespace/#excluding-workloads-from-being-injected-with-the-istio-sidecar) + +Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/logging/_index.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/troubleshooting/logging/_index.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/logging.md diff --git a/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md new file mode 100644 index 0000000000..7b3a675cde --- /dev/null +++ b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/networking.md @@ -0,0 +1,131 @@ +--- +title: Networking +weight: 102 +--- + +The commands/steps listed on this page can be used to check networking related issues in your cluster. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_rancher-cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +### Double check if all the required ports are opened in your (host) firewall + +Double check if all the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. +### Check if overlay network is functioning correctly + +The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. + +To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `swiss-army-knife` container on every host (image was developed by Rancher engineers and can be found here: https://github.com/rancherlabs/swiss-army-knife), which we will use to run a `ping` test between containers on all hosts. + +> **Note:** This container [does not support ARM nodes](https://github.com/leodotcloud/swiss-army-knife/issues/18), such as a Raspberry Pi. This will be seen in the pod logs as `exec user process caused: exec format error`. + +1. Save the following file as `overlaytest.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: overlaytest + spec: + selector: + matchLabels: + name: overlaytest + template: + metadata: + labels: + name: overlaytest + spec: + tolerations: + - operator: Exists + containers: + - image: rancherlabs/swiss-army-knife + imagePullPolicy: Always + name: overlaytest + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + + ``` + +2. Launch it using `kubectl create -f overlaytest.yml` +3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. +4. Run the following script, from the same location. It will have each `overlaytest` container on every host ping each other: + ``` + #!/bin/bash + echo "=> Start network overlay test" + kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | + while read spod shost + do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | + while read tip thost + do kubectl --request-timeout='10s' exec $spod -c overlaytest -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1" + RC=$? + if [ $RC -ne 0 ] + then echo FAIL: $spod on $shost cannot reach pod IP $tip on $thost + else echo $shost can reach $thost + fi + done + done + echo "=> End network overlay test" + ``` + +5. When this command has finished running, it will output the state of each route: + + ``` + => Start network overlay test + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.7.3 on wk2 + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.0.5 on cp1 + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.2.12 on wk1 + command terminated with exit code 1 + FAIL: overlaytest-v4qkl on cp1 cannot reach pod IP 10.42.7.3 on wk2 + cp1 can reach cp1 + cp1 can reach wk1 + command terminated with exit code 1 + FAIL: overlaytest-xpxwp on wk1 cannot reach pod IP 10.42.7.3 on wk2 + wk1 can reach cp1 + wk1 can reach wk1 + => End network overlay test + ``` + If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for overlay networking are not opened for `wk2`. +6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. + + +### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices + +When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: + +* `websocket: bad handshake` +* `Failed to connect to proxy` +* `read tcp: i/o timeout` + +See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. + +### Resolved issues + +#### Overlay network broken when using Canal/Flannel due to missing node annotations + +| | | +|------------|------------| +| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | +| Resolved in | v2.1.2 | + +To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): + +``` +kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' +``` + +If there is no output, the cluster is not affected. + +#### System namespace pods network connectivity broken + +> **Note:** This applies only to Rancher upgrades from v2.0.6 or earlier to v2.0.7 or later. Upgrades from v2.0.7 to later version are unaffected. + +| | | +|------------|------------| +| GitHub issue | [#15146](https://github.com/rancher/rancher/issues/15146) | + +If pods in system namespaces cannot communicate with pods in other system namespaces, you will need to follow the instructions in [Upgrading to v2.0.7+ — Namespace Migration](upgrades/upgrades/namespace-migration/) to restore connectivity. Symptoms include: + +- NGINX ingress controller showing `504 Gateway Time-out` when accessed. +- NGINX ingress controller logging `upstream timed out (110: Connection timed out) while connecting to upstream` when accessed. diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/rancherha/_index.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/troubleshooting/rancherha/_index.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/rancher-ha.md diff --git a/content/rancher/v2.0-v2.4/en/troubleshooting/imported-clusters/_index.md b/versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/troubleshooting/imported-clusters/_index.md rename to versioned_docs/version-2.0-2.4/troubleshooting/other-troubleshooting-tips/registered-clusters.md diff --git a/versioned_docs/version-2.5/backups/docker-installs/docker-installs.md b/versioned_docs/version-2.5/backups/docker-installs/docker-installs.md new file mode 100644 index 0000000000..dde7afc915 --- /dev/null +++ b/versioned_docs/version-2.5/backups/docker-installs/docker-installs.md @@ -0,0 +1,12 @@ +--- +title: Backup and Restore for Rancher Installed with Docker +shortTitle: Docker Installs +weight: 10 +aliases: + - /rancher/v2.5/en/installation/backups-and-restoration/single-node-backup-and-restoration/ + - /rancher/v2.5/en/backups/v2.5/docker-installs + - /rancher/v2.x/en/backups/v2.5/docker-installs/ +--- + +- [Backups](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) +- [Restores](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/cluster-provisioning/rke-clusters/options/options.md b/versioned_docs/version-2.5/cluster-provisioning/rke-clusters/options/options.md new file mode 100644 index 0000000000..721df23714 --- /dev/null +++ b/versioned_docs/version-2.5/cluster-provisioning/rke-clusters/options/options.md @@ -0,0 +1,327 @@ +--- +title: RKE Cluster Configuration Reference +weight: 2250 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +When Rancher installs Kubernetes, it uses [RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) as the Kubernetes distribution. + +This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. + +You can configure the Kubernetes options one of two ways: + +- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +The RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) + +This section is a cluster configuration reference, covering the following topics: + +- [Rancher UI Options](#rancher-ui-options) + - [Kubernetes version](#kubernetes-version) + - [Network provider](#network-provider) + - [Project network isolation](#project-network-isolation) + - [Kubernetes cloud providers](#kubernetes-cloud-providers) + - [Private registries](#private-registries) + - [Authorized cluster endpoint](#authorized-cluster-endpoint) + - [Node pools](#node-pools) +- [Advanced Options](#advanced-options) + - [NGINX Ingress](#nginx-ingress) + - [Node port range](#node-port-range) + - [Metrics server monitoring](#metrics-server-monitoring) + - [Pod security policy support](#pod-security-policy-support) + - [Docker version on nodes](#docker-version-on-nodes) + - [Docker root directory](#docker-root-directory) + - [Recurring etcd snapshots](#recurring-etcd-snapshots) + - [Agent Environment Variables](#agent-environment-variables) +- [Cluster config file](#cluster-config-file) + - [Config file structure in Rancher v2.3.0+](#config-file-structure-in-rancher-v2-3-0) + - [Default DNS provider](#default-dns-provider) +- [Rancher specific parameters](#rancher-specific-parameters) + +# Rancher UI Options + +When creating a cluster using one of the options described in [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), you can configure basic Kubernetes options using the **Cluster Options** section. + +### Kubernetes Version + +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). + +### Network Provider + +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ](../../../faq/container-network-interface-providers.md). + +>**Note:** After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. + +Out of the box, Rancher is compatible with the following network providers: + +- [Canal](https://github.com/projectcalico/canal) +- [Flannel](https://github.com/coreos/flannel#flannel) +- [Calico](https://docs.projectcalico.org/v3.11/introduction/) +- [Weave](https://github.com/weaveworks/weave) + + +**Notes on Weave:** + +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File](cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). + +### Project Network Isolation + +Project network isolation is used to enable or disable communication between pods in different projects. + + + + +To enable project network isolation as a cluster option, you will need to use any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + + + + +To enable project network isolation as a cluster option, you will need to use Canal as the CNI. + + + + +### Kubernetes Cloud Providers + +You can configure a [Kubernetes cloud provider](cluster-provisioning/rke-clusters/options/cloud-providers). If you want to use [volumes and storage](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. + +>**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. + +If you want to see all the configuration options for a cluster, please click **Show advanced options** on the bottom right. The advanced options are described below: + +### Private registries + +The cluster-level private registry configuration is only used for provisioning clusters. + +There are two main ways to set up private registries in Rancher: by setting up the [global default registry](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. + +The private registry configuration option tells Rancher where to pull the [system images](https://rancher.com/docs/rke/latest/en/config-options/system-images/) or [addon images](https://rancher.com/docs/rke/latest/en/config-options/add-ons/) that will be used in your cluster. + +- **System images** are components needed to maintain the Kubernetes cluster. +- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. + +See the [RKE documentation on private registries](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) for more information on the private registry for components applied during the provisioning of the cluster. + +### Authorized Cluster Endpoint + +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. + +> The authorized cluster endpoint is available only in clusters that Rancher has provisioned [using RKE](../../../pages-for-subheaders/rancher-manager-architecture.md#tools-for-provisioning-kubernetes-clusters). It is not available for clusters in hosted Kubernetes providers, such as Amazon's EKS. Additionally, the authorized cluster endpoint cannot be enabled for RKE clusters that are registered with Rancher; it is available only on Rancher-launched Kubernetes clusters. + +This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +### Node Pools + +For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +# Advanced Options + +The following options are available when you create clusters in the Rancher UI. They are located under **Advanced Options.** + +### NGINX Ingress + +Option to enable or disable the [NGINX ingress controller](https://rancher.com/docs/rke/latest/en/config-options/add-ons/ingress-controllers/). + +### Node Port Range + +Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. + +### Metrics Server Monitoring + +Option to enable or disable [Metrics Server](https://rancher.com/docs/rke/latest/en/config-options/add-ons/metrics-server/). + +### Pod Security Policy Support + +Option to enable and select a default [Pod Security Policy](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). You must have an existing Pod Security Policy configured before you can use this option. + +### Docker Version on Nodes + +Option to require [a supported Docker version](../../../pages-for-subheaders/installation-requirements.md) installed on the cluster nodes that are added to the cluster, or to allow unsupported Docker versions installed on the cluster nodes. + +### Docker Root Directory + +If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), please specify the correct Docker Root Directory in this option. + +### Recurring etcd Snapshots + +Option to enable or disable [recurring etcd snapshots](https://rancher.com/docs/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). + +### Agent Environment Variables + +_Available as of v2.5.6_ + +Option to set environment variables for [rancher agents](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. + + +# Cluster Config File + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available](https://rancher.com/docs/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. + +- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. +- To read from an existing RKE file, click **Read from a file**. + +![image](/img/cluster-options-yaml.png) + +### Config File Structure in Rancher v2.3.0+ + +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,](https://rancher.com/docs/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. + +
    + Example Cluster Config File + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` +
    + +### Default DNS provider + +The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider](https://rancher.com/docs/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. + +| Rancher version | Kubernetes version | Default DNS provider | +|-------------|--------------------|----------------------| +| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | +| v2.2.5 and higher | v1.13.x and lower | kube-dns | +| v2.2.4 and lower | any | kube-dns | + +# Rancher specific parameters + +Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): + +### docker_root_dir + +See [Docker Root Directory](#docker-root-directory). + +### enable_cluster_monitoring + +Option to enable or disable [Cluster Monitoring](../../../pages-for-subheaders/monitoring-and-alerting.md). + +### enable_network_policy + +Option to enable or disable Project Network Isolation. + +Before Rancher v2.5.8, project network isolation is only available if you are using the Canal network plugin for RKE. + +In v2.5.8+, project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### local_cluster_auth_endpoint + +See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). + +Example: + +```yaml +local_cluster_auth_endpoint: + enabled: true + fqdn: "FQDN" + ca_certs: "BASE64_CACERT" +``` + +### Custom Network Plug-in + +You can add a custom network plug-in by using the [user-defined add-on functionality](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. + +There are two ways that you can specify an add-on: + +- [In-line Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) + +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/versioned_docs/version-2.5/contribute-to-rancher.md b/versioned_docs/version-2.5/contribute-to-rancher.md new file mode 100644 index 0000000000..97c9313beb --- /dev/null +++ b/versioned_docs/version-2.5/contribute-to-rancher.md @@ -0,0 +1,123 @@ +--- +title: Contributing to Rancher +weight: 27 +aliases: + - /rancher/v2.5/en/faq/contributing/ + - /rancher/v2.x/en/contributing/ +--- + +This section explains the repositories used for Rancher, how to build the repositories, and what information to include when you file an issue. + +For more detailed information on how to contribute to the development of Rancher projects, refer to the [Rancher Developer Wiki](https://github.com/rancher/rancher/wiki). The wiki has resources on many topics, including the following: + +- How to set up the Rancher development environment and run tests +- The typical flow of an issue through the development lifecycle +- Coding guidelines and development best practices +- Debugging and troubleshooting +- Developing the Rancher API + +On the Rancher Users Slack, the channel for developers is **#developer**. + +# Repositories + +All of repositories are located within our main GitHub organization. There are many repositories used for Rancher, but we'll provide descriptions of some of the main ones used in Rancher. + +Repository | URL | Description +-----------|-----|------------- +Rancher | https://github.com/rancher/rancher | This repository is the main source code for Rancher 2.x. +Types | https://github.com/rancher/types | This repository is the repository that has all the API types for Rancher 2.x. +API Framework | https://github.com/rancher/norman | This repository is an API framework for building Rancher style APIs backed by Kubernetes Custom Resources. +User Interface | https://github.com/rancher/ui | This repository is the source of the UI. +(Rancher) Docker Machine | https://github.com/rancher/machine | This repository is the source of the Docker Machine binary used when using Node Drivers. This is a fork of the `docker/machine` repository. +machine-package | https://github.com/rancher/machine-package | This repository is used to build the Rancher Docker Machine binary. +kontainer-engine | https://github.com/rancher/kontainer-engine | This repository is the source of kontainer-engine, the tool to provision hosted Kubernetes clusters. +RKE repository | https://github.com/rancher/rke | This repository is the source of Rancher Kubernetes Engine, the tool to provision Kubernetes clusters on any machine. +CLI | https://github.com/rancher/cli | This repository is the source code for the Rancher CLI used in Rancher 2.x. +(Rancher) Helm repository | https://github.com/rancher/helm | This repository is the source of the packaged Helm binary. This is a fork of the `helm/helm` repository. +Telemetry repository | https://github.com/rancher/telemetry | This repository is the source for the Telemetry binary. +loglevel repository | https://github.com/rancher/loglevel | This repository is the source of the loglevel binary, used to dynamically change log levels. + +To see all libraries/projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. + +![Rancher diagram](/img/ranchercomponentsdiagram.svg)
    +Rancher components used for provisioning/managing Kubernetes clusters. + +# Building + +Every repository should have a Makefile and can be built using the `make` command. The `make` targets are based on the scripts in the `/scripts` directory in the repository, and each target will use [Dapper](https://github.com/rancher/dapper) to run the target in an isolated environment. The `Dockerfile.dapper` will be used for this process, and includes all the necessary build tooling needed. + +The default target is `ci`, and will run `./scripts/validate`, `./scripts/build`, `./scripts/test` and `./scripts/package`. The resulting binaries of the build will be in `./build/bin` and are usually also packaged in a Docker image. + +# Bugs, Issues or Questions + +If you find any bugs or are having any trouble, please search the [reported issue](https://github.com/rancher/rancher/issues) as someone may have experienced the same issue or we are actively working on a solution. + +If you can't find anything related to your issue, contact us by [filing an issue](https://github.com/rancher/rancher/issues/new). Though we have many repositories related to Rancher, we want the bugs filed in the Rancher repository so we won't miss them! If you want to ask a question or ask fellow users about an use case, we suggest creating a post on the [Rancher Forums](https://forums.rancher.com). + +### Checklist for Filing Issues + +Please follow this checklist when filing an issue which will helps us investigate and fix the issue. More info means more data we can use to determine what is causing the issue or what might be related to the issue. + +>**Note:** For large amounts of data, please use [GitHub Gist](https://gist.github.com/) or similar and link the created resource in the issue. +>**Important:** Please remove any sensitive data as it will be publicly viewable. + +- **Resources:** Provide as much as detail as possible on the used resources. As the source of the issue can be many things, including as much of detail as possible helps to determine the root cause. See some examples below: + - **Hosts:** What specifications does the host have, like CPU/memory/disk, what cloud does it happen on, what Amazon Machine Image are you using, what DigitalOcean droplet are you using, what image are you provisioning that we can rebuild or use when we try to reproduce + - **Operating System:** What operating system are you using? Providing specifics helps here like the output of `cat /etc/os-release` for exact OS release and `uname -r` for exact kernel used + - **Docker:** What Docker version are you using, how did you install it? Most of the details of Docker can be found by supplying output of `docker version` and `docker info` + - **Environment:** Are you in a proxy environment, are you using recognized CA/self signed certificates, are you using an external loadbalancer + - **Rancher:** What version of Rancher are you using, this can be found on the bottom left of the UI or be retrieved from the image tag you are running on the host + - **Clusters:** What kind of cluster did you create, how did you create it, what did you specify when you were creating it +- **Steps to reproduce the issue:** Provide as much detail on how you got into the reported situation. This helps the person to reproduce the situation you are in. + - Provide manual steps or automation scripts used to get from a newly created setup to the situation you reported. +- **Logs:** Provide data/logs from the used resources. + - Rancher + - Docker install + + ``` + docker logs \ + --timestamps \ + $(docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk '{ print $1 }') + ``` + - Kubernetes install using `kubectl` + + > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if Rancher is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ``` + kubectl -n cattle-system \ + logs \ + -l app=rancher \ + --timestamps=true + ``` + - Docker install using `docker` on each of the nodes in the RKE cluster + + ``` + docker logs \ + --timestamps \ + $(docker ps | grep -E "rancher/rancher@|rancher_rancher" | awk '{ print $1 }') + ``` + - Kubernetes Install with RKE Add-On + + > **Note:** Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` if the Rancher server is installed on a Kubernetes cluster) or are using the embedded kubectl via the UI. + + ``` + kubectl -n cattle-system \ + logs \ + --timestamps=true \ + -f $(kubectl --kubeconfig $KUBECONFIG get pods -n cattle-system -o json | jq -r '.items[] | select(.spec.containers[].name="cattle-server") | .metadata.name') + ``` + - System logging (these might not all exist, depending on operating system) + - `/var/log/messages` + - `/var/log/syslog` + - `/var/log/kern.log` + - Docker daemon logging (these might not all exist, depending on operating system) + - `/var/log/docker.log` +- **Metrics:** If you are experiencing performance issues, please provide as much of data (files or screenshots) of metrics which can help determining what is going on. If you have an issue related to a machine, it helps to supply output of `top`, `free -m`, `df` which shows processes/memory/disk usage. + +# Docs + +If you have any updates to our documentation, please make any pull request to our docs repo. + +- [Rancher 2.x Docs repository](https://github.com/rancher/docs): This repo is where all the docs for Rancher 2.x are located. They are located in the `content` folder in the repo. + +- [Rancher 1.x Docs repository](https://github.com/rancher/rancher.github.io): This repo is where all the docs for Rancher 1.x are located. They are located in the `rancher` folder in the repo. diff --git a/versioned_docs/version-2.5/explanations.md b/versioned_docs/version-2.5/explanations.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/explanations.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/rancher/v2.5/en/cis-scans/configuration/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/configuration-reference.md similarity index 100% rename from content/rancher/v2.5/en/cis-scans/configuration/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/configuration-reference.md diff --git a/content/rancher/v2.5/en/cis-scans/custom-benchmark/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md similarity index 100% rename from content/rancher/v2.5/en/cis-scans/custom-benchmark/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/custom-benchmark.md diff --git a/content/rancher/v2.5/en/cis-scans/rbac/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md similarity index 100% rename from content/rancher/v2.5/en/cis-scans/rbac/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md diff --git a/content/rancher/v2.5/en/cis-scans/skipped-tests/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md similarity index 100% rename from content/rancher/v2.5/en/cis-scans/skipped-tests/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md new file mode 100644 index 0000000000..f7725ddddd --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md @@ -0,0 +1,9 @@ +--- +title: Architecture +weight: 1 +--- + +Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three. Regardless of the source, all resources are dynamically turned into Helm charts, and Helm is used as the engine to deploy everything in the cluster. This gives you a high degree of control, consistency, and auditability. Fleet focuses not only on the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster. + +![Architecture](/img/fleet-architecture.svg) + diff --git a/content/rancher/v2.5/en/deploy-across-clusters/fleet/proxy/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md similarity index 100% rename from content/rancher/v2.5/en/deploy-across-clusters/fleet/proxy/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md diff --git a/content/rancher/v2.6/en/deploy-across-clusters/fleet/windows/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md similarity index 100% rename from content/rancher/v2.6/en/deploy-across-clusters/fleet/windows/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md diff --git a/content/rancher/v2.5/en/istio/configuration-reference/rke2/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md similarity index 100% rename from content/rancher/v2.5/en/istio/configuration-reference/rke2/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md new file mode 100644 index 0000000000..eaaab6dceb --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md @@ -0,0 +1,114 @@ +--- +title: Enable Istio with Pod Security Policies +weight: 1 +aliases: + - /rancher/v2.5/en/istio/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.5/en/istio/legacy/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-cluster/enable-istio-with-psp + - /rancher/v2.5/en/istio/v2.5/configuration-reference/enable-istio-with-psp + - /rancher/v2.x/en/istio/v2.5/configuration-reference/enable-istio-with-psp/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +If you have restrictive Pod Security Policies enabled, then Istio may not be able to function correctly, because it needs certain permissions in order to install itself and manage pod infrastructure. In this section, we will configure a cluster with PSPs enabled for an Istio install, and also set up the Istio CNI plugin. + +The Istio CNI plugin removes the need for each application pod to have a privileged `NET_ADMIN` container. For further information, see the [Istio CNI Plugin docs](https://istio.io/docs/setup/additional-setup/cni). Please note that the [Istio CNI Plugin is in alpha](https://istio.io/about/feature-stages/). + +The steps differ based on the Rancher version. + + + + +> **Prerequisites:** +> +> - The cluster must be an RKE Kubernetes cluster. +> - The cluster must have been created with a default PodSecurityPolicy. +> +> To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. + +1. [Set the PodSecurityPolicy to unrestricted](#1-set-the-podsecuritypolicy-to-unrestricted) +2. [Enable the CNI](#2-enable-the-cni) +3. [Verify that the CNI is working.](#3-verify-that-the-cni-is-working) + +### 1. Set the PodSecurityPolicy to unrestricted + +An unrestricted PSP allows Istio to be installed. + +Set the PSP to `unrestricted` in the project where is Istio is installed, or the project where you plan to install Istio. + +1. From the cluster view of the **Cluster Manager,** select **Projects/Namespaces.** +1. Find the **Project: System** and select the **⋮ > Edit**. +1. Change the Pod Security Policy option to be unrestricted, then click **Save.** + +### 2. Enable the CNI + +When installing or upgrading Istio through **Apps & Marketplace,** + +1. Click **Components.** +2. Check the box next to **Enabled CNI.** +3. Finish installing or upgrading Istio. + +The CNI can also be enabled by editing the `values.yaml`: + +``` +istio_cni.enabled: true +``` + +Istio should install successfully with the CNI enabled in the cluster. + +### 3. Verify that the CNI is working + +Verify that the CNI is working by deploying a [sample application](https://istio.io/latest/docs/examples/bookinfo/) or deploying one of your own applications. + + + + +> **Prerequisites:** +> +> - The cluster must be an RKE Kubernetes cluster. +> - The cluster must have been created with a default PodSecurityPolicy. +> +> To enable pod security policy support when creating a Kubernetes cluster in the Rancher UI, go to Advanced Options. In the Pod Security Policy Support section, click Enabled. Then select a default pod security policy. + +1. [Configure the System Project Policy to allow Istio install.](#1-configure-the-system-project-policy-to-allow-istio-install) +2. [Install the CNI plugin in the System project.](#2-install-the-cni-plugin-in-the-system-project) +3. [Install Istio.](#3-install-istio) + +### 1. Configure the System Project Policy to allow Istio install + +1. From the cluster view of the **Cluster Manager,** select **Projects/Namespaces.** +1. Find the **Project: System** and select the **⋮ > Edit**. +1. Change the Pod Security Policy option to be unrestricted, then click Save. + +### 2. Install the CNI Plugin in the System Project + +1. From the main menu of the **Dashboard**, select **Projects/Namespaces**. +1. Select the **Project: System** project. +1. Choose **Tools > Catalogs** in the navigation bar. +1. Add a catalog with the following: + 1. Name: istio-cni + 1. Catalog URL: https://github.com/istio/cni + 1. Branch: The branch that matches your current release, for example: `release-1.4`. +1. From the main menu select **Apps** +1. Click Launch and select istio-cni +1. Update the namespace to be "kube-system" +1. In the answers section, click "Edit as YAML" and paste in the following, then click launch: + +``` +--- + logLevel: "info" + excludeNamespaces: + - "istio-system" + - "kube-system" +``` + +### 3. Install Istio + +Follow the [primary instructions](../../../../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md), adding a custom answer: `istio_cni.enabled: true`. + +After Istio has finished installing, the Apps page in System Projects should show both istio and `istio-cni` applications deployed successfully. Sidecar injection will now be functional. + + + \ No newline at end of file diff --git a/content/rancher/v2.5/en/istio/configuration-reference/canal-and-project-network/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md similarity index 100% rename from content/rancher/v2.5/en/istio/configuration-reference/canal-and-project-network/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md diff --git a/content/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md similarity index 100% rename from content/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md new file mode 100644 index 0000000000..54c391f5ac --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md @@ -0,0 +1,82 @@ +--- +title: CPU and Memory Allocations +weight: 1 +aliases: + - /rancher/v2.5/en/project-admin/istio/configuring-resource-allocations/ + - /rancher/v2.5/en/project-admin/istio/config/ + - /rancher/v2.5/en/istio/resources + - /rancher/v2.5/en/istio/v2.5/resources + - /rancher/v2.x/en/istio/v2.5/resources/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes the minimum recommended computing resources for the Istio components in a cluster. + +The CPU and memory allocations for each component are [configurable.](#configuring-resource-allocations) + +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough CPU and memory to run all of the components of Istio. + +> **Tip:** In larger deployments, it is strongly advised that the infrastructure be placed on dedicated nodes in the cluster by adding a node selector for each Istio component. + +The table below shows a summary of the minimum recommended resource requests and limits for the CPU and memory of each core Istio component. + +In Kubernetes, the resource request indicates that the workload will not be deployed on a node unless the node has at least the specified amount of memory and CPU available. If the workload surpasses the limit for CPU or memory, it can be terminated or evicted from the node. For more information on managing resource limits for containers, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) + + + + +| Workload | CPU - Request | Memory - Request | CPU - Limit | Memory - Limit | +|----------------------|---------------|------------|-----------------|-------------------| +| ingress gateway | 100m | 128mi | 2000m | 1024mi | +| egress gateway | 100m | 128mi | 2000m | 1024mi | +| istiod | 500m | 2048mi | No limit | No limit | +| proxy | 10m | 10mi | 2000m | 1024mi | +| **Totals:** | **710m** | **2314Mi** | **6000m** | **3072Mi** | + + + + +Workload | CPU - Request | Memory - Request | CPU - Limit | Mem - Limit | Configurable +---------:|---------------:|---------------:|-------------:|-------------:|-------------: +Istiod | 500m | 2048Mi | No limit | No limit | Y | +Istio-Mixer | 1000m | 1000Mi | 4800m | 4000Mi | Y | +Istio-ingressgateway | 100m | 128Mi | 2000m | 1024Mi | Y | +Others | 10m | - | - | - | Y | +Totals: | 1710m | 3304Mi | >8800m | >6048Mi | - + + + + +# Configuring Resource Allocations + +You can individually configure the resource allocation for each type of Istio component. This section includes the default resource allocations for each component. + +To make it easier to schedule the workloads to a node, a cluster-admin can reduce the CPU and memory resource requests for the component. However, the default CPU and memory allocations are the minimum that we recommend. + +You can find more information about Istio configuration in the [official Istio documentation](https://istio.io/). + +To configure the resources allocated to an Istio component, + +1. In the Rancher **Cluster Explorer**, navigate to your Istio installation in **Apps & Marketplace** +1. Click **Upgrade** to edit the base components via changes to the values.yaml or add an [overlay file](istio/v2.5/configuration-reference/#overlay-file). For more information about editing the overlay file, see [this section.](cpu-and-memory-allocations.md#editing-the-overlay-file) +1. Change the CPU or memory allocations, the nodes where each component will be scheduled to, or the node tolerations. +1. Click **Upgrade.** to rollout changes + +**Result:** The resource allocations for the Istio components are updated. + +### Editing the Overlay File + +The overlay file can contain any of the values in the [Istio Operator spec.](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/#IstioOperatorSpec) The overlay file included with the Istio application is just one example of a potential configuration of the overlay file. + +As long as the file contains `kind: IstioOperator` and the YAML options are valid, the file can be used as an overlay. + +In the example overlay file provided with the Istio application, the following section allows you to change Kubernetes resources: + +``` +# k8s: +# resources: +# requests: +# cpu: 200m +``` diff --git a/content/rancher/v2.5/en/istio/disabling-istio/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/disable-istio.md similarity index 100% rename from content/rancher/v2.5/en/istio/disabling-istio/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/disable-istio.md diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/rbac-for-istio.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/rbac-for-istio.md new file mode 100644 index 0000000000..2ef832f277 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/istio/rbac-for-istio.md @@ -0,0 +1,48 @@ +--- +title: Role-based Access Control +weight: 3 +aliases: + - /rancher/v2.5/en/istio/rbac + - /rancher/v2.5/en/istio/v2.5/rbac + - /rancher/v2.x/en/istio/v2.5/rbac/ +--- + +This section describes the permissions required to access Istio features. + +The rancher istio chart installs three `ClusterRoles` + +## Cluster-Admin Access + +By default, only those with the `cluster-admin` `ClusterRole` can: + +- Install istio app in a cluster +- Configure resource allocations for Istio + + +## Admin and Edit access + +By default, only Admin and Edit roles can: + +- Enable and disable Istio sidecar auto-injection for namespaces +- Add the Istio sidecar to workloads +- View the traffic metrics and traffic graph for the cluster +- Configure Istio's resources (such as the gateway, destination rules, or virtual services) + +## Summary of Default Permissions for Kubernetes Default roles + +Istio creates three `ClusterRoles` and adds Istio CRD access to the following default K8s `ClusterRole`: + +ClusterRole create by chart | Default K8s ClusterRole | Rancher Role | + ------------------------------:| ---------------------------:|---------:| + `istio-admin` | admin| Project Owner | + `istio-edit`| edit | Project Member | + `istio-view` | view | Read-only | + +Rancher will continue to use cluster-owner, cluster-member, project-owner, project-member, etc as role names, but will utilize default roles to determine access. For each default K8s `ClusterRole` there are different Istio CRD permissions and K8s actions (Create ( C ), Get ( G ), List ( L ), Watch ( W ), Update ( U ), Patch ( P ), Delete( D ), All ( * )) that can be performed. + + +|CRDs | Admin | Edit | View +|----------------------------| ------| -----| ----- +|
    • `config.istio.io`
      • `adapters`
      • `attributemanifests`
      • `handlers`
      • `httpapispecbindings`
      • `httpapispecs`
      • `instances`
      • `quotaspecbindings`
      • `quotaspecs`
      • `rules`
      • `templates`
    | GLW | GLW | GLW +|
    • `networking.istio.io`
      • `destinationrules`
      • `envoyfilters`
      • `gateways`
      • `serviceentries`
      • `sidecars`
      • `virtualservices`
      • `workloadentries`
    | * | * | GLW +|
    • `security.istio.io`
      • `authorizationpolicies`
      • `peerauthentications`
      • `requestauthentications`
    | * | * | GLW \ No newline at end of file diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md new file mode 100644 index 0000000000..e5ae7af09f --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md @@ -0,0 +1,165 @@ +--- +title: Flows and ClusterFlows +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +For the full details on configuring `Flows` and `ClusterFlows`, see the [Banzai Cloud Logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/output/) + +- [Configuration](#configuration) +- [YAML Example](#yaml-example) + +# Configuration + + + + +- [Flows](#flows-2-5-8) + - [Matches](#matches-2-5-8) + - [Filters](#filters-2-5-8) + - [Outputs](#outputs-2-5-8) +- [ClusterFlows](#clusterflows-2-5-8) + +# Changes in v2.5.8 + +The `Flows` and `ClusterFlows` can now be configured by filling out forms in the Rancher UI. + + + + +# Flows + +A `Flow` defines which logs to collect and filter and which output to send the logs to. + +The `Flow` is a namespaced resource, which means logs will only be collected from the namespace that the `Flow` is deployed in. + +For more details about the `Flow` custom resource, see [FlowSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/flow_types/) + + + + +### Matches + +Match statements are used to select which containers to pull logs from. + +You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies. + +Matches can be configured by filling out the `Flow` or `ClusterFlow` forms in the Rancher UI. + +For detailed examples on using the match statement, see the [official documentation on log routing.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/log-routing/) + + + +### Filters + +You can define one or more filters within a `Flow`. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. The filters in the `Flow` are applied in the order in the definition. + +For a list of filters supported by the Banzai Cloud Logging operator, see [this page.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/filters/) + +Filters need to be configured in YAML. + + + +### Outputs + +This `Output` will receive logs from the `Flow`. Because the `Flow` is a namespaced resource, the `Output` must reside in same namespace as the `Flow`. + +`Outputs` can be referenced when filling out the `Flow` or `ClusterFlow` forms in the Rancher UI. + + + +# ClusterFlows + +Matches, filters and `Outputs` are configured for `ClusterFlows` in the same way that they are configured for `Flows`. The key difference is that the `ClusterFlow` is scoped at the cluster level and can configure log collection across all namespaces. + +After `ClusterFlow` selects logs from all namespaces in the cluster, logs from the cluster will be collected and logged to the selected `ClusterOutput`. + + + + + +- [Flows](#flows-2-5-0) + - [Matches](#matches-2-5-0) + - [Filters](#filters-2-5-0) + - [Outputs](#outputs-2-5-0) +- [ClusterFlows](#clusterflows-2-5-0) + + + + +# Flows + +A `Flow` defines which logs to collect and filter and which `Output` to send the logs to. The `Flow` is a namespaced resource, which means logs will only be collected from the namespace that the `Flow` is deployed in. + +`Flows` need to be defined in YAML. + +For more details about the `Flow` custom resource, see [FlowSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/flow_types/) + + + + +### Matches + +Match statements are used to select which containers to pull logs from. + +You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies. + +For detailed examples on using the match statement, see the [official documentation on log routing.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/log-routing/) + + + +### Filters + +You can define one or more filters within a `Flow`. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. The filters in the `Flow` are applied in the order in the definition. + +For a list of filters supported by the Banzai Cloud Logging operator, see [this page.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/filters/) + + + +### Outputs + +This `Output` will receive logs from the `Flow`. + +Because the `Flow` is a namespaced resource, the `Output` must reside in same namespace as the `Flow`. + + + +# ClusterFlows + +Matches, filters and `Outputs` are also configured for `ClusterFlows`. The only difference is that the `ClusterFlow` is scoped at the cluster level and can configure log collection across all namespaces. + +`ClusterFlow` selects logs from all namespaces in the cluster. Logs from the cluster will be collected and logged to the selected `ClusterOutput`. + +`ClusterFlows` need to be defined in YAML. + + + + + +# YAML Example + +The following example `Flow` transforms the log messages from the default namespace and sends them to an S3 `Output`: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + filters: + - parser: + remove_key_name_field: true + parse: + type: nginx + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + localOutputRefs: + - s3-output + match: + - select: + labels: + app: nginx +``` diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md new file mode 100644 index 0000000000..7ccf7d0677 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md @@ -0,0 +1,348 @@ +--- +title: Outputs and ClusterOutputs +weight: 2 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +For the full details on configuring `Outputs` and `ClusterOutputs`, see the [Banzai Cloud Logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/output/) + +- [Configuration](#configuration) +- [YAML Examples](#yaml-examples) + - [Cluster Output to ElasticSearch](#cluster-output-to-elasticsearch) + - [Output to Splunk](#output-to-splunk) + - [Output to Syslog](#output-to-syslog) + - [Unsupported Outputs](#unsupported-outputs) + +# Configuration + + + + +- [Outputs](#outputs-2-5-8) +- [ClusterOutputs](#clusteroutputs-2-5-8) + +# Changes in v2.5.8 + +The `Outputs` and `ClusterOutputs` can now be configured by filling out forms in the Rancher UI. + + + +# Outputs + +The `Output` resource defines where your `Flows` can send the log messages. `Outputs` are the final stage for a logging `Flow`. + +The `Output` is a namespaced resource, which means only a `Flow` within the same namespace can access it. + +You can use secrets in these definitions, but they must also be in the same namespace. + +For the details of `Output` custom resource, see [OutputSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/output_types/) + +The Rancher UI provides forms for configuring the following `Output` types: + +- Amazon ElasticSearch +- Azure Storage +- Cloudwatch +- Datadog +- Elasticsearch +- File +- Fluentd +- GCS +- Kafka +- Kinesis Stream +- LogDNA +- LogZ +- Loki +- New Relic +- Splunk +- SumoLogic +- Syslog + +The Rancher UI provides forms for configuring the `Output` type, target, and access credentials if applicable. + +For example configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) + + + +# ClusterOutputs + +`ClusterOutput` defines an `Output` without namespace restrictions. It is only effective when deployed in the same namespace as the logging operator. + +For the details of the `ClusterOutput` custom resource, see [ClusterOutput.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/clusteroutput_types/) + + + + +- [Outputs](#outputs-2-5-0) +- [ClusterOutputs](#clusteroutputs-2-5-0) + + + +# Outputs + +The `Output` resource defines where your `Flows` can send the log messages. `Outputs` are the final stage for a logging `Flow`. + +The `Output` is a namespaced resource, which means only a `Flow` within the same namespace can access it. + +You can use secrets in these definitions, but they must also be in the same namespace. + +`Outputs` are configured in YAML. For the details of `Output` custom resource, see [OutputSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/output_types/) + +For examples of configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) + + + +# ClusterOutputs + +`ClusterOutput` defines an `Output` without namespace restrictions. It is only effective when deployed in the same namespace as the logging operator. + +The Rancher UI provides forms for configuring the `ClusterOutput` type, target, and access credentials if applicable. + +`ClusterOutputs` are configured in YAML. For the details of `ClusterOutput` custom resource, see [ClusterOutput.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/clusteroutput_types/) + +For example configuration for each logging plugin supported by the logging operator, see the [logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/outputs/) + + + + + +# YAML Examples + +Once logging is installed, you can use these examples to help craft your own logging pipeline. + +- [Cluster Output to ElasticSearch](#cluster-output-to-elasticsearch) +- [Output to Splunk](#output-to-splunk) +- [Output to Syslog](#output-to-syslog) +- [Unsupported Outputs](#unsupported-outputs) + +### Cluster Output to ElasticSearch + +Let's say you wanted to send all logs in your cluster to an `elasticsearch` cluster. First, we create a cluster `Output`. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterOutput +metadata: + name: "example-es" + namespace: "cattle-logging-system" +spec: + elasticsearch: + host: elasticsearch.example.com + port: 9200 + scheme: http +``` + +We have created this `ClusterOutput`, without elasticsearch configuration, in the same namespace as our operator: `cattle-logging-system.`. Any time we create a `ClusterFlow` or `ClusterOutput`, we have to put it in the `cattle-logging-system` namespace. + +Now that we have configured where we want the logs to go, let's configure all logs to go to that `ClusterOutput`. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterFlow +metadata: + name: "all-logs" + namespace: "cattle-logging-system" +spec: + globalOutputRefs: + - "example-es" +``` + +We should now see our configured index with logs in it. + + +### Output to Splunk + +What if we have an application team who only wants logs from a specific namespaces sent to a `splunk` server? For this case, we can use namespaced `Outputs` and `Flows`. + +Before we start, let's set up that team's application: `coolapp`. + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: devteam +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coolapp + namespace: devteam + labels: + app: coolapp +spec: + replicas: 2 + selector: + matchLabels: + app: coolapp + template: + metadata: + labels: + app: coolapp + spec: + containers: + - name: generator + image: paynejacob/loggenerator:latest +``` + +With `coolapp` running, we will follow a similar path as when we created a `ClusterOutput`. However, unlike `ClusterOutputs`, we create our `Output` in our application's namespace. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Output +metadata: + name: "devteam-splunk" + namespace: "devteam" +spec: + splunkHec: + hec_host: splunk.example.com + hec_port: 8088 + protocol: http +``` + +Once again, let's feed our `Output` some logs: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: "devteam-logs" + namespace: "devteam" +spec: + localOutputRefs: + - "devteam-splunk" +``` + + +### Output to Syslog + +Let's say you wanted to send all logs in your cluster to an `syslog` server. First, we create a `ClusterOutput`: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterOutput +metadata: + name: "example-syslog" + namespace: "cattle-logging-system" +spec: + syslog: + buffer: + timekey: 30s + timekey_use_utc: true + timekey_wait: 10s + flush_interval: 5s + format: + type: json + app_name_field: test + host: syslog.example.com + insecure: true + port: 514 + transport: tcp +``` + +Now that we have configured where we want the logs to go, let's configure all logs to go to that `Output`. + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterFlow +metadata: + name: "all-logs" + namespace: cattle-logging-system +spec: + globalOutputRefs: + - "example-syslog" +``` + +### Unsupported Outputs + +For the final example, we create an `Output` to write logs to a destination that is not supported out of the box: + +> **Note on syslog** As of Rancher v2.5.4, `syslog` is a supported `Output`. However, this example still provides an overview on using unsupported plugins. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: syslog-config + namespace: cattle-logging-system +type: Opaque +stringData: + fluent-bit.conf: | + [INPUT] + Name forward + Port 24224 + + [OUTPUT] + Name syslog + InstanceName syslog-output + Match * + Addr syslog.example.com + Port 514 + Cluster ranchers + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fluentbit-syslog-forwarder + namespace: cattle-logging-system + labels: + output: syslog +spec: + selector: + matchLabels: + output: syslog + template: + metadata: + labels: + output: syslog + spec: + containers: + - name: fluentbit + image: paynejacob/fluent-bit-out-syslog:latest + ports: + - containerPort: 24224 + volumeMounts: + - mountPath: "/fluent-bit/etc/" + name: configuration + volumes: + - name: configuration + secret: + secretName: syslog-config +--- +apiVersion: v1 +kind: Service +metadata: + name: syslog-forwarder + namespace: cattle-logging-system +spec: + selector: + output: syslog + ports: + - protocol: TCP + port: 24224 + targetPort: 24224 +--- +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterFlow +metadata: + name: all-logs + namespace: cattle-logging-system +spec: + globalOutputRefs: + - syslog +--- +apiVersion: logging.banzaicloud.io/v1beta1 +kind: ClusterOutput +metadata: + name: syslog + namespace: cattle-logging-system +spec: + forward: + servers: + - host: "syslog-forwarder.cattle-logging-system" + require_ack_response: false + ignore_network_errors_at_startup: false +``` + +Let's break down what is happening here. First, we create a deployment of a container that has the additional `syslog` plugin and accepts logs forwarded from another `fluentd`. Next we create an `Output` configured as a forwarder to our deployment. The deployment `fluentd` will then forward all logs to the configured `syslog` destination. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md new file mode 100644 index 0000000000..657cedd67e --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-architecture.md @@ -0,0 +1,40 @@ +--- +title: Architecture +weight: 1 +--- + +This section summarizes the architecture of the Rancher logging application. + +For more details about how the Banzai Cloud Logging operator works, see the [official documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) + +### Changes in Rancher v2.5 + +The following changes were introduced to logging in Rancher v2.5: + +- The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. +- [Fluent Bit](https://fluentbit.io/) is now used to aggregate the logs, and [Fluentd](https://www.fluentd.org/) is used for filtering the messages and routing them to the `Outputs`. Previously, only Fluentd was used. +- Logging can be configured with a Kubernetes manifest, because logging now uses a Kubernetes operator with Custom Resource Definitions. +- We now support filtering logs. +- We now support writing logs to multiple `Outputs`. +- We now always collect Control Plane and etcd logs. + +### How the Banzai Cloud Logging Operator Works + +The Logging operator automates the deployment and configuration of a Kubernetes logging pipeline. It deploys and configures a Fluent Bit DaemonSet on every node to collect container and application logs from the node file system. + +Fluent Bit queries the Kubernetes API and enriches the logs with metadata about the pods, and transfers both the logs and the metadata to Fluentd. Fluentd receives, filters, and transfers logs to multiple `Outputs`. + +The following custom resources are used to define how logs are filtered and sent to their `Outputs`: + +- A `Flow` is a namespaced custom resource that uses filters and selectors to route log messages to the appropriate `Outputs`. +- A `ClusterFlow` is used to route cluster-level log messages. +- An `Output` is a namespaced resource that defines where the log messages are sent. +- A `ClusterOutput` defines an `Output` that is available from all `Flows` and `ClusterFlows`. + +Each `Flow` must reference an `Output`, and each `ClusterFlow` must reference a `ClusterOutput`. + +The following figure from the [Banzai documentation](https://banzaicloud.com/docs/one-eye/logging-operator/#architecture) shows the new logging architecture: + +
    How the Banzai Cloud Logging Operator Works with Fluentd and Fluent Bit
    + +![How the Banzai Cloud Logging Operator Works with Fluentd](/img/banzai-cloud-logging-operator.png) \ No newline at end of file diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md new file mode 100644 index 0000000000..6433010ae5 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/logging-helm-chart-options.md @@ -0,0 +1,76 @@ +--- +title: rancher-logging Helm Chart Options +shortTitle: Helm Chart Options +weight: 4 +--- + +- [Enable/Disable Windows Node Logging](#enable-disable-windows-node-logging) +- [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) +- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) +- [Enabling the Logging Application to Work with SELinux](#enabling-the-logging-application-to-work-with-selinux) +- [Additional Logging Sources](#additional-logging-sources) + + +### Enable/Disable Windows Node Logging + +_Available as of v2.5.8_ + +You can enable or disable Windows node logging by setting `global.cattle.windows.enabled` to either `true` or `false` in the `values.yaml`. + +By default, Windows node logging will be enabled if the Cluster Explorer UI is used to install the logging application on a Windows cluster. + +In this scenario, setting `global.cattle.windows.enabled` to `false` will disable Windows node logging on the cluster. +When disabled, logs will still be collected from Linux nodes within the Windows cluster. + +> Note: Currently an [issue](https://github.com/rancher/rancher/issues/32325) exists where Windows nodeAgents are not deleted when performing a `helm upgrade` after disabling Windows logging in a Windows cluster. In this scenario, users may need to manually remove the Windows nodeAgents if they are already installed. + +### Working with a Custom Docker Root Directory + +_Applies to v2.5.6+_ + +If using a custom Docker root directory, you can set `global.dockerRootDirectory` in `values.yaml`. + +This will ensure that the Logging CRs created will use your specified path rather than the default Docker `data-root` location. + +Note that this only affects Linux nodes. + +If there are any Windows nodes in the cluster, the change will not be applicable to those nodes. + +### Adding NodeSelector Settings and Tolerations for Custom Taints + +You can add your own `nodeSelector` settings and add `tolerations` for additional taints by editing the logging Helm chart values. For details, see [this page.](taints-and-tolerations.md) + +### Enabling the Logging Application to Work with SELinux + +_Available as of v2.5.8_ + +> **Requirements:** Logging v2 was tested with SELinux on RHEL/CentOS 7 and 8. + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. + +To use Logging v2 with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.](../../../pages-for-subheaders/selinux-rpm.md#installing-the-rancher-selinux-rpm) + +Then, when installing the logging application, configure the chart to be SELinux aware by changing `global.seLinux.enabled` to `true` in the `values.yaml`. + +### Additional Logging Sources + +By default, Rancher collects logs for [control plane components](https://kubernetes.io/docs/concepts/overview/components/#control-plane-components) and [node components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for all cluster types. + +In some cases, Rancher may be able to collect additional logs. + +The following table summarizes the sources where additional logs may be collected for each node types: + +| Logging Source | Linux Nodes (including in Windows cluster) | Windows Nodes | +| --- | --- | ---| +| RKE | ✓ | ✓ | +| RKE2 | ✓ | | +| K3s | ✓ | | +| AKS | ✓ | | +| EKS | ✓ | | +| GKE | ✓ | | + +To enable hosted Kubernetes providers as additional logging sources, go to **Cluster Explorer > Logging > Chart Options** and select the **Enable enhanced cloud provider logging** option. + +When enabled, Rancher collects all additional node and control plane logs the provider has made available, which may vary between providers + +If you're already using a cloud provider's own logging solution such as AWS CloudWatch or Google Cloud operations suite (formerly Stackdriver), it is not necessary to enable this option as the native solution will have unrestricted access to all logs. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md new file mode 100644 index 0000000000..a5e6ff00d4 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md @@ -0,0 +1,195 @@ +--- +title: Migrating to Rancher v2.5 Logging +weight: 2 +aliases: + - /rancher/v2.5/en/logging/v2.5/migrating + - /rancher/v2.x/en/logging/v2.5/migrating/ +--- +Starting in v2.5, the logging feature available within Rancher has been completely overhauled. The [logging operator](https://github.com/banzaicloud/logging-operator) from Banzai Cloud has been adopted; Rancher configures this tooling for use when deploying logging. + +Among the many features and changes in the new logging functionality is the removal of project-specific logging configurations. Instead, one now configures logging at the namespace level. Cluster-level logging remains available, but configuration options differ. + +> Note: The pre-v2.5 user interface is now referred to as the _Cluster Manager_. The v2.5+ dashboard is referred to as the _Cluster Explorer_. + +- [Installation](#installation) + - [Terminology](#terminology) +- [Cluster Logging](#cluster-logging) +- [Project Logging](#project-logging) +- [Output Configuration](#output-configuration) + - [Elasticsearch](#elasticsearch) + - [Splunk](#splunk) + - [Kafka](#kafka) + - [Fluentd](#fluentd) + - [Syslog](#syslog) +- [Custom Log Fields](#custom-log-fields) +- [System Logging](#system-logging) + +# Installation + +To install logging in Rancher v2.5+, refer to the [installation instructions](../../../pages-for-subheaders/logging.md#enabling-logging). + +### Terminology + +In v2.5, logging configuration is centralized under a _Logging_ menu option available in the _Cluster Explorer_. It is from this menu option that logging for both cluster and namespace is configured. + +> Note: Logging is installed on a per-cluster basis. You will need to navigate between clusters to configure logging for each cluster. + +There are four key concepts to understand for v2.5+ logging: + +1. Outputs + + `Outputs` are a configuration resource that determine a destination for collected logs. This is where settings for aggregators such as ElasticSearch, Kafka, etc. are stored. `Outputs` are namespaced resources. + +2. Flows + + `Flows` are a configuration resource that determine collection, filtering, and destination rules for logs. It is within a flow that one will configure what logs to collect, how to mutate or filter them, and which `Outputs` to send the logs to. `Flows` are namespaced resources, and can connect either to an `Output` in the same namespace, or a `ClusterOutput`. + +3. ClusterOutputs + + `ClusterOutputs` serve the same functionality as `Outputs`, except they are a cluster-scoped resource. `ClusterOutputs` are necessary when collecting logs cluster-wide, or if you wish to provide an `Output` to all namespaces in your cluster. + +4. ClusterFlows + + `ClusterFlows` serve the same function as `Flows`, but at the cluster level. They are used to configure log collection for an entire cluster, instead of on a per-namespace level. `ClusterFlows` are also where mutations and filters are defined, same as `Flows` (in functionality). + +# Cluster Logging + +To configure cluster-wide logging for v2.5+ logging, one needs to set up a `ClusterFlow`. This object defines the source of logs, any transformations or filters to be applied, and finally the `Output` (or `Outputs`) for the logs. + +> Important: `ClusterFlows` must be defined within the `cattle-logging-system` namespace. `ClusterFlows` will not work if defined in any other namespace. + +In legacy logging, in order to collect logs from across the entire cluster, one only needed to enable cluster-level logging and define the desired `Output`. This basic approach remains in v2.5+ logging. To replicate legacy cluster-level logging, follow these steps: + +1. Define a `ClusterOutput` according to the instructions found under [Output Configuration](#output-configuration) +2. Create a `ClusterFlow`, ensuring that it is set to be created in the `cattle-logging-system` namespace + 1. Remove all _Include_ and _Exclude_ rules from the `Flow` definition. This ensures that all logs are gathered. + 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation + 3. Define your cluster `Output` or `Outputs` + +This will result in logs from all sources in the cluster (all pods, and all system components) being collected and sent to the `Output` or `Outputs` you defined in the `ClusterFlow`. + +# Project Logging + +Logging in v2.5+ is not project-aware. This means that in order to collect logs from pods running in project namespaces, you will need to define `Flows` for those namespaces. + +To collect logs from a specific namespace, follow these steps: + +1. Define an `Output` or `ClusterOutput` according to the instructions found under [Output Configuration](#output-configuration) +2. Create a `Flow`, ensuring that it is set to be created in the namespace in which you want to gather logs. + 1. If you wish to define _Include_ or _Exclude_ rules, you may do so. Otherwise, removal of all rules will result in all pods in the target namespace having their logs collected. + 2. You do not need to configure any filters if you do not wish - default behavior does not require their creation + 3. Define your outputs - these can be either `ClusterOutput` or `Output` objects. + +This will result in logs from all sources in the namespace (pods) being collected and sent to the `Output` (or `Outputs`) you defined in your `Flow`. + +> To collect logs from a project, repeat the above steps for every namespace within the project. Alternatively, you can label your project workloads with a common label (e.g. `project=my-project`) and use a `ClusterFlow` to collect logs from all pods matching this label. + +# Output Configuration +In legacy logging, there are five logging destinations to choose from: Elasticsearch, Splunk, Kafka, Fluentd, and Syslog. With the exception of Syslog, all of these destinations are available in logging v2.5+. + + +### Elasticsearch + +| Legacy Logging | v2.5+ Logging | Notes | +|-----------------------------------------------|-----------------------------------|-----------------------------------------------------------| +| Endpoint | Target -> Host | Make sure to specify Scheme (https/http), as well as Port | +| X-Pack Security -> Username | Access -> User | | +| X-Pack Security -> Password | Access -> Password | Password must now be stored in a secret | +| SSL Configuration -> Client Private Key | SSL -> Client Key | Key must now be stored in a secret | +| SSL Configuration -> Client Certificate | SSL -> Client Cert | Certificate must now be stored in a secret | +| SSL Configuration -> Client Key Password | SSL -> Client Key Pass | Password must now be stored in a secret | +| SSL Configuration -> Enabled SSL Verification | SSL -> Certificate Authority File | Certificate must now be stored in a secret | + + +In legacy logging, indices were automatically created according to the format in the "Index Patterns" section. In v2.5 logging, default behavior has been changed to logging to a single index. You can still configure index pattern functionality on the `Output` object by editing as YAML and inputting the following values: + +``` +... +spec: + elasticsearch: + ... + logstash_format: true + logstash_prefix: + logstash_dateformat: "%Y-%m-%d" +``` + +Replace `` with the prefix for the indices that will be created. In legacy logging, this defaulted to the name of the cluster. + +### Splunk + +| Legacy Logging | v2.5+ Logging | Notes | +|------------------------------------------|----------------------------------------|----------------------------------------------------------------------------------------| +| HEC Configuration -> Endpoint | Target -> Host | Protocol (https/http) and port must be defined separately from the host | +| HEC Configuration -> Token | Access -> Token | Token must now be stored as a secret | +| HEC Configuration -> Index | Edit as YAML -> `index` | `index` field must be added as YAML key under `spec.splunkHec` | +| HEC Configuration -> Source | Edit as YAML -> `source` | `source` field must be added as YAML key under `spec.splunkHec` | +| SSL Configuration -> Client Private Key | Edit as YAML -> `client_key` | `client_key` field must be added as YAML key under `spec.splunkHec`. See (1) | +| SSL Configuration -> Client Certificate | Edit as YAML -> `client_cert` | `client_cert` field must be added as YAML key under `spec.splunkHec`. See (1) | +| SSL Configuration -> Client Key Password | _Not Supported_ | Specifying a password for the client private key is not currently supported. | +| SSL Configuration -> SSL Verify | Edit as YAML -> `ca_file` or `ca_path` | `ca_file` or `ca_path` field must be added as YAML key under `spec.splunkHec`. See (2) | + +_(1) `client_key` and `client_cert` values must be paths to the key and cert files, respectively. These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ + +_(2) Users can configure either `ca_file` (a path to a PEM-encoded CA certificate) or `ca_path` (a path to a directory containing CA certificates in PEM format). These files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ + +### Kafka + +| Legacy Logging | v2.5+ Logging | Notes | +|-----------------------------------------|----------------------------|------------------------------------------------------| +| Kafka Configuration -> Endpoint Type | - | Zookeeper is no longer supported as an endpoint type | +| Kafka Configuration -> Endpoint | Target -> Brokers | Comma-separated list of brokers (host:port) | +| Kafka Configuration -> Topic | Target -> Default Topic | | +| SSL Configuration -> Client Private Key | SSL -> SSL Client Cert | Certificate must be stored as a secret | +| SSL Configuration -> Client Certificate | SSL -> SSL Client Cert Key | Key must be stored as a secret | +| SSL Configuration -> CA Certificate PEM | SSL -> SSL CA Cert | Certificate must be stored as a secret | +| SASL Configuration -> Username | Access -> Username | Username must be stored in a secret | +| SASL Configuration -> Password | Access -> Password | Password must be stored in a secret | +| SASL Configuration -> Scram Mechanism | Access -> Scram Mechanism | Input mechanism as string, e.g. "sha256" or "sha512" | + +### Fluentd + +As of v2.5.2, it is only possible to add a single Fluentd server using the "Edit as Form" option. To add multiple servers, edit the `Output` as YAML and input multiple servers. + +| Legacy Logging | v2.5+ Logging | Notes | +|------------------------------------------|-----------------------------------------------------|----------------------------------------------------------------------| +| Fluentd Configuration -> Endpoint | Target -> Host, Port | Input the host and port separately | +| Fluentd Configuration -> Shared Key | Access -> Shared Key | Shared key must be stored as a secret | +| Fluentd Configuration -> Username | Access -> Username | Username must be stored as a secret | +| Fluentd Configuration -> Password | Access -> Password | Password must be stored as a secret | +| Fluentd Configuration -> Hostname | Edit as YAML -> `host` | `host` field set as YAML key under `spec.forward.servers[n]` | +| Fluentd Configuration -> Weight | Edit as YAML -> `weight` | `weight` field set as YAML key under `spec.forward.servers[n]` | +| SSL Configuration -> Use TLS | - | Do not need to explicitly enable. Define client cert fields instead. | +| SSL Configuration -> Client Private Key | Edit as YAML -> `tls_private_key_path` | Field set as YAML key under `spec.forward`. See (1) | +| SSL Configuration -> Client Certificate | Edit as YAML -> `tls_client_cert_path` | Field set as YAML key under `spec.forward`. See (1) | +| SSL Configuration -> Client Key Password | Edit as YAML -> `tls_client_private_key_passphrase` | Field set as YAML key under `spec.forward`. See (1) | +| SSL Configuration -> SSL Verify | Edit as YAML -> `tls_insecure_mode` | Field set as YAML key under `spec.forward`. Default: `false` | +| SSL Configuration -> CA Certificate PEM | Edit as YAML -> `tls_cert_path` | Field set as YAML key under `spec.forward`. See (1) | +| Enable Gzip Compression | - | No longer supported in v2.5+ logging | + +_(1) These values are to be specified as paths to files. Those files must be mounted into the `rancher-logging-fluentd` pod in order to be used._ + +### Syslog + +As of v2.5.2, syslog is not currently supported for `Outputs` using v2.5+ logging. + +# Custom Log Fields + +In order to add custom log fields, you will need to add the following YAML to your `Flow` configuration: + +``` +... +spec: + filters: + - record_modifier: + records: + - foo: "bar" +``` + +(replace `foo: "bar"` with custom log fields you wish to add) + +# System Logging + +In legacy logging, collecting logs from system components was accomplished by checking a box labeled "Include System Log" when setting up cluster logging. In v2.5+ logging, system logs are gathered in one of two ways: + +1. Gather all cluster logs, not specifying any match or exclusion rules. This results in all container logs from the cluster being collected, which includes system logs. +2. Specifically target system logs by adding match rules for system components. Specific match rules depend on the component being collected. \ No newline at end of file diff --git a/content/rancher/v2.5/en/logging/rbac/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/rbac-for-logging.md similarity index 100% rename from content/rancher/v2.5/en/logging/rbac/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/rbac-for-logging.md diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md new file mode 100644 index 0000000000..fa8a057657 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/logging/taints-and-tolerations.md @@ -0,0 +1,83 @@ +--- +title: Working with Taints and Tolerations +weight: 6 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +"Tainting" a Kubernetes node causes pods to repel running on that node. + +Unless the pods have a `toleration` for that node's taint, they will run on other nodes in the cluster. + +[Taints and tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) can work in conjunction with the `nodeSelector` [field](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) within the `PodSpec`, which enables the *opposite* effect of a taint. + +Using `nodeSelector` gives pods an affinity towards certain nodes. + +Both provide choice for the what node(s) the pod will run on. + +- [Default Implementation in Rancher's Logging Stack](#default-implementation-in-rancher-s-logging-stack) +- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) + + +### Default Implementation in Rancher's Logging Stack + + + + +By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. +The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. +Moreover, most logging stack pods run on Linux only and have a `nodeSelector` added to ensure they run on Linux nodes. + + + + + +By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. +The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. +Moreover, we can populate the `nodeSelector` to ensure that our pods *only* run on Linux nodes. + + + + +This example Pod YAML file shows a nodeSelector being used with a toleration: + +```yaml +apiVersion: v1 +kind: Pod +# metadata... +spec: + # containers... + tolerations: + - key: cattle.io/os + operator: "Equal" + value: "linux" + effect: NoSchedule + nodeSelector: + kubernetes.io/os: linux +``` + +In the above example, we ensure that our pod only runs on Linux nodes, and we add a `toleration` for the taint we have on all of our Linux nodes. + +You can do the same with Rancher's existing taints, or with your own custom ones. + +### Adding NodeSelector Settings and Tolerations for Custom Taints + +If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. + +```yaml +tolerations: + # insert tolerations... +nodeSelector: + # insert nodeSelector... +``` + +These values will add both settings to the `fluentd`, `fluentbit`, and `logging-operator` containers. +Essentially, these are global settings for all pods in the logging stack. + +However, if you would like to add tolerations for *only* the `fluentbit` container, you can add the following to the chart's values. + +```yaml +fluentbit_tolerations: + # insert tolerations list for fluentbit containers only... +``` diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/longhorn.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/longhorn.md new file mode 100644 index 0000000000..df97fa46e7 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/longhorn.md @@ -0,0 +1,79 @@ +--- +title: Longhorn - Cloud native distributed block storage for Kubernetes +shortTitle: Longhorn Storage +weight: 19 +aliases: + - /rancher/v2.x/en/longhorn/ +--- + +[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. + +Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. You can learn more about its architecture [here.](https://longhorn.io/docs/1.0.2/concepts/) + +With Longhorn, you can: + +- Use Longhorn volumes as persistent storage for the distributed stateful applications in your Kubernetes cluster +- Partition your block storage into Longhorn volumes so that you can use Kubernetes volumes with or without a cloud provider +- Replicate block storage across multiple nodes and data centers to increase availability +- Store backup data in external storage such as NFS or AWS S3 +- Create cross-cluster disaster recovery volumes so that data from a primary Kubernetes cluster can be quickly recovered from backup in a second Kubernetes cluster +- Schedule recurring snapshots of a volume, and schedule recurring backups to NFS or S3-compatible secondary storage +- Restore volumes from backup +- Upgrade Longhorn without disrupting persistent volumes + +
    Longhorn Dashboard
    +![Longhorn Dashboard](/img/longhorn-screenshot.png) + +### New in Rancher v2.5 + +Before Rancher v2.5, Longhorn could be installed as a Rancher catalog app. In Rancher v2.5, the catalog system was replaced by the **Apps & Marketplace,** and it became possible to install Longhorn as an app from that page. + +The **Cluster Explorer** now allows you to manipulate Longhorn's Kubernetes resources from the Rancher UI. So now you can control the Longhorn functionality with the Longhorn UI, or with kubectl, or by manipulating Longhorn's Kubernetes custom resources in the Rancher UI. + +These instructions assume you are using Rancher v2.5, but Longhorn can be installed with earlier Rancher versions. For documentation about installing Longhorn as a catalog app using the legacy Rancher UI, refer to the [Longhorn documentation.](https://longhorn.io/docs/1.0.2/deploy/install/install-with-rancher/) + +### Installing Longhorn with Rancher + +1. Fulfill all [Installation Requirements.](https://longhorn.io/docs/1.1.0/deploy/install/#installation-requirements) +1. Go to the **Cluster Explorer** in the Rancher UI. +1. Click **Apps.** +1. Click `longhorn`. +1. Optional: To customize the initial settings, click **Longhorn Default Settings** and edit the configuration. For help customizing the settings, refer to the [Longhorn documentation.](https://longhorn.io/docs/1.0.2/references/settings/) +1. Click **Install.** + +**Result:** Longhorn is deployed in the Kubernetes cluster. + +### Accessing Longhorn from the Rancher UI + +1. From the **Cluster Explorer," go to the top left dropdown menu and click **Cluster Explorer > Longhorn.** +1. On this page, you can edit Kubernetes resources managed by Longhorn. To view the Longhorn UI, click the **Longhorn** button in the **Overview** section. + +**Result:** You will be taken to the Longhorn UI, where you can manage your Longhorn volumes and their replicas in the Kubernetes cluster, as well as secondary backups of your Longhorn storage that may exist in another Kubernetes cluster or in S3. + +### Uninstalling Longhorn from the Rancher UI + +1. Click **Cluster Explorer > Apps & Marketplace.** +1. Click **Installed Apps.** +1. Go to the `longhorn-system` namespace and check the boxes next to the `longhorn` and `longhorn-crd` apps. +1. Click **Delete,** and confirm **Delete.** + +**Result:** Longhorn is uninstalled. + +### GitHub Repository + +The Longhorn project is available [here.](https://github.com/longhorn/longhorn) + +### Documentation + +The Longhorn documentation is [here.](https://longhorn.io/docs/) + +### Architecture + +Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. + +The storage controller and replicas are themselves orchestrated using Kubernetes. + +You can learn more about its architecture [here.](https://longhorn.io/docs/1.0.2/concepts/) + +
    Longhorn Architecture
    +![Longhorn Architecture](/img/longhorn-architecture.svg) diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md new file mode 100644 index 0000000000..cdaac5fb0c --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md @@ -0,0 +1,82 @@ +--- +title: Built-in Dashboards +weight: 3 +--- + +- [Grafana UI](#grafana-ui) +- [Alertmanager UI](#alertmanager-ui) +- [Prometheus UI](#prometheus-ui) + +# Grafana UI + +[Grafana](https://grafana.com/grafana/) allows you to query, visualize, alert on and understand your metrics no matter where they are stored. Create, explore, and share dashboards with your team and foster a data driven culture. + +To see the default dashboards for time series data visualization, go to the Grafana UI. + +### Customizing Grafana + +To view and customize the PromQL queries powering the Grafana dashboard, see [this page.](../../../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md) + +### Persistent Grafana Dashboards + +To create a persistent Grafana dashboard, see [this page.](../../../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md) + +### Access to Grafana + +For information about role-based access control for Grafana, see [this section.](rbac-for-monitoring.md#role-based-access-control-for-grafana) + + +# Alertmanager UI + +When `rancher-monitoring` is installed, the Prometheus Alertmanager UI is deployed, allowing you to view your alerts and the current Alertmanager configuration. + +> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](how-monitoring-works.md#how-alertmanager-works) + + +### Accessing the Alertmanager UI + +The Alertmanager UI lets you see the most recently fired alerts. + +> **Prerequisite:** The `rancher-monitoring` application must be installed. + +To see the Alertmanager UI, go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Alertmanager.** + +**Result:** The Alertmanager UI opens in a new tab. For help with configuration, refer to the [official Alertmanager documentation.](https://prometheus.io/docs/alerting/latest/alertmanager/) + +
    The Alertmanager UI
    +![Alertmanager UI](/img/alertmanager-ui.png) + + +### Viewing Default Alerts + +To see alerts that are fired by default, go to the Alertmanager UI and click **Expand all groups.** + + +# Prometheus UI + +By default, the [kube-state-metrics service](https://github.com/kubernetes/kube-state-metrics) provides a wealth of information about CPU and memory utilization to the monitoring application. These metrics cover Kubernetes resources across namespaces. This means that in order to see resource metrics for a service, you don't need to create a new ServiceMonitor for it. Because the data is already in the time series database, you can go to the Prometheus UI and run a PromQL query to get the information. The same query can be used to configure a Grafana dashboard to show a graph of those metrics over time. + +To see the Prometheus UI, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Graph.** + +
    Prometheus Graph UI
    +![Prometheus Graph UI](/img/prometheus-graph-ui.png) + +### Viewing the Prometheus Targets + +To see what services you are monitoring, you will need to see your targets. Targets are set up by ServiceMonitors and PodMonitors as sources to scrape metrics from. You won't need to directly edit targets, but the Prometheus UI can be useful for giving you an overview of all of the sources of metrics that are being scraped. + +To see the Prometheus Targets, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Targets.** + +
    Targets in the Prometheus UI
    +![Prometheus Targets UI](/img/prometheus-targets-ui.png) + +### Viewing the PrometheusRules + +When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Alertmanager to figure out which Route should receive a certain Alert. + +To see the PrometheusRules, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Prometheus Rules.** + +You can also see the rules in the Prometheus UI: + +
    Rules in the Prometheus UI
    +![PrometheusRules UI](/img/prometheus-rules-ui.png) \ No newline at end of file diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md new file mode 100644 index 0000000000..d2937abac7 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md @@ -0,0 +1,256 @@ +--- +title: How Monitoring Works +weight: 1 +--- + +1. [Architecture Overview](#1-architecture-overview) +2. [How Prometheus Works](#2-how-prometheus-works) +3. [How Alertmanager Works](#3-how-alertmanager-works) +4. [Monitoring V2 Specific Components](#4-monitoring-v2-specific-components) +5. [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics) + +# 1. Architecture Overview + +_**The following sections describe how data flows through the Monitoring V2 application:**_ + +### Prometheus Operator + +Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created. When the Prometheus configuration resources are created, Prometheus Operator calls the Prometheus API to sync the new configuration. As the diagram at the end of this section shows, the Prometheus Operator acts as the intermediary between Prometheus and Kubernetes, calling the Prometheus API to synchronize Prometheus with the monitoring-related resources in Kubernetes. + +### ServiceMonitors and PodMonitors + +ServiceMonitors and PodMonitors declaratively specify targets, such as Services and Pods, that need to be monitored. + +- Targets are scraped on a recurring schedule based on the configured Prometheus scrape interval, and the metrics that are scraped are stored into the Prometheus Time Series Database (TSDB). + +- In order to perform the scrape, ServiceMonitors and PodMonitors are defined with label selectors that determine which Services or Pods should be scraped and endpoints that determine how the scrape should happen on the given target, e.g., scrape/metrics in TCP 10252, proxying through IP addr x.x.x.x. + +- Out of the box, Monitoring V2 comes with certain pre-configured exporters that are deployed based on the type of Kubernetes cluster that it is deployed on. For more information, see [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics). + +### How PushProx Works + +- Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called **PushProx**. The Kubernetes components that expose metrics to Prometheus through PushProx are the following: +`kube-controller-manager`, `kube-scheduler`, `etcd`, and `kube-proxy`. + +- For each PushProx exporter, we deploy one PushProx client onto all target nodes. For example, a PushProx client is deployed onto all controlplane nodes for kube-controller-manager, all etcd nodes for kube-etcd, and all nodes for kubelet. + +- We deploy exactly one PushProx proxy per exporter. The process for exporting metrics is as follows: + +1. The PushProx Client establishes an outbound connection with the PushProx Proxy. +1. The client then polls the proxy for scrape requests that have come into the proxy. +1. When the proxy receives a scrape request from Prometheus, the client sees it as a result of the poll. +1. The client scrapes the internal component. +1. The internal component responds by pushing metrics back to the proxy. + + +

    Process for Exporting Metrics with PushProx:
    + +![Process for Exporting Metrics with PushProx](/img/pushprox-process.svg) + +### PrometheusRules + +PrometheusRules allow users to define rules for what metrics or time series database queries should result in alerts being fired. Rules are evaluated on an interval. + +- **Recording rules** create a new time series based on existing series that have been collected. They are frequently used to precompute complex queries. +- **Alerting rules** run a particular query and fire an alert from Prometheus if the query evaluates to a non-zero value. + +### Alert Routing + +Once Prometheus determines that an alert needs to be fired, alerts are forwarded to **Alertmanager**. + +- Alerts contain labels that come from the PromQL query itself and additional labels and annotations that can be provided as part of specifying the initial PrometheusRule. + +- Before receiving any alerts, Alertmanager will use the **routes** and **receivers** specified in its configuration to form a routing tree on which all incoming alerts are evaluated. Each node of the routing tree can specify additional grouping, labeling, and filtering that needs to happen based on the labels attached to the Prometheus alert. A node on the routing tree (usually a leaf node) can also specify that an alert that reaches it needs to be sent out to a configured Receiver, e.g., Slack, PagerDuty, SMS, etc. Note that Alertmanager will send an alert first to **alertingDriver**, then alertingDriver will send or forward alert to the proper destination. + +- Routes and receivers are also stored in the Kubernetes API via the Alertmanager Secret. When the Secret is updated, Alertmanager is also updated automatically. Note that routing occurs via labels only (not via annotations, etc.). + +
    How data flows through the monitoring application:
    + + +# 2. How Prometheus Works + +### Storing Time Series Data + +After collecting metrics from exporters, Prometheus stores the time series in a local on-disk time series database. Prometheus optionally integrates with remote systems, but `rancher-monitoring` uses local storage for the time series database. + +Once stored, users can query this TSDB using PromQL, the query language for Prometheus. + +PromQL queries can be visualized in one of two ways: + +1. By supplying the query in Prometheus's Graph UI, which will show a simple graphical view of the data. +1. By creating a Grafana Dashboard that contains the PromQL query and additional formatting directives that label axes, add units, change colors, use alternative visualizations, etc. + +### Defining Rules for Prometheus + +Rules define queries that Prometheus needs to execute on a regular `evaluationInterval` to perform certain actions, such as firing an alert (alerting rules) or precomputing a query based on others existing in its TSDB (recording rules). These rules are encoded in PrometheusRules custom resources. When PrometheusRule custom resources are created or updated, the Prometheus Operator observes the change and calls the Prometheus API to synchronize the set of rules that Prometheus is currently evaluating on a regular interval. + +A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: + +- The name of the new alert or record +- A PromQL expression for the new alert or record +- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) +- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. + +On evaluating a [rule](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#rule), Prometheus will execute the provided PromQL query, add additional provided labels (or annotations - only for alerting rules), and execute the appropriate action for the rule. For example, an Alerting Rule that adds `team: front-end` as a label to the provided PromQL query will append that label to the fired alert, which will allow Alertmanager to forward the alert to the correct Receiver. + +### Alerting and Recording Rules + +Prometheus doesn't maintain the state of whether alerts are active. It fires alerts repetitively at every evaluation interval, relying on Alertmanager to group and filter the alerts into meaningful notifications. + +The `evaluation_interval` constant defines how often Prometheus evaluates its alerting rules against the time series database. Similar to the `scrape_interval`, the `evaluation_interval` also defaults to one minute. + +The rules are contained in a set of rule files. Rule files include both alerting rules and recording rules, but only alerting rules result in alerts being fired after their evaluation. + +For recording rules, Prometheus runs a query, then stores it as a time series. This synthetic time series is useful for storing the results of an expensive or time-consuming query so that it can be queried more quickly in the future. + +Alerting rules are more commonly used. Whenever an alerting rule evaluates to a positive number, Prometheus fires an alert. + +The Rule file adds labels and annotations to alerts before firing them, depending on the use case: + +- Labels indicate information that identifies the alert and could affect the routing of the alert. For example, if when sending an alert about a certain container, the container ID could be used as a label. + +- Annotations denote information that doesn't affect where an alert is routed, for example, a runbook or an error message. + +# 3. How Alertmanager Works + +The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of the following tasks: + +- Deduplicating, grouping, and routing alerts to the correct receiver integration such as email, PagerDuty, or OpsGenie + +- Silencing and inhibition of alerts + +- Tracking alerts that fire over time + +- Sending out the status of whether an alert is currently firing, or if it is resolved + +### Alerts Forwarded by alertingDrivers + +When alertingDrivers are installed, this creates a `Service` that can be used as the receiver's URL for Teams or SMS, based on the alertingDriver's configuration. The URL in the Receiver points to the alertingDrivers; so the Alertmanager sends alert first to alertingDriver, then alertingDriver forwards or sends alert to the proper destination. + +### Routing Alerts to Receivers + +Alertmanager coordinates where alerts are sent. It allows you to group alerts based on labels and fire them based on whether certain labels are matched. One top-level route accepts all alerts. From there, Alertmanager continues routing alerts to receivers based on whether they match the conditions of the next route. + +While the Rancher UI forms only allow editing a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager Secret. + +### Configuring Multiple Receivers + +By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. + +By editing custom YAML in the Alertmanager or Receiver configuration, you can also send alerts to multiple notification systems. For more information, see the section on configuring [Receivers.](../../../reference-guides/monitoring-v2-configuration/receivers.md#configuring-multiple-receivers) + +# 4. Monitoring V2 Specific Components + +Prometheus Operator introduces a set of [Custom Resource Definitions](https://github.com/prometheus-operator/prometheus-operator#customresourcedefinitions) that allow users to deploy and manage Prometheus and Alertmanager instances by creating and modifying those custom resources on a cluster. + +Prometheus Operator will automatically update your Prometheus configuration based on the live state of the resources and configuration options that are edited in the Rancher UI. + +### Resources Deployed by Default + +By default, a set of resources curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project are deployed onto your cluster as part of installing the Rancher Monitoring Application to set up a basic Monitoring/Alerting stack. + +The resources that get deployed onto your cluster to support this solution can be found in the [`rancher-monitoring`](https://github.com/rancher/charts/tree/main/charts/rancher-monitoring) Helm chart, which closely tracks the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community with certain changes tracked in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md). + +### Default Exporters + +Monitoring V2 deploys three default exporters that provide additional metrics for Prometheus to store: + +1. `node-exporter`: exposes hardware and OS metrics for Linux hosts. For more information on `node-exporter`, refer to the [upstream documentation](https://prometheus.io/docs/guides/node-exporter/). + +1. `windows-exporter`: exposes hardware and OS metrics for Windows hosts (only deployed on Windows clusters). For more information on `windows-exporter`, refer to the [upstream documentation](https://github.com/prometheus-community/windows_exporter). + +1. `kube-state-metrics`: expose additional metrics that track the state of resources contained in the Kubernetes API (e.g., pods, workloads, etc.). For more information on `kube-state-metrics`, refer to the [upstream documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs). + +ServiceMonitors and PodMonitors will scrape these exporters, as defined [here](#defining-what-metrics-are-scraped). Prometheus stores these metrics, and you can query the results via either Prometheus's UI or Grafana. + +See the [architecture](#1-architecture-overview) section for more information on recording rules, alerting rules, and Alertmanager. + +### Components Exposed in the Rancher UI + +When the monitoring application is installed, you will be able to edit the following components in the Rancher UI: + +| Component | Type of Component | Purpose and Common Use Cases for Editing | +|--------------|------------------------|---------------------------| +| ServiceMonitor | Custom resource | Sets up Kubernetes Services to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | +| PodMonitor | Custom resource | Sets up Kubernetes Pods to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | +| Receiver | Configuration block (part of Alertmanager) | Modifies information on where to send an alert (e.g., Slack, PagerDuty, etc.) and any necessary information to send the alert (e.g., TLS certs, proxy URLs, etc.). Automatically updates the Alertmanager custom resource. | +| Route | Configuration block (part of Alertmanager) | Modifies the routing tree that is used to filter, label, and group alerts based on labels and send them to the appropriate Receiver. Automatically updates the Alertmanager custom resource. | +| PrometheusRule | Custom resource | Defines additional queries that need to trigger alerts or define materialized views of existing series that are within Prometheus's TSDB. Automatically updates the Prometheus custom resource. | + +### PushProx + +PushProx allows Prometheus to scrape metrics across a network boundary, which prevents users from having to expose metrics ports for internal Kubernetes components on each node in a Kubernetes cluster. + +Since the metrics for Kubernetes components are generally exposed on the host network of nodes in the cluster, PushProx deploys a DaemonSet of clients that sit on the hostNetwork of each node and make an outbound connection to a single proxy that is sitting on the Kubernetes API. Prometheus can then be configured to proxy scrape requests through the proxy to each client, which allows it to scrape metrics from the internal Kubernetes components without requiring any inbound node ports to be open. + +Refer to [Scraping Metrics with PushProx](#scraping-metrics-with-pushprox) for more. + +# 5. Scraping and Exposing Metrics + +### Defining what Metrics are Scraped + +ServiceMonitors and PodMonitors define targets that are intended for Prometheus to scrape. The [Prometheus custom resource](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md#prometheus) tells Prometheus which ServiceMonitors or PodMonitors it should use to find out where to scrape metrics from. + +The Prometheus Operator observes the ServiceMonitors and PodMonitors. When it observes that they are created or updated, it calls the Prometheus API to update the scrape configuration in the Prometheus custom resource and keep it in sync with the scrape configuration in the ServiceMonitors or PodMonitors. This scrape configuration tells Prometheus which endpoints to scrape metrics from and how it will label the metrics from those endpoints. + +Prometheus scrapes all of the metrics defined in its scrape configuration at every `scrape_interval`, which is one minute by default. + +The scrape configuration can be viewed as part of the Prometheus custom resource that is exposed in the Rancher UI. + +### How the Prometheus Operator Sets up Metrics Scraping + +The Prometheus Deployment or StatefulSet scrapes metrics, and the configuration of Prometheus is controlled by the Prometheus custom resources. The Prometheus Operator watches for Prometheus and Alertmanager resources, and when they are created, the Prometheus Operator creates a Deployment or StatefulSet for Prometheus or Alertmanager with the user-defined configuration. + +When the Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created, it knows that the scrape configuration needs to be updated in Prometheus. It updates Prometheus by first updating the configuration and rules files in the volumes of Prometheus's Deployment or StatefulSet. Then it calls the Prometheus API to sync the new configuration, resulting in the Prometheus Deployment or StatefulSet to be modified in place. + +### How Kubernetes Component Metrics are Exposed + +Prometheus scrapes metrics from deployments known as [exporters,](https://prometheus.io/docs/instrumenting/exporters/) which export the time series data in a format that Prometheus can ingest. In Prometheus, time series consist of streams of timestamped values belonging to the same metric and the same set of labeled dimensions. + +### Scraping Metrics with PushProx + +Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called PushProx. For detailed information on PushProx, refer [here](#how-pushprox-works) and to the above [architecture](#1-architecture-overview) section. + +### Scraping Metrics + +The following Kubernetes components are directly scraped by Prometheus: + +- kubelet* +- ingress-nginx** +- coreDns/kubeDns +- kube-api-server + +\* You can optionally use `hardenedKubelet.enabled` to use a PushProx, but that is not the default. + +** For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. + + +### Scraping Metrics Based on Kubernetes Distribution + +Metrics are scraped differently based on the Kubernetes distribution. For help with terminology, refer [here](#terminology). For details, see the table below: + +
    How Metrics are Exposed to Prometheus
    + +| Kubernetes Component | RKE | RKE2 | KubeADM | K3s | +|-----|-----|-----|-----|-----| +| kube-controller-manager | rkeControllerManager.enabled |rke2ControllerManager.enabled | kubeAdmControllerManager.enabled | k3sServer.enabled | +| kube-scheduler | rkeScheduler.enabled | rke2Scheduler.enabled |kubeAdmScheduler.enabled | k3sServer.enabled | +| etcd | rkeEtcd.enabled | rke2Etcd.enabled | kubeAdmEtcd.enabled | Not available | +| kube-proxy | rkeProxy.enabled | rke2Proxy.enabled | kubeAdmProxy.enabled | k3sServer.enabled | +| kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | +| ingress-nginx* | Collects metrics directly exposed by kubelet, exposed by rkeIngressNginx.enabled | Collects metrics directly exposed by kubelet, Exposed by rke2IngressNginx.enabled | Not available | Not available | +| coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | +| kube-api-server | Collects metrics directly exposed by kube-api-server |Collects metrics directly exposed by kube-api-server | Collects metrics directly exposed by kube-appi-server | Collects metrics directly exposed by kube-api-server | + +\* For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. + +### Terminology + +- **kube-scheduler:** The internal Kubernetes component that uses information in the pod spec to decide on which node to run a pod. +- **kube-controller-manager:** The internal Kubernetes component that is responsible for node management (detecting if a node fails), pod replication and endpoint creation. +- **etcd:** The internal Kubernetes component that is the distributed key/value store which Kubernetes uses for persistent storage of all cluster information. +- **kube-proxy:** The internal Kubernetes component that watches the API server for pods/services changes in order to maintain the network up to date. +- **kubelet:** The internal Kubernetes component that watches the API server for pods on a node and makes sure they are running. +- **ingress-nginx:** An Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer. +- **coreDns/kubeDns:** The internal Kubernetes component responsible for DNS. +- **kube-api-server:** The main internal Kubernetes component that is responsible for exposing APIs for the other master components. diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md new file mode 100644 index 0000000000..40a128118d --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md @@ -0,0 +1,435 @@ +--- +title: PromQL Expression Reference +weight: 6 +aliases: + - /rancher/v2.5/en/project-admin/tools/monitoring/expression + - /rancher/v2.5/en/cluster-admin/tools/monitoring/expression + - /rancher/v2.5/en/monitoring-alerting/expression + - /rancher/v2.5/en/monitoring-alerting/configuration/expression + - /rancher/v2.5/en/monitoring/alerting/configuration/expression + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/expression/ +--- + +The PromQL expressions in this doc can be used to configure alerts. + +For more information about querying the Prometheus time series database, refer to the official [Prometheus documentation.](https://prometheus.io/docs/prometheus/latest/querying/basics/) + + + +- [Cluster Metrics](#cluster-metrics) + - [Cluster CPU Utilization](#cluster-cpu-utilization) + - [Cluster Load Average](#cluster-load-average) + - [Cluster Memory Utilization](#cluster-memory-utilization) + - [Cluster Disk Utilization](#cluster-disk-utilization) + - [Cluster Disk I/O](#cluster-disk-i-o) + - [Cluster Network Packets](#cluster-network-packets) + - [Cluster Network I/O](#cluster-network-i-o) +- [Node Metrics](#node-metrics) + - [Node CPU Utilization](#node-cpu-utilization) + - [Node Load Average](#node-load-average) + - [Node Memory Utilization](#node-memory-utilization) + - [Node Disk Utilization](#node-disk-utilization) + - [Node Disk I/O](#node-disk-i-o) + - [Node Network Packets](#node-network-packets) + - [Node Network I/O](#node-network-i-o) +- [Etcd Metrics](#etcd-metrics) + - [Etcd Has a Leader](#etcd-has-a-leader) + - [Number of Times the Leader Changes](#number-of-times-the-leader-changes) + - [Number of Failed Proposals](#number-of-failed-proposals) + - [GRPC Client Traffic](#grpc-client-traffic) + - [Peer Traffic](#peer-traffic) + - [DB Size](#db-size) + - [Active Streams](#active-streams) + - [Raft Proposals](#raft-proposals) + - [RPC Rate](#rpc-rate) + - [Disk Operations](#disk-operations) + - [Disk Sync Duration](#disk-sync-duration) +- [Kubernetes Components Metrics](#kubernetes-components-metrics) + - [API Server Request Latency](#api-server-request-latency) + - [API Server Request Rate](#api-server-request-rate) + - [Scheduling Failed Pods](#scheduling-failed-pods) + - [Controller Manager Queue Depth](#controller-manager-queue-depth) + - [Scheduler E2E Scheduling Latency](#scheduler-e2e-scheduling-latency) + - [Scheduler Preemption Attempts](#scheduler-preemption-attempts) + - [Ingress Controller Connections](#ingress-controller-connections) + - [Ingress Controller Request Process Time](#ingress-controller-request-process-time) +- [Rancher Logging Metrics](#rancher-logging-metrics) + - [Fluentd Buffer Queue Rate](#fluentd-buffer-queue-rate) + - [Fluentd Input Rate](#fluentd-input-rate) + - [Fluentd Output Errors Rate](#fluentd-output-errors-rate) + - [Fluentd Output Rate](#fluentd-output-rate) +- [Workload Metrics](#workload-metrics) + - [Workload CPU Utilization](#workload-cpu-utilization) + - [Workload Memory Utilization](#workload-memory-utilization) + - [Workload Network Packets](#workload-network-packets) + - [Workload Network I/O](#workload-network-i-o) + - [Workload Disk I/O](#workload-disk-i-o) +- [Pod Metrics](#pod-metrics) + - [Pod CPU Utilization](#pod-cpu-utilization) + - [Pod Memory Utilization](#pod-memory-utilization) + - [Pod Network Packets](#pod-network-packets) + - [Pod Network I/O](#pod-network-i-o) + - [Pod Disk I/O](#pod-disk-i-o) +- [Container Metrics](#container-metrics) + - [Container CPU Utilization](#container-cpu-utilization) + - [Container Memory Utilization](#container-memory-utilization) + - [Container Disk I/O](#container-disk-i-o) + + + +# Cluster Metrics + +### Cluster CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))` | +| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle"}[5m])))` | + +### Cluster Load Average + +| Catalog | Expression | +| --- | --- | +| Detail |
    load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
    load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
    load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"}) by (instance)`
    | +| Summary |
    load1`sum(node_load1) by (instance) / count(node_cpu_seconds_total{mode="system"})`
    load5`sum(node_load5) by (instance) / count(node_cpu_seconds_total{mode="system"})`
    load15`sum(node_load15) by (instance) / count(node_cpu_seconds_total{mode="system"})`
    | + +### Cluster Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - sum(node_memory_MemAvailable_bytes) by (instance) / sum(node_memory_MemTotal_bytes) by (instance)` | +| Summary | `1 - sum(node_memory_MemAvailable_bytes) / sum(node_memory_MemTotal_bytes)` | + +### Cluster Disk Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance) - sum(node_filesystem_free_bytes{device!="rootfs"}) by (instance)) / sum(node_filesystem_size_bytes{device!="rootfs"}) by (instance)` | +| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs"}) - sum(node_filesystem_free_bytes{device!="rootfs"})) / sum(node_filesystem_size_bytes{device!="rootfs"})` | + +### Cluster Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(node_disk_read_bytes_total[5m])) by (instance)`
    written`sum(rate(node_disk_written_bytes_total[5m])) by (instance)`
    | +| Summary |
    read`sum(rate(node_disk_read_bytes_total[5m]))`
    written`sum(rate(node_disk_written_bytes_total[5m]))`
    | + +### Cluster Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    | +| Summary |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    | + +### Cluster Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m])) by (instance)
    | +| Summary |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*"}[5m]))
    | + +# Node Metrics + +### Node CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `avg(irate(node_cpu_seconds_total{mode!="idle", instance=~"$instance"}[5m])) by (mode)` | +| Summary | `1 - (avg(irate(node_cpu_seconds_total{mode="idle", instance=~"$instance"}[5m])))` | + +### Node Load Average + +| Catalog | Expression | +| --- | --- | +| Detail |
    load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    | +| Summary |
    load1`sum(node_load1{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load5`sum(node_load5{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    load15`sum(node_load15{instance=~"$instance"}) / count(node_cpu_seconds_total{mode="system",instance=~"$instance"})`
    | + +### Node Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"})` | +| Summary | `1 - sum(node_memory_MemAvailable_bytes{instance=~"$instance"}) / sum(node_memory_MemTotal_bytes{instance=~"$instance"}) ` | + +### Node Disk Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"}) by (device)) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) by (device)` | +| Summary | `(sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"}) - sum(node_filesystem_free_bytes{device!="rootfs",instance=~"$instance"})) / sum(node_filesystem_size_bytes{device!="rootfs",instance=~"$instance"})` | + +### Node Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
    written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
    | +| Summary |
    read`sum(rate(node_disk_read_bytes_total{instance=~"$instance"}[5m]))`
    written`sum(rate(node_disk_written_bytes_total{instance=~"$instance"}[5m]))`
    | + +### Node Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    | +| Summary |
    receive-droppedsum(rate(node_network_receive_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    receive-errssum(rate(node_network_receive_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    receive-packetssum(rate(node_network_receive_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmit-droppedsum(rate(node_network_transmit_drop_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmit-errssum(rate(node_network_transmit_errs_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmit-packetssum(rate(node_network_transmit_packets_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    | + +### Node Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m])) by (device)
    | +| Summary |
    receivesum(rate(node_network_receive_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    transmitsum(rate(node_network_transmit_bytes_total{device!~"lo | veth.* | docker.* | flannel.* | cali.* | cbr.*",instance=~"$instance"}[5m]))
    | + +# Etcd Metrics + +### Etcd Has a Leader + +`max(etcd_server_has_leader)` + +### Number of Times the Leader Changes + +`max(etcd_server_leader_changes_seen_total)` + +### Number of Failed Proposals + +`sum(etcd_server_proposals_failed_total)` + +### GRPC Client Traffic + +| Catalog | Expression | +| --- | --- | +| Detail |
    in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m])) by (instance)`
    out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m])) by (instance)`
    | +| Summary |
    in`sum(rate(etcd_network_client_grpc_received_bytes_total[5m]))`
    out`sum(rate(etcd_network_client_grpc_sent_bytes_total[5m]))`
    | + +### Peer Traffic + +| Catalog | Expression | +| --- | --- | +| Detail |
    in`sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)`
    out`sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)`
    | +| Summary |
    in`sum(rate(etcd_network_peer_received_bytes_total[5m]))`
    out`sum(rate(etcd_network_peer_sent_bytes_total[5m]))`
    | + +### DB Size + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)` | +| Summary | `sum(etcd_debugging_mvcc_db_total_size_in_bytes)` | + +### Active Streams + +| Catalog | Expression | +| --- | --- | +| Detail |
    lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) by (instance)`
    watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) by (instance)`
    | +| Summary |
    lease-watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})`
    watch`sum(grpc_server_started_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})`
    | + +### Raft Proposals + +| Catalog | Expression | +| --- | --- | +| Detail |
    applied`sum(increase(etcd_server_proposals_applied_total[5m])) by (instance)`
    committed`sum(increase(etcd_server_proposals_committed_total[5m])) by (instance)`
    pending`sum(increase(etcd_server_proposals_pending[5m])) by (instance)`
    failed`sum(increase(etcd_server_proposals_failed_total[5m])) by (instance)`
    | +| Summary |
    applied`sum(increase(etcd_server_proposals_applied_total[5m]))`
    committed`sum(increase(etcd_server_proposals_committed_total[5m]))`
    pending`sum(increase(etcd_server_proposals_pending[5m]))`
    failed`sum(increase(etcd_server_proposals_failed_total[5m]))`
    | + +### RPC Rate + +| Catalog | Expression | +| --- | --- | +| Detail |
    total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m])) by (instance)`
    fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m])) by (instance)`
    | +| Summary |
    total`sum(rate(grpc_server_started_total{grpc_type="unary"}[5m]))`
    fail`sum(rate(grpc_server_handled_total{grpc_type="unary",grpc_code!="OK"}[5m]))`
    | + +### Disk Operations + +| Catalog | Expression | +| --- | --- | +| Detail |
    commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m])) by (instance)`
    fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m])) by (instance)`
    | +| Summary |
    commit-called-by-backend`sum(rate(etcd_disk_backend_commit_duration_seconds_sum[1m]))`
    fsync-called-by-wal`sum(rate(etcd_disk_wal_fsync_duration_seconds_sum[1m]))`
    | + +### Disk Sync Duration + +| Catalog | Expression | +| --- | --- | +| Detail |
    wal`histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))`
    db`histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))`
    | +| Summary |
    wal`sum(histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le)))`
    db`sum(histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le)))`
    | + +# Kubernetes Components Metrics + +### API Server Request Latency + +| Catalog | Expression | +| --- | --- | +| Detail | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance, verb) /1e+06` | +| Summary | `avg(apiserver_request_latencies_sum / apiserver_request_latencies_count) by (instance) /1e+06` | + +### API Server Request Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(apiserver_request_count[5m])) by (instance, code)` | +| Summary | `sum(rate(apiserver_request_count[5m])) by (instance)` | + +### Scheduling Failed Pods + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(kube_pod_status_scheduled{condition="false"})` | +| Summary | `sum(kube_pod_status_scheduled{condition="false"})` | + +### Controller Manager Queue Depth + +| Catalog | Expression | +| --- | --- | +| Detail |
    volumes`sum(volumes_depth) by instance`
    deployment`sum(deployment_depth) by instance`
    replicaset`sum(replicaset_depth) by instance`
    service`sum(service_depth) by instance`
    serviceaccount`sum(serviceaccount_depth) by instance`
    endpoint`sum(endpoint_depth) by instance`
    daemonset`sum(daemonset_depth) by instance`
    statefulset`sum(statefulset_depth) by instance`
    replicationmanager`sum(replicationmanager_depth) by instance`
    | +| Summary |
    volumes`sum(volumes_depth)`
    deployment`sum(deployment_depth)`
    replicaset`sum(replicaset_depth)`
    service`sum(service_depth)`
    serviceaccount`sum(serviceaccount_depth)`
    endpoint`sum(endpoint_depth)`
    daemonset`sum(daemonset_depth)`
    statefulset`sum(statefulset_depth)`
    replicationmanager`sum(replicationmanager_depth)`
    | + +### Scheduler E2E Scheduling Latency + +| Catalog | Expression | +| --- | --- | +| Detail | `histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06` | +| Summary | `sum(histogram_quantile(0.99, sum(scheduler_e2e_scheduling_latency_microseconds_bucket) by (le, instance)) / 1e+06)` | + +### Scheduler Preemption Attempts + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(scheduler_total_preemption_attempts[5m])) by (instance)` | +| Summary | `sum(rate(scheduler_total_preemption_attempts[5m]))` | + +### Ingress Controller Connections + +| Catalog | Expression | +| --- | --- | +| Detail |
    reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"}) by (instance)`
    waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"}) by (instance)`
    writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"}) by (instance)`
    accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m]))) by (instance)`
    active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m]))) by (instance)`
    handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m]))) by (instance)`
    | +| Summary |
    reading`sum(nginx_ingress_controller_nginx_process_connections{state="reading"})`
    waiting`sum(nginx_ingress_controller_nginx_process_connections{state="waiting"})`
    writing`sum(nginx_ingress_controller_nginx_process_connections{state="writing"})`
    accepted`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="accepted"}[5m])))`
    active`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="active"}[5m])))`
    handled`sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state="handled"}[5m])))`
    | + +### Ingress Controller Request Process Time + +| Catalog | Expression | +| --- | --- | +| Detail | `topk(10, histogram_quantile(0.95,sum by (le, host, path)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | +| Summary | `topk(10, histogram_quantile(0.95,sum by (le, host)(rate(nginx_ingress_controller_request_duration_seconds_bucket{host!="_"}[5m]))))` | + +# Rancher Logging Metrics + + +### Fluentd Buffer Queue Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_buffer_queue_length[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_output_status_buffer_queue_length[5m]))` | + +### Fluentd Input Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_input_status_num_records_total[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_input_status_num_records_total[5m]))` | + +### Fluentd Output Errors Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_num_errors[5m])) by (type)` | +| Summary | `sum(rate(fluentd_output_status_num_errors[5m]))` | + +### Fluentd Output Rate + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(rate(fluentd_output_status_num_records_total[5m])) by (instance)` | +| Summary | `sum(rate(fluentd_output_status_num_records_total[5m]))` | + +# Workload Metrics + +### Workload CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    user seconds`sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    system seconds`sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +### Workload Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""}) by (pod_name)` | +| Summary | `sum(container_memory_working_set_bytes{namespace="$namespace",pod_name=~"$podName", container_name!=""})` | + +### Workload Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +### Workload Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +### Workload Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m])) by (pod_name)`
    | +| Summary |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name=~"$podName",container_name!=""}[5m]))`
    | + +# Pod Metrics + +### Pod CPU Utilization + +| Catalog | Expression | +| --- | --- | +| Detail |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m])) by (container_name)`
    | +| Summary |
    cfs throttled seconds`sum(rate(container_cpu_cfs_throttled_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    usage seconds`sum(rate(container_cpu_usage_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    system seconds`sum(rate(container_cpu_system_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    user seconds`sum(rate(container_cpu_user_seconds_total{container_name!="POD",namespace="$namespace",pod_name="$podName", container_name!=""}[5m]))`
    | + +### Pod Memory Utilization + +| Catalog | Expression | +| --- | --- | +| Detail | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""}) by (container_name)` | +| Summary | `sum(container_memory_working_set_bytes{container_name!="POD",namespace="$namespace",pod_name="$podName",container_name!=""})` | + +### Pod Network Packets + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | +| Summary |
    receive-packets`sum(rate(container_network_receive_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-dropped`sum(rate(container_network_receive_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    receive-errors`sum(rate(container_network_receive_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-packets`sum(rate(container_network_transmit_packets_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-dropped`sum(rate(container_network_transmit_packets_dropped_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit-errors`sum(rate(container_network_transmit_errors_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | + +### Pod Network I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | +| Summary |
    receive`sum(rate(container_network_receive_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    transmit`sum(rate(container_network_transmit_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | + +### Pod Disk I/O + +| Catalog | Expression | +| --- | --- | +| Detail |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m])) by (container_name)`
    | +| Summary |
    read`sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    write`sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name!=""}[5m]))`
    | + +# Container Metrics + +### Container CPU Utilization + +| Catalog | Expression | +| --- | --- | +| cfs throttled seconds | `sum(rate(container_cpu_cfs_throttled_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| usage seconds | `sum(rate(container_cpu_usage_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| system seconds | `sum(rate(container_cpu_system_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| user seconds | `sum(rate(container_cpu_user_seconds_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | + +### Container Memory Utilization + +`sum(container_memory_working_set_bytes{namespace="$namespace",pod_name="$podName",container_name="$containerName"})` + +### Container Disk I/O + +| Catalog | Expression | +| --- | --- | +| read | `sum(rate(container_fs_reads_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | +| write | `sum(rate(container_fs_writes_bytes_total{namespace="$namespace",pod_name="$podName",container_name="$containerName"}[5m]))` | diff --git a/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md new file mode 100644 index 0000000000..03b10ec375 --- /dev/null +++ b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md @@ -0,0 +1,180 @@ +--- +title: Role-based Access Control +shortTitle: RBAC +weight: 2 +aliases: + - /rancher/v2.5/en/cluster-admin/tools/monitoring/rbac + - /rancher/v2.5/en/monitoring-alerting/rbac + - /rancher/v2.5/en/monitoring-alerting/grafana + - /rancher/v2.x/en/monitoring-alerting/v2.5/rbac/ +--- +This section describes the expectations for RBAC for Rancher Monitoring. + +- [Cluster Admins](#cluster-admins) +- [Users with Kubernetes ClusterRole-based Permissions](#users-with-kubernetes-clusterrole-based-permissions) + - [Users with Kubernetes Admin/Edit Permissions](#users-with-kubernetes-admin-edit-permissions) + - [Users with Kubernetes View Permissions](#users-with-kubernetes-view-permissions) + - [Additional Monitoring Roles](#additional-monitoring-roles) + - [Additional Monitoring ClusterRoles](#additional-monitoring-clusterroles) +- [Users with Rancher Cluster Manager Based Permissions](#users-with-rancher-cluster-manager-based-permissions) + - [Differences in 2.5.x](#differences-in-2-5-x) + - [Assigning Additional Access](#assigning-additional-access) +- [Role-based Access Control for Grafana](#role-based-access-control-for-grafana) + +# Cluster Admins + +By default, only those with the cluster-admin `ClusterRole` should be able to: + +- Install the `rancher-monitoring` App onto a cluster and all other relevant configuration performed on the chart deploy + - e.g. whether default dashboards are created, what exporters are deployed onto the cluster to collect metrics, etc. +- Create / modify / delete Prometheus deployments in the cluster via Prometheus CRs +- Create / modify / delete Alertmanager deployments in the cluster via Alertmanager CRs +- Persist new Grafana dashboards or datasources via creating ConfigMaps in the appropriate namespace +- Expose certain Prometheus metrics to the k8s Custom Metrics API for HPA via a Secret in the `cattle-monitoring-system` namespace + +# Users with Kubernetes ClusterRole-based Permissions + +The `rancher-monitoring` chart installs the following three `ClusterRoles`. By default, they aggregate into the corresponding k8s `ClusterRoles`: + +| ClusterRole | Aggregates To Default K8s ClusterRole | +| ------------------------------| ---------------------------| +| `monitoring-admin` | `admin`| +| `monitoring-edit` | `edit` | +| `monitoring-view` | `view ` | + +These `ClusterRoles` provide different levels of access to the Monitoring CRDs based on the actions that can be performed: + +| CRDs (monitoring.coreos.com) | Admin | Edit | View | +| ------------------------------| ---------------------------| ---------------------------| ---------------------------| +|
    • `prometheuses`
    • `alertmanagers`
    | Get, List, Watch | Get, List, Watch | Get, List, Watch | +|
    • `servicemonitors`
    • `podmonitors`
    • `prometheusrules`
    | * | * | Get, List, Watch | + +On a high level, the following permissions are assigned by default as a result. + +### Users with Kubernetes Admin/Edit Permissions + +Only those with the the cluster-admin, admin or edit `ClusterRole` should be able to: + +- Modify the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs +- Modify the alerting / recording rules of a Prometheus deployment via PrometheusRules CRs + +### Users with Kubernetes View Permissions + +Only those with who have some Kubernetes `ClusterRole` should be able to: + +- View the configuration of Prometheuses that are deployed within the cluster +- View the configuration of Alertmanagers that are deployed within the cluster +- View the scrape configuration of Prometheus deployments via ServiceMonitor and PodMonitor CRs +- View the alerting/recording rules of a Prometheus deployment via PrometheusRules CRs + +### Additional Monitoring Roles + +Monitoring also creates additional `Roles` that are not assigned to users by default but are created within the cluster. They can be bound to a namespace by deploying a `RoleBinding` that references it. To define a `RoleBinding` with `kubectl` instead of through Rancher, click [here](#assigning-roles-and-clusterroles-with-kubectl). + +Admins should use these roles to provide more fine-grained access to users: + +| Role | Purpose | +| ------------------------------| ---------------------------| +| monitoring-config-admin | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | +| monitoring-config-edit | Allow admins to assign roles to users to be able to view / modify Secrets and ConfigMaps within the cattle-monitoring-system namespace. Modifying Secrets / ConfigMaps in this namespace could allow users to alter the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | +| monitoring-config-view | Allow admins to assign roles to users to be able to view Secrets and ConfigMaps within the cattle-monitoring-system namespace. Viewing Secrets / ConfigMaps in this namespace could allow users to observe the cluster's Alertmanager configuration, Prometheus Adapter configuration, additional Grafana datasources, TLS secrets, etc. | +| monitoring-dashboard-admin | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | +| monitoring-dashboard-edit | Allow admins to assign roles to users to be able to edit / view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | +| monitoring-dashboard-view | Allow admins to assign roles to users to be able to view ConfigMaps within the cattle-dashboards namespace. ConfigMaps in this namespace will correspond to Grafana Dashboards that are persisted onto the cluster. | + +### Additional Monitoring ClusterRoles + +Monitoring also creates additional `ClusterRoles` that are not assigned to users by default but are created within the cluster. They are not aggregated by default but can be bound to a namespace by deploying a `RoleBinding` or `ClusterRoleBinding` that references it. To define a `RoleBinding` with `kubectl` instead of through Rancher, click [here](#assigning-roles-and-clusterroles-with-kubectl). + +| Role | Purpose | +| ------------------------------| ---------------------------| +| monitoring-ui-view | _Available as of Monitoring v2 14.5.100+_ Provides read-only access to external Monitoring UIs by giving a user permission to list the Prometheus, Alertmanager, and Grafana endpoints and make GET requests to Prometheus, Grafana, and Alertmanager UIs through the Rancher proxy. | + +### Assigning Roles and ClusterRoles with kubectl + +An alternative method to using Rancher to attach a `Role` or `ClusterRole` to a user or group is by defining bindings in YAML files that you create. You must first configure the `RoleBinding` with the YAML file, then you apply the config changes by running the `kubectl apply` command. + + +* **Roles**: Below is an example of a YAML file to help you configure `RoleBindings` in Kubernetes to attach to a user. You will need to fill in the name below, and name is case-sensitive. + +``` +# monitoring-config-view-role-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: monitoring-config-view + namespace: cattle-monitoring-system +roleRef: + kind: Role + name: monitoring-config-view + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: User + name: u-b4qkhsnliz # this can be found via `kubectl get users -A` + apiGroup: rbac.authorization.k8s.io +``` + +* **kubectl**: Below is an example of a `kubectl` command used to apply the binding you've created in the YAML file. As noted, you will need to fill in your YAML filename accordingly. + + * **`kubectl apply -f monitoring-config-view-role-binding.yaml` + +# Users with Rancher Cluster Manager Based Permissions + +The relationship between the default roles deployed by Rancher Cluster Manager (i.e. cluster-owner, cluster-member, project-owner, project-member), the default k8s roles, and the roles deployed by the rancher-monitoring chart are detailed in the table below: + +
    Default Rancher Permissions and Corresponding Kubernetes ClusterRoles
    + +| Cluster Manager Role | k8s Role | Monitoring ClusterRole / Role | ClusterRoleBinding or RoleBinding? | +| --------- | --------- | --------- | --------- | +| cluster-owner | cluster-admin | N/A | ClusterRoleBinding | +| cluster-member | admin | monitoring-admin | ClusterRoleBinding | +| project-owner | admin | monitoring-admin | RoleBinding within Project namespace | +| project-member | edit | monitoring-edit | RoleBinding within Project namespace | + +In addition to these default Roles, the following additional Rancher project roles can be applied to members of your Cluster to provide additional access to Monitoring. These Rancher Roles will be tied to ClusterRoles deployed by the Monitoring chart: + +
    Non-default Rancher Permissions and Corresponding Kubernetes ClusterRoles
    + +| Cluster Manager Role | Kubernetes ClusterRole | Available In Rancher From | Available in Monitoring v2 From | +|--------------------------|-------------------------------|-------|------| +| View Monitoring* | [monitoring-ui-view](#monitoring-ui-view) | 2.4.8+ | 9.4.204+ | + +\* A User bound to the **View Monitoring** Rancher Role only has permissions to access external Monitoring UIs if provided links to those UIs. In order to access the Monitoring Pane on Cluster Explorer to get those links, the User must be a Project Member of at least one Project. + +### Differences in 2.5.x + +Users with the project-member or project-owners roles assigned will not be given access to either Prometheus or Grafana in Rancher 2.5.x since we only create Grafana or Prometheus on a cluster-level. + +In addition, while project owners will still be only able to add ServiceMonitors / PodMonitors that scrape resources within their project's namespace by default, PrometheusRules are not scoped to a single namespace / project. Therefore, any alert rules or recording rules created by project-owners within their project namespace will be applied across the entire cluster, although they will be unable to view / edit / delete any rules that were created outside the project's namespace. + +### Assigning Additional Access + +If cluster-admins would like to provide additional admin/edit access to users outside of the roles offered by the rancher-monitoring chart, the following table identifies the potential impact: + +|CRDs (monitoring.coreos.com) | Can it cause impact outside of a namespace / project? | Impact | +|----------------------------| ------| ----------------------------| +| `prometheuses`| Yes, this resource can scrape metrics from any targets across the entire cluster (unless the Operator itself is otherwise configured). | User will be able to define the configuration of new cluster-level Prometheus deployments that should be created in the cluster. | +| `alertmanagers`| No | User will be able to define the configuration of new cluster-level Alertmanager deployments that should be created in the cluster. Note: if you just want to allow users to configure settings like Routes and Receivers, you should just provide access to the Alertmanager Config Secret instead. | +|
    • `servicemonitors`
    • `podmonitors`
    | No, not by default; this is configurable via `ignoreNamespaceSelectors` on the Prometheus CR. | User will be able to set up scrapes by Prometheus on endpoints exposed by Services / Pods within the namespace they are given this permission in. | +| `prometheusrules`| Yes, PrometheusRules are cluster-scoped. | User will be able to define alert or recording rules on Prometheus based on any series collected across the entire cluster. | + +| k8s Resources | Namespace | Can it cause impact outside of a namespace / project? | Impact | +|----------------------------| ------| ------| ----------------------------| +|
    • `secrets`
    • `configmaps`
    | `cattle-monitoring-system` | Yes, Configs and Secrets in this namespace can impact the entire monitoring / alerting pipeline. | User will be able to create or edit Secrets / ConfigMaps such as the Alertmanager Config, Prometheus Adapter Config, TLS secrets, additional Grafana datasources, etc. This can have broad impact on all cluster monitoring / alerting. | +|
    • `secrets`
    • `configmaps`
    | `cattle-dashboards` | Yes, Configs and Secrets in this namespace can create dashboards that make queries on all metrics collected at a cluster-level. | User will be able to create Secrets / ConfigMaps that persist new Grafana Dashboards only. | + + + +# Role-based Access Control for Grafana + +Rancher allows any users who are authenticated by Kubernetes and have access the Grafana service deployed by the Rancher Monitoring chart to access Grafana via the Rancher Dashboard UI. By default, all users who are able to access Grafana are given the [Viewer](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#viewer-role) role, which allows them to view any of the default dashboards deployed by Rancher. + +However, users can choose to log in to Grafana as an [Admin](https://grafana.com/docs/grafana/latest/permissions/organization_roles/#admin-role) if necessary. The default Admin username and password for the Grafana instance will be `admin`/`prom-operator`, but alternative credentials can also be supplied on deploying or upgrading the chart. + +To see the Grafana UI, install `rancher-monitoring`. Then go to the **Cluster Explorer.** In the top left corner, click **Cluster Explorer > Monitoring.** Then click **Grafana. + +
    Cluster Compute Resources Dashboard in Grafana
    +![Cluster Compute Resources Dashboard in Grafana](/img/cluster-compute-resources-dashboard.png) + +
    Default Dashboards in Grafana
    +![Default Dashboards in Grafana](/img/grafana-default-dashboard.png) \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/windows-clusters/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md similarity index 100% rename from content/rancher/v2.5/en/monitoring-alerting/windows-clusters/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md diff --git a/content/rancher/v2.5/en/opa-gatekeper/_index.md b/versioned_docs/version-2.5/explanations/integrations-in-rancher/opa-gatekeeper.md similarity index 100% rename from content/rancher/v2.5/en/opa-gatekeper/_index.md rename to versioned_docs/version-2.5/explanations/integrations-in-rancher/opa-gatekeeper.md diff --git a/versioned_docs/version-2.5/faq.md b/versioned_docs/version-2.5/faq.md new file mode 100644 index 0000000000..ef1603c180 --- /dev/null +++ b/versioned_docs/version-2.5/faq.md @@ -0,0 +1,73 @@ +--- +title: FAQ +weight: 25 +aliases: + - /rancher/v2.5/en/about/ + - /rancher/v2.x/en/faq/ +--- + +This FAQ is a work in progress designed to answer the questions our users most frequently ask about Rancher v2.x. + +See [Technical FAQ](faq/technical-items.md), for frequently asked technical questions. + +
    + +**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** + +When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. + +
    + +**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** + +Yes. + +
    + +**Does Rancher support Windows?** + +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.](pages-for-subheaders/use-windows-clusters.md) + +
    + +**Does Rancher support Istio?** + +As of Rancher 2.3.0, we support [Istio.](pages-for-subheaders/istio.md) + +Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) + +
    + +**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** + +There is no built-in integration of Rancher and Hashicorp's Vault. Rancher manages Kubernetes and integrates with secrets via the Kubernetes API. Thus in any downstream (managed) cluster, you can use a secret vault of your choice provided it integrates with Kubernetes, including [Vault](https://www.vaultproject.io/docs/platform/k8s). + +
    + +**Does Rancher v2.x support RKT containers as well?** + +At this time, we only support Docker. + +
    + +**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and registered Kubernetes?** + +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. + +
    + +**Are you planning on supporting Traefik for existing setups?** + +We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. + +
    + +**Can I import OpenShift Kubernetes clusters into v2.x?** + +Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. + +
    + +**Are you going to integrate Longhorn?** + +Yes. Longhorn was integrated into Rancher v2.5+. diff --git a/versioned_docs/version-2.5/faq/container-network-interface-providers.md b/versioned_docs/version-2.5/faq/container-network-interface-providers.md new file mode 100644 index 0000000000..e81302b44c --- /dev/null +++ b/versioned_docs/version-2.5/faq/container-network-interface-providers.md @@ -0,0 +1,155 @@ +--- +title: Container Network Interface (CNI) Providers +description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you +weight: 2300 +aliases: + - /rancher/v2.x/en/faq/networking/cni-providers/ +--- + +## What is CNI? + +CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. + +Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. + +![CNI Logo](/img/cni-logo.png) + +For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). + +### What Network Models are Used in CNI? + +CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). + +#### What is an Encapsulated Network? + +This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. + +In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. + +This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. + +CNI network providers using this network model include Flannel, Canal, and Weave. + +![Encapsulated Network](/img/encapsulated-network.png) + +#### What is an Unencapsulated Network? + +This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). + +In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. + +This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. + +CNI network providers using this network model include Calico and Romana. + +![Unencapsulated Network](/img/unencapsulated-network.png) + +### What CNI Providers are Provided by Rancher? + +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. + +#### Canal + +![Canal Logo](/img/canal-logo.png) + +Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. + +In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. + +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). For details, refer to [the port requirements for user clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) + +![](/img/canal-diagram.png) + +For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) + +#### Flannel + +![Flannel Logo](/img/flannel-logo.png) + +Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). + +Encapsulated traffic is unencrypted by default. Therefore, flannel provides an experimental backend for encryption, [IPSec](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. + +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (healthcheck). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +![Flannel Diagram](/img/flannel-diagram.png) + +For more information, see the [Flannel GitHub Page](https://github.com/coreos/flannel). + +#### Calico + +![Calico Logo](/img/calico-logo.png) + +Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. + +Calico also provides a stateless IP-in-IP encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. + +Kubernetes workers should open TCP port `179` (BGP). See [the port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +![Calico Diagram](/img/calico-diagram.svg) + +For more information, see the following pages: + +- [Project Calico Official Site](https://www.projectcalico.org/) +- [Project Calico GitHub Page](https://github.com/projectcalico/calico) + + +#### Weave + +![Weave Logo](/img/weave-logo.png) + +Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. + +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for more details. + +For more information, see the following pages: + +- [Weave Net Official Site](https://www.weave.works/) + +### CNI Features by Provider + +The following table summarizes the different features available for each CNI network provider provided by Rancher. + +| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | +| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | +| Canal | Encapsulated (VXLAN) | No | Yes | No | K8S API | No | Yes | +| Flannel | Encapsulated (VXLAN) | No | No | No | K8S API | No | No | +| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8S API | No | Yes | +| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | + +- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) + +- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. + +- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. + +- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. + +- External Datastore: CNI network providers with this feature need an external datastore for its data. + +- Encryption: This feature allows cyphered and secure network control and data planes. + +- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. + +#### CNI Community Popularity + +The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2020. + +| Provider | Project | Stars | Forks | Contributors | +| ---- | ---- | ---- | ---- | ---- | +| Canal | https://github.com/projectcalico/canal | 614 | 89 | 19 | +| flannel | https://github.com/coreos/flannel | 4977 | 1.4k | 140 | +| Calico | https://github.com/projectcalico/calico | 1534 | 429 | 135 | +| Weave | https://github.com/weaveworks/weave/ | 5737 | 559 | 73 | + +
    + +### Which CNI Provider Should I Use? + +It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. + +Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. + +### How can I configure a CNI network provider? + +Please see [Cluster Options](cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File](cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.5/en/faq/deprecated-features-25x/_index.md b/versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md similarity index 100% rename from content/rancher/v2.5/en/faq/deprecated-features-25x/_index.md rename to versioned_docs/version-2.5/faq/deprecated-features-in-v2.5.md diff --git a/content/rancher/v2.5/en/faq/kubectl/_index.md b/versioned_docs/version-2.5/faq/install-and-configure-kubectl.md similarity index 100% rename from content/rancher/v2.5/en/faq/kubectl/_index.md rename to versioned_docs/version-2.5/faq/install-and-configure-kubectl.md diff --git a/versioned_docs/version-2.5/faq/networking/networking.md b/versioned_docs/version-2.5/faq/networking/networking.md new file mode 100644 index 0000000000..4bd3345e50 --- /dev/null +++ b/versioned_docs/version-2.5/faq/networking/networking.md @@ -0,0 +1,11 @@ +--- +title: Networking +weight: 8005 +aliases: + - /rancher/v2.x/en/faq/networking/ +--- + +Networking FAQ's + +- [CNI Providers](../container-network-interface-providers.md) + diff --git a/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md b/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md new file mode 100644 index 0000000000..d546c81d68 --- /dev/null +++ b/versioned_docs/version-2.5/faq/rancher-is-no-longer-needed.md @@ -0,0 +1,67 @@ +--- +title: Rancher is No Longer Needed +weight: 8010 +aliases: + - /rancher/v2.5/en/installation/removing-rancher/cleaning-cluster-nodes/ + - /rancher/v2.5/en/installation/removing-rancher/ + - /rancher/v2.5/en/admin-settings/removing-rancher/ + - /rancher/v2.5/en/admin-settings/removing-rancher/rancher-cluster-nodes/ + - /rancher/v2.x/en/faq/removing-rancher/ +--- + +This page is intended to answer questions about what happens if you don't want Rancher anymore, if you don't want a cluster to be managed by Rancher anymore, or if the Rancher server is deleted. + +- [If the Rancher server is deleted, what happens to the workloads in my downstream clusters?](#if-the-rancher-server-is-deleted-what-happens-to-the-workloads-in-my-downstream-clusters) +- [If the Rancher server is deleted, how do I access my downstream clusters?](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) +- [What if I don't want Rancher anymore?](#what-if-i-don-t-want-rancher-anymore) +- [What if I don't want my registered cluster managed by Rancher?](#what-if-i-don-t-want-my-registered-cluster-managed-by-rancher) +- [What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher?](#what-if-i-don-t-want-my-rke-cluster-or-hosted-kubernetes-cluster-managed-by-rancher) + +### If the Rancher server is deleted, what happens to the workloads in my downstream clusters? + +If Rancher is ever deleted or unrecoverable, all workloads in the downstream Kubernetes clusters managed by Rancher will continue to function as normal. + +### If the Rancher server is deleted, how do I access my downstream clusters? + +The capability to access a downstream cluster without Rancher depends on the type of cluster and the way that the cluster was created. To summarize: + +- **Registered clusters:** The cluster will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. +- **Hosted Kubernetes clusters:** If you created the cluster in a cloud-hosted Kubernetes provider such as EKS, GKE, or AKS, you can continue to manage the cluster using your provider's cloud credentials. +- **RKE clusters:** Please note that you will no longer be able to manage the individual Kubernetes components or perform any upgrades on them after the deletion of the Rancher server. However, you can still access the cluster to manage your workloads. To access an [RKE cluster,](../pages-for-subheaders/launch-kubernetes-with-rancher.md) the cluster must have the [authorized cluster endpoint](../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled, and you must have already downloaded the cluster's kubeconfig file from the Rancher UI. (The authorized cluster endpoint is enabled by default for RKE clusters.) With this endpoint, you can access your cluster with kubectl directly instead of communicating through the Rancher server's [authentication proxy.](../pages-for-subheaders/rancher-manager-architecture.md#1-the-authentication-proxy) For instructions on how to configure kubectl to use the authorized cluster endpoint, refer to the section about directly accessing clusters with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) These clusters will use a snapshot of the authentication as it was configured when Rancher was removed. + +### What if I don't want Rancher anymore? + +If you [installed Rancher on a Kubernetes cluster,](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) remove Rancher by using the [System Tools](../reference-guides/system-tools.md) with the `remove` subcommand. + +As of Rancher v2.5.8, uninstalling Rancher in high-availability (HA) mode will also remove all `helm-operation-*` pods and the following apps: + +- fleet +- fleet-agent +- rancher-operator +- rancher-webhook + +Custom resources (CRDs) and custom namespaces will still need to be manually removed. + +If you installed Rancher with Docker, you can uninstall Rancher by removing the single Docker container that it runs in. + +Imported clusters will not be affected by Rancher being removed. For other types of clusters, refer to the section on [accessing downstream clusters when Rancher is removed.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) + +### What if I don't want my registered cluster managed by Rancher? + +If a registered cluster is deleted from the Rancher UI, the cluster is detached from Rancher, leaving it intact and accessible by the same methods that were used to access it before it was registered in Rancher. + +To detach the cluster, + +1. From the **Global** view in Rancher, go to the **Clusters** tab. +2. Go to the registered cluster that should be detached from Rancher and click **⋮ > Delete.** +3. Click **Delete.** + +**Result:** The registered cluster is detached from Rancher and functions normally outside of Rancher. + +### What if I don't want my RKE cluster or hosted Kubernetes cluster managed by Rancher? + +At this time, there is no functionality to detach these clusters from Rancher. In this context, "detach" is defined as the ability to remove Rancher components from the cluster and manage access to the cluster independently of Rancher. + +The capability to manage these clusters without Rancher is being tracked in this [issue.](https://github.com/rancher/rancher/issues/25234) + +For information about how to access clusters if the Rancher server is deleted, refer to [this section.](#if-the-rancher-server-is-deleted-how-do-i-access-my-downstream-clusters) diff --git a/versioned_docs/version-2.5/faq/security.md b/versioned_docs/version-2.5/faq/security.md new file mode 100644 index 0000000000..bcba5f0742 --- /dev/null +++ b/versioned_docs/version-2.5/faq/security.md @@ -0,0 +1,16 @@ +--- +title: Security +weight: 8007 +aliases: + - /rancher/v2.x/en/faq/security/ +--- + +**Is there a Hardening Guide?** + +The Hardening Guide is now located in the main [Security](../pages-for-subheaders/rancher-security.md) section. + +
    + +**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** + +We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security](../pages-for-subheaders/rancher-security.md) section. diff --git a/versioned_docs/version-2.5/faq/technical-items.md b/versioned_docs/version-2.5/faq/technical-items.md new file mode 100644 index 0000000000..a7ab2594f7 --- /dev/null +++ b/versioned_docs/version-2.5/faq/technical-items.md @@ -0,0 +1,180 @@ +--- +title: Technical +weight: 8006 +aliases: + - /rancher/v2.x/en/faq/technical/ +--- + +### How can I reset the administrator password? + +Docker Install: +``` +$ docker exec -ti reset-password +New password for default administrator (user-xxxxx): + +``` + +Kubernetes install (Helm): +``` +$ KUBECONFIG=./kube_config_cluster.yml +$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- reset-password +New password for default administrator (user-xxxxx): + +``` + + + +### I deleted/deactivated the last admin, how can I fix it? +Docker Install: +``` +$ docker exec -ti ensure-default-admin +New default administrator (user-xxxxx) +New password for default administrator (user-xxxxx): + +``` + +Kubernetes install (Helm): +``` +$ KUBECONFIG=./kube_config_cluster.yml +$ kubectl --kubeconfig $KUBECONFIG -n cattle-system exec $(kubectl --kubeconfig $KUBECONFIG -n cattle-system get pods -l app=rancher | grep '1/1' | head -1 | awk '{ print $1 }') -- ensure-default-admin +New password for default administrator (user-xxxxx): + +``` +### How can I enable debug logging? + +See [Troubleshooting: Logging](../troubleshooting/other-troubleshooting-tips/logging.md) + +### My ClusterIP does not respond to ping + +ClusterIP is a virtual IP, which will not respond to ping. Best way to test if the ClusterIP is configured correctly, is by using `curl` to access the IP and port to see if it responds. + +### Where can I manage Node Templates? + +Node Templates can be accessed by opening your account menu (top right) and selecting `Node Templates`. + +### Why is my Layer-4 Load Balancer in `Pending` state? + +The Layer-4 Load Balancer is created as `type: LoadBalancer`. In Kubernetes, this needs a cloud provider or controller that can satisfy these requests, otherwise these will be in `Pending` state forever. More information can be found on [Cloud Providers](cluster-provisioning/rke-clusters/options/cloud-providers/) or [Create External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) + +### Where is the state of Rancher stored? + +- Docker Install: in the embedded etcd of the `rancher/rancher` container, located at `/var/lib/rancher`. +- Kubernetes install: in the etcd of the RKE cluster created to run Rancher. + +### How are the supported Docker versions determined? + +We follow the validated Docker versions for upstream Kubernetes releases. The validated versions can be found under [External Dependencies](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md#external-dependencies) in the Kubernetes release CHANGELOG.md. + +### How can I access nodes created by Rancher? + +SSH keys to access the nodes created by Rancher can be downloaded via the **Nodes** view. Choose the node which you want to access and click on the vertical ⋮ button at the end of the row, and choose **Download Keys** as shown in the picture below. + +![Download Keys](/img/downloadsshkeys.png) + +Unzip the downloaded zip file, and use the file `id_rsa` to connect to you host. Be sure to use the correct username (`rancher` or `docker` for RancherOS, `ubuntu` for Ubuntu, `ec2-user` for Amazon Linux) + +``` +$ ssh -i id_rsa user@ip_of_node +``` + +### How can I automate task X in Rancher? + +The UI consists of static files, and works based on responses of the API. That means every action/task that you can execute in the UI, can be automated via the API. There are 2 ways to do this: + +* Visit `https://your_rancher_ip/v3` and browse the API options. +* Capture the API calls when using the UI (Most commonly used for this is [Chrome Developer Tools](https://developers.google.com/web/tools/chrome-devtools/#network) but you can use anything you like) + +### The IP address of a node changed, how can I recover? + +A node is required to have a static IP configured (or a reserved IP via DHCP). If the IP of a node has changed, you will have to remove it from the cluster and readd it. After it is removed, Rancher will update the cluster to the correct state. If the cluster is no longer in `Provisioning` state, the node is removed from the cluster. + +When the IP address of the node changed, Rancher lost connection to the node, so it will be unable to clean the node properly. See [Cleaning cluster nodes](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) to clean the node. + +When the node is removed from the cluster, and the node is cleaned, you can readd the node to the cluster. + +### How can I add additional arguments/binds/environment variables to Kubernetes components in a Rancher Launched Kubernetes cluster? + +You can add additional arguments/binds/environment variables via the [Config File](cluster-provisioning/rke-clusters/options/#cluster-config-file) option in Cluster Options. For more information, see the [Extra Args, Extra Binds, and Extra Environment Variables](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/) in the RKE documentation or browse the [Example Cluster.ymls](https://rancher.com/docs/rke/latest/en/example-yamls/). + +### How do I check if my certificate chain is valid? + +Use the `openssl verify` command to validate your certificate chain: + +>**Note:** Configure `SSL_CERT_DIR` and `SSL_CERT_FILE` to a dummy location to make sure the OS installed certificates are not used when verifying manually. + +``` +SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem rancher.yourdomain.com.pem +rancher.yourdomain.com.pem: OK +``` + +If you receive the error `unable to get local issuer certificate`, the chain is incomplete. This usually means that there is an intermediate CA certificate that issued your server certificate. If you already have this certificate, you can use it in the verification of the certificate like shown below: + +``` +SSL_CERT_DIR=/dummy SSL_CERT_FILE=/dummy openssl verify -CAfile ca.pem -untrusted intermediate.pem rancher.yourdomain.com.pem +rancher.yourdomain.com.pem: OK +``` + +If you have successfully verified your certificate chain, you should include needed intermediate CA certificates in the server certificate to complete the certificate chain for any connection made to Rancher (for example, by the Rancher agent). The order of the certificates in the server certificate file should be first the server certificate itself (contents of `rancher.yourdomain.com.pem`), followed by intermediate CA certificate(s) (contents of `intermediate.pem`). + +``` +-----BEGIN CERTIFICATE----- +%YOUR_CERTIFICATE% +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +%YOUR_INTERMEDIATE_CERTIFICATE% +-----END CERTIFICATE----- +``` + +If you still get errors during verification, you can retrieve the subject and the issuer of the server certificate using the following command: + +``` +openssl x509 -noout -subject -issuer -in rancher.yourdomain.com.pem +subject= /C=GB/ST=England/O=Alice Ltd/CN=rancher.yourdomain.com +issuer= /C=GB/ST=England/O=Alice Ltd/CN=Alice Intermediate CA +``` + +### How do I check `Common Name` and `Subject Alternative Names` in my server certificate? + +Although technically an entry in `Subject Alternative Names` is required, having the hostname in both `Common Name` and as entry in `Subject Alternative Names` gives you maximum compatibility with older browser/applications. + +Check `Common Name`: + +``` +openssl x509 -noout -subject -in cert.pem +subject= /CN=rancher.my.org +``` + +Check `Subject Alternative Names`: + +``` +openssl x509 -noout -in cert.pem -text | grep DNS + DNS:rancher.my.org +``` + +### Why does it take 5+ minutes for a pod to be rescheduled when a node has failed? + +This is due to a combination of the following default Kubernetes settings: + +* kubelet + * `node-status-update-frequency`: Specifies how often kubelet posts node status to master (default 10s) +* kube-controller-manager + * `node-monitor-period`: The period for syncing NodeStatus in NodeController (default 5s) + * `node-monitor-grace-period`: Amount of time which we allow running Node to be unresponsive before marking it unhealthy (default 40s) + * `pod-eviction-timeout`: The grace period for deleting pods on failed nodes (default 5m0s) + +See [Kubernetes: kubelet](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) and [Kubernetes: kube-controller-manager](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) for more information on these settings. + +In Kubernetes v1.13, the `TaintBasedEvictions` feature is enabled by default. See [Kubernetes: Taint based Evictions](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#taint-based-evictions) for more information. + +* kube-apiserver (Kubernetes v1.13 and up) + * `default-not-ready-toleration-seconds`: Indicates the tolerationSeconds of the toleration for notReady:NoExecute that is added by default to every pod that does not already have such a toleration. + * `default-unreachable-toleration-seconds`: Indicates the tolerationSeconds of the toleration for unreachable:NoExecute that is added by default to every pod that does not already have such a toleration. + +### Can I use keyboard shortcuts in the UI? + +Yes, most parts of the UI can be reached using keyboard shortcuts. For an overview of the available shortcuts, press `?` anywhere in the UI. + + +### What does `Unknown schema for type:` errors followed by something like `catalog.cattle.io.operation` mean when trying to modify an App? + +This error occurs when Kubernetes can not find the CRD mentioned. The vast majority of the time these are a result of missing RBAC permissions. Try with an admin user and if this works, add permissions for the resource mentioned by the error (ie. `Get`, `List`, `Patch` as needed). diff --git a/content/rancher/v2.5/en/faq/telemetry/_index.md b/versioned_docs/version-2.5/faq/telemetry.md similarity index 100% rename from content/rancher/v2.5/en/faq/telemetry/_index.md rename to versioned_docs/version-2.5/faq/telemetry.md diff --git a/versioned_docs/version-2.5/getting-started.md b/versioned_docs/version-2.5/getting-started.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md new file mode 100644 index 0000000000..4cb72c3f07 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md @@ -0,0 +1,262 @@ +--- +title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer +weight: 252 +aliases: + - /rancher/v2.5/en/installation/single-node/single-node-install-external-lb/ + - /rancher/v2.5/en/installation/other-installation-methods/single-node-docker/single-node-install-external-lb + - /rancher/v2.5/en/installation/options/single-node-install-external-lb + - /rancher/v2.5/en/installation/single-node-install-external-lb + - /rancher/v2.x/en/installation/resources/advanced/single-node-install-external-lb/ +--- + +For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. + +A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. + +This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. + +> **Want to skip the external load balancer?** +> See [Docker Installation](installation/single-node) instead. + +## Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.](../../../../pages-for-subheaders/installation-requirements.md) + +## Installation Outline + + + +- [1. Provision Linux Host](#1-provision-linux-host) +- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) +- [3. Configure Load Balancer](#3-configure-load-balancer) + + + +## 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements](../../../../pages-for-subheaders/installation-requirements.md) to launch your Rancher Server. + +## 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> **Do you want to...** +> +> - Complete an Air Gap Installation? +> - Record all transactions with the Rancher API? +> +> See [Advanced Options](#advanced-options) below before continuing. + +Choose from the following options: + +
    + Option A-Bring Your Own Certificate: Self-Signed + +If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. + +> **Prerequisites:** +> Create a self-signed certificate. +> +> - The certificate files must be in PEM format. + +**To Install Rancher Using a Self-Signed Cert:** + +1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest + ``` + +
    +
    + Option B-Bring Your Own Certificate: Signed by Recognized CA + +If your cluster is public facing, it's best to use a certificate signed by a recognized CA. + +> **Prerequisites:** +> +> - The certificate files must be in PEM format. + +**To Install Rancher Using a Cert Signed by a Recognized CA:** + +If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. + +1. Enter the following command. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest --no-cacerts + ``` + +
    + +## 3. Configure Load Balancer + +When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. + +The load balancer or proxy has to be configured to support the following: + +- **WebSocket** connections +- **SPDY** / **HTTP/2** protocols +- Passing / setting the following headers: + + | Header | Value | Description | + |--------|-------|-------------| + | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. + | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

    **Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. + | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. + | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. +### Example NGINX configuration + +This NGINX configuration is tested on NGINX 1.14. + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server rancher-server:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` + +
    + +## What's Next? + +- **Recommended:** Review [Single Node Backup and Restore](installation/backups-and-restoration/single-node-backup-and-restoration/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
    + +## FAQ and Troubleshooting + +For help troubleshooting certificates, see [this section.](../../other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +## Advanced Options + +### API Auditing + +If you want to record all transactions with the Rancher API, enable the [API Auditing](installation/api-auditing) feature by adding the flags below into your install command. + + -e AUDIT_LEVEL=1 \ + -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ + -e AUDIT_LOG_MAXAGE=20 \ + -e AUDIT_LOG_MAXBACKUP=20 \ + -e AUDIT_LOG_MAXSIZE=100 \ + +### Air Gap + +If you are visiting this page to complete an [Air Gap Installation](installation/air-gap-installation/), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + +``` +upstream rancher { + server rancher-server:80; +} + +map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; +} + +server { + listen 443 ssl http2; + server_name rancher.yourdomain.com; + ssl_certificate /etc/your_certificate_directory/fullchain.pem; + ssl_certificate_key /etc/your_certificate_directory/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } +} + +server { + listen 80; + server_name rancher.yourdomain.com; + return 301 https://$server_name$request_uri; +} +``` + +
    + diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md new file mode 100644 index 0000000000..75ec9ecf15 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md @@ -0,0 +1,570 @@ +--- +title: Enabling the API Audit Log to Record System Events +weight: 4 +aliases: + - /rancher/v2.5/en/installation/options/api-audit-log/ + - /rancher/v2.5/en/installation/api-auditing + - /rancher/v2.x/en/installation/resources/advanced/api-audit-log/ +--- + +You can enable the API audit log to record the sequence of system events initiated by individual users. You can know what happened, when it happened, who initiated it, and what cluster it affected. When you enable this feature, all requests to the Rancher API and all responses from it are written to a log. + +You can enable API Auditing during Rancher installation or upgrade. + +## Enabling API Audit Log + +The Audit Log is enabled and configured by passing environment variables to the Rancher server container. See the following to enable on your installation. + +- [Docker Install](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +- [Kubernetes Install](../../../../reference-guides/installation-references/helm-chart-options.md#api-audit-log) + +## API Audit Log Options + +The usage below defines rules about what the audit log should record and what data it should include: + +| Parameter | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `AUDIT_LEVEL` | `0` - Disable audit log (default setting).
    `1` - Log event metadata.
    `2` - Log event metadata and request body.
    `3` - Log event metadata, request body, and response body. Each log transaction for a request/response pair uses the same `auditID` value.

    See [Audit Level Logging](#audit-log-levels) for a table that displays what each setting logs. | +| `AUDIT_LOG_PATH` | Log path for Rancher Server API. Default path is `/var/log/auditlog/rancher-api-audit.log`. You can mount the log directory to host.

    Usage Example: `AUDIT_LOG_PATH=/my/custom/path/`
    | +| `AUDIT_LOG_MAXAGE` | Defined the maximum number of days to retain old audit log files. Default is 10 days. | +| `AUDIT_LOG_MAXBACKUP` | Defines the maximum number of audit log files to retain. Default is 10. | +| `AUDIT_LOG_MAXSIZE` | Defines the maximum size in megabytes of the audit log file before it gets rotated. Default size is 100M. | + +
    + +### Audit Log Levels + +The following table displays what parts of API transactions are logged for each [`AUDIT_LEVEL`](#audit-level) setting. + +| `AUDIT_LEVEL` Setting | Request Metadata | Request Body | Response Metadata | Response Body | +| --------------------- | ---------------- | ------------ | ----------------- | ------------- | +| `0` | | | | | +| `1` | ✓ | | | | +| `2` | ✓ | ✓ | | | +| `3` | ✓ | ✓ | ✓ | ✓ | + +## Viewing API Audit Logs + +### Docker Install + +Share the `AUDIT_LOG_PATH` directory (Default: `/var/log/auditlog`) with the host system. The log can be parsed by standard CLI tools or forwarded on to a log collection tool like Fluentd, Filebeat, Logstash, etc. + +### Kubernetes Install + +Enabling the API Audit Log with the Helm chart install will create a `rancher-audit-log` sidecar container in the Rancher pod. This container will stream the log to standard output (stdout). You can view the log as you would any container log. + +The `rancher-audit-log` container is part of the `rancher` pod in the `cattle-system` namespace. + +#### CLI + +```bash +kubectl -n cattle-system logs -f rancher-84d886bdbb-s4s69 rancher-audit-log +``` + +#### Rancher Web GUI + +1. From the context menu, select **Cluster: local > System**. +1. From the main navigation bar, choose **Resources > Workloads.** Find the `cattle-system` namespace. Open the `rancher` workload by clicking its link. +1. Pick one of the `rancher` pods and select **⋮ > View Logs**. +1. From the **Logs** drop-down, select `rancher-audit-log`. + +#### Shipping the Audit Log + +You can enable Rancher's built in log collection and shipping for the cluster to ship the audit and other services logs to a supported collection endpoint. See [Logging](../../../../pages-for-subheaders/logging.md) for details. + +## Audit Log Samples + +After you enable auditing, each API request or response is logged by Rancher in the form of JSON. Each of the following code samples provide examples of how to identify each API transaction. + +### Metadata Level + +If you set your `AUDIT_LEVEL` to `1`, Rancher logs the metadata header for every API request, but not the body. The header provides basic information about the API transaction, such as the transaction's ID, who initiated the transaction, the time it occurred, etc. + +```json +{ + "auditID": "30022177-9e2e-43d1-b0d0-06ef9d3db183", + "requestURI": "/v3/schemas", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "GET", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:22:43 +0800" +} +``` + +### Metadata and Request Body Level + +If you set your `AUDIT_LEVEL` to `2`, Rancher logs the metadata header and body for every API request. + +The code sample below depicts an API request, with both its metadata header and body. + +```json +{ + "auditID": "ef1d249e-bfac-4fd0-a61f-cbdcad53b9bb", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:28:08 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my description", + "volumes": [] + } +} +``` + +### Metadata, Request Body, and Response Body Level + +If you set your `AUDIT_LEVEL` to `3`, Rancher logs: + +- The metadata header and body for every API request. +- The metadata header and body for every API response. + +#### Request + +The code sample below depicts an API request, with both its metadata header and body. + +```json +{ + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "requestURI": "/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "sourceIPs": ["::1"], + "user": { + "name": "user-f4tt2", + "group": ["system:authenticated"] + }, + "verb": "PUT", + "stage": "RequestReceived", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "requestBody": { + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "paused": false, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements", + "requests": {}, + "limits": {} + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container", + "environmentFrom": [], + "capAdd": [], + "capDrop": [], + "livenessProbe": null, + "volumeMounts": [] + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "name": "nginx", + "namespaceId": "default", + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport", + "type": "publicEndpoint" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "scheduling": { + "node": {} + }, + "description": "my decript", + "volumes": [] + } +} +``` + +#### Response + +The code sample below depicts an API response, with both its metadata header and body. + +```json +{ + "auditID": "a886fd9f-5d6b-4ae3-9a10-5bff8f3d68af", + "responseStatus": "200", + "stage": "ResponseComplete", + "stageTimestamp": "2018-07-20 10:33:06 +0800", + "responseBody": { + "actionLinks": { + "pause": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=pause", + "resume": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=resume", + "rollback": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx?action=rollback" + }, + "annotations": {}, + "baseType": "workload", + "containers": [ + { + "allowPrivilegeEscalation": false, + "image": "nginx", + "imagePullPolicy": "Always", + "initContainer": false, + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "dnsName": "nginx-nodeport", + "kind": "NodePort", + "name": "80tcp01", + "protocol": "TCP", + "sourcePort": 0, + "type": "/v3/project/schemas/containerPort" + } + ], + "privileged": false, + "readOnly": false, + "resources": { + "type": "/v3/project/schemas/resourceRequirements" + }, + "restartCount": 0, + "runAsNonRoot": false, + "stdin": true, + "stdinOnce": false, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "type": "/v3/project/schemas/container" + } + ], + "created": "2018-07-18T07:34:16Z", + "createdTS": 1531899256000, + "creatorId": null, + "deploymentConfig": { + "maxSurge": 1, + "maxUnavailable": 0, + "minReadySeconds": 0, + "progressDeadlineSeconds": 600, + "revisionHistoryLimit": 10, + "strategy": "RollingUpdate" + }, + "deploymentStatus": { + "availableReplicas": 1, + "conditions": [ + { + "lastTransitionTime": "2018-07-18T07:34:38Z", + "lastTransitionTimeTS": 1531899278000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "Deployment has minimum availability.", + "reason": "MinimumReplicasAvailable", + "status": "True", + "type": "Available" + }, + { + "lastTransitionTime": "2018-07-18T07:34:16Z", + "lastTransitionTimeTS": 1531899256000, + "lastUpdateTime": "2018-07-18T07:34:38Z", + "lastUpdateTimeTS": 1531899278000, + "message": "ReplicaSet \"nginx-64d85666f9\" has successfully progressed.", + "reason": "NewReplicaSetAvailable", + "status": "True", + "type": "Progressing" + } + ], + "observedGeneration": 2, + "readyReplicas": 1, + "replicas": 1, + "type": "/v3/project/schemas/deploymentStatus", + "unavailableReplicas": 0, + "updatedReplicas": 1 + }, + "dnsPolicy": "ClusterFirst", + "hostIPC": false, + "hostNetwork": false, + "hostPID": false, + "id": "deployment:default:nginx", + "labels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "links": { + "remove": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "revisions": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/revisions", + "self": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "update": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx", + "yaml": "https://localhost:8443/v3/project/c-bcz5t:p-fdr4s/workloads/deployment:default:nginx/yaml" + }, + "name": "nginx", + "namespaceId": "default", + "paused": false, + "projectId": "c-bcz5t:p-fdr4s", + "publicEndpoints": [ + { + "addresses": ["10.64.3.58"], + "allNodes": true, + "ingressId": null, + "nodeId": null, + "podId": null, + "port": 30917, + "protocol": "TCP", + "serviceId": "default:nginx-nodeport" + } + ], + "restartPolicy": "Always", + "scale": 1, + "schedulerName": "default-scheduler", + "selector": { + "matchLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + }, + "type": "/v3/project/schemas/labelSelector" + }, + "state": "active", + "terminationGracePeriodSeconds": 30, + "transitioning": "no", + "transitioningMessage": "", + "type": "deployment", + "uuid": "f998037d-8a5c-11e8-a4cf-0245a7ebb0fd", + "workloadAnnotations": { + "deployment.kubernetes.io/revision": "1", + "field.cattle.io/creatorId": "user-f4tt2" + }, + "workloadLabels": { + "workload.user.cattle.io/workloadselector": "deployment-default-nginx" + } + } +} +``` diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md new file mode 100644 index 0000000000..341e9cb37b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld.md @@ -0,0 +1,110 @@ +--- +title: Opening Ports with firewalld +weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/advanced/firewall/ +--- + +> We recommend disabling firewalld. For Kubernetes 1.19.x and higher, firewalld must be turned off. + +Some distributions of Linux [derived from RHEL,](https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Rebuilds) including Oracle Linux, may have default firewall rules that block communication with Helm. + +For example, one Oracle Linux image in AWS has REJECT rules that stop Helm from communicating with Tiller: + +``` +Chain INPUT (policy ACCEPT) +target prot opt source destination +ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED +ACCEPT icmp -- anywhere anywhere +ACCEPT all -- anywhere anywhere +ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain FORWARD (policy ACCEPT) +target prot opt source destination +REJECT all -- anywhere anywhere reject-with icmp-host-prohibited + +Chain OUTPUT (policy ACCEPT) +target prot opt source destination +``` + +You can check the default firewall rules with this command: + +``` +sudo iptables --list +``` + +This section describes how to use `firewalld` to apply the [firewall port rules](../../installation-requirements/port-requirements.md) for nodes in a high-availability Rancher server cluster. + +# Prerequisite + +Install v7.x or later ofv`firewalld`: + +``` +yum install firewalld +systemctl start firewalld +systemctl enable firewalld +``` + +# Applying Firewall Port Rules + +In the Rancher high-availability installation instructions, the Rancher server is set up on three nodes that have all three Kubernetes roles: etcd, controlplane, and worker. If your Rancher server nodes have all three roles, run the following commands on each node: + +``` +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` +If your Rancher server nodes have separate roles, use the following commands based on the role of the node: + +``` +# For etcd nodes, run the following commands: +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=2379/tcp +firewall-cmd --permanent --add-port=2380/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp + +# For control plane nodes, run the following commands: +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=6443/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp + +# For worker nodes, run the following commands: +firewall-cmd --permanent --add-port=22/tcp +firewall-cmd --permanent --add-port=80/tcp +firewall-cmd --permanent --add-port=443/tcp +firewall-cmd --permanent --add-port=2376/tcp +firewall-cmd --permanent --add-port=8472/udp +firewall-cmd --permanent --add-port=9099/tcp +firewall-cmd --permanent --add-port=10250/tcp +firewall-cmd --permanent --add-port=10254/tcp +firewall-cmd --permanent --add-port=30000-32767/tcp +firewall-cmd --permanent --add-port=30000-32767/udp +``` + +After the `firewall-cmd` commands have been run on a node, use the following command to enable the firewall rules: + +``` +firewall-cmd --reload +``` + +**Result:** The firewall is updated so that Helm can communicate with the Rancher server nodes. diff --git a/content/rancher/v2.5/en/installation/resources/advanced/etcd/_index.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md similarity index 100% rename from content/rancher/v2.5/en/installation/resources/advanced/etcd/_index.md rename to versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md new file mode 100644 index 0000000000..be6f7a6c45 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md @@ -0,0 +1,34 @@ +--- +title: UI for Istio Virtual Services and Destination Rules +weight: 2 +aliases: + - /rancher/v2.5/en/installation/options/feature-flags/istio-virtual-service-ui + - /rancher/v2.x/en/installation/resources/feature-flags/istio-virtual-service-ui/ +--- + +This feature enables a UI that lets you create, read, update and delete virtual services and destination rules, which are traffic management features of Istio. + +> **Prerequisite:** Turning on this feature does not enable Istio. A cluster administrator needs to [enable Istio for the cluster](../../../../pages-for-subheaders/istio-setup-guide.md) in order to use the feature. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](installation/options/feature-flags/) + +Environment Variable Key | Default Value | Status | Available as of +---|---|---|--- +`istio-virtual-service-ui` |`false` | Experimental | v2.3.0 +`istio-virtual-service-ui` | `true` | GA | v2.3.2 + +# About this Feature + +A central advantage of Istio's traffic management features is that they allow dynamic request routing, which is useful for canary deployments, blue/green deployments, or A/B testing. + +When enabled, this feature turns on a page that lets you configure some traffic management features of Istio using the Rancher UI. Without this feature, you need to use `kubectl` to manage traffic with Istio. + +The feature enables two UI tabs: one tab for **Virtual Services** and another for **Destination Rules.** + +- **Virtual services** intercept and direct traffic to your Kubernetes services, allowing you to direct percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) +- **Destination rules** serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. For details, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule) + +To see these tabs, + +1. Go to the project view in Rancher and click **Resources > Istio.** +1. You will see tabs for **Traffic Graph,** which has the Kiali network visualization integrated into the UI, and **Traffic Metrics,** which shows metrics for the success rate and request volume of traffic to your services, among other metrics. Next to these tabs, you should see the tabs for **Virtual Services** and **Destination Rules.** \ No newline at end of file diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md new file mode 100644 index 0000000000..2121242ffb --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md @@ -0,0 +1,43 @@ +--- +title: "Running on ARM64 (Experimental)" +weight: 3 +aliases: + - /rancher/v2.5/en/installation/options/arm64-platform + - /rancher/v2.x/en/installation/resources/advanced/arm64-platform/ +--- + +> **Important:** +> +> Running on an ARM64 platform is currently an experimental feature and is not yet officially supported in Rancher. Therefore, we do not recommend using ARM64 based nodes in a production environment. + +The following options are available when using an ARM64 platform: + +- Running Rancher on ARM64 based node(s) + - Only for Docker Install. Please note that the following installation command replaces the examples found in the [Docker Install]({{}}/rancher/v2.0-v2.4/en/installation/other-installation-methods/single-node-docker) link: + + ``` + # In the last line `rancher/rancher:vX.Y.Z`, be certain to replace "X.Y.Z" with a released version in which ARM64 builds exist. For example, if your matching version is v2.5.8, you would fill in this line with `rancher/rancher:v2.5.8`. + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:vX.Y.Z + ``` +> **Note:** To check if your specific released version is compatible with the ARM64 architecture, you may navigate to your +> version's release notes in the following two ways: +> +> - Manually find your version using https://github.com/rancher/rancher/releases. +> - Go directly to your version using the tag and the specific version number. If you plan to use v2.5.8, for example, you may +> navigate to https://github.com/rancher/rancher/releases/tag/v2.5.8. + +- Create custom cluster and adding ARM64 based node(s) + - Kubernetes cluster version must be 1.12 or higher + - CNI Network Provider must be [Flannel](../../../../faq/container-network-interface-providers.md#flannel) +- Importing clusters that contain ARM64 based nodes + - Kubernetes cluster version must be 1.12 or higher + +Please see [Cluster Options](cluster-provisioning/rke-clusters/options/) how to configure the cluster options. + +The following features are not tested: + +- Monitoring, alerts, notifiers, pipelines and logging +- Launching apps from the catalog diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md new file mode 100644 index 0000000000..25ee97f9e2 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md @@ -0,0 +1,43 @@ +--- +title: Allow Unsupported Storage Drivers +weight: 1 +aliases: + - /rancher/v2.5/en/installation/options/feature-flags/enable-not-default-storage-drivers/ + - /rancher/v2.x/en/installation/resources/feature-flags/enable-not-default-storage-drivers/ +--- + +This feature allows you to use types for storage providers and provisioners that are not enabled by default. + +To enable or disable this feature, refer to the instructions on [the main page about enabling experimental features.](installation/options/feature-flags/) + +Environment Variable Key | Default Value | Description +---|---|--- + `unsupported-storage-drivers` | `false` | This feature enables types for storage providers and provisioners that are not enabled by default. + +### Types for Persistent Volume Plugins that are Enabled by Default +Below is a list of storage types for persistent volume plugins that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +### Types for StorageClass that are Enabled by Default +Below is a list of storage types for a StorageClass that are enabled by default. When enabling this feature flag, any persistent volume plugins that are not on this list are considered experimental and unsupported: + +Name | Plugin +--------|-------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` \ No newline at end of file diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md new file mode 100644 index 0000000000..0b4079b0ea --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades.md @@ -0,0 +1,138 @@ +--- +title: Rendering the Helm Template in an Air Gapped Environment +shortTitle: Air Gap Upgrade +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> These instructions assume you have already followed the instructions for a Kubernetes upgrade on [this page,](upgrades.md) including the prerequisites, up until step 3. Upgrade Rancher. + +### Rancher Helm Template Options + +Render the Rancher template using the same chosen options that were used when installing Rancher. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +Based on the choice you made during installation, complete one of the procedures below. + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. + + +### Option A: Default Self-signed Certificate + + + + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + + + + + ```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + + + + +### Option B: Certificates from Files using Kubernetes Secrets + + + + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + + + + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ +--namespace cattle-system \ +--set hostname= \ +--set rancherImage=/rancher/rancher \ +--set ingress.tls.source=secret \ +--set privateCA=true \ +--set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher +--set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + + + + + +### Apply the Rendered Templates + +Copy the rendered manifest directories to a system with access to the Rancher server cluster and apply the rendered templates. + +Use `kubectl` to apply the rendered manifests. + +```plain +kubectl -n cattle-system apply -R -f ./rancher +``` + +# Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +>**Having network issues following upgrade?** +> +> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). + +# Known Upgrade Issues + +A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md new file mode 100644 index 0000000000..938f3931a3 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md @@ -0,0 +1,119 @@ +--- +title: Installing Rancher on Azure Kubernetes Service +shortTitle: AKS +weight: 4 +--- + +This page covers how to install Rancher on Microsoft's Azure Kubernetes Service (AKS). + +The guide uses command line tools to provision an AKS cluster with an ingress. If you prefer to provision your cluster using the Azure portal, refer to the [official documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough-portal). + +If you already have an AKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) + +# Prerequisites + +>**Note** +>Deploying to Microsoft Azure will incur charges. + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- Your subscription has sufficient quota for at least 2 vCPUs. For details on Rancher server resource requirements, refer to [this section](../../../pages-for-subheaders/installation-requirements.md#rke-and-hosted-kubernetes) +- When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +# 1. Prepare your Workstation + +Install the following command line tools on your workstation: + +- The Azure CLI, **az:** For help, refer to these [installation steps.](https://docs.microsoft.com/en-us/cli/azure/) +- **kubectl:** For help, refer to these [installation steps.](https://kubernetes.io/docs/tasks/tools/#kubectl) +- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) + +# 2. Create a Resource Group + +After installing the CLI, you will need to log in with your Azure account. + +``` +az login +``` + +Create a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) to hold all relevant resources for your cluster. Use a location that applies to your use case. + +``` +az group create --name rancher-rg --location eastus +``` + +# 3. Create the AKS Cluster + +To create an AKS cluster, run the following command. Use a VM size that applies to your use case. Refer to [this article](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) for available sizes and options. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +az aks create \ + --resource-group rancher-rg \ + --name rancher-server \ + --kubernetes-version 1.20.5 \ + --node-count 3 \ + --node-vm-size Standard_D2_v3 +``` + +The cluster will take some time to be deployed. + +# 4. Get Access Credentials + +After the cluster is deployed, get the access credentials. + +``` +az aks get-credentials --resource-group rancher-rg --name rancher-server +``` + +This command merges your cluster's credentials into the existing kubeconfig and allows `kubectl` to interact with the cluster. + +# 5. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. Installing an Ingress requires allocating a public IP address. Ensure you have sufficient quota, otherwise it will fail to assign the IP address. Limits for public IP addresses are applicable at a regional level per subscription. + +The following command installs an `nginx-ingress-controller` with a Kubernetes load balancer service. + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +# 6. Get Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + AGE +ingress-nginx-controller LoadBalancer 10.0.116.18 40.31.180.83 80:31229/TCP,443:31050/TCP + 67s +``` + +Save the `EXTERNAL-IP`. + +# 7. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the `EXTERNAL-IP` that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the [Azure DNS documentation](https://docs.microsoft.com/en-us/azure/dns/) + +# 8. Install the Rancher Helm Chart + +Next, install the Rancher Helm chart by following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md new file mode 100644 index 0000000000..08e925796a --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md @@ -0,0 +1,166 @@ +--- +title: Installing Rancher on Amazon EKS +shortTitle: Amazon EKS +weight: 4 +aliases: + - /rancher/v2.x/en/installation/install-rancher-on-k8s/amazon-eks/ +--- + +This page covers two ways to install Rancher on EKS. + +The first is a guide for deploying the Rancher server on an EKS cluster using CloudFormation. This guide was created in collaboration with Amazon Web Services to show how to deploy Rancher following best practices. + +The second is a guide for installing an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. + +If you already have an EKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) + +- [Automated Quickstart using AWS Best Practices](#automated-quickstart-using-aws-best-practices) +- [Creating an EKS Cluster for the Rancher Server](#creating-an-eks-cluster-for-the-rancher-server) + +# Automated Quickstart using AWS Best Practices + +Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) + +The quick start guide provides three options for deploying Rancher on EKS: + +- **Deploy Rancher into a new VPC and new Amazon EKS cluster.** This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, Amazon EKS cluster, and other infrastructure components. It then deploys Rancher into this new EKS cluster. +- **Deploy Rancher into an existing VPC and a new Amazon EKS cluster.** This option provisions Rancher in your existing AWS infrastructure. +- **Deploy Rancher into an existing VPC and existing Amazon EKS cluster.** This option provisions Rancher in your existing AWS infrastructure. + +Deploying this Quick Start for a new virtual private cloud (VPC) and new Amazon EKS cluster using default parameters builds the following Rancher environment in the AWS Cloud: + +- A highly available architecture that spans three Availability Zones.* +- A VPC configured with public and private subnets, according to AWS best practices, to provide you with your own virtual network on AWS.* +- In the public subnets: + - Managed network address translation (NAT) gateways to allow outbound internet access for resources.* + - Linux bastion hosts in an Auto Scaling group to allow inbound Secure Shell (SSH) access to Amazon Elastic Compute Cloud (Amazon EC2) instances in public and private subnets.* +- In the private subnets: + - Kubernetes nodes in an Auto Scaling group.* + - A Network Load Balancer (not shown) for accessing the Rancher console. +- Rancher deployment using AWS Systems Manager automation. +- Amazon EKS service for the EKS cluster, which provides the Kubernetes control plane.* +- An Amazon Route 53 DNS record for accessing the Rancher deployment. + +\* The CloudFormation template that deploys the Quick Start into an existing Amazon EKS cluster skips the components marked by asterisks and prompts you for your existing VPC configuration. + +# Creating an EKS Cluster for the Rancher Server + +In this section, you'll install an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. + +> **Prerequisites:** +> +> - You should already have an AWS account. +> - It is recommended to use an IAM user instead of the root AWS account. You will need the IAM user's access key and secret key to configure the AWS command line interface. +> - The IAM user needs the minimum IAM policies described in the official [eksctl documentation.](https://eksctl.io/usage/minimum-iam-policies/) + +### 1. Prepare your Workstation + +Install the following command line tools on your workstation: + +- **The AWS CLI v2:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- **eksctl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) +- **kubectl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) + +### 2. Configure the AWS CLI + +To configure the AWS CLI, run the following command: + +``` +aws configure +``` + +Then enter the following values: + +| Value | Description | +|-------|-------------| +| AWS Access Key ID | The access key credential for the IAM user with EKS permissions. | +| AWS Secret Access Key | The secret key credential for the IAM user with EKS permissions. | +| Default region name | An [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) where the cluster nodes will be located. | +| Default output format | Enter `json`. | + +### 3. Create the EKS Cluster + +To create an EKS cluster, run the following command. Use the AWS region that applies to your use case. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +eksctl create cluster \ + --name rancher-server \ + --version 1.20 \ + --region us-west-2 \ + --nodegroup-name ranchernodes \ + --nodes 3 \ + --nodes-min 1 \ + --nodes-max 4 \ + --managed +``` + +The cluster will take some time to be deployed with CloudFormation. + +### 4. Test the Cluster + +To test the cluster, run: + +``` +eksctl get cluster +``` + +The result should look like the following: + +``` +eksctl get cluster +2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0 +2021-03-18 15:09:35 [ℹ] using region us-west-2 +NAME REGION EKSCTL CREATED +rancher-server-cluster us-west-2 True +``` + +### 5. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. + +The following command installs an `nginx-ingress-controller` with a LoadBalancer service. This will result in an ELB (Elastic Load Balancer) in front of NGINX: + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +### 6. Get Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + AGE +ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP + 27m +``` + +Save the `EXTERNAL-IP`. + +### 7. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the AWS documentation on [routing traffic to an ELB load balancer.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) + +### 8. Install the Rancher Helm Chart + +Next, install the Rancher Helm chart by following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md new file mode 100644 index 0000000000..baef9e9bca --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md @@ -0,0 +1,186 @@ +--- +title: Installing Rancher on a Google Kubernetes Engine Cluster +shortTitle: GKE +weight: 5 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to install Rancher using Google Kubernetes Engine. + +If you already have a GKE Kubernetes cluster, skip to the step about [installing an ingress.](#7-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) + +# Prerequisites + +- You will need a Google account. +- You will need a Google Cloud billing account. You can manage your Cloud Billing accounts using the Google Cloud Console. For more information about the Cloud Console, visit [General guide to the console.](https://support.google.com/cloud/answer/3465889?hl=en&ref_topic=3340599) +- You will need a cloud quota for at least one in-use IP address and at least 2 CPUs. For more details about hardware requirements for the Rancher server, refer to [this section.](../../../pages-for-subheaders/installation-requirements.md#rke-and-hosted-kubernetes) + +# 1. Enable the Kubernetes Engine API + +Take the following steps to enable the Kubernetes Engine API: + +1. Visit the [Kubernetes Engine page](https://console.cloud.google.com/projectselector/kubernetes?_ga=2.169595943.767329331.1617810440-856599067.1617343886) in the Google Cloud Console. +1. Create or select a project. +1. Open the project and enable the Kubernetes Engine API for the project. Wait for the API and related services to be enabled. This can take several minutes. +1. Make sure that billing is enabled for your Cloud project. For information on how to enable billing for your project, refer to the [Google Cloud documentation.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project) + +# 2. Open the Cloud Shell + +Cloud Shell is a shell environment for managing resources hosted on Google Cloud. Cloud Shell comes preinstalled with the `gcloud` command-line tool and kubectl command-line tool. The `gcloud` tool provides the primary command-line interface for Google Cloud, and `kubectl` provides the primary command-line interface for running commands against Kubernetes clusters. + +The following sections describe how to launch the cloud shell from the Google Cloud Console or from your local workstation. + +### Cloud Shell + +To launch the shell from the [Google Cloud Console,](https://console.cloud.google.com) go to the upper-right corner of the console and click the terminal button. When hovering over the button, it is labeled **Activate Cloud Shell.** + +### Local Shell + +To install `gcloud` and `kubectl`, perform the following steps: + +1. Install the Cloud SDK by following [these steps.](https://cloud.google.com/sdk/docs/install) The Cloud SDK includes the `gcloud` command-line tool. The steps vary based on your OS. +1. After installing Cloud SDK, install the `kubectl` command-line tool by running the following command: + + ``` + gcloud components install kubectl + ``` + In a later step, `kubectl` will be configured to use the new GKE cluster. +1. [Install Helm 3](https://helm.sh/docs/intro/install/) if it is not already installed. +1. Enable Helm experimental [support for OCI images](https://github.com/helm/community/blob/master/hips/hip-0006.md) with the `HELM_EXPERIMENTAL_OCI` variable. Add the following line to `~/.bashrc` (or `~/.bash_profile` in macOS, or wherever your shell stores environment variables): + + ``` + export HELM_EXPERIMENTAL_OCI=1 + ``` +1. Run the following command to load your updated `.bashrc` file: + + ``` + source ~/.bashrc + ``` + If you are running macOS, use this command: + ``` + source ~/.bash_profile + ``` + + + +# 3. Configure the gcloud CLI + + Set up default gcloud settings using one of the following methods: + +- Using gcloud init, if you want to be walked through setting defaults. +- Using gcloud config, to individually set your project ID, zone, and region. + + + + +1. Run gcloud init and follow the directions: + + ``` + gcloud init + ``` + If you are using SSH on a remote server, use the --console-only flag to prevent the command from launching a browser: + + ``` + gcloud init --console-only + ``` +2. Follow the instructions to authorize gcloud to use your Google Cloud account and select the new project that you created. + + + + + + + +# 4. Confirm that gcloud is configured correctly + +Run: + +``` +gcloud config list +``` + +The output should resemble the following: + +``` +[compute] +region = us-west1 # Your chosen region +zone = us-west1-b # Your chosen zone +[core] +account = +disable_usage_reporting = True +project = + +Your active configuration is: [default] +``` + +# 5. Create a GKE Cluster + +The following command creates a three-node cluster. + +Replace `cluster-name` with the name of your new cluster. + +When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +gcloud container clusters create cluster-name --num-nodes=3 --cluster-version=1.20.10-gke.301 +``` + +# 6. Get Authentication Credentials + +After creating your cluster, you need to get authentication credentials to interact with the cluster: + +``` +gcloud container clusters get-credentials cluster-name +``` + +This command configures `kubectl` to use the cluster you created. + +# 7. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. + +The following command installs an `nginx-ingress-controller` with a LoadBalancer service: + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +# 8. Get the Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:31876/TCP,443:32497/TCP 81s +``` + +Save the `EXTERNAL-IP`. + +# 9. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the Google Cloud documentation about [managing DNS records.](https://cloud.google.com/dns/docs/records) + +# 10. Install the Rancher Helm chart + +Next, install the Rancher Helm chart by following the instructions on [this page.](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use the DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md new file mode 100644 index 0000000000..578e3d14c3 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md @@ -0,0 +1,112 @@ +--- +title: Rollbacks +weight: 3 +aliases: + - /rancher/v2.x/en/upgrades/rollbacks + - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks + - /rancher/v2.x/en/upgrades/ha-server-rollbacks + - /rancher/v2.x/en/upgrades/rollbacks/ha-server-rollbacks + - /rancher/v2.x/en/installation/upgrades-rollbacks/rollbacks/ha-server-rollbacks + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades-rollbacks/rollbacks + - /rancher/v2.x/en/installation/install-rancher-on-k8s/rollbacks/ +--- + +- [Rolling Back to Rancher v2.5.0+](#rolling-back-to-rancher-v2-5-0) +- [Rolling Back to Rancher v2.2-v2.4+](#rolling-back-to-rancher-v2-2-v2-4) +- [Rolling Back to Rancher v2.0-v2.1](#rolling-back-to-rancher-v2-0-v2-1) + +# Rolling Back to Rancher v2.5.0+ + +To roll back to Rancher v2.5.0+, use the **Rancher Backups** application and restore Rancher from backup. + +Rancher has to be started with the lower/previous version after a rollback. + +A restore is performed by creating a Restore custom resource. + +> **Important** +> +> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) +> * While restoring Rancher on the same setup, the Rancher deployment is manually scaled down before the restore starts, then the operator will scale it back up once the restore completes. As a result, Rancher and its UI will be unavailable until the restore is complete. While the UI is unavailable, use the original cluster kubeconfig with the restore YAML file: `kubectl create -f restore.yaml`. + +### Scale the Rancher Deployment to 0 + +1. From the **Global** view, hover over the **local** cluster. +1. Under **Projects in local**, click on **System**. +1. From the **cattle-system** namespace section, find the `rancher` deployment. +1. Select **⋮ > Edit**. +1. Change **Scalable deployment of _ pods** to `0`. +1. Scroll to the bottom and click **Save**. + +### Create the Restore Custom Resource + +1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** + * **Note:** If the Rancher Backups app is not visible in the dropdown, you will need to install it from the Charts page in **Apps & Marketplace**. Refer [here](../../../pages-for-subheaders/helm-charts-in-rancher.md#charts) for more information. +1. Click **Restore.** +1. Create the Restore with the form or with YAML. For help creating the Restore resource using the online form, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) +1. To use the YAML editor, you can click **Create > Create from YAML.** Enter the Restore YAML. The following is an example Restore custom resource: + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Restore + metadata: + name: restore-migration + spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + encryptionConfigSecretName: encryptionconfig + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + ``` + For help configuring the Restore, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) + +1. Click **Create.** + +**Result:** The backup file is created and updated to the target storage location. The resources are restored in this order: + +1. Custom Resource Definitions (CRDs) +2. Cluster-scoped resources +3. Namespaced resources + +To check how the restore is progressing, you can check the logs of the operator. Follow these steps to get the logs: + +```yaml +kubectl get pods -n cattle-resources-system +kubectl logs -n cattle-resources-system -f +``` + +### Roll back to a previous Rancher version + +Rancher can be rolled back using the Helm CLI. To roll back to the previous version: + +```yaml +helm rollback rancher -n cattle-system +``` + +If the previous revision is not the intended target, you can specify a revision to roll back to. To see the deployment history: + +```yaml +helm history rancher -n cattle-system +``` + +When the target revision is determined, perform the rollback. This example will roll back to revision `3`: + +```yaml +helm rollback rancher 3 -n cattle-system +``` + +# Rolling Back to Rancher v2.2-v2.4+ + +To roll back to Rancher before v2.5, follow the procedure detailed here: [Restoring Backups — Kubernetes installs]({{}}/rancher/v2.0-v2.4/en/backups/restore/rke-restore/) Restoring a snapshot of the Rancher server cluster will revert Rancher to the version and state at the time of the snapshot. + +For information on how to roll back Rancher installed with Docker, refer to [this page.](../other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md) + +> Managed clusters are authoritative for their state. This means restoring the rancher server will not revert workload deployments or changes made on managed clusters after the snapshot was taken. + +# Rolling Back to Rancher v2.0-v2.1 + +Rolling back to Rancher v2.0-v2.1 is no longer supported. The instructions for rolling back to these versions are preserved [here]({{}}/rancher/v2.0-v2.4/en/backups/restore/rke-restore/v2.0-v2.1) and are intended to be used only in cases where upgrading to Rancher v2.2+ is not feasible. diff --git a/content/rancher/v2.5/en/installation/resources/troubleshooting/_index.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md similarity index 100% rename from content/rancher/v2.5/en/installation/resources/troubleshooting/_index.md rename to versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md new file mode 100644 index 0000000000..771c0e467b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md @@ -0,0 +1,196 @@ +--- +title: Upgrades +weight: 2 +aliases: + - /rancher/v2.5/en/upgrades/upgrades + - /rancher/v2.5/en/installation/upgrades-rollbacks/upgrades + - /rancher/v2.5/en/upgrades/upgrades/ha-server-upgrade-helm-airgap + - /rancher/v2.5/en/upgradeinstallation/install-rancher-on-k8s/upgrades/air-gap-upgrade/ + - /rancher/v2.5/en/upgrades/upgrades/ha + - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/upgrades/ha + - /rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ + - /rancher/v2.5/en/upgrades/upgrades/ha-server-upgrade-helm/ + - /rancher/v2.5/en/installation/upgrades-rollbacks/upgrades/ha + - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades + - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades-rollbacks/upgrades/ha + - /rancher/v2.5/en/installation/upgrades-rollbacks/ + - /rancher/v2.5/en/upgrades/ + - /rancher/v2.x/en/installation/install-rancher-on-k8s/upgrades/ +--- +The following instructions will guide you through upgrading a Rancher server that was installed on a Kubernetes cluster with Helm. These steps also apply to air gap installs with Helm. + +For the instructions to upgrade Rancher installed on Kubernetes with RancherD, refer to [this page.](installation/install-rancher-on-linux/upgrades) + +For the instructions to upgrade Rancher installed with Docker, refer to [this page.](../other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md) + +To upgrade the components in your Kubernetes cluster, or the definition of the [Kubernetes services](https://rancher.com/docs/rke/latest/en/config-options/services/) or [add-ons](https://rancher.com/docs/rke/latest/en/config-options/add-ons/), refer to the [upgrade documentation for RKE](https://rancher.com/docs/rke/latest/en/upgrades/), the Rancher Kubernetes Engine. + +- [Prerequisites](#prerequisites) +- [Upgrade Outline](#upgrade-outline) +- [Known Upgrade Issues](#known-upgrade-issues) +- [RKE Add-on Installs](#rke-add-on-installs) + +# Prerequisites + +### Access to kubeconfig + +Helm should be run from the same location as your kubeconfig file, or the same location where you run your kubectl commands from. + +If you installed Kubernetes with RKE, the config will have been created in the directory you ran `rke up` in. + +The kubeconfig can also be manually targeted for the intended cluster with the `--kubeconfig` tag (see: https://helm.sh/docs/helm/helm/) + +### Review Known Issues + +Review the list of known issues for each Rancher version, which can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) + +Note that upgrades _to_ or _from_ any chart in the [rancher-alpha repository](../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren't supported. + +### Helm Version + +The upgrade instructions assume you are using Helm 3. + +For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) The [Helm 2 upgrade page here]({{}}/rancher/v2.0-v2.4/en/installation/upgrades-rollbacks/upgrades/ha/helm2)provides a copy of the older upgrade instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +### For air gap installs: Populate private registry + +For [air gap installs only,](../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version. Follow the guide to [populate your private registry](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +### For upgrades from a Rancher server with a hidden local cluster + +If you are upgrading to Rancher v2.5 from a Rancher server that was started with the Helm chart option `--add-local=false`, you will need to drop that flag when upgrading. Otherwise, the Rancher server will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. For more information, see [this section.](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#upgrading-from-rancher-with-a-hidden-local-cluster) + +### For upgrades with cert-manager older than 0.8.0 + +[Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) Upgrade cert-manager to the latest version by following [these instructions.](installation/options/upgrading-cert-manager) + +# Upgrade Outline + +Follow the steps to upgrade Rancher server: + +- [1. Back up your Kubernetes cluster that is running Rancher server](#1-back-up-your-kubernetes-cluster-that-is-running-rancher-server) +- [2. Update the Helm chart repository](#2-update-the-helm-chart-repository) +- [3. Upgrade Rancher](#3-upgrade-rancher) +- [4. Verify the Upgrade](#4-verify-the-upgrade) + +# 1. Back up Your Kubernetes Cluster that is Running Rancher Server + +Use the [backup application](../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) to back up Rancher. + +You'll use the backup as a restoration point if something goes wrong during upgrade. + +# 2. Update the Helm chart repository + +1. Update your local helm repo cache. + + ``` + helm repo update + ``` + +1. Get the repository name that you used to install Rancher. + + For information about the repos and their differences, see [Helm Chart Repositories](../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + + {{< release-channel >}} + + ``` + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + + > **Note:** If you want to switch to a different Helm chart repository, please follow the [steps on how to switch repositories](../resources/choose-a-rancher-version.md#switching-to-a-different-helm-chart-repository). If you switch repositories, make sure to list the repositories again before continuing onto Step 3 to ensure you have the correct one added. + + +1. Fetch the latest chart to install Rancher from the Helm chart repository. + + This command will pull down the latest charts and save it in the current directory as a `.tgz` file. + + ```plain + helm fetch rancher-/rancher + ``` + You can fetch the chart for the specific version you are upgrading to by adding in the `--version=` tag. For example: + + ```plain + helm fetch rancher-/rancher --version=v2.4.11 + ``` + +# 3. Upgrade Rancher + +This section describes how to upgrade normal (Internet-connected) or air gap installations of Rancher with Helm. + +> **Air Gap Instructions:** If you are installing Rancher in an air gapped environment, skip the rest of this page and render the Helm template by following the instructions on [this page.](air-gapped-upgrades.md) + + +Get the values, which were passed with `--set`, from the current Rancher Helm chart that is installed. + +``` +helm get values rancher -n cattle-system + +hostname: rancher.my.org +``` + +> **Note:** There will be more values that are listed with this command. This is just an example of one of the values. + +If you are also upgrading cert-manager to the latest version from a version older than 0.11.0, follow [Option B: Reinstalling Rancher and cert-manager.](#option-b-reinstalling-rancher-and-cert-manager) + +Otherwise, follow [Option A: Upgrading Rancher.](#option-a-upgrading-rancher) + +### Option A: Upgrading Rancher + +Upgrade Rancher to the latest version with all your settings. + +Take all the values from the previous step and append them to the command using `--set key=value`: + +``` +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org +``` + +> **Note:** The above is an example, there may be more values from the previous step that need to be appended. + +Alternatively, it's possible to export the current values to a file and reference that file during upgrade. For example, to only change the Rancher version: + +``` +helm get values rancher -n cattle-system -o yaml > values.yaml + +helm upgrade rancher rancher-/rancher \ + --namespace cattle-system \ + -f values.yaml \ + --version=2.4.5 +``` + +### Option B: Reinstalling Rancher and cert-manager + +If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, then you need to reinstall both Rancher and cert-manager due to the API change in cert-manager v0.11. + +1. Uninstall Rancher + + ``` + helm delete rancher -n cattle-system + ``` + +2. Uninstall and reinstall `cert-manager` according to the instructions on the [Upgrading Cert-Manager](installation/options/upgrading-cert-manager) page. + +3. Reinstall Rancher to the latest version with all your settings. Take all the values from the step 1 and append them to the command using `--set key=value`. Note: There will be many more options from the step 1 that need to be appended. + + ``` + helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org + ``` + +# 4. Verify the Upgrade + +Log into Rancher to confirm that the upgrade succeeded. + +>**Having network issues following upgrade?** +> +> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). + +# Known Upgrade Issues + +A list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) diff --git a/content/rancher/v2.6/en/installation/requirements/installing-docker/_index.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/installation-requirements/install-docker.md similarity index 100% rename from content/rancher/v2.6/en/installation/requirements/installing-docker/_index.md rename to versioned_docs/version-2.5/getting-started/installation-and-upgrade/installation-requirements/install-docker.md diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md new file mode 100644 index 0000000000..3fae624ff5 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/installation-requirements/port-requirements.md @@ -0,0 +1,335 @@ +--- +title: Port Requirements +description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes +weight: 300 +aliases: + - /rancher/v2.x/en/installation/requirements/ports/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. + +- [Rancher Nodes](#rancher-nodes) + - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) + - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) + - [Ports for Rancher Server Nodes on RancherD or RKE2](#ports-for-rancher-server-nodes-on-rancherd-or-rke2) + - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) +- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) + - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) + - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) + - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) + - [Ports for Registered Clusters](#ports-for-registered-clusters) +- [Other Port Considerations](#other-port-considerations) + - [Commonly Used Ports](#commonly-used-ports) + - [Local Node Traffic](#local-node-traffic) + - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) + - [Opening SUSE Linux Ports](#opening-suse-linux-ports) + +# Rancher Nodes + +The following table lists the ports that need to be open to and from nodes that are running the Rancher server. + +The port requirements differ based on the Rancher server architecture. + +As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster. For Rancher installs on a K3s, RKE, or RKE2 Kubernetes cluster, refer to the tabs below. For other Kubernetes distributions, refer to the distribution's documentation for the port requirements for cluster nodes. + +> **Notes:** +> +> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). +> - Kubernetes recommends TCP 30000-32767 for node port services. +> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. + +### Ports for Rancher Server Nodes on K3s + +
    + Click to expand + +The K3s server needs port 6443 to be accessible by the nodes. + +The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +The following tables break down the port requirements for inbound and outbound traffic: + +
    Inbound Rules for Rancher Server Nodes
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
    • server nodes
    • agent nodes
    • hosted/registered Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl | +| TCP | 6443 | K3s server nodes | Kubernetes API +| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. +| TCP | 10250 | K3s server and agent nodes | kubelet + +
    Outbound Rules for Rancher Nodes
    + +| Protocol | Port | Destination | Description | +| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
    + +### Ports for Rancher Server Nodes on RKE + +
    + Click to expand + +Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. + +The following tables break down the port requirements for traffic between the Rancher nodes: + +
    Rules for traffic between Rancher nodes
    + +| Protocol | Port | Description | +|-----|-----|----------------| +| TCP | 443 | Rancher agents | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| TCP | 6443 | Kubernetes apiserver | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 10250 | Metrics server communication with all nodes | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | + +The following tables break down the port requirements for inbound and outbound traffic: + +
    Inbound Rules for Rancher Nodes
    + +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | +| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | +| TCP | 443 |
    • Load Balancer/Reverse Proxy
    • IPs of all cluster nodes and other API/UI clients
    | HTTPS traffic to Rancher UI/API | +| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | + +
    Outbound Rules for Rancher Nodes
    + +| Protocol | Port | Destination | Description | +|-----|-----|----------------|---| +| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | +| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | +| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | +| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | + +
    + +### Ports for Rancher Server Nodes on RancherD or RKE2 + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +
    + Click to expand + +The RancherD (or RKE2) server needs port 6443 and 9345 to be accessible by other nodes in the cluster. + +All nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +**Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +
    Inbound Rules for RancherD or RKE2 Server Nodes
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 9345 | RancherD/RKE2 agent nodes | Kubernetes API +| TCP | 6443 | RancherD/RKE2 agent nodes | Kubernetes API +| UDP | 8472 | RancherD/RKE2 server and agent nodes | Required only for Flannel VXLAN +| TCP | 10250 | RancherD/RKE2 server and agent nodes | kubelet +| TCP | 2379 | RancherD/RKE2 server nodes | etcd client port +| TCP | 2380 | RancherD/RKE2 server nodes | etcd peer port +| TCP | 30000-32767 | RancherD/RKE2 server and agent nodes | NodePort port range +| HTTP | 8080 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| HTTPS | 8443 |
    • hosted/registered Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl. Not needed if you have LB doing TLS termination. | + +Typically all outbound traffic is allowed. +
    + +### Ports for Rancher Server in Docker + +
    + Click to expand + +The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: + +
    Inbound Rules for Rancher Node
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used +| TCP | 443 |
    • hosted/registered Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl + +
    Outbound Rules for Rancher Node
    + +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
    + +# Downstream Kubernetes Cluster Nodes + +Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. + +The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +The following diagram depicts the ports that are opened for each [cluster type](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +
    Port Requirements for the Rancher Management Plane
    + +![Basic Port Requirements](/img/port-communications.svg) + +>**Tip:** +> +>If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. + +### Ports for Rancher Launched Kubernetes Clusters using Node Pools + +
    + Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with nodes created in an [Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +>**Note:** +>The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. + +{{< ports-iaas-nodes >}} + +
    + +### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes + +
    + Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) with [Custom Nodes](../../../pages-for-subheaders/use-existing-nodes.md). + +{{< ports-custom-nodes >}} + +
    + +### Ports for Hosted Kubernetes Clusters + +
    + Click to expand + +The following table depicts the port requirements for [hosted clusters](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md). + +{{< ports-imported-hosted >}} + +
    + +### Ports for Registered Clusters + +Note: Registered clusters were called imported clusters before Rancher v2.5. + +
    + Click to expand + +The following table depicts the port requirements for [registered clusters](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md). + +{{< ports-imported-hosted >}} + +
    + + +# Other Port Considerations + +### Commonly Used Ports + +These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. + +import CommonPortsTable from '../../../shared-files/_common-ports-table.md'; + + + +---- + +### Local Node Traffic + +Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). +These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. + +However, this traffic may be blocked when: + +- You have applied strict host firewall policies on the node. +- You are using nodes that have multiple interfaces (multihomed). + +In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. + +### Rancher AWS EC2 Security Group + +When using the [AWS EC2 node driver](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. + +| Type | Protocol | Port Range | Source/Destination | Rule Type | +|-----------------|:--------:|:-----------:|------------------------|:---------:| +| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | +| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | +| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | +| All traffic | All | All | 0.0.0.0/0 | Outbound | + +### Opening SUSE Linux Ports + +SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, + + + + +1. SSH into the instance. +1. Start YaST in text mode: +``` +sudo yast2 +``` + +1. Navigate to **Security and Users** > **Firewall** > **Zones:public** > **Ports**. To navigate within the interface, follow the instructions [here](https://doc.opensuse.org/documentation/leap/reference/html/book.opensuse.reference/cha-yast-text.html#sec-yast-cli-navigate). +1. To open the required ports, enter them into the **TCP Ports** and **UDP Ports** fields. In this example, ports 9796 and 10250 are also opened for monitoring. The resulting fields should look similar to the following: +```yaml +TCP Ports +22, 80, 443, 2376, 2379, 2380, 6443, 9099, 9796, 10250, 10254, 30000-32767 +UDP Ports +8472, 30000-32767 +``` + +1. When all required ports are enter, select **Accept**. + + + + + +1. SSH into the instance. +1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: + ``` + FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" + FW_SERVICES_EXT_UDP="8472 30000:32767" + FW_ROUTE=yes + ``` +1. Restart the firewall with the new ports: + ``` + SuSEfirewall2 + ``` + + + + +**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md new file mode 100644 index 0000000000..602f6e56a6 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands.md @@ -0,0 +1,133 @@ +--- +title: Docker Install Commands +weight: 1 +--- + +The Docker installation is for Rancher users who want to test out Rancher. + +Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +For Rancher v2.5+, the backup application can be used to migrate the Rancher server from a Docker install to a Kubernetes install using [these steps.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | + +> **Do you want to...** +> +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](../../resources/custom-ca-root-certificates.md/). +> - Record all transactions with the Rancher API? See [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log). + +Choose from the following options: + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to install. | + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](../rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to install. | + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisite:** The certificate files must be in PEM format. + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to install. | + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged + /rancher/rancher: +``` + +
    + + + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. + diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md new file mode 100644 index 0000000000..0cf723c91b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md @@ -0,0 +1,182 @@ +--- +title: '1. Set up Infrastructure and Private Registry' +weight: 100 +aliases: + - /rancher/v2.5/en/installation/air-gap-single-node/provision-host + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/prepare-nodes/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). + +An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. + +The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.](../../../../pages-for-subheaders/installation-and-upgrade.md) + +As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster. The RKE and K3s Kubernetes infrastructure tutorials below are still included for convenience. + + + + +We recommend setting up the following infrastructure for a high-availability installation: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set up one of the following external databases: + +* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) +* [MySQL](https://www.mysql.com/) (certified against version 5.7) +* [etcd](https://etcd.io/) (certified against version 3.3.15) + +When you install Kubernetes, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the database, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md) for setting up a MySQL database on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 5. Set up a Private Docker Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 4. Set up a Private Docker Registry + +Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file](https://rancher.com/docs/rke/latest/en/config-options/private-registries/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + + +> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. +> +> As of Rancher v2.5, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +### 1. Set up a Linux Node + +This host will be disconnected from the Internet, but needs to be able to connect to your private registry. + +Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up a Private Docker Registry + +Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) + + + + +### [Next: Collect and Publish Images to your Private Registry](publish-images.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md new file mode 100644 index 0000000000..7eb961d769 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md @@ -0,0 +1,229 @@ +--- +title: '3. Install Kubernetes (Skip for Docker Installs)' +weight: 300 +aliases: + - /rancher/v2.5/en/installation/air-gap-high-availability/install-kube + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/launch-kubernetes/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> Skip this section if you are installing Rancher on a single node with Docker. + +This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. + +As of Rancher v2.5, Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes providers. + +The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown below. + + + + +In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. + +### Installation Outline + +1. [Prepare Images Directory](#1-prepare-images-directory) +2. [Create Registry YAML](#2-create-registry-yaml) +3. [Install K3s](#3-install-k3s) +4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) + +### 1. Prepare Images Directory +Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. + +Place the tar file in the `images` directory before starting K3s on each node, for example: + +```sh +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ +``` + +### 2. Create Registry YAML +Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. + +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +Note, at this time only secure registries are supported with K3s (SSL with custom CA). + +For more information on private registries configuration file for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) + +### 3. Install K3s + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. +Also obtain the K3s install script at https://get.k3s.io + +Place the binary in `/usr/local/bin` on each node. +Place the install script anywhere on each node, and name it `install.sh`. + +Install K3s on each server: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +Install K3s on each agent: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh +``` + +Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. +The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` + +>**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. + +### 4. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### Note on Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. +2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. +3. Restart the K3s service (if not restarted automatically by installer). + + + + +We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. + +### 1. Install RKE + +Install RKE by following the instructions in the [RKE documentation.](https://rancher.com/docs/rke/latest/en/installation/) + +### 2. Create an RKE Config File + +From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. + +This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the three nodes you created. + +> **Tip:** For more details on the options available, see the RKE [Config Options](https://rancher.com/docs/rke/latest/en/config-options/). + +
    RKE Options
    + +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | +| `user` | ✓ | A user that can run Docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### 3. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### 4. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +
    +
    + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. + +### [Next: Install Rancher](install-rancher-ha.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md new file mode 100644 index 0000000000..c6044e2b08 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md @@ -0,0 +1,316 @@ +--- +title: 4. Install Rancher +weight: 400 +aliases: + - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-system-charts/ + - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.5/en/installation/air-gap-single-node/install-rancher + - /rancher/v2.5/en/installation/air-gap/install-rancher + - /rancher/v2.5/en/installation/air-gap-installation/install-rancher/ + - /rancher/v2.5/en/installation/air-gap-high-availability/install-rancher/ + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/install-rancher/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Privileged Access for Rancher v2.5+ + +When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. + +# Docker Instructions + +If you want to continue the air gapped installation using Docker commands, skip the rest of this page and follow the instructions on [this page.](docker-install-commands.md) + +# Kubernetes Instructions + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher: + +- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) +- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) +- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) +- [4. Install Rancher](#4-install-rancher) + +# 1. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements](../../resources/helm-version-requirements.md) to choose a version of Helm to install Rancher. + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. + ```plain + helm fetch rancher-/rancher + ``` + + If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: + ```plain + helm fetch rancher-stable/rancher --version=v2.4.8 + ``` + +# 2. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
    This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
    This option must be passed when rendering the Rancher Helm template. | no | + +# Helm Chart Options for Air Gap Installations + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | `` | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | + +# 3. Render the Rancher Helm Template + +Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. + +# Option A: Default Self-Signed Certificate + + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +> **Note:** +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation](../../resources/upgrade-cert-manager.md/). + +### 1. Add the cert-manager repo + +From a system connected to the internet, add the cert-manager repo to Helm: + +```plain +helm repo add jetstack https://charts.jetstack.io +helm repo update +``` + +### 2. Fetch the cert-manager chart + +Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + +```plain +helm fetch jetstack/cert-manager --version v1.5.1 +``` + +### 3. Render the cert-manager template + +Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + +```plain +helm template cert-manager ./cert-manager-v1.5.1.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller \ + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ + --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl +``` + +### 4. Download the cert-manager CRD + +Download the required CRD file for cert-manager: + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml + ``` + +### 5. Render the Rancher template + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. + + + + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` + + + + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.6` + + + + + + +# Option B: Certificates From Files using Kubernetes Secrets + + +### 1. Create secrets + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +### 2. Render the Rancher template + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | + + + + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` + +Then refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md/) to publish the certificate files so Rancher and the ingress controller can use them. + + + + + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` + +Then refer to [Adding TLS Secrets](../../resources/add-tls-secrets.md/) to publish the certificate files so Rancher and the ingress controller can use them. + + + + + + +# 4. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +### For Self-Signed Certificate Installs, Install Cert-manager + +
    + Click to expand + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +
    + +### Install Rancher with kubectl + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` +The installation is complete. + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +# Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](../../../../reference-guides/installation-references/helm-chart-options.md/) +- [Adding TLS secrets](../../resources/add-tls-secrets.md) +- [Troubleshooting Rancher Kubernetes Installations](../../install-upgrade-on-a-kubernetes-cluster/upgrades.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md new file mode 100644 index 0000000000..3ce77fe99b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md @@ -0,0 +1,299 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +aliases: + - /rancher/v2.5/en/installation/air-gap-high-availability/prepare-private-registry/ + - /rancher/v2.5/en/installation/air-gap-single-node/prepare-private-registry/ + - /rancher/v2.5/en/installation/air-gap-single-node/config-rancher-for-private-reg/ + - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-for-private-reg/ + - /rancher/v2.5/en/installation/air-gap-installation/prepare-private-reg/ + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/populate-private-registry/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, all images used to [provision Kubernetes clusters](../../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) or launch any tools in Rancher, e.g. monitoring and logging, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. + +The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes](../../../../pages-for-subheaders/use-windows-clusters.md), there are separate instructions to support the images needed. + +> **Prerequisites:** +> +> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. +> +> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. + + + + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) +2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) +3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) +4. [Populate the private registry](#4-populate-the-private-registry) + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + +### 1. Find the required assets for your Rancher version + +1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets.** Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### 2. Collect the cert-manager image + +> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](../../resources/upgrade-cert-manager.md). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v1.5.1 + helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### 4. Populate the private registry + +Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` + + + + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +# Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +1. Find the required assets for your Rancher version +2. Save the images to your Windows Server workstation +3. Prepare the Docker daemon +4. Populate the private registry + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + +| Release File | Description | +|----------------------------|------------------| +| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | +| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | +| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + ```plain + ./rancher-save-images.ps1 + ``` + + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + + + +### 3. Prepare the Docker daemon + +Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ``` + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + + + +### 4. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. + +1. Using `powershell`, log into your private registry if required: + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.ps1 --registry + ``` + +# Linux Steps + +The Linux images need to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +1. Find the required assets for your Rancher version +2. Collect all the required images +3. Save the images to your Linux workstation +4. Populate the private registry + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets.** + +2. From the release's **Assets** section, download the following files: + +| Release File | Description | +|----------------------------| -------------------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Collect all the required images + +**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation](../../resources/upgrade-cert-manager.md). + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.12.0 + helm template ./cert-manager-.tgz | | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + + + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + +**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + + + +### 4. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. + +The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` + +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + +```plain +./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry +``` + + + + +### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster](install-kubernetes.md) + +### [Next step for Docker Installs - Install Rancher](install-rancher-ha.md) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/roll-back-rancherd.md similarity index 100% rename from content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rollbacks/_index.md rename to versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/roll-back-rancherd.md diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md new file mode 100644 index 0000000000..6d720b7f35 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md @@ -0,0 +1,73 @@ +--- +title: Upgrades +weight: 2 +aliases: + - /rancher/v2.5/en/installation/install-rancher-on-linux/upgrades + - /rancher/v2.x/en/installation/install-rancher-on-linux/upgrades/ +--- + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +When RancherD is upgraded, the Rancher Helm controller and the Fleet pods are upgraded. + +During a RancherD upgrade, there is very little downtime, but it is possible that RKE2 may be down for a minute, during which you could lose access to Rancher. + +When Rancher is installed with RancherD, the underlying Kubernetes cluster can't be upgraded from the Rancher UI. It needs to be upgraded using the RancherD CLI. + +### Upgrading the Rancher Helm Chart without Upgrading the Underlying Cluster + +To upgrade Rancher without upgrading the underlying Kubernetes cluster, follow these steps. + +> Before upgrading, we recommend that you should: +> +> - Create a backup of the Rancher server using the [backup application.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) +> - Review the known issues for the Rancher version you are upgrading to. The known issues are listed in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) + +1. Uninstall the chart with Helm: + + ``` + helm uninstall rancher + ``` + +2. Reinstall the Rancher chart with Helm. To install a specific Rancher version, use the `--version` flag. For example: + + ``` + helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --version 2.5.1 + ``` + +**Result:** Rancher is upgraded to the new version. + +If necessary, restore Rancher from backup by following [these steps.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md) + +### Upgrading Both Rancher and the Underlying Cluster + +Upgrade both RancherD and the underlying Kubernetes cluster by re-running the RancherD installation script. + +> Before upgrading, we recommend that you should: +> +> - Create a backup of the Rancher server using the [backup application.](../../../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) +> - Review the known issues for the Rancher version you are upgrading to. The known issues are listed in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) + +``` +sudo curl -sfL https://get.rancher.io | sudo sh - +``` + +To specify a specific version to upgrade to, use `INSTALL_RANCHERD_VERSION` environment variable: + +``` +curl -sfL https://get.rancher.io | INSTALL_RANCHERD_VERSION=v2.5.1 sh - +``` + +Then launch the server: + +``` +systemctl enable rancherd-server +systemctl start rancherd-server +``` + +The upgrade can also be performed by manually installing the binary of the desired version. + + diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md new file mode 100644 index 0000000000..261daf1d3e --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md @@ -0,0 +1,153 @@ +--- +title: '2. Install Kubernetes' +weight: 200 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/ +--- + +Once the infrastructure is ready, you can continue with setting up an RKE cluster to install Rancher in. + +### Installing Docker + +First, you have to install Docker and setup the HTTP proxy on all three Linux nodes. For this perform the following steps on all three nodes. + +For convenience export the IP address and port of your proxy into an environment variable and set up the HTTP_PROXY variables for your current shell: + +``` +export proxy_host="10.0.0.5:8888" +export HTTP_PROXY=http://${proxy_host} +export HTTPS_PROXY=http://${proxy_host} +export NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16 +``` + +Next configure apt to use this proxy when installing packages. If you are not using Ubuntu, you have to adapt this step accordingly: + +``` +cat < /dev/null +Acquire::http::Proxy "http://${proxy_host}/"; +Acquire::https::Proxy "http://${proxy_host}/"; +EOF +``` + +Now you can install Docker: + +``` +curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh +``` + +Then ensure that your current user is able to access the Docker daemon without sudo: + +``` +sudo usermod -aG docker YOUR_USERNAME +``` + +And configure the Docker daemon to use the proxy to pull images: + +``` +sudo mkdir -p /etc/systemd/system/docker.service.d +cat < /dev/null +[Service] +Environment="HTTP_PROXY=http://${proxy_host}" +Environment="HTTPS_PROXY=http://${proxy_host}" +Environment="NO_PROXY=127.0.0.0/8,10.0.0.0/8,cattle-system.svc,172.16.0.0/12,192.168.0.0/16" +EOF +``` + +To apply the configuration, restart the Docker daemon: + +``` +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +### Creating the RKE Cluster + +You need several command line tools on the host where you have SSH access to the Linux nodes to create and interact with the cluster: + +* [RKE CLI binary](https://rancher.com/docs/rke/latest/en/installation/#download-the-rke-binary) + +``` +sudo curl -fsSL -o /usr/local/bin/rke https://github.com/rancher/rke/releases/download/v1.1.4/rke_linux-amd64 +sudo chmod +x /usr/local/bin/rke +``` + +* [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +``` +curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" +chmod +x ./kubectl +sudo mv ./kubectl /usr/local/bin/kubectl +``` + +* [helm](https://helm.sh/docs/intro/install/) + +``` +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 +chmod +x get_helm.sh +sudo ./get_helm.sh +``` + +Next, create a YAML file that describes the RKE cluster. Ensure that the IP addresses of the nodes and the SSH username are correct. For more information on the cluster YAML, have a look at the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). + +``` +nodes: + - address: 10.0.1.200 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 10.0.1.201 + user: ubuntu + role: [controlplane,worker,etcd] + - address: 10.0.1.202 + user: ubuntu + role: [controlplane,worker,etcd] + +services: + etcd: + backup_config: + interval_hours: 12 + retention: 6 +``` + +After that, you can create the Kubernetes cluster by running: + +``` +rke up --config rancher-cluster.yaml +``` + +RKE creates a state file called `rancher-cluster.rkestate`, this is needed if you want to perform updates, modify your cluster configuration or restore it from a backup. It also creates a `kube_config_cluster.yaml` file, that you can use to connect to the remote Kubernetes cluster locally with tools like kubectl or Helm. Make sure to save all of these files in a secure location, for example by putting them into a version control system. + +To have a look at your cluster run: + +``` +export KUBECONFIG=kube_config_cluster.yaml +kubectl cluster-info +kubectl get pods --all-namespaces +``` + +You can also verify that your external load balancer works, and the DNS entry is set up correctly. If you send a request to either, you should receive HTTP 404 response from the ingress controller: + +``` +$ curl 10.0.1.100 +default backend - 404 +$ curl rancher.example.com +default backend - 404 +``` + +### Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates. + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. + +### [Next: Install Rancher](install-rancher.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md new file mode 100644 index 0000000000..a0497f7f81 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md @@ -0,0 +1,91 @@ +--- +title: 3. Install Rancher +weight: 300 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/install-rancher/ +--- + +Now that you have a running RKE cluster, you can install Rancher in it. For security reasons all traffic to Rancher must be encrypted with TLS. For this tutorial you are going to automatically issue a self-signed certificate through [cert-manager](https://cert-manager.io/). In a real-world use-case you will likely use Let's Encrypt or provide your own certificate. + +> **Note:** These installation instructions assume you are using Helm 3. + +### Install cert-manager + +Add the cert-manager helm repository: + +``` +helm repo add jetstack https://charts.jetstack.io +``` + +Create a namespace for cert-manager: + +``` +kubectl create namespace cert-manager +``` + +Install the CustomResourceDefinitions of cert-manager: + +``` +kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml +``` + +And install it with Helm. Note that cert-manager also needs your proxy configured in case it needs to communicate with Let's Encrypt or other external certificate issuers: + +``` +helm upgrade --install cert-manager jetstack/cert-manager \ + --namespace cert-manager --version v1.5.1 \ + --set http_proxy=http://${proxy_host} \ + --set https_proxy=http://${proxy_host} \ + --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local +``` + +Now you should wait until cert-manager is finished starting up: + +``` +kubectl rollout status deployment -n cert-manager cert-manager +kubectl rollout status deployment -n cert-manager cert-manager-webhook +``` + +### Install Rancher + +Next you can install Rancher itself. First add the helm repository: + +``` +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +``` + +Create a namespace: + +``` +kubectl create namespace cattle-system +``` + +And install Rancher with Helm. Rancher also needs a proxy configuration so that it can communicate with external application catalogs or retrieve Kubernetes version update metadata. + +Note that `rancher.cattle-system` must be added to the noProxy list (as shown below) so that Fleet can communicate directly to Rancher with Kubernetes service DNS using service discovery. + +``` +helm upgrade --install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.example.com \ + --set proxy=http://${proxy_host} + --set noProxy=127.0.0.0/8\\,10.0.0.0/8\\,cattle-system.svc\\,172.16.0.0/12\\,192.168.0.0/16\\,.svc\\,.cluster.local,rancher.cattle-system +``` + +After waiting for the deployment to finish: + +``` +kubectl rollout status deployment -n cattle-system rancher +``` + +You can now navigate to `https://rancher.example.com` and start using Rancher. + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry](../../../../faq/telemetry.md) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +### Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options](../../../../reference-guides/installation-references/helm-chart-options.md) +- [Adding TLS secrets](../../resources/add-tls-secrets.md) +- [Troubleshooting Rancher Kubernetes Installations](../../install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md new file mode 100644 index 0000000000..61ead2609b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md @@ -0,0 +1,63 @@ +--- +title: '1. Set up Infrastructure' +weight: 100 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/prepare-nodes/ +--- + +In this section, you will provision the underlying infrastructure for your Rancher management server with internete access through a HTTP proxy. + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will connect to the internet through an HTTP proxy. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + + +### [Next: Set up a Kubernetes cluster](install-kubernetes.md) diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md similarity index 100% rename from content/rancher/v2.5/en/installation/other-installation-methods/single-node-docker/troubleshooting/_index.md rename to versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md new file mode 100644 index 0000000000..1b6294300a --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher.md @@ -0,0 +1,88 @@ +--- +title: Rolling Back Rancher Installed with Docker +weight: 1015 +aliases: + - /rancher/v2.5/en/upgrades/single-node-rollbacks + - /rancher/v2.5/en/upgrades/rollbacks/single-node-rollbacks + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/ +--- + +If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade](upgrade-docker-installed-rancher.md). Rolling back restores: + +- Your previous version of Rancher. +- Your data backup created before upgrade. + +## Before You Start + +During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker pull rancher/rancher: +``` + +In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <PRIOR_RANCHER_VERSION> and <RANCHER_CONTAINER_NAME>![Placeholder Reference](/img/placeholder-ref-2.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | ------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that the backup is for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Rolling Back Rancher + +If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. + +>**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. + + For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. + + ``` + docker pull rancher/rancher: + ``` + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + You can obtain the name for your Rancher container by entering `docker ps`. + +1. Move the backup tarball that you created during completion of [Docker Upgrade](upgrade-docker-installed-rancher.md) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Docker Upgrade](upgrade-docker-installed-rancher.md), it will have a name similar to (`rancher-data-backup--.tar.gz`). + +1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. + + ``` + docker run --volumes-from rancher-data \ + -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ + && tar zxvf /backup/rancher-data-backup--.tar.gz" + ``` + +1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. + ``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: + ``` + As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + + >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. + +**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md new file mode 100644 index 0000000000..d690305a85 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher.md @@ -0,0 +1,377 @@ +--- +title: Upgrading Rancher Installed with Docker +weight: 1010 +aliases: + - /rancher/v2.5/en/upgrades/single-node-upgrade/ + - /rancher/v2.5/en/upgrades/upgrades/single-node-air-gap-upgrade + - /rancher/v2.5/en/upgrades/upgrades/single-node + - /rancher/v2.5/en/upgrades/upgrades/single-node-upgrade/ + - /rancher/v2.5/en/installation/install-rancher-on-k8s/upgrades/upgrades/single-node/ + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/single-node-upgrades/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The following instructions will guide you through upgrading a Rancher server that was installed with Docker. + +# Prerequisites + +- **Review the [known upgrade issues](../../install-upgrade-on-a-kubernetes-cluster/upgrades.md#known-upgrade-issues) in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums.](https://forums.rancher.com/c/announcements/12) Note that upgrades to or from any chart in the [rancher-alpha repository](../../../../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories/) aren’t supported. +- **For [air gap installs only,](../../../../pages-for-subheaders/air-gapped-helm-cli-install.md) collect and populate images for the new Rancher server version.** Follow the guide to [populate your private registry](../air-gapped-helm-cli-install/publish-images.md) with the images for the Rancher version that you want to upgrade to. + +# Placeholder Review + +During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). + +Here's an **example** of a command with a placeholder: + +``` +docker stop +``` + +In this command, `` is the name of your Rancher container. + +# Get Data for Upgrade Commands + +To obtain the data to replace the placeholders, run: + +``` +docker ps +``` + +Write down or copy this information before starting the upgrade. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | +| `` | `2018-12-19` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +# Upgrade Outline + +During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: + +- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) +- [2. Create a backup tarball](#2-create-a-backup-tarball) +- [3. Pull the new Docker image](#3-pull-the-new-docker-image) +- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) +- [5. Verify the Upgrade](#5-verify-the-upgrade) +- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) + +# 1. Create a copy of the data from your Rancher server container + +1. Using a remote Terminal connection, log into the node running your Rancher server. + +1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data rancher/rancher: + ``` + +# 2. Create a backup tarball + +1. From the data container that you just created (rancher-data), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). + + This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. + + + ``` + docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** When you enter this command, a series of commands should run. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + + ``` + [rancher@ip-10-0-0-50 ~]$ ls + rancher-data-backup-v2.1.3-20181219.tar.gz + ``` + +1. Move your backup tarball to a safe location external from your Rancher server. + +# 3. Pull the New Docker Image + +Pull the image of the Rancher version that you want to upgrade to. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +``` +docker pull rancher/rancher: +``` + +# 4. Start the New Rancher Server Container + +Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. + +>**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. + +If you used a proxy, see [HTTP Proxy Configuration.](../../../../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) + +If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) + +If you are recording all transactions with the Rancher API, see [API Auditing](../../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +To see the command to use when starting the new Rancher server container, choose from the following options: + +- Docker Upgrade +- Docker Upgrade for Air Gap Installs + + + + +Select which option you had installed Rancher server + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. + +Placeholder | Description +------------|------------- + `` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + --privileged \ + rancher/rancher: +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + --privileged \ + rancher/rancher: \ + --no-cacerts +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) +
    + +### Option D: Let's Encrypt Certificate + +
    + Click to expand + +>**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. + +>**Reminder of the Cert Prerequisites:** +> +>- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +>- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. +`` | The domain address that you had originally started with + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: \ + --acme-domain +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +
    + +
    + + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +When starting the new Rancher server container, choose from the following options: + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to to upgrade to. + +``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. + + >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](certificate-troubleshooting.md) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version](../../../../reference-guides/installation-references/helm-chart-options.md) that you want to upgrade to. + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged + /rancher/rancher: +``` +As of Rancher v2.5, privileged access is [required.](../../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) +
    + +
    +
    + +**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. + +# 5. Verify the Upgrade + +Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. + +>**Having network issues in your user clusters following upgrade?** +> +> See [Restoring Cluster Networking](../../../../../version-2.0-2.4/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration.md). + + +# 6. Clean up Your Old Rancher Server Container + +Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. + +# Rolling Back + +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback](roll-back-docker-installed-rancher.md). diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/add-tls-secrets.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/add-tls-secrets.md new file mode 100644 index 0000000000..6ad8401f01 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/add-tls-secrets.md @@ -0,0 +1,41 @@ +--- +title: Adding TLS Secrets +weight: 2 +aliases: + - /rancher/v2.5/en/installation/resources/encryption/tls-secrets/ + - /rancher/v2.x/en/installation/resources/tls-secrets/ +--- + +Kubernetes will create all the objects and services for Rancher, but it will not become available until we populate the `tls-rancher-ingress` secret in the `cattle-system` namespace with the certificate and key. + +Combine the server certificate followed by any intermediate certificate(s) needed into a file named `tls.crt`. Copy your certificate key into a file named `tls.key`. + +For example, [acme.sh](https://acme.sh) provides server certificate and CA chains in `fullchain.cer` file. +This `fullchain.cer` should be renamed to `tls.crt` & certificate key file as `tls.key`. + +Use `kubectl` with the `tls` secret type to create the secrets. + +``` +kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +> **Note:** If you want to replace the certificate, you can delete the `tls-rancher-ingress` secret using `kubectl -n cattle-system delete secret tls-rancher-ingress` and add a new one using the command shown above. If you are using a private CA signed certificate, replacing the certificate is only possible if the new certificate is signed by the same CA as the certificate currently in use. + +# Using a Private CA Signed Certificate + +If you are using a private CA, Rancher requires a copy of the CA certificate which is used by the Rancher Agent to validate the connection to the server. + +Copy the CA certificate into a file named `cacerts.pem` and use `kubectl` to create the `tls-ca` secret in the `cattle-system` namespace. + +``` +kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem=./cacerts.pem +``` + +> **Note:** The configured `tls-ca` secret is retrieved when Rancher starts. On a running Rancher installation the updated CA will take effect after new Rancher pods are started. + +# Updating a Private CA Certificate + +Follow the steps on [this page](update-rancher-certificate.md) to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. \ No newline at end of file diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md new file mode 100644 index 0000000000..1000939a06 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md @@ -0,0 +1,108 @@ +--- +title: Choosing a Rancher Version +weight: 1 +aliases: + - /rancher/v2.5/en/installation/options/server-tags + - /rancher/v2.x/en/installation/resources/choosing-version/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to choose a Rancher version. + +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements](./helm-version-requirements.md) to choose a version of Helm to install Rancher. + +For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image.** + +The Helm chart version also applies to RancherD installs because RancherD installs the Rancher Helm chart on a Kubernetes cluster. + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + + + + +When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. + +Refer to the [Helm version requirements](./helm-version-requirements.md) to choose a version of Helm to install Rancher. + +### Helm Chart Repositories + +Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. + +| Type | Command to Add the Repo | Description of the Repo | +| -------------- | ------------ | ----------------- | +| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | +| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | +| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | + +
    +Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). + +> **Note:** All charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. + +### Helm Chart Versions + +Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
    +    `helm search repo --versions` + +If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
    +For more information, see https://helm.sh/docs/helm/helm_search_repo/ + +To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
    +    `helm fetch rancher-stable/rancher --version=2.4.8` + +### Switching to a Different Helm Chart Repository + +After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. + +> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. + +{{< release-channel >}} + +1. List the current Helm chart repositories. + + ```plain + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + +2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. + + ```plain + helm repo remove rancher- + ``` + +3. Add the Helm chart repository that you want to start installing Rancher from. + + ```plain + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +4. Continue to follow the steps to [upgrade Rancher](../install-upgrade-on-a-kubernetes-cluster/upgrades.md) from the new Helm chart repository. + +
    + + +When performing [Docker installs](../../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. + +### Server Tags + +Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. + +| Tag | Description | +| -------------------------- | ------ | +| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | +| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | + +> **Notes:** +> +> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. +> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. + + +
    diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md new file mode 100644 index 0000000000..97dde2142f --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md @@ -0,0 +1,29 @@ +--- +title: About Custom CA Root Certificates +weight: 1 +aliases: + - /rancher/v2.5/en/installation/options/custom-ca-root-certificate/ + - /rancher/v2.5/en/installation/resources/choosing-version/encryption/custom-ca-root-certificate + - /rancher/v2.x/en/installation/resources/custom-ca-root-certificate/ +--- + +If you're using Rancher in an internal production environment where you aren't exposing apps publicly, use a certificate from a private certificate authority (CA). + +Services that Rancher needs to access are sometimes configured with a certificate from a custom/internal CA root, also known as self signed certificate. If the presented certificate from the service cannot be validated by Rancher, the following error displays: `x509: certificate signed by unknown authority`. + +To validate the certificate, the CA root certificates need to be added to Rancher. As Rancher is written in Go, we can use the environment variable `SSL_CERT_DIR` to point to the directory where the CA root certificates are located in the container. The CA root certificates directory can be mounted using the Docker volume option (`-v host-source-directory:container-destination-directory`) when starting the Rancher container. + +Examples of services that Rancher can access: + +- Catalogs +- Authentication providers +- Accessing hosting/cloud API when using Node Drivers + +## Installing with the custom CA Certificate + +For details on starting a Rancher container with your private CA certificates mounted, refer to the installation docs: + +- [Docker install Custom CA certificate options](../../../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) + +- [Kubernetes install options for Additional Trusted CAs](../../../reference-guides/installation-references/helm-chart-options.md#additional-trusted-cas) + diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/helm-version-requirements.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/helm-version-requirements.md new file mode 100644 index 0000000000..2dc112c076 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/helm-version-requirements.md @@ -0,0 +1,19 @@ +--- +title: Helm Version Requirements +weight: 3 +aliases: + - /rancher/v2.5/en/installation/options/helm-version + - /rancher/v2.5/en/installation/options/helm2 + - /rancher/v2.5/en/installation/options/helm2/helm-init + - /rancher/v2.5/en/installation/options/helm2/helm-rancher + - /rancher/v2.x/en/installation/resources/helm-version/ +--- + +This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. + +> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section](./helm-version-requirements.md) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +- Helm v3.2.x or higher is required to install or upgrade Rancher v2.5. +- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. +- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. +- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/local-system-charts.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/local-system-charts.md new file mode 100644 index 0000000000..2d83e11cef --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/local-system-charts.md @@ -0,0 +1,21 @@ +--- +title: Setting up Local System Charts for Air Gapped Installations +weight: 120 +aliases: + - /rancher/v2.5/en/installation/air-gap-single-node/config-rancher-system-charts/_index.md + - /rancher/v2.5/en/installation/air-gap-high-availability/config-rancher-system-charts/_index.md + - /rancher/v2.5/en/installation/options/local-system-charts + - /rancher/v2.x/en/installation/resources/local-system-charts/ + - /rancher/v2.x/en/installation/options/local-system-charts/ +--- + +The [System Charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. + +In an air gapped installation of Rancher, you will need to configure Rancher to use a local copy of the system charts. This section describes how to use local system charts using a CLI flag. + +# Using Local System Charts + +A local copy of `system-charts` has been packaged into the `rancher/rancher` container. To be able to use these features in an air gap install, you will need to run the Rancher install command with an extra environment variable, `CATTLE_SYSTEM_CATALOG=bundled`, which tells Rancher to use the local copy of the charts instead of attempting to fetch them from GitHub. + +Example commands for a Rancher installation with a bundled `system-charts` are included in the [air gap Docker installation](../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) instructions and the [air gap Kubernetes installation](../other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) instructions. + diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md new file mode 100644 index 0000000000..cb90a9a4e0 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/update-rancher-certificate.md @@ -0,0 +1,254 @@ +--- +title: Updating the Rancher Certificate +weight: 10 +aliases: + - /rancher/v2.x/en/installation/resources/update-ca-cert/ +--- + +# Updating a Private CA Certificate + +Follow these steps to update the SSL certificate of the ingress in a Rancher [high availability Kubernetes installation](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) or to switch from the default self-signed certificate to a custom certificate. + +A summary of the steps is as follows: + +1. Create or update the `tls-rancher-ingress` Kubernetes secret resource with the new certificate and private key. +2. Create or update the `tls-ca` Kubernetes secret resource with the root CA certificate (only required when using a private CA). +3. Update the Rancher installation using the Helm CLI. +4. Reconfigure the Rancher agents to trust the new CA certificate. +5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher. + +The details of these instructions are below. + +## 1. Create/update the certificate secret resource + +First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. + +If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +Alternatively, to update an existing certificate secret: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 2. Create/update the CA certificate secret resource + +If the new certificate was signed by a private CA, you will need to copy the corresponding root CA certificate into a file named `cacerts.pem` and create or update the `tls-ca secret` in the `cattle-system` namespace. If the certificate was signed by an intermediate CA, then the `cacerts.pem` must contain both the intermediate and root CA certificates (in this order). + +To create the initial secret: + +``` +$ kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem +``` + +To update an existing `tls-ca` secret: + +``` +$ kubectl -n cattle-system create secret generic tls-ca \ + --from-file=cacerts.pem \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 3. Reconfigure the Rancher deployment + +> Before proceeding, generate an API token in the Rancher UI (User > API & Keys) and save the Bearer Token which you might need in step 4. + +This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). + +It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. + +To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: + +``` +$ helm get values rancher -n cattle-system +``` + +Also get the version string of the currently deployed Rancher chart: + +``` +$ helm ls -A +``` + +Upgrade the Helm application instance using the original configuration values and making sure to specify `ingress.tls.source=secret` as well as the current chart version to prevent an application upgrade. + +If the certificate was signed by a private CA, add the `set privateCA=true` argument as well. Also make sure to read the documentation describing the initial installation using custom certificates. + +``` +helm upgrade rancher rancher-stable/rancher \ + --namespace cattle-system \ + --version \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret \ + --set ... +``` + +When the upgrade is completed, navigate to `https:///v3/settings/cacerts` to verify that the value matches the CA certificate written in the `tls-ca` secret earlier. + +## 4. Reconfigure Rancher agents to trust the private CA + +This section covers three methods to reconfigure Rancher agents to trust the private CA. This step is required if either of the following is true: + +- Rancher was initially configured to use the Rancher self-signed certificate (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`) +- The root CA certificate for the new custom certificate has changed + +### Why is this step required? + +When Rancher is configured with a certificate signed by a private CA, the CA certificate chain is downloaded into Rancher agent containers. Agents compare the checksum of the downloaded certificate against the `CATTLE_CA_CHECKSUM` environment variable. This means that, when the private CA certificate is changed on Rancher server side, the environvment variable `CATTLE_CA_CHECKSUM` must be updated accordingly. + +### Which method should I choose? + +Method 1 is the easiest one but requires all clusters to be connected to Rancher after the certificates have been rotated. This is usually the case if the process is performed right after updating the Rancher deployment (Step 3). + +If the clusters have lost connection to Rancher but you have [Authorized Cluster Endpoints](https://rancher.com/docs/rancher/v2.5/en/cluster-admin/cluster-access/ace/) enabled, then go with method 2. + +Method 3 can be used as a fallback if method 1 and 2 are unfeasible. + +### Method 1: Kubectl command + +For each cluster under Rancher management (except the `local` Rancher management cluster) run the following command using the Kubeconfig file of the Rancher management cluster (RKE or K3S). + +``` +kubectl patch clusters.management.cattle.io -p '{"status":{"agentImage":"dummy"}}' --type merge +``` + +This command will cause all Agent Kubernetes resources to be reconfigured with the checksum of the new certificate. + + +### Method 2: Manually update checksum + +Manually patch the agent Kubernetes resources by updating the `CATTLE_CA_CHECKSUM` environment variable to the value matching the checksum of the new CA certificate. Generate the new checksum value like so: + +``` +$ curl -k -s -fL /v3/settings/cacerts | jq -r .value > cacert.tmp +$ sha256sum cacert.tmp | awk '{print $1}' +``` + +Using a Kubeconfig for each downstream cluster update the environment variable for the two agent deployments. + +``` +$ kubectl edit -n cattle-system ds/cattle-node-agent +$ kubectl edit -n cattle-system deployment/cattle-cluster-agent +``` + +### Method 3: Recreate Rancher agents + +With this method you are recreating the Rancher agents by running a set of commands on a controlplane node of each downstream cluster. + +First, generate the agent definitions as described here: https://gist.github.com/superseb/076f20146e012f1d4e289f5bd1bd4971 + +Then, connect to a controlplane node of the downstream cluster via SSH, create a Kubeconfig and apply the definitions: +https://gist.github.com/superseb/b14ed3b5535f621ad3d2aa6a4cd6443b + + +## 5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher + +Select 'Force Update' for the clusters within the [Continuous Delivery](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#accessing-fleet-in-the-rancher-ui) view under Cluster Explorer in the Rancher UI to allow the fleet-agent in downstream clusters to successfully connect to Rancher. + +### Why is this step required? + +Fleet agents in Rancher managed clusters store kubeconfig that is used to connect to the Rancher proxied kube-api in the fleet-agent secret of the fleet-system namespace. The kubeconfig contains a certificate-authority-data block containing the Rancher CA. When changing the Rancher CA, this block needs to be updated for a successful connection of the fleet-agent to Rancher. + +# Updating from a Private CA Certificate to a Common Certificate + +>It is possible to perform the opposite procedure as shown above: you may change from a private certificate to a common, or non-private, certificate. The steps involved are outlined below. + +## 1. Create/update the certificate secret resource + +First, concatenate the server certificate followed by any intermediate certificate(s) to a file named `tls.crt` and provide the corresponding certificate key in a file named `tls.key`. + +If you are switching the install from using the Rancher self-signed certificate or Let’s Encrypt issued certificates, use the following command to create the `tls-rancher-ingress` secret resource in your Rancher HA cluster: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key +``` + +Alternatively, to update an existing certificate secret: + +``` +$ kubectl -n cattle-system create secret tls tls-rancher-ingress \ + --cert=tls.crt \ + --key=tls.key \ + --dry-run --save-config -o yaml | kubectl apply -f - +``` + +## 2. Delete the CA certificate secret resource + +You will delete the `tls-ca secret` in the `cattle-system` namespace as it is no longer needed. You may also optionally save a copy of the `tls-ca secret` if desired. + +To save the existing secret: + +``` +kubectl -n cattle-system get secret tls-ca -o yaml > tls-ca.yaml +``` + +To delete the existing `tls-ca` secret: + +``` +kubectl -n cattle-system delete secret tls-ca +``` + +## 3. Reconfigure the Rancher deployment + +> Before proceeding, [generate an API token in the Rancher UI](../../../reference-guides/user-settings/api-keys.md#creating-an-api-key) (User > API & Keys). + +This step is required if Rancher was initially installed with self-signed certificates (`ingress.tls.source=rancher`) or with a Let's Encrypt issued certificate (`ingress.tls.source=letsEncrypt`). + +It ensures that the Rancher pods and ingress resources are reconfigured to use the new server and optional CA certificate. + +To update the Helm deployment you will need to use the same (`--set`) options that were used during initial installation. Check with: + +``` +$ helm get values rancher -n cattle-system +``` + +Also get the version string of the currently deployed Rancher chart: + +``` +$ helm ls -A +``` + +Upgrade the Helm application instance using the original configuration values and making sure to specify the current chart version to prevent an application upgrade. + +Also make sure to read the documentation describing the initial installation using custom certificates. + +``` +helm upgrade rancher rancher-stable/rancher \ + --namespace cattle-system \ + --version \ + --set hostname=rancher.my.org \ + --set ... +``` + +On upgrade, you can either + +- remove `--set ingress.tls.source=secret \` from the Helm upgrade command, as shown above, or + +- remove the `privateCA` parameter or set it to `false` because the CA is valid: + +``` +set privateCA=false +``` + +## 4. Reconfigure Rancher agents for the non-private/common certificate + +`CATTLE_CA_CHECKSUM` environment variable on the downstream cluster agents should be removed or set to "" (an empty string). + +## 5. Select Force Update of Fleet clusters to connect fleet-agent to Rancher + +Select 'Force Update' for the clusters within the [Continuous Delivery](../../../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#accessing-fleet-in-the-rancher-ui) view under Cluster Explorer in the Rancher UI to allow the fleet-agent in downstream clusters to successfully connect to Rancher. + +### Why is this step required? + +Fleet agents in Rancher managed clusters store kubeconfig that is used to connect to the Rancher proxied kube-api in the fleet-agent secret of the fleet-system namespace. The kubeconfig contains a certificate-authority-data block containing the Rancher CA. When changing the Rancher CA, this block needs to be updated for a successful connection of the fleet-agent to Rancher. \ No newline at end of file diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md new file mode 100644 index 0000000000..68571d736b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2.md @@ -0,0 +1,181 @@ +--- +title: Upgrading Cert-Manager with Helm 2 +weight: 2040 +aliases: + - /rancher/v2.5/en/installation/options/upgrading-cert-manager/helm-2-instructions + - /rancher/v2.5/en/installation/resources/choosing-version/encryption/upgrading-cert-manager/helm-2-instructions +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.7-0.8.html#upgrading-from-v0-7-to-v0-8). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's offficial documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart](../install-upgrade-on-a-kubernetes-cluster/upgrades.md) under the upgrade Rancher section. + +## Upgrade Cert-Manager Only + +> **Note:** +> These instructions are applied if you have no plan to upgrade Rancher. + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +In order to upgrade cert-manager, follow these instructions: + +
    + Upgrading cert-manager with Internet access + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml + ``` + +1. Delete the existing deployment + + ```plain + helm delete --purge cert-manager + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install --version 0.12.0 --name cert-manager --namespace kube-system jetstack/cert-manager + ``` + +
    + +
    + Upgrading cert-manager in an airgapped environment + +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace kube-system \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces issuer,clusterissuer,certificates > cert-manager-backup.yaml + ``` + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n kube-system delete deployment,sa,clusterrole,clusterrolebinding -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + +1. Install cert-manager + + ```plain + kubectl -n kube-system apply -R -f ./cert-manager + ``` + +
    + + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace kube-system + +NAME READY STATUS RESTARTS AGE +cert-manager-7cbdc48784-rpgnt 1/1 Running 0 3m +cert-manager-webhook-5b5dd6999-kst4x 1/1 Running 0 3m +cert-manager-cainjector-3ba5cd2bcd-de332x 1/1 Running 0 3m +``` + +If the ‘webhook’ pod (2nd line) is in a ContainerCreating state, it may still be waiting for the Secret to be mounted into the pod. Wait a couple of minutes for this to happen but if you experience problems, please check cert-manager's [troubleshooting](https://docs.cert-manager.io/en/latest/getting-started/troubleshooting.html) guide. + +> **Note:** The above instructions ask you to add the disable-validation label to the kube-system namespace. Here are additional resources that explain why this is necessary: +> +> - [Information on the disable-validation label](https://docs.cert-manager.io/en/latest/tasks/upgrading/upgrading-0.4-0.5.html?highlight=certmanager.k8s.io%2Fdisable-validation#disabling-resource-validation-on-the-cert-manager-namespace) +> - [Information on webhook validation for certificates](https://docs.cert-manager.io/en/latest/getting-started/webhook.html) + +## Cert-Manager API change and data migration + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be `cert-manager.io` instead of `certmanager.k8s.io.` + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +For information on upgrading from all other versions of cert-manager, refer to the [official documentation](https://cert-manager.io/docs/installation/upgrading/). diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md new file mode 100644 index 0000000000..c863c19bcc --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md @@ -0,0 +1,248 @@ +--- +title: Upgrading Cert-Manager +weight: 4 +aliases: + - /rancher/v2.5/en/installation/options/upgrading-cert-manager + - /rancher/v2.5/en/installation/options/upgrading-cert-manager/helm-2-instructions + - /rancher/v2.5/en/installation/resources/encryption/upgrading-cert-manager + - /rancher/v2.x/en/installation/resources/upgrading-cert-manager/ +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manager whose version is older than v0.11, and want to upgrade both Rancher and cert-manager to a newer version, you need to reinstall both of them: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +> For reinstalling Rancher with Helm, please check [Option B: Reinstalling Rancher Chart](../install-upgrade-on-a-kubernetes-cluster/upgrades.md) under the upgrade Rancher section. + +# Upgrade Cert-Manager + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +> These instructions have been updated for Helm 3. If you are still using Helm 2, refer to [these instructions.](./upgrade-cert-manager-helm-2.md) + +In order to upgrade cert-manager, follow these instructions: + +### Option A: Upgrade cert-manager with Internet Access + +
    + Click to expand + +1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) + + ```plain + helm uninstall cert-manager + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed + + ```plain + kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager if needed + + ```plain + kubectl create namespace cert-manager + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v0.12.0 + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
    + +### Option B: Upgrade cert-manager in an Air Gap Environment + +
    + Click to expand + +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry](../other-installation-methods/air-gapped-helm-cli-install/publish-images.md) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + The Helm 3 command is as follows: + + ```plain + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + + The Helm 2 command is as follows: + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager (old and new) + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n cert-manager \ + delete deployment,sa,clusterrole,clusterrolebinding \ + -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y you installed + + ```plain + kubectl delete -f cert-manager/cert-manager-crd-old.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager + + ```plain + kubectl create namespace cert-manager + ``` + +1. Install cert-manager + + ```plain + kubectl -n cert-manager apply -R -f ./cert-manager + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
    + +### Verify the Deployment + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +## Cert-Manager API change and data migration + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). + diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md new file mode 100644 index 0000000000..7cd1227b69 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md @@ -0,0 +1,138 @@ +--- +title: Upgrading and Rolling Back Kubernetes +weight: 70 +aliases: + - /rancher/v2.x/en/cluster-admin/upgrading-kubernetes/ +--- + +Following an upgrade to the latest version of Rancher, downstream Kubernetes clusters can be upgraded to use the latest supported version of Kubernetes. + +Rancher calls RKE (Rancher Kubernetes Engine) as a library when provisioning and editing RKE clusters. For more information on configuring the upgrade strategy for RKE clusters, refer to the [RKE documentation](https://rancher.com/docs/rke/latest/en/). + +This section covers the following topics: + +- [New Features](#new-features) +- [Tested Kubernetes Versions](#tested-kubernetes-versions) +- [How Upgrades Work](#how-upgrades-work) +- [Recommended Best Practice for Upgrades](#recommended-best-practice-for-upgrades) +- [Upgrading the Kubernetes Version](#upgrading-the-kubernetes-version) +- [Rolling Back](#rolling-back) +- [Configuring the Upgrade Strategy](#configuring-the-upgrade-strategy) + - [Configuring the Maximum Unavailable Worker Nodes in the Rancher UI](#configuring-the-maximum-unavailable-worker-nodes-in-the-rancher-ui) + - [Enabling Draining Nodes During Upgrades from the Rancher UI](#enabling-draining-nodes-during-upgrades-from-the-rancher-ui) + - [Maintaining Availability for Applications During Upgrades](#maintaining-availability-for-applications-during-upgrades) + - [Configuring the Upgrade Strategy in the cluster.yml](#configuring-the-upgrade-strategy-in-the-cluster-yml) +- [Troubleshooting](#troubleshooting) + +# Tested Kubernetes Versions + +Before a new version of Rancher is released, it's tested with the latest minor versions of Kubernetes to ensure compatibility. For details on which versions of Kubernetes were tested on each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.5.9/) + +# How Upgrades Work + +RKE v1.1.0 changed the way that clusters are upgraded. + +In this section of the [RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/how-upgrades-work) you'll learn what happens when you edit or upgrade your RKE Kubernetes cluster. + + +# Recommended Best Practice for Upgrades + +When upgrading the Kubernetes version of a cluster, we recommend that you: + +1. Take a snapshot. +1. Initiate a Kubernetes upgrade. +1. If the upgrade fails, revert the cluster to the pre-upgrade Kubernetes version. This is achieved by selecting the **Restore etcd and Kubernetes version** option. This will return your cluster to the pre-upgrade kubernetes version before restoring the etcd snapshot. + +The restore operation will work on a cluster that is not in a healthy or active state. + +# Upgrading the Kubernetes Version + +> **Prerequisites:** +> +> - The options below are available only for [Rancher-launched RKE Kubernetes clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) and [Registered K3s Kubernetes clusters.](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md#additional-features-for-registered-k3s-clusters) +> - Before upgrading Kubernetes, [back up your cluster.](../../pages-for-subheaders/backup-restore-and-disaster-recovery.md) + +1. From the **Global** view, find the cluster for which you want to upgrade Kubernetes. Select **⋮ > Edit**. + +1. Expand **Cluster Options**. + +1. From the **Kubernetes Version** drop-down, choose the version of Kubernetes that you want to use for the cluster. + +1. Click **Save**. + +**Result:** Kubernetes begins upgrading for the cluster. + +# Rolling Back + +A cluster can be restored to a backup in which the previous Kubernetes version was used. For more information, refer to the following sections: + +- [Backing up a cluster](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md#how-snapshots-work) +- [Restoring a cluster from backup](../../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md#restoring-a-cluster-from-a-snapshot) + +# Configuring the Upgrade Strategy + +As of RKE v1.1.0, additional upgrade options became available to give you more granular control over the upgrade process. These options can be used to maintain availability of your applications during a cluster upgrade if certain [conditions and requirements](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability) are met. + +The upgrade strategy can be configured in the Rancher UI, or by editing the `cluster.yml`. More advanced options are available by editing the `cluster.yml`. + +### Configuring the Maximum Unavailable Worker Nodes in the Rancher UI + +From the Rancher UI, the maximum number of unavailable worker nodes can be configured. During a cluster upgrade, worker nodes will be upgraded in batches of this size. + +By default, the maximum number of unavailable worker is defined as 10 percent of all worker nodes. This number can be configured as a percentage or as an integer. When defined as a percentage, the batch size is rounded down to the nearest node, with a minimum of one node. + +To change the default number or percentage of worker nodes, + +1. Go to the cluster view in the Rancher UI. +1. Click **⋮ > Edit.** +1. In the **Advanced Options** section, go to the **Maxiumum Worker Nodes Unavailable** field. Enter the percentage of worker nodes that can be upgraded in a batch. Optionally, select **Count** from the drop-down menu and enter the maximum unavailable worker nodes as an integer. +1. Click **Save.** + +**Result:** The cluster is updated to use the new upgrade strategy. + +### Enabling Draining Nodes During Upgrades from the Rancher UI + +By default, RKE [cordons](https://kubernetes.io/docs/concepts/architecture/nodes/#manual-node-administration) each node before upgrading it. [Draining](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) is disabled during upgrades by default. If draining is enabled in the cluster configuration, RKE will both cordon and drain the node before it is upgraded. + +To enable draining each node during a cluster upgrade, + +1. Go to the cluster view in the Rancher UI. +1. Click **⋮ > Edit.** +1. In the **Advanced Options** section, go to the **Drain nodes** field and click **Yes.** +1. Choose a safe or aggressive drain option. For more information about each option, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md#aggressive-and-safe-draining-options) +1. Optionally, configure a grace period. The grace period is the timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. Pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If this value is negative, the default value specified in the pod will be used. +1. Optionally, configure a timeout, which is the amount of time the drain should continue to wait before giving up. +1. Click **Save.** + +**Result:** The cluster is updated to use the new upgrade strategy. + +> **Note:** As of Rancher v2.4.0, there is a [known issue](https://github.com/rancher/rancher/issues/25478) in which the Rancher UI doesn't show state of etcd and controlplane as drained, even though they are being drained. + +### Maintaining Availability for Applications During Upgrades + +_Available as of RKE v1.1.0_ + +In [this section of the RKE documentation,](https://rancher.com/docs/rke/latest/en/upgrades/maintaining-availability/) you'll learn the requirements to prevent downtime for your applications when upgrading the cluster. + +### Configuring the Upgrade Strategy in the cluster.yml + +More advanced upgrade strategy configuration options are available by editing the `cluster.yml`. + +For details, refer to [Configuring the Upgrade Strategy](https://rancher.com/docs/rke/latest/en/upgrades/configuring-strategy) in the RKE documentation. The section also includes an example `cluster.yml` for configuring the upgrade strategy. + +# Troubleshooting + +If a node doesn't come up after an upgrade, the `rke up` command errors out. + +No upgrade will proceed if the number of unavailable nodes exceeds the configured maximum. + +If an upgrade stops, you may need to fix an unavailable node or remove it from the cluster before the upgrade can continue. + +A failed node could be in many different states: + +- Powered off +- Unavailable +- User drains a node while upgrade is in process, so there are no kubelets on the node +- The upgrade itself failed + +If the max unavailable number of nodes is reached during an upgrade, Rancher user clusters will be stuck in updating state and not move forward with upgrading any other control plane nodes. It will continue to evaluate the set of unavailable nodes in case one of the nodes becomes available. If the node cannot be fixed, you must remove the node in order to continue the upgrade. diff --git a/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md new file mode 100644 index 0000000000..d3f8ac144e --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md @@ -0,0 +1,73 @@ +--- +title: Upgrading Kubernetes without Upgrading Rancher +weight: 1120 +aliases: + - /rancher/v2.x/en/admin-settings/k8s-metadata/ +--- + +The RKE metadata feature allows you to provision clusters with new versions of Kubernetes as soon as they are released, without upgrading Rancher. This feature is useful for taking advantage of patch versions of Kubernetes, for example, if you want to upgrade to Kubernetes v1.14.7 when your Rancher server originally supported v1.14.6. + +> **Note:** The Kubernetes API can change between minor versions. Therefore, we don't support introducing minor Kubernetes versions, such as introducing v1.15 when Rancher currently supports v1.14. You would need to upgrade Rancher to add support for minor Kubernetes versions. + +Rancher's Kubernetes metadata contains information specific to the Kubernetes version that Rancher uses to provision [RKE clusters](../../pages-for-subheaders/launch-kubernetes-with-rancher.md). Rancher syncs the data periodically and creates custom resource definitions (CRDs) for **system images,** **service options** and **addon templates.** Consequently, when a new Kubernetes version is compatible with the Rancher server version, the Kubernetes metadata makes the new version available to Rancher for provisioning clusters. The metadata gives you an overview of the information that the [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE) uses for deploying various Kubernetes versions. + +This table below describes the CRDs that are affected by the periodic data sync. + +> **Note:** Only administrators can edit metadata CRDs. It is recommended not to update existing objects unless explicitly advised. + +| Resource | Description | Rancher API URL | +|----------|-------------|-----------------| +| System Images | List of system images used to deploy Kubernetes through RKE. | `/v3/rkek8ssystemimages` | +| Service Options | Default options passed to Kubernetes components like `kube-api`, `scheduler`, `kubelet`, `kube-proxy`, and `kube-controller-manager` | `/v3/rkek8sserviceoptions` | +| Addon Templates | YAML definitions used to deploy addon components like Canal, Calico, Flannel, Weave, Kube-dns, CoreDNS, `metrics-server`, `nginx-ingress` | `/v3/rkeaddons` | + +Administrators might configure the RKE metadata settings to do the following: + +- Refresh the Kubernetes metadata, if a new patch version of Kubernetes comes out and they want Rancher to provision clusters with the latest version of Kubernetes without having to upgrade Rancher +- Change the metadata URL that Rancher uses to sync the metadata, which is useful for air gap setups if you need to sync Rancher locally instead of with GitHub +- Prevent Rancher from auto-syncing the metadata, which is one way to prevent new and unsupported Kubernetes versions from being available in Rancher + +### Refresh Kubernetes Metadata + +The option to refresh the Kubernetes metadata is available for administrators by default, or for any user who has the **Manage Cluster Drivers** [global role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) + +To force Rancher to refresh the Kubernetes metadata, a manual refresh action is available under **Tools > Drivers > Refresh Kubernetes Metadata** on the right side corner. + +You can configure Rancher to only refresh metadata when desired by setting `refresh-interval-minutes` to `0` (see below) and using this button to perform the metadata refresh manually when desired. + +### Configuring the Metadata Synchronization + +> Only administrators can change these settings. + +The RKE metadata config controls how often Rancher syncs metadata and where it downloads data from. You can configure the metadata from the settings in the Rancher UI, or through the Rancher API at the endpoint `v3/settings/rke-metadata-config`. + +The way that the metadata is configured depends on the Rancher version. + +To edit the metadata config in Rancher, + +1. Go to the **Global** view and click the **Settings** tab. +1. Go to the **rke-metadata-config** section. Click the **⋮** and click **Edit.** +1. You can optionally fill in the following parameters: + + - `refresh-interval-minutes`: This is the amount of time that Rancher waits to sync the metadata. To disable the periodic refresh, set `refresh-interval-minutes` to 0. + - `url`: This is the HTTP path that Rancher fetches data from. The path must be a direct path to a JSON file. For example, the default URL for Rancher v2.4 is `https://releases.rancher.com/kontainer-driver-metadata/release-v2.4/data.json`. + +If you don't have an air gap setup, you don't need to specify the URL where Rancher gets the metadata, because the default setting is to pull from [Rancher's metadata Git repository.](https://github.com/rancher/kontainer-driver-metadata/blob/dev-v2.5/data/data.json) + +However, if you have an [air gap setup,](#air-gap-setups) you will need to mirror the Kubernetes metadata repository in a location available to Rancher. Then you need to change the URL to point to the new location of the JSON file. +### Air Gap Setups + +Rancher relies on a periodic refresh of the `rke-metadata-config` to download new Kubernetes version metadata if it is supported with the current version of the Rancher server. For a table of compatible Kubernetes and Rancher versions, refer to the [service terms section.](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.2.8/) + +If you have an air gap setup, you might not be able to get the automatic periodic refresh of the Kubernetes metadata from Rancher's Git repository. In that case, you should disable the periodic refresh to prevent your logs from showing errors. Optionally, you can configure your metadata settings so that Rancher can sync with a local copy of the RKE metadata. + +To sync Rancher with a local mirror of the RKE metadata, an administrator would configure the `rke-metadata-config` settings to point to the mirror. For details, refer to [Configuring the Metadata Synchronization.](#configuring-the-metadata-synchronization) + +After new Kubernetes versions are loaded into the Rancher setup, additional steps would be required in order to use them for launching clusters. Rancher needs access to updated system images. While the metadata settings can only be changed by administrators, any user can download the Rancher system images and prepare a private Docker registry for them. + +1. To download the system images for the private registry, click the Rancher server version at the bottom left corner of the Rancher UI. +1. Download the OS specific image lists for Linux or Windows. +1. Download `rancher-images.txt`. +1. Prepare the private registry using the same steps during the [air gap install](other-installation-methods/air-gapped-helm-cli-install/publish-images.md), but instead of using the `rancher-images.txt` from the releases page, use the one obtained from the previous steps. + +**Result:** The air gap installation of Rancher can now sync the Kubernetes metadata. If you update your private registry when new versions of Kubernetes are released, you can provision clusters with the new version without having to upgrade Rancher. diff --git a/versioned_docs/version-2.5/getting-started/introduction/overview.md b/versioned_docs/version-2.5/getting-started/introduction/overview.md new file mode 100644 index 0000000000..8c3b132068 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/introduction/overview.md @@ -0,0 +1,67 @@ +--- +title: Overview +weight: 1 +aliases: + - /rancher/v2.x/en/overview/ +--- +Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. + +# Run Kubernetes Everywhere + +Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. + +# Meet IT requirements + +Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: + +- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. +- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. +- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. + +# Empower DevOps Teams + +Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. + +The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. + +![Platform](/img/platform.png) + +# Features of the Rancher API Server + +The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: + +### Authorization and Role-Based Access Control + +- **User management:** The Rancher API server [manages user identities](../../pages-for-subheaders/about-authentication.md) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. +- **Authorization:** The Rancher API server manages [access control](../../pages-for-subheaders/manage-role-based-access-control-rbac.md) and [security](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) policies. + +### Working with Kubernetes + +- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) on existing nodes, or perform [Kubernetes upgrades.](../installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts](catalog/) that make it easy to repeatedly deploy applications. +- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration](../../pages-for-subheaders/manage-projects.md) and for [managing applications within projects.](../../pages-for-subheaders/kubernetes-resources-setup.md) +- **Pipelines:** Setting up a [pipeline](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. +- **Istio:** Our [integration with Istio](../../pages-for-subheaders/istio.md) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +### Working with Cloud Infrastructure + +- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes](../../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) in all clusters. +- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) and [persistent storage](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) in the cloud. + +### Cluster Visibility + +- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. +- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. +- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. + +# Editing Downstream Clusters with Rancher + +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) have **Cluster Options** available for editing. + +After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.](../../pages-for-subheaders/cluster-configuration.md) + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../../shared-files/_cluster-capabilities-table.md'; + + diff --git a/versioned_docs/version-2.5/getting-started/introduction/what-are-divio-docs.md b/versioned_docs/version-2.5/getting-started/introduction/what-are-divio-docs.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/introduction/what-are-divio-docs.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/aws.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/aws.md new file mode 100644 index 0000000000..1ceeb43287 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/aws.md @@ -0,0 +1,87 @@ +--- +title: Rancher AWS Quick Start Guide +description: Read this step by step Rancher AWS guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher server on AWS in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to Amazon AWS will incur charges. + +- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. +- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. +- [IAM Policy created](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start): Defines the permissions an account attached with this policy has. +- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. + +### Example IAM Policy + +The AWS module just creates an EC2 KeyPair, an EC2 SecurityGroup and an EC2 instance. A simple policy would be: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + } + ] +} +``` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the AWS folder containing the terraform files by executing `cd quickstart/aws`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `aws_access_key` - Amazon AWS Access Key + - `aws_secret_key` - Amazon AWS Secret Key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. +Suggestions include: + - `aws_region` - Amazon AWS region, choose the closest instead of the default (`us-east-1`) + - `prefix` - Prefix for all created resources + - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget + - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher server using the `id_rsa` key generated in `quickstart/aws`. + +#### Result + +Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/aws` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/azure.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/azure.md new file mode 100644 index 0000000000..e839143684 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/azure.md @@ -0,0 +1,76 @@ +--- +title: Rancher Azure Quick Start Guide +description: Read this step by step Rancher Azure guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 100 +--- + +The following steps will quickly deploy a Rancher server on Azure in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to Microsoft Azure will incur charges. + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the Azure folder containing the terraform files by executing `cd quickstart/azure`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `azure_subscription_id` - Microsoft Azure Subscription ID + - `azure_client_id` - Microsoft Azure Client ID + - `azure_client_secret` - Microsoft Azure Client Secret + - `azure_tenant_id` - Microsoft Azure Tenant ID + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. +Suggestions include: + - `azure_location` - Microsoft Azure region, choose the closest instead of the default (`East US`) + - `prefix` - Prefix for all created resources + - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget + - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster + - `windows_admin_password` - The admin password of the windows worker node + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/azure`. + +#### Result + +Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/azure` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md new file mode 100644 index 0000000000..cc15a69bbd --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md @@ -0,0 +1,68 @@ +--- +title: Rancher DigitalOcean Quick Start Guide +description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher server on DigitalOcean in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to DigitalOcean will incur charges. + +- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. +- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/do`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `do_token` - DigitalOcean access key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. +Suggestions include: + - `do_region` - DigitalOcean region, choose the closest instead of the default (`nyc1`) + - `prefix` - Prefix for all created resources + - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 15 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/do`. + +#### Result + +Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/do` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md new file mode 100644 index 0000000000..1fc8fedb5c --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/gcp.md @@ -0,0 +1,70 @@ +--- +title: Rancher GCP Quick Start Guide +description: Read this step by step Rancher GCP guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher server on GCP in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +>**Note** +>Deploying to Google GCP will incur charges. + +- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. +- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. +- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the GCP folder containing the terraform files by executing `cd quickstart/gcp`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `gcp_account_json` - GCP service account file path and file name + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. +Suggestions include: + - `gcp_region` - Google GCP region, choose the closest instead of the default (`us-east4`) + - `gcp_zone` - Google GCP zone, choose the closest instead of the default (`us-east4-a`) + - `prefix` - Prefix for all created resources + - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/gcp`. + +#### Result + +Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/gcp` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md new file mode 100644 index 0000000000..1239d449e8 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md @@ -0,0 +1,126 @@ +--- +title: Manual Quick Start +weight: 300 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/quickstart-manual-setup/ +--- +Howdy Partner! This tutorial walks you through: + +- Installation of Rancher 2.x +- Creation of your first cluster +- Deployment of an application, Nginx + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Quick Start Outline + +This Quick Start Guide is divided into different tasks for easier consumption. + + + + +1. [Provision a Linux Host](#1-provision-a-linux-host) + +1. [Install Rancher](#2-install-rancher) + +1. [Log In](#3-log-in) + +1. [Create the Cluster](#4-create-the-cluster) + + +
    +### 1. Provision a Linux Host + + Begin creation of a custom cluster by provisioning a Linux host. Your host can be: + +- A cloud-host virtual machine (VM) +- An on-prem VM +- A bare-metal server + + >**Note:** + > When using a cloud-hosted virtual machine you need to allow inbound TCP communication to ports 80 and 443. Please see your cloud-host's documentation for information regarding port configuration. + > + > For a full list of port requirements, refer to [Docker Installation](../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md). + + Provision the host according to our [Requirements](../../../pages-for-subheaders/installation-requirements.md). + +### 2. Install Rancher + +To install Rancher on your host, connect to it and then use a shell to install. + +1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. + +1. From your shell, enter the following command: + + ``` + sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged rancher/rancher + ``` + +**Result:** Rancher is installed. + +### 3. Log In + +Log in to Rancher to begin using the application. After you log in, you'll make some one-time configurations. + +1. Open a web browser and enter the IP address of your host: `https://`. + + Replace `` with your host IP address. + +1. When prompted, create a password for the default `admin` account there cowpoke! + +1. Set the **Default View**. + - If `I want to create or manage multiple clusters` is selected, the Cluster Manager UI is used as the default view. + - If `I'm only going to use the cluster Rancher was installed on` is selected, the Cluster Explorer UI is used as the default view. + +1. Set the **Rancher Server URL**. The URL can either be an IP address or a host name. However, each node added to your cluster must be able to connect to this URL.

    If you use a hostname in the URL, this hostname must be resolvable by DNS on the nodes you want to add to you cluster. + +
    + +### 4. Create the Cluster + +Welcome to Rancher! You are now able to create your first Kubernetes cluster. + +In this task, you can use the versatile **Custom** option. This option lets you add _any_ Linux host (cloud-hosted VM, on-prem VM, or bare-metal) to be used in a cluster. + +1. If you chose `I'm only going to use the cluster Rancher was installed on` when setting the default view, click the **Cluster Manager** button in the upper-right of the UI to access the **Clusters** page. + +1. From the **Clusters** page, click **Add Cluster**. + +1. Choose **Existing Nodes**. + +1. Enter a **Cluster Name**. + +1. Skip **Member Roles** and **Cluster Options**. We'll tell you about them later. + +1. Click **Next**. + +1. From **Node Role**, select _all_ the roles: **etcd**, **Control**, and **Worker**. + +1. **Optional**: Rancher auto-detects the IP addresses used for Rancher communication and cluster communication. You can override these using `Public Address` and `Internal Address` in the **Node Address** section. + +1. Skip the **Labels** stuff. It's not important for now. + +1. Copy the command displayed on screen to your clipboard. + +1. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. + +1. When you finish running the command on your Linux host, click **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +#### Finished + +Congratulations! You have created your first cluster. + +#### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md new file mode 100644 index 0000000000..002a81abdb --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md @@ -0,0 +1,49 @@ +--- +title: Vagrant Quick Start +weight: 200 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/quickstart-vagrant/ +--- +The following steps quickly deploy a Rancher Server with a single node cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](../../../pages-for-subheaders/installation-and-upgrade.md). + +## Prerequisites + +- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. +- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. +- At least 4GB of free RAM. + +### Note +- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: + + `vagrant plugin install vagrant-vboxmanage` + + `vagrant plugin install vagrant-vbguest` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the folder containing the Vagrantfile by executing `cd quickstart/vagrant`. + +3. **Optional:** Edit `config.yaml` to: + + - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) + - Change the password of the `admin` user for logging into Rancher. (`default_password`) + +4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. + +5. Once provisioning finishes, go to `https://192.168.56.101` in the browser. The default user/password is `admin/admin`. + +**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments](../../../pages-for-subheaders/deploy-rancher-workloads.md). + +## Destroying the Environment + +1. From the `quickstart/vagrant` folder execute `vagrant destroy -f`. + +2. Wait for the confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-workloads/nodeports.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-workloads/nodeports.md new file mode 100644 index 0000000000..0cdfaa708b --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-workloads/nodeports.md @@ -0,0 +1,158 @@ +--- +title: Workload with NodePort Quick Start +weight: 200 +aliases: + - /rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/ +--- + +### Prerequisite + +You have a running cluster with at least 1 node. + +### 1. Deploying a Workload + +You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. + +For this workload, you'll be deploying the application Rancher Hello-World. + +1. From the **Clusters** page, open the cluster that you just created. + +2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. + +3. Open the **Project: Default** project. + +4. Click **Resources > Workloads.** + +5. Click **Deploy**. + + **Step Result:** The **Deploy Workload** page opens. + +6. Enter a **Name** for your workload. + +7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. + +8. From **Port Mapping**, click **Add Port**. + +9. From the **As a** drop-down, make sure that **NodePort (On every node)** is selected. + + ![As a dropdown, NodePort (On every node selected)](/img/nodeport-dropdown.png) + +10. From the **On Listening Port** field, leave the **Random** value in place. + + ![On Listening Port, Random selected](/img/listening-port-field.png) + +11. From the **Publish the container port** field, enter port `80`. + + ![Publish the container port, 80 entered](/img/container-port-field.png) + +12. Leave the remaining options on their default setting. We'll tell you about them later. + +13. Click **Launch**. + +**Result:** + +* Your workload is deployed. This process might take a few minutes to complete. +* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. + +
    + +### 2. Viewing Your Application + +From the **Workloads** page, click the link underneath your workload. If your deployment succeeded, your application opens. + +### Attention: Cloud-Hosted Sandboxes + +When using a cloud-hosted virtual machine, you may not have access to the port running the container. In this event, you can test Nginx in an ssh session on the local machine using `Execute Shell`. Use the port number after the `:` in the link under your workload if available, which is `31568` in this example. + +```sh +gettingstarted@rancher:~$ curl http://localhost:31568 + + + + Rancher + + + + + +

    Hello world!

    +

    My hostname is hello-world-66b4b9d88b-78bhx

    +
    +

    k8s services found 2

    + + INGRESS_D1E1A394F61C108633C4BD37AEDDE757 tcp://10.43.203.31:80
    + + KUBERNETES tcp://10.43.0.1:443
    + +
    +
    + + +
    + + +
    + + + +gettingstarted@rancher:~$ + +``` + +### Finished + +Congratulations! You have successfully deployed a workload exposed via a NodePort. + +#### What's Next? + +When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: + +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md new file mode 100644 index 0000000000..1ac82e43b9 --- /dev/null +++ b/versioned_docs/version-2.5/getting-started/quick-start-guides/deploy-workloads/workload-ingress.md @@ -0,0 +1,84 @@ +--- +title: Workload with Ingress Quick Start +weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/ +--- + +### Prerequisite + +You have a running cluster with at least 1 node. + +### 1. Deploying a Workload + +You're ready to create your first Kubernetes [workload](https://kubernetes.io/docs/concepts/workloads/). A workload is an object that includes pods along with other files and info needed to deploy your application. + +For this workload, you'll be deploying the application Rancher Hello-World. + +1. From the **Clusters** page, open the cluster that you just created. + +2. From the main menu of the **Dashboard**, select **Projects/Namespaces**. + +3. Open the **Project: Default** project. + +4. Click **Resources > Workloads.** + +5. Click **Deploy**. + + **Step Result:** The **Deploy Workload** page opens. + +6. Enter a **Name** for your workload. + +7. From the **Docker Image** field, enter `rancher/hello-world`. This field is case-sensitive. + +8. Leave the remaining options on their default setting. We'll tell you about them later. + +9. Click **Launch**. + +**Result:** + +* Your workload is deployed. This process might take a few minutes to complete. +* When your workload completes deployment, it's assigned a state of **Active**. You can view this status from the project's **Workloads** page. + +
    +### 2. Expose The Application Via An Ingress + +Now that the application is up and running it needs to be exposed so that other services can connect. + +1. From the **Clusters** page, open the cluster that you just created. + +2. From the main menu of the **Dashboard**, select **Projects**. + +3. Open the **Default** project. + +4. Click **Resources > Workloads > Load Balancing.** Click on the **Load Balancing** tab. + +5. Click **Add Ingress**. + +6. Enter a name i.e. **hello**. + +7. In the **Target** field, drop down the list and choose the name that you set for your service. + +8. Enter `80` in the **Port** field. + +9. Leave everything else as default and click **Save**. + +**Result:** The application is assigned a `sslip.io` address and exposed. It may take a minute or two to populate. + +### View Your Application + +From the **Load Balancing** page, click the target link, which will look something like `hello.default.xxx.xxx.xxx.xxx.sslip.io > hello-world`. + +Your application will open in a separate window. + +#### Finished + +Congratulations! You have successfully deployed a workload exposed via an ingress. + +#### What's Next? + +When you're done using your sandbox, destroy the Rancher Server and your cluster. See one of the following: + +- [Amazon AWS: Destroying the Environment](../deploy-rancher-manager/aws.md#destroying-the-environment) +- [DigitalOcean: Destroying the Environment](../deploy-rancher-manager/digitalocean.md#destroying-the-environment) +- [Vagrant: Destroying the Environment](../deploy-rancher-manager/vagrant.md#destroying-the-environment) diff --git a/versioned_docs/version-2.5/how-to-guides.md b/versioned_docs/version-2.5/how-to-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md new file mode 100644 index 0000000000..9891b5a635 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md @@ -0,0 +1,200 @@ +--- +title: Configuring Active Directory (AD) +weight: 1112 +aliases: + - /rancher/v2.5/en/tasks/global-configuration/authentication/active-directory/ + - /rancher/v2.x/en/admin-settings/authentication/ad/ +--- + +If your organization uses Microsoft Active Directory as central user repository, you can configure Rancher to communicate with an Active Directory server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the Active Directory, while allowing end-users to authenticate with their AD credentials when logging in to the Rancher UI. + +Rancher uses LDAP to communicate with the Active Directory server. The authentication flow for Active Directory is therefore the same as for the [OpenLDAP authentication](../../../../../pages-for-subheaders/configure-openldap.md) integration. + +> **Note:** +> +> Before you start, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +## Prerequisites + +You'll need to create or obtain from your AD administrator a new AD user to use as service account for Rancher. This user must have sufficient permissions to perform LDAP searches and read attributes of users and groups under your AD domain. + +Usually a (non-admin) **Domain User** account should be used for this purpose, as by default such user has read-only privileges for most objects in the domain partition. + +Note however, that in some locked-down Active Directory configurations this default behaviour may not apply. In such case you will need to ensure that the service account user has at least **Read** and **List Content** permissions granted either on the Base OU (enclosing users and groups) or globally for the domain. + +> **Using TLS?** +> +> If the certificate used by the AD server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +## Configuration Steps +### Open Active Directory Configuration + +1. Log into the Rancher UI using the initial local `admin` account. +2. From the **Global** view, navigate to **Security** > **Authentication** +3. Select **Active Directory**. The **Configure an AD server** form will be displayed. + +### Configure Active Directory Server Settings + +In the section titled `1. Configure an Active Directory server`, complete the fields with the information specific to your Active Directory server. Please refer to the following table for detailed information on the required values for each parameter. + +> **Note:** +> +> If you are unsure about the correct values to enter in the user/group Search Base field, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch). + +**Table 1: AD Server parameters** + +| Parameter | Description | +|:--|:--| +| Hostname | Specify the hostname or IP address of the AD server | +| Port | Specify the port at which the Active Directory server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| +| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS).| +| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the AD server unreachable. | +| Service Account Username | Enter the username of an AD account with read-only access to your domain partition (see [Prerequisites](#prerequisites)). The username can be entered in NetBIOS format (e.g. "DOMAIN\serviceaccount") or UPN format (e.g. "serviceaccount@domain.com"). | +| Service Account Password | The password for the service account. | +| Default Login Domain | When you configure this field with the NetBIOS name of your AD domain, usernames entered without a domain (e.g. "jdoe") will automatically be converted to a slashed, NetBIOS logon (e.g. "LOGIN_DOMAIN\jdoe") when binding to the AD server. If your users authenticate with the UPN (e.g. "jdoe@acme.com") as username then this field **must** be left empty. | +| User Search Base | The Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| +| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave it empty. For example: "ou=groups,dc=acme,dc=com".| + +--- + +### Configure User/Group Schema + +In the section titled `2. Customize Schema` you must provide Rancher with a correct mapping of user and group attributes corresponding to the schema used in your directory. + +Rancher uses LDAP queries to search for and retrieve information about users and groups within the Active Directory. The attribute mappings configured in this section are used to construct search filters and resolve group membership. It is therefore paramount that the provided settings reflect the reality of your AD domain. + +> **Note:** +> +> If you are unfamiliar with the schema used in your Active Directory domain, please refer to [Identify Search Base and Schema using ldapsearch](#annex-identify-search-base-and-schema-using-ldapsearch) to determine the correct configuration values. + +#### User Schema + +The table below details the parameters for the user schema section configuration. + +**Table 2: User schema configuration parameters** + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Username Attribute | The user attribute whose value is suitable as a display name. | +| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. If your users authenticate with their UPN (e.g. "jdoe@acme.com") as username then this field must normally be set to `userPrincipalName`. Otherwise for the old, NetBIOS-style logon names (e.g. "jdoe") it's usually `sAMAccountName`. | +| User Member Attribute | The attribute containing the groups that a user is a member of. | +| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the AD server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. To match UPN usernames (e.g. jdoe@acme.com) you should usually set the value of this field to `userPrincipalName`. | +| Search Filter | This filter gets applied to the list of users that is searched when Rancher attempts to add users to a site access list or tries to add members to clusters or projects. For example, a user search filter could be (|(memberOf=CN=group1,CN=Users,DC=testad,DC=rancher,DC=io)(memberOf=CN=group2,CN=Users,DC=testad,DC=rancher,DC=io)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of users will be empty. | +| User Enabled Attribute | The attribute containing an integer value representing a bitwise enumeration of user account flags. Rancher uses this to determine if a user account is disabled. You should normally leave this set to the AD standard `userAccountControl`. | +| Disabled Status Bitmask | This is the value of the `User Enabled Attribute` designating a disabled user account. You should normally leave this set to the default value of "2" as specified in the Microsoft Active Directory schema (see [here](https://docs.microsoft.com/en-us/windows/desktop/adschema/a-useraccountcontrol#remarks)). | + +--- + +#### Group Schema + +The table below details the parameters for the group schema configuration. + +**Table 3: Group schema configuration parameters** + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for group objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Name Attribute | The group attribute whose value is suitable for a display name. | +| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | +| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | +| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects. See description of user schema `Search Attribute`. | +| Search Filter | This filter gets applied to the list of groups that is searched when Rancher attempts to add groups to a site access list or tries to add groups to clusters or projects. For example, a group search filter could be (|(cn=group1)(cn=group2)). Note: If the search filter does not use [valid AD search syntax,](https://docs.microsoft.com/en-us/windows/win32/adsi/search-filter-syntax) the list of groups will be empty. | +| Group DN Attribute | The name of the group attribute whose format matches the values in the user attribute describing a the user's memberships. See `User Member Attribute`. | +| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (i.e., you have groups that contain other groups as members. We advise avoiding nested groups when possible). | + +--- + +### Test Authentication + +Once you have completed the configuration, proceed by testing the connection to the AD server **using your AD admin account**. If the test is successful, authentication with the configured Active Directory will be enabled implicitly with the account you test with set as admin. + +> **Note:** +> +> The AD user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which AD account you use to perform this step. + +1. Enter the **username** and **password** for the AD account that should be mapped to the local principal account. +2. Click **Authenticate with Active Directory** to finalise the setup. + +**Result:** + +- Active Directory authentication has been enabled. +- You have been signed into Rancher as administrator using the provided AD credentials. + +> **Note:** +> +> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. + +## Annex: Identify Search Base and Schema using ldapsearch + +In order to successfully configure AD authentication it is crucial that you provide the correct configuration pertaining to the hierarchy and schema of your AD server. + +The [`ldapsearch`](http://manpages.ubuntu.com/manpages/artful/man1/ldapsearch.1.html) tool allows you to query your AD server to learn about the schema used for user and group objects. + +For the purpose of the example commands provided below we will assume: + +- The Active Directory server has a hostname of `ad.acme.com` +- The server is listening for unencrypted connections on port `389` +- The Active Directory domain is `acme` +- You have a valid AD account with the username `jdoe` and password `secret` + +### Identify Search Base + +First we will use `ldapsearch` to identify the Distinguished Name (DN) of the parent node(s) for users and groups: + +``` +$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ +-h ad.acme.com -b "dc=acme,dc=com" -s sub "sAMAccountName=jdoe" +``` + +This command performs an LDAP search with the search base set to the domain root (`-b "dc=acme,dc=com"`) and a filter targeting the user account (`sAMAccountNam=jdoe`), returning the attributes for said user: + +![](/img/ldapsearch-user.png) + +Since in this case the user's DN is `CN=John Doe,CN=Users,DC=acme,DC=com` [5], we should configure the **User Search Base** with the parent node DN `CN=Users,DC=acme,DC=com`. + +Similarly, based on the DN of the group referenced in the **memberOf** attribute [4], the correct value for the **Group Search Base** would be the parent node of that value, i.e., `OU=Groups,DC=acme,DC=com`. + +### Identify User Schema + +The output of the above `ldapsearch` query also allows to determine the correct values to use in the user schema configuration: + +- `Object Class`: **person** [1] +- `Username Attribute`: **name** [2] +- `Login Attribute`: **sAMAccountName** [3] +- `User Member Attribute`: **memberOf** [4] + +> **Note:** +> +> If the AD users in our organization were to authenticate with their UPN (e.g. jdoe@acme.com) instead of the short logon name, then we would have to set the `Login Attribute` to **userPrincipalName** instead. + +We'll also set the `Search Attribute` parameter to **sAMAccountName|name**. That way users can be added to clusters/projects in the Rancher UI either by entering their username or full name. + +### Identify Group Schema + +Next, we'll query one of the groups associated with this user, in this case `CN=examplegroup,OU=Groups,DC=acme,DC=com`: + +``` +$ ldapsearch -x -D "acme\jdoe" -w "secret" -p 389 \ +-h ad.acme.com -b "ou=groups,dc=acme,dc=com" \ +-s sub "CN=examplegroup" +``` + +This command will inform us on the attributes used for group objects: + +![](/img/ldapsearch-group.png) + +Again, this allows us to determine the correct values to enter in the group schema configuration: + +- `Object Class`: **group** [1] +- `Name Attribute`: **name** [2] +- `Group Member Mapping Attribute`: **member** [3] +- `Search Attribute`: **sAMAccountName** [4] + +Looking at the value of the **member** attribute, we can see that it contains the DN of the referenced user. This corresponds to the **distinguishedName** attribute in our user object. Accordingly will have to set the value of the `Group Member User Attribute` parameter to this attribute. + +In the same way, we can observe that the value in the **memberOf** attribute in the user object corresponds to the **distinguishedName** [5] of the group. We therefore need to set the value for the `Group DN Attribute` parameter to this attribute. + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Active Directory server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md new file mode 100644 index 0000000000..b010188c4a --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md @@ -0,0 +1,208 @@ +--- +title: Configuring Azure AD +weight: 1115 +aliases: + - /rancher/v2.5/en/tasks/global-configuration/authentication/azure-ad/ + - /rancher/v2.x/en/admin-settings/authentication/azure-ad/ +--- + +If you have an instance of Active Directory (AD) hosted in Azure, you can configure Rancher to allow your users to log in using their AD accounts. Configuration of Azure AD external authentication requires you to make configurations in both Azure and Rancher. + +>**Note:** Azure AD integration only supports Service Provider initiated logins. + +>**Prerequisite:** Have an instance of Azure AD configured. + +>**Note:** Most of this procedure takes place from the [Microsoft Azure Portal](https://portal.azure.com/). + +## Azure Active Directory Configuration Outline + +Configuring Rancher to allow your users to authenticate with their Azure AD accounts involves multiple procedures. Review the outline below before getting started. + + + +>**Tip:** Before you start, we recommend creating an empty text file. You can use this file to copy values from Azure that you'll paste into Rancher later. + + + +- [1. Register Rancher with Azure](#1-register-rancher-with-azure) +- [2. Create a new client secret](#2-create-a-new-client-secret) +- [3. Set Required Permissions for Rancher](#3-set-required-permissions-for-rancher) +- [4. Add a Reply URL](#4-add-a-reply-url) +- [5. Copy Azure Application Data](#5-copy-azure-application-data) +- [6. Configure Azure AD in Rancher](#6-configure-azure-ad-in-rancher) + + + +### 1. Register Rancher with Azure + +Before enabling Azure AD within Rancher, you must register Rancher with Azure. + +1. Log in to [Microsoft Azure](https://portal.azure.com/) as an administrative user. Configuration in future steps requires administrative access rights. + +1. Use search to open the **App registrations** service. + + ![Open App Registrations](/img/search-app-registrations.png) + +1. Click **New registrations** and complete the **Create** form. + + ![New App Registration](/img/new-app-registration.png) + + 1. Enter a **Name** (something like `Rancher`). + + 1. From **Supported account types**, select "Accounts in this organizational directory only (AzureADTest only - Single tenant)" This corresponds to the legacy app registration options. + + 1. In the **Redirect URI** section, make sure **Web** is selected from the dropdown and enter the URL of your Rancher Server in the text box next to the dropdown. This Rancher server URL should be appended with the verification path: `/verify-auth-azure`. + + >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + + 1. Click **Register**. + +>**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +### 2. Create a new client secret + +From the Azure portal, create a client secret. Rancher will use this key to authenticate with Azure AD. + +1. Use search to open **App registrations** services. Then open the entry for Rancher that you created in the last procedure. + + ![Open Rancher Registration](/img/open-rancher-app.png) + +1. From the navigation pane on left, click **Certificates and Secrets**. + +1. Click **New client secret**. + + ![Create new client secret](/img/select-client-secret.png) + + 1. Enter a **Description** (something like `Rancher`). + + 1. Select duration for the key from the options under **Expires**. This drop-down sets the expiration date for the key. Shorter durations are more secure, but require you to create a new key after expiration. + + 1. Click **Add** (you don't need to enter a value—it will automatically populate after you save). + + +1. Copy the key value and save it to an [empty text file](#tip). + + You'll enter this key into the Rancher UI later as your **Application Secret**. + + You won't be able to access the key value again within the Azure UI. + +### 3. Set Required Permissions for Rancher + +Next, set API permissions for Rancher within Azure. + +1. From the navigation pane on left, select **API permissions**. + + ![Open Required Permissions](/img/select-required-permissions.png) + +1. Click **Add a permission**. + +1. From the **Azure Active Directory Graph**, select the following **Delegated Permissions**: + + ![Select API Permissions](/img/select-required-permissions-2.png) + +
    +
    + - **Access the directory as the signed-in user** + - **Read directory data** + - **Read all groups** + - **Read all users' full profiles** + - **Read all users' basic profiles** + - **Sign in and read user profile** + +1. Click **Add permissions**. + +1. From **API permissions**, click **Grant admin consent**. Then click **Yes**. + + >**Note:** You must be signed in as an Azure administrator to successfully save your permission settings. + + +### 4. Add a Reply URL + +To use Azure AD with Rancher you must whitelist Rancher with Azure. You can complete this whitelisting by providing Azure with a reply URL for Rancher, which is your Rancher Server URL followed with a verification path. + + +1. From the **Setting** blade, select **Reply URLs**. + + ![Azure: Enter Reply URL](/img/enter-azure-reply-url.png) + +1. From the **Reply URLs** blade, enter the URL of your Rancher Server, appended with the verification path: `/verify-auth-azure`. + + >**Tip:** You can find your personalized Azure reply URL in Rancher on the Azure AD Authentication page (Global View > Security Authentication > Azure AD). + +1. Click **Save**. + +**Result:** Your reply URL is saved. + +>**Note:** It can take up to five minutes for this change to take affect, so don't be alarmed if you can't authenticate immediately after Azure AD configuration. + +### 5. Copy Azure Application Data + +As your final step in Azure, copy the data that you'll use to configure Rancher for Azure AD authentication and paste it into an empty text file. + +1. Obtain your Rancher **Tenant ID**. + + 1. Use search to open the **Azure Active Directory** service. + + ![Open Azure Active Directory](/img/search-azure-ad.png) + + 1. From the left navigation pane, open **Overview**. + + 2. Copy the **Directory ID** and paste it into your [text file](#tip). + + You'll paste this value into Rancher as your **Tenant ID**. + +1. Obtain your Rancher **Application ID**. + + 1. Use search to open **App registrations**. + + ![Open App Registrations](/img/search-app-registrations.png) + + 1. Find the entry you created for Rancher. + + 1. Copy the **Application ID** and paste it to your [text file](#tip). + +1. Obtain your Rancher **Graph Endpoint**, **Token Endpoint**, and **Auth Endpoint**. + + 1. From **App registrations**, click **Endpoints**. + + ![Click Endpoints](/img/click-endpoints.png) + + 2. Copy the following endpoints to your clipboard and paste them into your [text file](#tip) (these values will be your Rancher endpoint values). + + - **Microsoft Graph API endpoint** (Graph Endpoint) + - **OAuth 2.0 token endpoint (v1)** (Token Endpoint) + - **OAuth 2.0 authorization endpoint (v1)** (Auth Endpoint) + +>**Note:** Copy the v1 version of the endpoints + +### 6. Configure Azure AD in Rancher + +From the Rancher UI, enter information about your AD instance hosted in Azure to complete configuration. + +Enter the values that you copied to your [text file](#tip). + +1. Log into Rancher. From the **Global** view, select **Security > Authentication**. + +1. Select **Azure AD**. + +1. Complete the **Configure Azure AD Account** form using the information you copied while completing [Copy Azure Application Data](#5-copy-azure-application-data). + + >**Important:** When entering your Graph Endpoint, remove the tenant ID from the URL, like below. + > + >https://graph.windows.net/abb5adde-bee8-4821-8b03-e63efdc7701c + + The following table maps the values you copied in the Azure portal to the fields in Rancher. + + | Rancher Field | Azure Value | + | ------------------ | ------------------------------------- | + | Tenant ID | Directory ID | + | Application ID | Application ID | + | Application Secret | Key Value | + | Endpoint | https://login.microsoftonline.com/ | + | Graph Endpoint | Microsoft Azure AD Graph API Endpoint | + | Token Endpoint | OAuth 2.0 Token Endpoint | + | Auth Endpoint | OAuth 2.0 Authorization Endpoint | + +1. Click **Authenticate with Azure**. + +**Result:** Azure Active Directory authentication is configured. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md new file mode 100644 index 0000000000..14a971e554 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md @@ -0,0 +1,55 @@ +--- +title: Configuring FreeIPA +weight: 1114 +aliases: + - /rancher/v2.5/en/tasks/global-configuration/authentication/freeipa/ + - /rancher/v2.x/en/admin-settings/authentication/freeipa/ +--- + +If your organization uses FreeIPA for user authentication, you can configure Rancher to allow your users to login using their FreeIPA credentials. + +>**Prerequisites:** +> +>- You must have a [FreeIPA Server](https://www.freeipa.org/) configured. +>- Create a service account in FreeIPA with `read-only` access. Rancher uses this account to verify group membership when a user makes a request using an API key. +>- Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). + +2. From the **Global** view, select **Security > Authentication** from the main menu. + +3. Select **FreeIPA**. + +4. Complete the **Configure an FreeIPA server** form. + + You may need to log in to your domain controller to find the information requested in the form. + + >**Using TLS?** + >If the certificate is self-signed or not from a recognized certificate authority, make sure you provide the complete chain. That chain is needed to verify the server's certificate. +
    +
    + >**User Search Base vs. Group Search Base** + > + >Search base allows Rancher to search for users and groups that are in your FreeIPA. These fields are only for search bases and not for search filters. + > + >* If your users and groups are in the same search base, complete only the User Search Base. + >* If your groups are in a different search base, you can optionally complete the Group Search Base. This field is dedicated to searching groups, but is not required. + +5. If your FreeIPA deviates from the standard AD schema, complete the **Customize Schema** form to match it. Otherwise, skip this step. + + >**Search Attribute** The Search Attribute field defaults with three specific values: `uid|sn|givenName`. After FreeIPA is configured, when a user enters text to add users or groups, Rancher automatically queries the FreeIPA server and attempts to match fields by user id, last name, or first name. Rancher specifically searches for users/groups that begin with the text entered in the search field. + > + >The default field value `uid|sn|givenName`, but you can configure this field to a subset of these fields. The pipe (`|`) between the fields separates these fields. + > + > * `uid`: User ID + > * `sn`: Last Name + > * `givenName`: First Name + > + > With this search attribute, Rancher creates search filters for users and groups, but you *cannot* add your own search filters in this field. + +6. Enter your FreeIPA username and password in **Authenticate with FreeIPA** to confirm that Rancher is configured to use FreeIPA authentication. + +**Result:** + +- FreeIPA authentication is configured. +- You are signed into Rancher with your FreeIPA account (i.e., the _external principal_). diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md new file mode 100644 index 0000000000..6dec8dace5 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md @@ -0,0 +1,54 @@ +--- +title: Configuring GitHub +weight: 1116 +aliases: + - /rancher/v2.5/en/tasks/global-configuration/authentication/github/ + - /rancher/v2.x/en/admin-settings/authentication/github/ +--- + +In environments using GitHub, you can configure Rancher to allow sign on using GitHub credentials. + +>**Prerequisites:** Read [External Authentication Configuration and Principal Users](../../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Sign into Rancher using a local user assigned the `administrator` role (i.e., the _local principal_). + +2. From the **Global** view, select **Security > Authentication** from the main menu. + +3. Select **GitHub**. + +4. Follow the directions displayed to **Setup a GitHub Application**. Rancher redirects you to GitHub to complete registration. + + >**What's an Authorization Callback URL?** + > + >The Authorization Callback URL is the URL where users go to begin using your application (i.e. the splash screen). + + >When you use external authentication, authentication does not actually take place in your application. Instead, authentication takes place externally (in this case, GitHub). After this external authentication completes successfully, the Authorization Callback URL is the location where the user re-enters your application. + +5. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. + + >**Where do I find the Client ID and Client Secret?** + > + >From GitHub, select Settings > Developer Settings > OAuth Apps. The Client ID and Client Secret are displayed prominently. + +6. Click **Authenticate with GitHub**. + +7. Use the **Site Access** options to configure the scope of user authorization. + + - **Allow any valid Users** + + _Any_ GitHub user can access Rancher. We generally discourage use of this setting! + + - **Allow members of Clusters, Projects, plus Authorized Users and Organizations** + + Any GitHub user or group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any GitHub user or group you add to the **Authorized Users and Organizations** list may log in to Rancher. + + - **Restrict access to only Authorized Users and Organizations** + + Only GitHub users or groups added to the Authorized Users and Organizations can log in to Rancher. +
    +8. Click **Save**. + +**Result:** + +- GitHub authentication is configured. +- You are signed into Rancher with your GitHub account (i.e., the _external principal_). diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md new file mode 100644 index 0000000000..7e2367cc8e --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md @@ -0,0 +1,108 @@ +--- +title: Configuring Google OAuth +weight: 15 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/google/ +--- + +If your organization uses G Suite for user authentication, you can configure Rancher to allow your users to log in using their G Suite credentials. + +Only admins of the G Suite domain have access to the Admin SDK. Therefore, only G Suite admins can configure Google OAuth for Rancher. + +Within Rancher, only administrators or users with the **Manage Authentication** [global role](../../manage-role-based-access-control-rbac/global-permissions.md) can configure authentication. + +# Prerequisites +- You must have a [G Suite admin account](https://admin.google.com) configured. +- G Suite requires a [top private domain FQDN](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) as an authorized domain. One way to get an FQDN is by creating an A-record in Route53 for your Rancher server. You do not need to update your Rancher Server URL setting with that record, because there could be clusters using that URL. +- You must have the Admin SDK API enabled for your G Suite domain. You can enable it using the steps on [this page.](https://support.google.com/a/answer/60757?hl=en) + +After the Admin SDK API is enabled, your G Suite domain's API screen should look like this: +![Enable Admin APIs](/img/Google-Enable-APIs-Screen.png) + +# Setting up G Suite for OAuth with Rancher +Before you can set up Google OAuth in Rancher, you need to log in to your G Suite account and do the following: + +1. [Add Rancher as an authorized domain in G Suite](#1-adding-rancher-as-an-authorized-domain) +1. [Generate OAuth2 credentials for the Rancher server](#2-creating-oauth2-credentials-for-the-rancher-server) +1. [Create service account credentials for the Rancher server](#3-creating-service-account-credentials) +1. [Register the service account key as an OAuth Client](#4-register-the-service-account-key-as-an-oauth-client) + +### 1. Adding Rancher as an Authorized Domain +1. Click [here](https://console.developers.google.com/apis/credentials) to go to credentials page of your Google domain. +1. Select your project and click **OAuth consent screen.** +![OAuth Consent Screen](/img/Google-OAuth-consent-screen-tab.png) +1. Go to **Authorized Domains** and enter the top private domain of your Rancher server URL in the list. The top private domain is the rightmost superdomain. So for example, www.foo.co.uk a top private domain of foo.co.uk. For more information on top-level domains, refer to [this article.](https://github.com/google/guava/wiki/InternetDomainNameExplained#public-suffixes-and-private-domains) +1. Go to **Scopes for Google APIs** and make sure **email,** **profile** and **openid** are enabled. + +**Result:** Rancher has been added as an authorized domain for the Admin SDK API. + +### 2. Creating OAuth2 Credentials for the Rancher Server +1. Go to the Google API console, select your project, and go to the [credentials page.](https://console.developers.google.com/apis/credentials) +![Credentials](/img/Google-Credentials-tab.png) +1. On the **Create Credentials** dropdown, select **OAuth client ID.** +1. Click **Web application.** +1. Provide a name. +1. Fill out the **Authorized JavaScript origins** and **Authorized redirect URIs.** Note: The Rancher UI page for setting up Google OAuth (available from the Global view under **Security > Authentication > Google**) provides you the exact links to enter for this step. + - Under **Authorized JavaScript origins,** enter your Rancher server URL. + - Under **Authorized redirect URIs,** enter your Rancher server URL appended with the path `verify-auth`. For example, if your URI is `https://rancherServer`, you will enter `https://rancherServer/verify-auth`. +1. Click on **Create.** +1. After the credential is created, you will see a screen with a list of your credentials. Choose the credential you just created, and in that row on rightmost side, click **Download JSON.** Save the file so that you can provide these credentials to Rancher. + +**Result:** Your OAuth credentials have been successfully created. + +### 3. Creating Service Account Credentials +Since the Google Admin SDK is available only to admins, regular users cannot use it to retrieve profiles of other users or their groups. Regular users cannot even retrieve their own groups. + +Since Rancher provides group-based membership access, we require the users to be able to get their own groups, and look up other users and groups when needed. + +As a workaround to get this capability, G Suite recommends creating a service account and delegating authority of your G Suite domain to that service account. + +This section describes how to: + +- Create a service account +- Create a key for the service account and download the credentials as JSON + +1. Click [here](https://console.developers.google.com/iam-admin/serviceaccounts) and select your project for which you generated OAuth credentials. +1. Click on **Create Service Account.** +1. Enter a name and click **Create.** +![Service account creation Step 1](/img/Google-svc-acc-step1.png) +1. Don't provide any roles on the **Service account permissions** page and click **Continue** +![Service account creation Step 2](/img/Google-svc-acc-step2.png) +1. Click on **Create Key** and select the JSON option. Download the JSON file and save it so that you can provide it as the service account credentials to Rancher. +![Service account creation Step 3](/img/Google-svc-acc-step3-key-creation.png) + +**Result:** Your service account is created. + +### 4. Register the Service Account Key as an OAuth Client + +You will need to grant some permissions to the service account you created in the last step. Rancher requires you to grant only read-only permissions for users and groups. + +Using the Unique ID of the service account key, register it as an Oauth Client using the following steps: + +1. Get the Unique ID of the key you just created. If it's not displayed in the list of keys right next to the one you created, you will have to enable it. To enable it, click **Unique ID** and click **OK.** This will add a **Unique ID** column to the list of service account keys. Save the one listed for the service account you created. NOTE: This is a numeric key, not to be confused with the alphanumeric field **Key ID.** + + ![Service account Unique ID](/img/Google-Select-UniqueID-column.png) +1. Go to the [**Manage OAuth Client Access** page.](https://admin.google.com/AdminHome?chromeless=1#OGX:ManageOauthClients) +1. Add the Unique ID obtained in the previous step in the **Client Name** field. +1. In the **One or More API Scopes** field, add the following scopes: + ``` + openid,profile,email,https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/admin.directory.group.readonly + ``` +1. Click **Authorize.** + +**Result:** The service account is registered as an OAuth client in your G Suite account. + +# Configuring Google OAuth in Rancher +1. Sign into Rancher using a local user assigned the [administrator](../../manage-role-based-access-control-rbac/global-permissions.md) role. This user is also called the local principal. +1. From the **Global** view, click **Security > Authentication** from the main menu. +1. Click **Google.** The instructions in the UI cover the steps to set up authentication with Google OAuth. + 1. Admin Email: Provide the email of an administrator account from your GSuite setup. In order to perform user and group lookups, google apis require an administrator's email in conjunction with the service account key. + 1. Domain: Provide the domain on which you have configured GSuite. Provide the exact domain and not any aliases. + 1. Nested Group Membership: Check this box to enable nested group memberships. Rancher admins can disable this at any time after configuring auth. + - **Step One** is about adding Rancher as an authorized domain, which we already covered in [this section.](#1-adding-rancher-as-an-authorized-domain) + - For **Step Two,** provide the OAuth credentials JSON that you downloaded after completing [this section.](#2-creating-oauth2-credentials-for-the-rancher-server) You can upload the file or paste the contents into the **OAuth Credentials** field. + - For **Step Three,** provide the service account credentials JSON that downloaded at the end of [this section.](#3-creating-service-account-credentials) The credentials will only work if you successfully [registered the service account key](#4-register-the-service-account-key-as-an-oauth-client) as an OAuth client in your G Suite account. +1. Click **Authenticate with Google**. +1. Click **Save**. + +**Result:** Google authentication is successfully configured. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md new file mode 100644 index 0000000000..965fa84c33 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md @@ -0,0 +1,185 @@ +--- +title: Configuring Keycloak (SAML) +description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins +weight: 1200 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/keycloak/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +## Prerequisites + +- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. +- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. + + Setting | Value + ------------|------------ + `Sign Documents` | `ON` 1 + `Sign Assertions` | `ON` 1 + All other `ON/OFF` Settings | `OFF` + `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 + `Client Name` | (e.g. `rancher`) + `Client Protocol` | `SAML` + `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` + + >1: Optionally, you can enable either one or both of these settings. + >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. + + ![](/img/keycloak/keycloak-saml-client-configuration.png) + +- In the new SAML client, create Mappers to expose the users fields + - Add all "Builtin Protocol Mappers" + ![](/img/keycloak/keycloak-saml-client-builtin-mappers.png) + - Create a new "Group list" mapper to map the member attribute to a user's groups + ![](/img/keycloak/keycloak-saml-client-group-mapper.png) + +## Getting the IDP Metadata + + + + +To get the IDP metadata, export a `metadata.xml` file from your Keycloak client. +From the **Installation** tab, choose the **SAML Metadata IDPSSODescriptor** format option and download your file. + + + + +1. From the **Configure** section, click the **Realm Settings** tab. +1. Click the **General** tab. +1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. + +Verify the IDP metadata contains the following attributes: + +``` +xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" +xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" +xmlns:ds="http://www.w3.org/2000/09/xmldsig#" +``` + +Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. + +The following is an example process for Firefox, but will vary slightly for other browsers: + +1. Press **F12** to access the developer console. +1. Click the **Network** tab. +1. From the table, click the row containing `descriptor`. +1. From the details pane, click the **Response** tab. +1. Copy the raw response data. + +The XML obtained contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: + +1. Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. +1. Remove the `` tag from the beginning. +1. Remove the `` from the end of the xml. + +You are left with something similar as the example below: + +``` + +.... + +``` + + + + +1. From the **Configure** section, click the **Realm Settings** tab. +1. Click the **General** tab. +1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. + +Verify the IDP metadata contains the following attributes: + +``` +xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" +xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" +xmlns:ds="http://www.w3.org/2000/09/xmldsig#" +``` + +Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. + +The following is an example process for Firefox, but will vary slightly for other browsers: + +1. Press **F12** to access the developer console. +1. Click the **Network** tab. +1. From the table, click the row containing `descriptor`. +1. From the details pane, click the **Response** tab. +1. Copy the raw response data. + + + + +## Configuring Keycloak in Rancher + + +1. From the **Global** view, select **Security > Authentication** from the main menu. + +1. Select **Keycloak**. + +1. Complete the **Configure Keycloak Account** form. For help with filling the form, see the [configuration reference](#configuration-reference). + +1. After you complete the **Configure Keycloak Account** form, click **Authenticate with Keycloak**, which is at the bottom of the page. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. + + >**Note:** You may have to disable your popup blocker to see the IdP login page. + +**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. + +{{< saml_caveats >}} + +## Configuration Reference + + +| Field | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Display Name Field | The attribute that contains the display name of users.

    Example: `givenName` | +| User Name Field | The attribute that contains the user name/given name.

    Example: `email` | +| UID Field | An attribute that is unique to every user.

    Example: `email` | +| Groups Field | Make entries for managing group memberships.

    Example: `member` | +| Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

    Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | +| Rancher API Host | The URL for your Rancher Server. | +| Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | +| IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | + +>**Tip:** You can generate a key/certificate pair using an openssl command. For example: +> +> openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert + + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../../../../../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. + +### You are not redirected to Keycloak + +When you click on **Authenticate with Keycloak**, you are not redirected to your IdP. + + * Verify your Keycloak client configuration. + * Make sure `Force Post Binding` set to `OFF`. + + +### Forbidden message displayed after IdP login + +You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. + + * Check the Rancher debug log. + * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. + +### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata + +This is usually due to the metadata not being created until a SAML provider is configured. +Try configuring and saving keycloak as your SAML provider and then accessing the metadata. + +### Keycloak Error: "We're sorry, failed to process response" + + * Check your Keycloak log. + * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. + +### Keycloak Error: "We're sorry, invalid requester" + + * Check your Keycloak log. + * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/content/rancher/v2.5/en/admin-settings/authentication/okta/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md similarity index 100% rename from content/rancher/v2.5/en/admin-settings/authentication/okta/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md diff --git a/content/rancher/v2.5/en/admin-settings/authentication/ping-federate/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md similarity index 100% rename from content/rancher/v2.5/en/admin-settings/authentication/ping-federate/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md diff --git a/content/rancher/v2.5/en/admin-settings/authentication/local/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md similarity index 100% rename from content/rancher/v2.5/en/admin-settings/authentication/local/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md new file mode 100644 index 0000000000..0cbad69be4 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md @@ -0,0 +1,62 @@ +--- +title: Users and Groups +weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/user-groups/ +--- + +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When you configure an external authentication provider, users from that provider will be able to log in to your Rancher server. When a user logs in, the authentication provider will supply your Rancher server with a list of groups to which the user belongs. + +Access to clusters, projects, multi-cluster apps, and global DNS providers and entries can be controlled by adding either individual users or groups to these resources. When you add a group to a resource, all users who are members of that group in the authentication provider, will be able to access the resource with the permissions that you've specified for the group. For more information on roles and permissions, see [Role Based Access Control](../../../../../pages-for-subheaders/manage-role-based-access-control-rbac.md). + +## Managing Members + +When adding a user or group to a resource, you can search for users or groups by beginning to type their name. The Rancher server will query the authentication provider to find users and groups that match what you've entered. Searching is limited to the authentication provider that you are currently logged in with. For example, if you've enabled GitHub authentication but are logged in using a [local](create-local-users.md) user account, you will not be able to search for GitHub users or groups. + +All users, whether they are local users or from an authentication provider, can be viewed and managed. From the **Global** view, click on **Users**. + +{{< saml_caveats >}} + +## User Information + +Rancher maintains information about each user that logs in through an authentication provider. This information includes whether the user is allowed to access your Rancher server and the list of groups that the user belongs to. Rancher keeps this user information so that the CLI, API, and kubectl can accurately reflect the access that the user has based on their group membership in the authentication provider. + +Whenever a user logs in to the UI using an authentication provider, Rancher automatically updates this user information. + +### Automatically Refreshing User Information + +Rancher will periodically refresh the user information even before a user logs in through the UI. You can control how often Rancher performs this refresh. From the **Global** view, click on **Settings**. Two settings control this behavior: + +- **`auth-user-info-max-age-seconds`** + + This setting controls how old a user's information can be before Rancher refreshes it. If a user makes an API call (either directly or by using the Rancher CLI or kubectl) and the time since the user's last refresh is greater than this setting, then Rancher will trigger a refresh. This setting defaults to `3600` seconds, i.e. 1 hour. + +- **`auth-user-info-resync-cron`** + + This setting controls a recurring schedule for resyncing authentication provider information for all users. Regardless of whether a user has logged in or used the API recently, this will cause the user to be refreshed at the specified interval. This setting defaults to `0 0 * * *`, i.e. once a day at midnight. See the [Cron documentation](https://en.wikipedia.org/wiki/Cron) for more information on valid values for this setting. + + +> **Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support periodically refreshing user information. User information will only be refreshed when the user logs into the Rancher UI. + +### Manually Refreshing User Information + +If you are not sure the last time Rancher performed an automatic refresh of user information, you can perform a manual refresh of all users. + +1. From the **Global** view, click on **Users** in the navigation bar. + +1. Click on **Refresh Group Memberships**. + +**Results:** Rancher refreshes the user information for all users. Requesting this refresh will update which users can access Rancher as well as all the groups that each user belongs to. + +>**Note:** Since SAML does not support user lookup, SAML-based authentication providers do not support the ability to manually refresh user information. User information will only be refreshed when the user logs into the Rancher UI. + + +## Session Length + +The default length (TTL) of each user session is adjustable. The default session length is 16 hours. + +1. From the **Global** view, click on **Settings**. +1. In the **Settings** page, find **`auth-user-session-ttl-minutes`** and click **Edit.** +1. Enter the amount of time in minutes a session length should last and click **Save.** + +**Result:** Users are automatically logged out of Rancher after the set number of minutes. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md new file mode 100644 index 0000000000..1dc44ce1fc --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md @@ -0,0 +1,84 @@ +--- +title: 1. Configuring Microsoft AD FS for Rancher +weight: 1205 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/ +--- + +Before configuring Rancher to support AD FS users, you must add Rancher as a [relying party trust](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/technical-reference/understanding-key-ad-fs-concepts) in AD FS. + +1. Log into your AD server as an administrative user. + +1. Open the **AD FS Management** console. Select **Add Relying Party Trust...** from the **Actions** menu and click **Start**. + + ![](/img/adfs/adfs-overview.png) + +1. Select **Enter data about the relying party manually** as the option for obtaining data about the relying party. + + ![](/img/adfs/adfs-add-rpt-2.png) + +1. Enter your desired **Display name** for your Relying Party Trust. For example, `Rancher`. + + ![](/img/adfs/adfs-add-rpt-3.png) + +1. Select **AD FS profile** as the configuration profile for your relying party trust. + + ![](/img/adfs/adfs-add-rpt-4.png) + +1. Leave the **optional token encryption certificate** empty, as Rancher AD FS will not be using one. + + ![](/img/adfs/adfs-add-rpt-5.png) + +1. Select **Enable support for the SAML 2.0 WebSSO protocol** + and enter `https:///v1-saml/adfs/saml/acs` for the service URL. + + ![](/img/adfs/adfs-add-rpt-6.png) + +1. Add `https:///v1-saml/adfs/saml/metadata` as the **Relying party trust identifier**. + + ![](/img/adfs/adfs-add-rpt-7.png) + +1. This tutorial will not cover multi-factor authentication; please refer to the [Microsoft documentation](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-additional-authentication-methods-for-ad-fs) if you would like to configure multi-factor authentication. + + ![](/img/adfs/adfs-add-rpt-8.png) + +1. From **Choose Issuance Authorization RUles**, you may select either of the options available according to use case. However, for the purposes of this guide, select **Permit all users to access this relying party**. + + ![](/img/adfs/adfs-add-rpt-9.png) + +1. After reviewing your settings, select **Next** to add the relying party trust. + + ![](/img/adfs/adfs-add-rpt-10.png) + + +1. Select **Open the Edit Claim Rules...** and click **Close**. + + ![](/img/adfs/adfs-add-rpt-11.png) + +1. On the **Issuance Transform Rules** tab, click **Add Rule...**. + + ![](/img/adfs/adfs-edit-cr.png) + +1. Select **Send LDAP Attributes as Claims** as the **Claim rule template**. + + ![](/img/adfs/adfs-add-tcr-1.png) + +1. Set the **Claim rule name** to your desired name (for example, `Rancher Attributes`) and select **Active Directory** as the **Attribute store**. Create the following mapping to reflect the table below: + + | LDAP Attribute | Outgoing Claim Type | + | -------------------------------------------- | ------------------- | + | Given-Name | Given Name | + | User-Principal-Name | UPN | + | Token-Groups - Qualified by Long Domain Name | Group | + | SAM-Account-Name | Name | +
    + ![](/img/adfs/adfs-add-tcr-2.png) + +1. Download the `federationmetadata.xml` from your AD server at: +``` +https:///federationmetadata/2007-06/federationmetadata.xml +``` + +**Result:** You've added Rancher as a relying trust party. Now you can configure Rancher to leverage AD. + +### [Next: Configuring Rancher for Microsoft AD FS](configure-rancher-for-ms-adfs.md) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md new file mode 100644 index 0000000000..db7b4b1fa2 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md @@ -0,0 +1,57 @@ +--- +title: 2. Configuring Rancher for Microsoft AD FS +weight: 1205 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/ +--- + +After you complete [Configuring Microsoft AD FS for Rancher](configure-ms-adfs-for-rancher.md), enter your AD FS information into Rancher to allow AD FS users to authenticate with Rancher. + +>**Important Notes For Configuring Your AD FS Server:** +> +>- The SAML 2.0 WebSSO Protocol Service URL is: `https:///v1-saml/adfs/saml/acs` +>- The Relying Party Trust identifier URL is: `https:///v1-saml/adfs/saml/metadata` +>- You must export the `federationmetadata.xml` file from your AD FS server. This can be found at: `https:///federationmetadata/2007-06/federationmetadata.xml` + + +1. From the **Global** view, select **Security > Authentication** from the main menu. + +1. Select **Microsoft Active Directory Federation Services**. + +1. Complete the **Configure AD FS Account** form. Microsoft AD FS lets you specify an existing Active Directory (AD) server. The [configuration section below](#configuration) describe how you can map AD attributes to fields within Rancher. + + + + + + + + +1. After you complete the **Configure AD FS Account** form, click **Authenticate with AD FS**, which is at the bottom of the page. + + Rancher redirects you to the AD FS login page. Enter credentials that authenticate with Microsoft AD FS to validate your Rancher AD FS configuration. + + >**Note:** You may have to disable your popup blocker to see the AD FS login page. + +**Result:** Rancher is configured to work with MS FS. Your users can now sign into Rancher using their MS FS logins. + +# Configuration + +| Field | Description | +|---------------------------|-----------------| +| Display Name Field | The AD attribute that contains the display name of users.

    Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name` | +| User Name Field | The AD attribute that contains the user name/given name.

    Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname` | +| UID Field | An AD attribute that is unique to every user.

    Example: `http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn` | +| Groups Field | Make entries for managing group memberships.

    Example: `http://schemas.xmlsoap.org/claims/Group` | +| Rancher API Host | The URL for your Rancher Server. | +| Private Key / Certificate | This is a key-certificate pair to create a secure shell between Rancher and your AD FS. Ensure you set the Common Name (CN) to your Rancher Server URL.

    [Certificate creation command](#cert-command) | +| Metadata XML | The `federationmetadata.xml` file exported from your AD FS server.

    You can find this file at `https:///federationmetadata/2007-06/federationmetadata.xml`. | + + + + +**Tip:** You can generate a certificate using an openssl command. For example: + +``` +openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" +``` diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md new file mode 100644 index 0000000000..2c9fa74695 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md @@ -0,0 +1,34 @@ +--- +title: Group Permissions with Shibboleth and OpenLDAP +weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/shibboleth/about/ +--- + +This page provides background information and context for Rancher users who intend to set up the Shibboleth authentication provider in Rancher. + +Because Shibboleth is a SAML provider, it does not support searching for groups. While a Shibboleth integration can validate user credentials, it can't be used to assign permissions to groups in Rancher without additional configuration. + +One solution to this problem is to configure an OpenLDAP identity provider. With an OpenLDAP back end for Shibboleth, you will be able to search for groups in Rancher and assign them to resources such as clusters, projects, or namespaces from the Rancher UI. + +### Terminology + +- **Shibboleth** is a single sign-on log-in system for computer networks and the Internet. It allows people to sign in using just one identity to various systems. It validates user credentials, but does not, on its own, handle group memberships. +- **SAML:** Security Assertion Markup Language, an open standard for exchanging authentication and authorization data between an identity provider and a service provider. +- **OpenLDAP:** a free, open-source implementation of the Lightweight Directory Access Protocol (LDAP). It is used to manage an organization’s computers and users. OpenLDAP is useful for Rancher users because it supports groups. In Rancher, it is possible to assign permissions to groups so that they can access resources such as clusters, projects, or namespaces, as long as the groups already exist in the identity provider. +- **IdP or IDP:** An identity provider. OpenLDAP is an example of an identity provider. + +### Adding OpenLDAP Group Permissions to Rancher Resources + +The diagram below illustrates how members of an OpenLDAP group can access resources in Rancher that the group has permissions for. + +For example, a cluster owner could add an OpenLDAP group to a cluster so that they have permissions view most cluster level resources and create new projects. Then the OpenLDAP group members will have access to the cluster as soon as they log in to Rancher. + +In this scenario, OpenLDAP allows the cluster owner to search for groups when assigning persmissions. Without OpenLDAP, the functionality to search for groups would not be supported. + +When a member of the OpenLDAP group logs in to Rancher, she is redirected to Shibboleth and enters her username and password. + +Shibboleth validates her credentials, and retrieves user attributes from OpenLDAP, including groups. Then Shibboleth sends a SAML assertion to Rancher including the user attributes. Rancher uses the group data so that she can access all of the resources and permissions that her groups have permissions for. + +![Adding OpenLDAP Group Permissions to Rancher Resources](/img/shibboleth-with-openldap-groups.svg) + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md new file mode 100644 index 0000000000..bf35682d12 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md @@ -0,0 +1,44 @@ +--- +title: Cluster Drivers +weight: 1 +aliases: + - /rancher/v2.x/en/admin-settings/drivers/cluster-drivers/ +--- + +Cluster drivers are used to create clusters in a [hosted Kubernetes provider](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md), such as Google GKE. The availability of which cluster driver to display when creating clusters is defined by the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters. By default, Rancher is packaged with several existing cloud provider cluster drivers, but you can also add custom cluster drivers to Rancher. + +If there are specific cluster drivers that you do not want to show your users, you may deactivate those cluster drivers within Rancher and they will not appear as an option for cluster creation. + +### Managing Cluster Drivers + +>**Prerequisites:** To create, edit, or delete cluster drivers, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Cluster Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. + +## Activating/Deactivating Cluster Drivers + +By default, Rancher only activates drivers for the most popular cloud providers, Google GKE, Amazon EKS and Azure AKS. If you want to show or hide any node driver, you can change its status. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. + +2. From the **Drivers** page, select the **Cluster Drivers** tab. + +3. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. + +## Adding Custom Cluster Drivers + +If you want to use a cluster driver that Rancher doesn't support out-of-the-box, you can add the provider's driver in order to start using them to create _hosted_ kubernetes clusters. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. + +2. From the **Drivers** page select the **Cluster Drivers** tab. + +3. Click **Add Cluster Driver**. + +4. Complete the **Add Cluster Driver** form. Then click **Create**. + + +### Developing your own Cluster Driver + +In order to develop cluster driver to add to Rancher, please refer to our [example](https://github.com/rancher-plugins/kontainer-engine-driver-example). diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md new file mode 100644 index 0000000000..0a5e3dc409 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md @@ -0,0 +1,41 @@ +--- +title: Node Drivers +weight: 2 +aliases: + - /rancher/v2.5/en/concepts/global-configuration/node-drivers/ + - /rancher/v2.5/en/tasks/global-configuration/node-drivers/ + - /rancher/v2.x/en/admin-settings/drivers/node-drivers/ +--- + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +#### Managing Node Drivers + +>**Prerequisites:** To create, edit, or delete drivers, you need _one_ of the following permissions: +> +>- [Administrator Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md) +>- [Custom Global Permissions](../manage-role-based-access-control-rbac/global-permissions.md#custom-global-permissions) with the [Manage Node Drivers](../manage-role-based-access-control-rbac/global-permissions.md) role assigned. + +## Activating/Deactivating Node Drivers + +By default, Rancher only activates drivers for the most popular cloud providers, Amazon EC2, Azure, DigitalOcean and vSphere. If you want to show or hide any node driver, you can change its status. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. + +2. Select the driver that you wish to **Activate** or **Deactivate** and select the appropriate icon. + +## Adding Custom Node Drivers + +If you want to use a node driver that Rancher doesn't support out-of-the-box, you can add that provider's driver in order to start using them to create node templates and eventually node pools for your Kubernetes cluster. + +1. From the **Global** view, choose **Tools > Drivers** in the navigation bar. From the **Drivers** page, select the **Node Drivers** tab. + +2. Click **Add Node Driver**. + +3. Complete the **Add Node Driver** form. Then click **Create**. + +### Developing your own node driver + +Node drivers are implemented with [Docker Machine](https://docs.docker.com/machine/). diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md new file mode 100644 index 0000000000..915aad540b --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md @@ -0,0 +1,63 @@ +--- +title: Access and Sharing +weight: 31 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/template-access-and-sharing/ +--- + +If you are an RKE template owner, you can share it with users or groups of users, who can then use the template to create clusters. + +Since RKE templates are specifically shared with users and groups, owners can share different RKE templates with different sets of users. + +When you share a template, each user can have one of two access levels: + +- **Owner:** This user can update, delete, and share the templates that they own. The owner can also share the template with other users. +- **User:** These users can create clusters using the template. They can also upgrade those clusters to new revisions of the same template. When you share a template as **Make Public (read-only),** all users in your Rancher setup have the User access level for the template. + +If you create a template, you automatically become an owner of that template. + +If you want to delegate responsibility for updating the template, you can share ownership of the template. For details on how owners can modify templates, refer to the [documentation about revising templates.](manage-rke1-templates.md) + +There are several ways to share templates: + +- Add users to a new RKE template during template creation +- Add users to an existing RKE template +- Make the RKE template public, sharing it with all users in the Rancher setup +- Share template ownership with users who are trusted to modify the template + +### Sharing Templates with Specific Users or Groups + +To allow users or groups to create clusters using your template, you can give them the basic **User** access level for the template. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to share and click the **⋮ > Edit.** +1. In the **Share Template** section, click on **Add Member**. +1. Search in the **Name** field for the user or group you want to share the template with. +1. Choose the **User** access type. +1. Click **Save.** + +**Result:** The user or group can create clusters using the template. + +### Sharing Templates with All Users + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to share and click the **⋮ > Edit.** +1. Under **Share Template,** click **Make Public (read-only).** Then click **Save.** + +**Result:** All users in the Rancher setup can create clusters using the template. + +### Sharing Ownership of Templates + +If you are the creator of a template, you might want to delegate responsibility for maintaining and updating a template to another user or group. + +In that case, you can give users the Owner access type, which allows another user to update your template, delete it, or share access to it with other users. + +To give Owner access to a user or group, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to share and click the **⋮ > Edit.** +1. Under **Share Template**, click on **Add Member** and search in the **Name** field for the user or group you want to share the template with. +1. In the **Access Type** field, click **Owner.** +1. Click **Save.** + +**Result:** The user or group has the Owner access type, and can modify, share, or delete the template. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md new file mode 100644 index 0000000000..66898351d1 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md @@ -0,0 +1,63 @@ +--- +title: Applying Templates +weight: 50 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/applying-templates/ +--- + +You can create a cluster from an RKE template that you created, or from a template that has been [shared with you.](access-or-share-templates.md) + +RKE templates can be applied to new clusters. + +You can [save the configuration of an existing cluster as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +You can't change a cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +This section covers the following topics: + +- [Creating a cluster from an RKE template](#creating-a-cluster-from-an-rke-template) +- [Updating a cluster created with an RKE template](#updating-a-cluster-created-with-an-rke-template) +- [Converting an existing cluster to use an RKE template](#converting-an-existing-cluster-to-use-an-rke-template) + +### Creating a Cluster from an RKE Template + +To add a cluster [hosted by an infrastructure provider](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using an RKE template, use these steps: + +1. From the **Global** view, go to the **Clusters** tab. +1. Click **Add Cluster** and choose the infrastructure provider. +1. Provide the cluster name and node template details as usual. +1. To use an RKE template, under the **Cluster Options**, check the box for **Use an existing RKE template and revision.** +1. Choose an existing template and revision from the dropdown menu. +1. Optional: You can edit any settings that the RKE template owner marked as **Allow User Override** when the template was created. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. Then you will need to edit the cluster to upgrade it to the new revision. +1. Click **Save** to launch the cluster. + +### Updating a Cluster Created with an RKE Template + +When the template owner creates a template, each setting has a switch in the Rancher UI that indicates if users can override the setting. + +- If the setting allows a user override, you can update these settings in the cluster by [editing the cluster.](../../../../pages-for-subheaders/cluster-configuration.md) +- If the switch is turned off, you cannot change these settings unless the cluster owner creates a template revision that lets you override them. If there are settings that you want to change, but don't have the option to, you will need to contact the template owner to get a new revision of the template. + +If a cluster was created from an RKE template, you can edit the cluster to update the cluster to a new revision of the template. + +An existing cluster's settings can be [saved as an RKE template.](#converting-an-existing-cluster-to-use-an-rke-template) In that situation, you can also edit the cluster to update the cluster to a new revision of the template. + +> **Note:** You can't change the cluster to use a different RKE template. You can only update the cluster to a new revision of the same template. + +### Converting an Existing Cluster to Use an RKE Template + +This section describes how to create an RKE template from an existing cluster. + +RKE templates cannot be applied to existing clusters, except if you save an existing cluster's settings as an RKE template. This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.](manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) + +To convert an existing cluster to use an RKE template, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** +1. Enter a name for the template in the form that appears, and click **Create.** + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md new file mode 100644 index 0000000000..dceb726645 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md @@ -0,0 +1,52 @@ +--- +title: Template Creator Permissions +weight: 10 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/creator-permissions/ +--- + +Administrators have the permission to create RKE templates, and only administrators can give that permission to other users. + +For more information on administrator permissions, refer to the [documentation on global permissions](../manage-role-based-access-control-rbac/global-permissions.md). + +# Giving Users Permission to Create Templates + +Templates can only be created by users who have the global permission **Create RKE Templates.** + +Administrators have the global permission to create templates, and only administrators can give that permission to other users. + +For information on allowing users to modify existing templates, refer to [Sharing Templates.](access-or-share-templates.md) + +Administrators can give users permission to create RKE templates in two ways: + +- By editing the permissions of an [individual user](#allowing-a-user-to-create-templates) +- By changing the [default permissions of new users](#allowing-new-users-to-create-templates-by-default) + +### Allowing a User to Create Templates + +An administrator can individually grant the role **Create RKE Templates** to any existing user by following these steps: + +1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** +1. In the **Global Permissions** section, choose **Custom** and select the **Create RKE Templates** role along with any other roles the user should have. Click **Save.** + +**Result:** The user has permission to create RKE templates. + +### Allowing New Users to Create Templates by Default + +Alternatively, the administrator can give all new users the default permission to create RKE templates by following the following steps. This will not affect the permissions of existing users. + +1. From the **Global** view, click **Security > Roles.** +1. Under the **Global** roles tab, go to the role **Create RKE Templates** and click the **⋮ > Edit**. +1. Select the option **Yes: Default role for new users** and click **Save.** + +**Result:** Any new user created in this Rancher installation will be able to create RKE templates. Existing users will not get this permission. + +### Revoking Permission to Create Templates + +Administrators can remove a user's permission to create templates with the following steps: + +1. From the global view, click the **Users** tab. Choose the user you want to edit and click the **⋮ > Edit.** +1. In the **Global Permissions** section, un-check the box for **Create RKE Templates**. In this section, you can change the user back to a standard user, or give the user a different set of custom permissions. +1. Click **Save.** + +**Result:** The user cannot create RKE templates. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md new file mode 100644 index 0000000000..d8d9cac1bc --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md @@ -0,0 +1,40 @@ +--- +title: Template Enforcement +weight: 32 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/enforcement/ +--- + +This section describes how template administrators can enforce templates in Rancher, restricting the ability of users to create clusters without a template. + +By default, any standard user in Rancher can create clusters. But when RKE template enforcement is turned on, + +- Only an administrator has the ability to create clusters without a template. +- All standard users must use an RKE template to create a new cluster. +- Standard users cannot create a cluster without using a template. + +Users can only create new templates if the administrator [gives them permission.](creator-permissions.md#allowing-a-user-to-create-templates) + +After a cluster is created with an RKE template, the cluster creator cannot edit settings that are defined in the template. The only way to change those settings after the cluster is created is to [upgrade the cluster to a new revision](apply-templates.md#updating-a-cluster-created-with-an-rke-template) of the same template. If cluster creators want to change template-defined settings, they would need to contact the template owner to get a new revision of the template. For details on how template revisions work, refer to the [documentation on revising templates.](manage-rke1-templates.md#updating-a-template) + +# Requiring New Clusters to Use an RKE Template + +You might want to require new clusters to use a template to ensure that any cluster launched by a [standard user](../manage-role-based-access-control-rbac/global-permissions.md) will use the Kubernetes and/or Rancher settings that are vetted by administrators. + +To require new clusters to use an RKE template, administrators can turn on RKE template enforcement with the following steps: + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** +1. Set the value to **True** and click **Save.** + +**Result:** All clusters provisioned by Rancher must use a template, unless the creator is an administrator. + +# Disabling RKE Template Enforcement + +To allow new clusters to be created without an RKE template, administrators can turn off RKE template enforcement with the following steps: + +1. From the **Global** view, click the **Settings** tab. +1. Go to the `cluster-template-enforcement` setting. Click the vertical **⋮** and click **Edit.** +1. Set the value to **False** and click **Save.** + +**Result:** When clusters are provisioned by Rancher, they don't need to use a template. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md new file mode 100644 index 0000000000..a5a1c1ede9 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md @@ -0,0 +1,73 @@ +--- +title: Example Scenarios +weight: 5 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/example-scenarios/ +--- + +These example scenarios describe how an organization could use templates to standardize cluster creation. + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](#templates-for-basic-and-advanced-users) so that basic users have more restricted options and advanced users have more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](#updating-templates-and-clusters-created-with-them) and clusters created from the template can upgrade to the new version of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to delegate ownership of the template, this scenario describes how [template ownership can be shared.](#allowing-other-users-to-control-and-share-a-template) + + +# Enforcing a Template Setting for Everyone + +Let's say there is an organization in which the administrators decide that all new clusters should be created with Kubernetes version 1.14. + +1. First, an administrator creates a template which specifies the Kubernetes version as 1.14 and marks all other settings as **Allow User Override**. +1. The administrator makes the template public. +1. The administrator turns on template enforcement. + +**Results:** + +- All Rancher users in the organization have access to the template. +- All new clusters created by [standard users](../manage-role-based-access-control-rbac/global-permissions.md) with this template will use Kubernetes 1.14 and they are unable to use a different Kubernetes version. By default, standard users don't have permission to create templates, so this template will be the only template they can use unless more templates are shared with them. +- All standard users must use a cluster template to create a new cluster. They cannot create a cluster without using a template. + +In this way, the administrators enforce the Kubernetes version across the organization, while still allowing end users to configure everything else. + +# Templates for Basic and Advanced Users + +Let's say an organization has both basic and advanced users. Administrators want the basic users to be required to use a template, while the advanced users and administrators create their clusters however they want. + +1. First, an administrator turns on [RKE template enforcement.](enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) This means that every [standard user](../manage-role-based-access-control-rbac/global-permissions.md) in Rancher will need to use an RKE template when they create a cluster. +1. The administrator then creates two templates: + + - One template for basic users, with almost every option specified except for access keys + - One template for advanced users, which has most or all options has **Allow User Override** turned on + +1. The administrator shares the advanced template with only the advanced users. +1. The administrator makes the template for basic users public, so the more restrictive template is an option for everyone who creates a Rancher-provisioned cluster. + +**Result:** All Rancher users, except for administrators, are required to use a template when creating a cluster. Everyone has access to the restrictive template, but only advanced users have permission to use the more permissive template. The basic users are more restricted, while advanced users have more freedom when configuring their Kubernetes clusters. + +# Updating Templates and Clusters Created with Them + +Let's say an organization has a template that requires clusters to use Kubernetes v1.14. However, as time goes on, the administrators change their minds. They decide they want users to be able to upgrade their clusters to use newer versions of Kubernetes. + +In this organization, many clusters were created with a template that requires Kubernetes v1.14. Because the template does not allow that setting to be overridden, the users who created the cluster cannot directly edit that setting. + +The template owner has several options for allowing the cluster creators to upgrade Kubernetes on their clusters: + +- **Specify Kubernetes v1.15 on the template:** The template owner can create a new template revision that specifies Kubernetes v1.15. Then the owner of each cluster that uses that template can upgrade their cluster to a new revision of the template. This template upgrade allows the cluster creator to upgrade Kubernetes to v1.15 on their cluster. +- **Allow any Kubernetes version on the template:** When creating a template revision, the template owner can also mark the the Kubernetes version as **Allow User Override** using the switch near that setting on the Rancher UI. This will allow clusters that upgrade to this template revision to use any version of Kubernetes. +- **Allow the latest minor Kubernetes version on the template:** The template owner can also create a template revision in which the Kubernetes version is defined as **Latest v1.14 (Allows patch version upgrades).** This means clusters that use that revision will be able to get patch version upgrades, but major version upgrades will not be allowed. + +# Allowing Other Users to Control and Share a Template + +Let's say Alice is a Rancher administrator. She owns an RKE template that reflects her organization's agreed-upon best practices for creating a cluster. + +Bob is an advanced user who can make informed decisions about cluster configuration. Alice trusts Bob to create new revisions of her template as the best practices get updated over time. Therefore, she decides to make Bob an owner of the template. + +To share ownership of the template with Bob, Alice [adds Bob as an owner of her template.](access-or-share-templates.md#sharing-ownership-of-templates) + +The result is that as a template owner, Bob is in charge of version control for that template. Bob can now do all of the following: + +- [Revise the template](manage-rke1-templates.md#updating-a-template) when the best practices change +- [Disable outdated revisions](manage-rke1-templates.md#disabling-a-template-revision) of the template so that no new clusters can be created with it +- [Delete the whole template](manage-rke1-templates.md#deleting-a-template) if the organization wants to go in a different direction +- [Set a certain revision as default](manage-rke1-templates.md#setting-a-template-revision-as-default) when users create a cluster with it. End users of the template will still be able to choose which revision they want to create the cluster with. +- [Share the template](access-or-share-templates.md) with specific users, make the template available to all Rancher users, or share ownership of the template with another user. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md new file mode 100644 index 0000000000..bed1648d4d --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md @@ -0,0 +1,72 @@ +--- +title: RKE Templates and Infrastructure +weight: 90 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/rke-templates-and-hardware/ +--- + +In Rancher, RKE templates are used to provision Kubernetes and define Rancher settings, while node templates are used to provision nodes. + +Therefore, even if RKE template enforcement is turned on, the end user still has flexibility when picking the underlying hardware when creating a Rancher cluster. The end users of an RKE template can still choose an infrastructure provider and the nodes they want to use. + +If you want to standardize the hardware in your clusters, use RKE templates conjunction with node templates or with a server provisioning tool such as Terraform. + +### Node Templates + +[Node templates](../../../../reference-guides/user-settings/manage-node-templates.md) are responsible for node configuration and node provisioning in Rancher. From your user profile, you can set up node templates to define which templates are used in each of your node pools. With node pools enabled, you can make sure you have the required number of nodes in each node pool, and ensure that all nodes in the pool are the same. + +### Terraform + +Terraform is a server provisioning tool. It uses infrastructure-as-code that lets you create almost every aspect of your infrastructure with Terraform configuration files. It can automate the process of server provisioning in a way that is self-documenting and easy to track in version control. + +This section focuses on how to use Terraform with the [Rancher 2 Terraform provider](https://www.terraform.io/docs/providers/rancher2/), which is a recommended option to standardize the hardware for your Kubernetes clusters. If you use the Rancher Terraform provider to provision hardware, and then use an RKE template to provision a Kubernetes cluster on that hardware, you can quickly create a comprehensive, production-ready cluster. + +Terraform allows you to: + +- Define almost any kind of infrastructure-as-code, including servers, databases, load balancers, monitoring, firewall settings, and SSL certificates +- Leverage catalog apps and multi-cluster apps +- Codify infrastructure across many platforms, including Rancher and major cloud providers +- Commit infrastructure-as-code to version control +- Easily repeat configuration and setup of infrastructure +- Incorporate infrastructure changes into standard development practices +- Prevent configuration drift, in which some servers become configured differently than others + +# How Does Terraform Work? + +Terraform is written in files with the extension `.tf`. It is written in HashiCorp Configuration Language, which is a declarative language that lets you define the infrastructure you want in your cluster, the cloud provider you are using, and your credentials for the provider. Then Terraform makes API calls to the provider in order to efficiently create that infrastructure. + +To create a Rancher-provisioned cluster with Terraform, go to your Terraform configuration file and define the provider as Rancher 2. You can set up your Rancher 2 provider with a Rancher API key. Note: The API key has the same permissions and access level as the user it is associated with. + +Then Terraform calls the Rancher API to provision your infrastructure, and Rancher calls the infrastructure provider. As an example, if you wanted to use Rancher to provision infrastructure on AWS, you would provide both your Rancher API key and your AWS credentials in the Terraform configuration file or in environment variables so that they could be used to provision the infrastructure. + +When you need to make changes to your infrastructure, instead of manually updating the servers, you can make changes in the Terraform configuration files. Then those files can be committed to version control, validated, and reviewed as necessary. Then when you run `terraform apply`, the changes would be deployed. + +# Tips for Working with Terraform + +- There are examples of how to provide most aspects of a cluster in the [documentation for the Rancher 2 provider.](https://www.terraform.io/docs/providers/rancher2/) + +- In the Terraform settings, you can install Docker Machine by using the Docker Machine node driver. + +- You can also modify auth in the Terraform provider. + +- You can reverse engineer how to do define a setting in Terraform by changing the setting in Rancher, then going back and checking your Terraform state file to see how it maps to the current state of your infrastructure. + +- If you want to manage Kubernetes cluster settings, Rancher settings, and hardware settings all in one place, use [Terraform modules](https://github.com/rancher/terraform-modules). You can pass a cluster configuration YAML file or an RKE template configuration file to a Terraform module so that the Terraform module will create it. In that case, you could use your infrastructure-as-code to manage the version control and revision history of both your Kubernetes cluster and its underlying hardware. + +# Tip for Creating CIS Benchmark Compliant Clusters + +This section describes one way that you can make security and compliance-related config files standard in your clusters. + +When you create a [CIS benchmark compliant cluster,](../../../../pages-for-subheaders/rancher-security.md) you have an encryption config file and an audit log config file. + +Your infrastructure provisioning system can write those files to disk. Then in your RKE template, you would specify where those files will be, then add your encryption config file and audit log config file as extra mounts to the `kube-api-server`. + +Then you would make sure that the `kube-api-server` flag in your RKE template uses your CIS-compliant config files. + +In this way, you can create flags that comply with the CIS benchmark. + +# Resources + +- [Terraform documentation](https://www.terraform.io/docs/) +- [Rancher2 Terraform provider documentation](https://www.terraform.io/docs/providers/rancher2/) +- [The RanchCast - Episode 1: Rancher 2 Terraform Provider](https://youtu.be/YNCq-prI8-8): In this demo, Director of Community Jason van Brackel walks through using the Rancher 2 Terraform Provider to provision nodes and create a custom cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md new file mode 100644 index 0000000000..12320d8bab --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md @@ -0,0 +1,164 @@ +--- +title: Creating and Revising Templates +weight: 32 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/creating-and-revising/ +--- + +This section describes how to manage RKE templates and revisions. You an create, share, update, and delete templates from the **Global** view under **Tools > RKE Templates.** + +Template updates are handled through a revision system. When template owners want to change or update a template, they create a new revision of the template. Individual revisions cannot be edited. However, if you want to prevent a revision from being used to create a new cluster, you can disable it. + +Template revisions can be used in two ways: to create a new cluster, or to upgrade a cluster that was created with an earlier version of the template. The template creator can choose a default revision, but when end users create a cluster, they can choose any template and any template revision that is available to them. After the cluster is created from a specific revision, it cannot change to another template, but the cluster can be upgraded to a newer available revision of the same template. + +The template owner has full control over template revisions, and can create new revisions to update the template, delete or disable revisions that should not be used to create clusters, and choose which template revision is the default. + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a template](#creating-a-template) +- [Updating a template](#updating-a-template) +- [Deleting a template](#deleting-a-template) +- [Creating a revision based on the default revision](#creating-a-revision-based-on-the-default-revision) +- [Creating a revision based on a cloned revision](#creating-a-revision-based-on-a-cloned-revision) +- [Disabling a template revision](#disabling-a-template-revision) +- [Re-enabling a disabled template revision](#re-enabling-a-disabled-template-revision) +- [Setting a template revision as default](#setting-a-template-revision-as-default) +- [Deleting a template revision](#deleting-a-template-revision) +- [Upgrading a cluster to use a new template revision](#upgrading-a-cluster-to-use-a-new-template-revision) +- [Exporting a running cluster to a new RKE template and revision](#exporting-a-running-cluster-to-a-new-rke-template-and-revision) + +### Prerequisites + +You can create RKE templates if you have the **Create RKE Templates** permission, which can be [given by an administrator.](creator-permissions.md) + +You can revise, share, and delete a template if you are an owner of the template. For details on how to become an owner of a template, refer to [the documentation on sharing template ownership.](access-or-share-templates.md#sharing-ownership-of-templates) + +### Creating a Template + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Click **Add Template.** +1. Provide a name for the template. An auto-generated name is already provided for the template' first version, which is created along with this template. +1. Optional: Share the template with other users or groups by [adding them as members.](access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) You can also make the template public to share with everyone in the Rancher setup. +1. Then follow the form on screen to save the cluster configuration parameters as part of the template's revision. The revision can be marked as default for this template. + +**Result:** An RKE template with one revision is configured. You can use this RKE template revision later when you [provision a Rancher-launched cluster](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). After a cluster is managed by an RKE template, it cannot be disconnected and the option to uncheck **Use an existing RKE Template and Revision** will be unavailable. + +### Updating a Template + +When you update an RKE template, you are creating a revision of the existing template. Clusters that were created with an older version of the template can be updated to match the new revision. + +You can't edit individual revisions. Since you can't edit individual revisions of a template, in order to prevent a revision from being used, you can [disable it.](#disabling-a-template-revision) + +When new template revisions are created, clusters using an older revision of the template are unaffected. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template that you want to edit and click the **⋮ > Edit.** +1. Edit the required information and click **Save.** +1. Optional: You can change the default revision of this template and also change who it is shared with. + +**Result:** The template is updated. To apply it to a cluster using an older version of the template, refer to the section on [upgrading a cluster to use a new revision of a template.](#upgrading-a-cluster-to-use-a-new-template-revision) + +### Deleting a Template + +When you no longer use an RKE template for any of your clusters, you can delete it. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to delete and click the **⋮ > Delete.** +1. Confirm the deletion when prompted. + +**Result:** The template is deleted. + +### Creating a Revision Based on the Default Revision + +You can clone the default template revision and quickly update its settings rather than creating a new revision from scratch. Cloning templates saves you the hassle of re-entering the access keys and other parameters needed for cluster creation. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template that you want to clone and click the **⋮ > New Revision From Default.** +1. Complete the rest of the form to create a new revision. + +**Result:** The RKE template revision is cloned and configured. + +### Creating a Revision Based on a Cloned Revision + +When creating new RKE template revisions from your user settings, you can clone an existing revision and quickly update its settings rather than creating a new one from scratch. Cloning template revisions saves you the hassle of re-entering the cluster parameters. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to clone. Then select **⋮ > Clone Revision.** +1. Complete the rest of the form. + +**Result:** The RKE template revision is cloned and configured. You can use the RKE template revision later when you provision a cluster. Any existing cluster using this RKE template can be upgraded to this new revision. + +### Disabling a Template Revision + +When you no longer want an RKE template revision to be used for creating new clusters, you can disable it. A disabled revision can be re-enabled. + +You can disable the revision if it is not being used by any cluster. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to disable. Then select **⋮ > Disable.** + +**Result:** The RKE template revision cannot be used to create a new cluster. + +### Re-enabling a Disabled Template Revision + +If you decide that a disabled RKE template revision should be used to create new clusters, you can re-enable it. + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the template revision you want to re-enable. Then select **⋮ > Enable.** + +**Result:** The RKE template revision can be used to create a new cluster. + +### Setting a Template Revision as Default + +When end users create a cluster using an RKE template, they can choose which revision to create the cluster with. You can configure which revision is used by default. + +To set an RKE template revision as default, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template revision that should be default and click the **⋮ > Set as Default.** + +**Result:** The RKE template revision will be used as the default option when clusters are created with the template. + +### Deleting a Template Revision + +You can delete all revisions of a template except for the default revision. + +To permanently delete a revision, + +1. From the **Global** view, click **Tools > RKE Templates.** +1. Go to the RKE template revision that should be deleted and click the **⋮ > Delete.** + +**Result:** The RKE template revision is deleted. + +### Upgrading a Cluster to Use a New Template Revision + +> This section assumes that you already have a cluster that [has an RKE template applied.](apply-templates.md) +> This section also assumes that you have [updated the template that the cluster is using](#updating-a-template) so that a new template revision is available. + +To upgrade a cluster to use a new template revision, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that you want to upgrade and click **⋮ > Edit.** +1. In the **Cluster Options** section, click the dropdown menu for the template revision, then select the new template revision. +1. Click **Save.** + +**Result:** The cluster is upgraded to use the settings defined in the new template revision. + +### Exporting a Running Cluster to a New RKE Template and Revision + +You can save an existing cluster's settings as an RKE template. + +This exports the cluster's settings as a new RKE template, and also binds the cluster to that template. The result is that the cluster can only be changed if the [template is updated,](manage-rke1-templates.md#updating-a-template) and the cluster is upgraded to [use a newer version of the template.] + +To convert an existing cluster to use an RKE template, + +1. From the **Global** view in Rancher, click the **Clusters** tab. +1. Go to the cluster that will be converted to use an RKE template. Click **⋮** > **Save as RKE Template.** +1. Enter a name for the template in the form that appears, and click **Create.** + +**Results:** + +- A new RKE template is created. +- The cluster is converted to use the new template. +- New clusters can be [created from the new template and revision.](apply-templates.md#creating-a-cluster-from-an-rke-template) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md new file mode 100644 index 0000000000..55af9eb53e --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md @@ -0,0 +1,17 @@ +--- +title: Overriding Template Settings +weight: 33 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/overrides/ +--- + +When a user creates an RKE template, each setting in the template has a switch in the Rancher UI that indicates if users can override the setting. This switch marks those settings as **Allow User Override.** + +After a cluster is created with a template, end users can't update any of the settings defined in the template unless the template owner marked them as **Allow User Override.** However, if the template is [updated to a new revision](manage-rke1-templates.md) that changes the settings or allows end users to change them, the cluster can be upgraded to a new revision of the template and the changes in the new revision will be applied to the cluster. + +When any parameter is set as **Allow User Override** on the RKE template, it means that end users have to fill out those fields during cluster creation and they can edit those settings afterward at any time. + +The **Allow User Override** model of the RKE template is useful for situations such as: + +- Administrators know that some settings will need the flexibility to be frequently updated over time +- End users will need to enter their own access keys or secret keys, for example, cloud credentials or credentials for backup snapshots \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md new file mode 100644 index 0000000000..d7b08ae0ea --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md @@ -0,0 +1,88 @@ +--- +title: Pod Security Policies +weight: 1135 +aliases: + - /rancher/v2.5/en/concepts/global-configuration/pod-security-policies/ + - /rancher/v2.5/en/tasks/global-configuration/pod-security-policies/ + - /rancher/v2.5/en/tasks/clusters/adding-a-pod-security-policy/ + - /rancher/v2.x/en/admin-settings/pod-security-policies/ +--- + +_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification (like root privileges). + +If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message of `Pod is forbidden: unable to validate...`. + +- [How PSPs Work](#how-psps-work) +- [Default PSPs](#default-psps) + - [Restricted](#restricted) + - [Unrestricted](#unrestricted) +- [Creating PSPs](#creating-psps) + - [Requirements](#requirements) + - [Creating PSPs in the Rancher UI](#creating-psps-in-the-rancher-ui) +- [Configuration](#configuration) + +# How PSPs Work + +You can assign PSPs at the cluster or project level. + +PSPs work through inheritance: + +- By default, PSPs assigned to a cluster are inherited by its projects, as well as any namespaces added to those projects. +- **Exception:** Namespaces that are not assigned to projects do not inherit PSPs, regardless of whether the PSP is assigned to a cluster or project. Because these namespaces have no PSPs, workload deployments to these namespaces will fail, which is the default Kubernetes behavior. +- You can override the default PSP by assigning a different PSP directly to the project. + +Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked if it complies with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. + +Read more about Pod Security Policies in the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/). + +# Default PSPs + +Rancher ships with two default Pod Security Policies (PSPs): the `restricted` and `unrestricted` policies. + +### Restricted + +This policy is based on the Kubernetes [example restricted policy](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml). It significantly restricts what types of pods can be deployed to a cluster or project. This policy: + +- Prevents pods from running as a privileged user and prevents escalation of privileges. +- Validates that server-required security mechanisms are in place (such as restricting what volumes can be mounted to only the core volume types and preventing root supplemental groups from being added. + +### Unrestricted + +This policy is equivalent to running Kubernetes with the PSP controller disabled. It has no restrictions on what pods can be deployed into a cluster or project. + +# Creating PSPs + +Using Rancher, you can create a Pod Security Policy using our GUI rather than creating a YAML file. + +### Requirements + +Rancher can only assign PSPs for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +You must enable PSPs at the cluster level before you can assign them to a project. This can be configured by [editing the cluster.](../../../pages-for-subheaders/cluster-configuration.md) + +It is a best practice to set PSP at the cluster level. + +We recommend adding PSPs during cluster and project creation instead of adding it to an existing one. + +### Creating PSPs in the Rancher UI + +1. From the **Global** view, select **Security** > **Pod Security Policies** from the main menu. Then click **Add Policy**. + + **Step Result:** The **Add Policy** form opens. + +2. Name the policy. + +3. Complete each section of the form. Refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) for more information on what each policy does. + + +# Configuration + +The Kubernetes documentation on PSPs is [here.](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) + + + + + +[1]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems +[2]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces +[3]: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md new file mode 100644 index 0000000000..3f9babe4a8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry.md @@ -0,0 +1,45 @@ +--- +title: Configuring a Global Default Private Registry +weight: 400 +aliases: + - /rancher/v2.x/en/admin-settings/config-private-registry/ +--- + +You might want to use a private container registry to share your custom base images within your organization. With a private registry, you can keep a private, consistent, and centralized source of truth for the container images that are used in your clusters. + +There are two main ways to set up private registries in Rancher: by setting up the global default registry through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +This section is about configuring the global default private registry, and focuses on how to configure the registry from the Rancher UI after Rancher is installed. + +For instructions on setting up a private registry with command line options during the installation of Rancher, refer to the [air gapped Kubernetes installation](../../../pages-for-subheaders/air-gapped-helm-cli-install.md) instructions. + +If your private registry requires credentials, it cannot be used as the default registry. There is no global way to set up a private registry with authorization for every Rancher-provisioned cluster. Therefore, if you want a Rancher-provisioned cluster to pull images from a private registry with credentials, you will have to [pass in the registry credentials through the advanced cluster options](#setting-a-private-registry-with-credentials-when-deploying-a-cluster) every time you create a new cluster. + +# Setting a Private Registry with No Credentials as the Default Registry + +1. Log into Rancher and configure the default administrator password. + +1. Go into the **Settings** view. + + ![](/img/airgap/settings.png) + +1. Look for the setting called `system-default-registry` and choose **Edit**. + + ![](/img/airgap/edit-system-default-registry.png) + +1. Change the value to your registry (e.g. `registry.yourdomain.com:port`). Do not prefix the registry with `http://` or `https://`. + + ![](/img/airgap/enter-system-default-registry.png) + +**Result:** Rancher will use your private registry to pull system images. + +# Setting a Private Registry with Credentials when Deploying a Cluster + +You can follow these steps to configure a private registry when you provision a cluster with Rancher: + +1. When you create a cluster through the Rancher UI, go to the **Cluster Options** section and click **Show Advanced Options.** +1. In the Enable Private Registries section, click **Enabled.** +1. Enter the registry URL and credentials. +1. Click **Save.** + +**Result:** The new cluster will be able to pull images from the private registry. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md new file mode 100644 index 0000000000..e5d7a6201a --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md @@ -0,0 +1,195 @@ +--- +title: Cluster and Project Roles +weight: 1127 +aliases: + - /rancher/v2.x/en/admin-settings/rbac/cluster-project-roles/ +--- + +Cluster and project roles define user authorization inside a cluster or project. You can manage these roles from the **Global > Security > Roles** page. + +### Membership and Role Assignment + +The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. + +When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. + +> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. + +### Cluster Roles + +_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. + +- **Cluster Owner:** + + These users have full control over the cluster and all resources in it. + +- **Cluster Member:** + + These users can view most cluster level resources and create new projects. + +#### Custom Cluster Roles + +Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. + +#### Cluster Role Reference + +The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. + +| Built-in Cluster Role | Owner | Member | +| ---------------------------------- | ------------- | --------------------------------- | +| Create Projects | ✓ | ✓ | +| Manage Cluster Backups             | ✓ | | +| Manage Cluster Catalogs | ✓ | | +| Manage Cluster Members | ✓ | | +| Manage Nodes | ✓ | | +| Manage Storage | ✓ | | +| View All Projects | ✓ | | +| View Cluster Catalogs | ✓ | ✓ | +| View Cluster Members | ✓ | ✓ | +| View Nodes | ✓ | ✓ | + +For details on how each cluster role can access Kubernetes resources, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Clusters** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. + +> **Note:** +>When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +### Giving a Custom Cluster Role to a Cluster Member + +After an administrator [sets up a custom cluster role,](custom-roles.md) cluster owners and admins can then assign those roles to cluster members. + +To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. + +To assign the role to a new cluster member, + +1. Go to the **Cluster** view, then go to the **Members** tab. +1. Click **Add Member.** Then in the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create.** + +**Result:** The member has the assigned role. + +To assign any custom role to an existing cluster member, + +1. Go to the member you want to give the role to. Click the **⋮ > View in API.** +1. In the **roleTemplateId** field, go to the drop-down menu and choose the role you want to assign to the member. Click **Show Request** and **Send Request.** + +**Result:** The member has the assigned role. + +### Project Roles + +_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. + +- **Project Owner:** + + These users have full control over the project and all resources in it. + +- **Project Member:** + + These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. + + >**Note:** + > + >By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + +- **Read Only:** + + These users can view everything in the project but cannot create, update, or delete anything. + + >**Caveat:** + > + >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + +#### Custom Project Roles + +Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. + +#### Project Role Reference + +The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. + +| Built-in Project Role | Owner | Member | Read Only | +| ---------------------------------- | ------------- | ----------------------------- | ------------- | +| Manage Project Members | ✓ | | | +| Create Namespaces | ✓ | ✓ | | +| Manage Config Maps | ✓ | ✓ | | +| Manage Ingress | ✓ | ✓ | | +| Manage Project Catalogs | ✓ | | | +| Manage Secrets | ✓ | ✓ | | +| Manage Service Accounts | ✓ | ✓ | | +| Manage Services | ✓ | ✓ | | +| Manage Volumes | ✓ | ✓ | | +| Manage Workloads | ✓ | ✓ | | +| View Secrets | ✓ | ✓ | | +| View Config Maps | ✓ | ✓ | ✓ | +| View Ingress | ✓ | ✓ | ✓ | +| View Project Members | ✓ | ✓ | ✓ | +| View Project Catalogs | ✓ | ✓ | ✓ | +| View Service Accounts | ✓ | ✓ | ✓ | +| View Services | ✓ | ✓ | ✓ | +| View Volumes | ✓ | ✓ | ✓ | +| View Workloads | ✓ | ✓ | ✓ | + +> **Notes:** +> +>- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. +>- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. +>- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. + +### Defining Custom Roles +As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. + +When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. + +### Default Cluster and Project Roles + +By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. + +There are two methods for changing default cluster/project roles: + +- **Assign Custom Roles**: Create a [custom role](custom-roles.md) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. + +- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. + + For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). + +>**Note:** +> +>- Although you can [lock](locked-roles.md) a default role, the system still assigns the role to users who create a cluster/project. +>- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. + +### Configuring Default Roles for Cluster and Project Creators + +You can change the cluster or project role(s) that are automatically assigned to the creating user. + +1. From the **Global** view, select **Security > Roles** from the main menu. Select either the **Cluster** or **Project** tab. + +1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit**. + +1. Enable the role as default. + +
    + For Clusters + +1. From **Cluster Creator Default**, choose **Yes: Default role for new cluster creation**. +1. Click **Save**. + +
    +
    + + For Projects +1. From **Project Creator Default**, choose **Yes: Default role for new project creation**. +1. Click **Save**. + +
    + +1. If you want to remove a default role, edit the permission and select **No** from the default roles option. + +**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. + +### Cluster Membership Revocation Behavior + +When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: + +- Access the projects they hold membership in. +- Exercise any [individual project roles](#project-role-reference) they are assigned. + +If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md new file mode 100644 index 0000000000..3be1c53fc0 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md @@ -0,0 +1,141 @@ +--- +title: Custom Roles +weight: 1128 +aliases: + - /rancher/v2.5/en/tasks/global-configuration/roles/ + - /rancher/v2.x/en/admin-settings/rbac/default-custom-roles/ +--- + +Within Rancher, _roles_ determine what actions a user can make within a cluster or project. + +Note that _roles_ are different from _permissions_, which determine what clusters and projects you can access. + +> It is possible for a custom role to enable privilege escalation. For details, see [this section.](#privilege-escalation) + +This section covers the following topics: + +- [Prerequisites](#prerequisites) +- [Creating a custom role for a cluster or project](#creating-a-custom-role-for-a-cluster-or-project) +- [Creating a custom global role](#creating-a-custom-global-role) +- [Deleting a custom global role](#deleting-a-custom-global-role) +- [Assigning a custom global role to a group](#assigning-a-custom-global-role-to-a-group) +- [Privilege escalation](#privilege-escalation) + +# Prerequisites + +To complete the tasks on this page, one of the following permissions are required: + + - [Administrator Global Permissions](global-permissions.md). + - [Custom Global Permissions](global-permissions.md#custom-global-permissions) with the [Manage Roles](global-permissions.md) role assigned. + +# Creating A Custom Role for a Cluster or Project + +While Rancher comes out-of-the-box with a set of default user roles, you can also create default custom roles to provide users with very specific permissions within Rancher. + +The steps to add custom roles differ depending on the version of Rancher. + +1. From the **Global** view, select **Security > Roles** from the main menu. + +1. Select a tab to determine the scope of the roles you're adding. The tabs are: + + - **Cluster:** The role is valid for assignment when adding/managing members to _only_ clusters. + - **Project:** The role is valid for assignment when adding/managing members to _only_ projects. + +1. Click **Add Cluster/Project Role.** + +1. **Name** the role. + +1. Optional: Choose the **Cluster/Project Creator Default** option to assign this role to a user when they create a new cluster or project. Using this feature, you can expand or restrict the default roles for cluster/project creators. + + > Out of the box, the Cluster Creator Default and the Project Creator Default roles are `Cluster Owner` and `Project Owner` respectively. + +1. Use the **Grant Resources** options to assign individual [Kubernetes API endpoints](https://kubernetes.io/docs/reference/) to the role. + + > When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + + You can also choose the individual cURL methods (`Create`, `Delete`, `Get`, etc.) available for use with each endpoint you assign. + +1. Use the **Inherit from a Role** options to assign individual Rancher roles to your custom roles. Note: When a custom role inherits from a parent role, the parent role cannot be deleted until the child role is deleted. + +1. Click **Create**. + +# Creating a Custom Global Role + +### Creating a Custom Global Role that Copies Rules from an Existing Role + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role in which all of the rules from another role, such as the administrator role, are copied into a new role. This allows you to only configure the variations between the existing role and the new role. + +The custom global role can then be assigned to a user or group so that the custom global role takes effect the first time the user or users sign into Rancher. + +To create a custom global role based on an existing role, + +1. Go to the **Global** view and click **Security > Roles.** +1. On the **Global** tab, go to the role that the custom global role will be based on. Click **⋮ (…) > Clone.** +1. Enter a name for the role. +1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** +1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + +1. Click **Save.** + +### Creating a Custom Global Role that Does Not Copy Rules from Another Role + +Custom global roles don't have to be based on existing roles. To create a custom global role by choosing the specific Kubernetes resource operations that should be allowed for the role, follow these steps: + +1. Go to the **Global** view and click **Security > Roles.** +1. On the **Global** tab, click **Add Global Role.** +1. Enter a name for the role. +1. Optional: To assign the custom role default for new users, go to the **New User Default** section and click **Yes: Default role for new users.** +1. In the **Grant Resources** section, select the Kubernetes resource operations that will be enabled for users with the custom role. + + > The Resource text field provides a method to search for pre-defined Kubernetes API resources, or enter a custom resource name for the grant. The pre-defined or `(Custom)` resource must be selected from the dropdown, after entering a resource name into this field. + +1. Click **Save.** + +# Deleting a Custom Global Role + +When deleting a custom global role, all global role bindings with this custom role are deleted. + +If a user is only assigned one custom global role, and the role is deleted, the user would lose access to Rancher. For the user to regain access, an administrator would need to edit the user and apply new global permissions. + +Custom global roles can be deleted, but built-in roles cannot be deleted. + +To delete a custom global role, + +1. Go to the **Global** view and click **Security > Roles.** +2. On the **Global** tab, go to the custom global role that should be deleted and click **⋮ (…) > Delete.** +3. Click **Delete.** + +# Assigning a Custom Global Role to a Group + +If you have a group of individuals that need the same level of access in Rancher, it can save time to create a custom global role. When the role is assigned to a group, the users in the group have the appropriate level of access the first time they sign into Rancher. + +When a user in the group logs in, they get the built-in Standard User global role by default. They will also get the permissions assigned to their groups. + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have their individual Standard User role. + +> **Prerequisites:** You can only assign a global role to a group if: +> +> * You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +> * The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) +> * You have already set up at least one user group with the authentication provider + +To assign a custom global role to a group, follow these steps: + +1. From the **Global** view, go to **Security > Groups.** +1. Click **Assign Global Role.** +1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. +1. In the **Custom** section, choose any custom global role that will be assigned to the group. +1. Optional: In the **Global Permissions** or **Built-in** sections, select any additional permissions that the group should have. +1. Click **Create.** + +**Result:** The custom global role will take effect when the users in the group log into Rancher. + +# Privilege Escalation + +The `Configure Catalogs` custom permission is powerful and should be used with caution. When an admin assigns the `Configure Catalogs` permission to a standard user, it could result in privilege escalation in which the user could give themselves admin access to Rancher provisioned clusters. Anyone with this permission should be considered equivalent to an admin. + +The `Manager Users` role grants the ability to create, update, and delete _any_ user. This presents the risk of privilege escalation as even non-admin users with this role will be able to create, update, and delete admin users. Admins should take caution when assigning this role. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md new file mode 100644 index 0000000000..3571ebfa11 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md @@ -0,0 +1,236 @@ +--- +title: Global Permissions +weight: 1126 +aliases: + - /rancher/v2.x/en/admin-settings/rbac/global-permissions/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +_Permissions_ are individual access rights that you can assign when selecting a custom permission for a user. + +Global Permissions define user authorization outside the scope of any particular cluster. Out-of-the-box, there are three default global permissions: `Administrator`, `Standard User` and `User-base`. + +- **Administrator:** These users have full control over the entire Rancher system and all clusters within it. + +- **Standard User:** These users can create new clusters and use them. Standard users can also assign other users permissions to their clusters. + +- **User-Base:** User-Base users have login-access only. + +You cannot update or delete the built-in Global Permissions. + +This section covers the following topics: + +- [Restricted Admin](#restricted-admin) +- [Global permission assignment](#global-permission-assignment) + - [Global permissions for new local users](#global-permissions-for-new-local-users) + - [Global permissions for users with external authentication](#global-permissions-for-users-with-external-authentication) +- [Custom global permissions](#custom-global-permissions) + - [Custom global permissions reference](#custom-global-permissions-reference) + - [Configuring default global permissions for new users](#configuring-default-global-permissions) + - [Configuring global permissions for existing individual users](#configuring-global-permissions-for-existing-individual-users) + - [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + - [Refreshing group memberships](#refreshing-group-memberships) + +# Restricted Admin + +A new `restricted-admin` role was created in Rancher v2.5 in order to prevent privilege escalation from the local Rancher server Kubernetes cluster. This role has full administrator access to all downstream clusters managed by Rancher, but it does not have permission to alter the local Kubernetes cluster. + +The `restricted-admin` can create other `restricted-admin` users with an equal level of access. + +A new setting was added to Rancher to set the initial bootstrapped administrator to have the `restricted-admin` role. This applies to the first user created when the Rancher server is started for the first time. If the environment variable is set, then no global administrator would be created, and it would be impossible to create the global administrator through Rancher. + +To bootstrap Rancher with the `restricted-admin` as the initial user, the Rancher server should be started with the following environment variable: + +``` +CATTLE_RESTRICTED_DEFAULT_ADMIN=true +``` +### List of `restricted-admin` Permissions + +The permissions for the `restricted-admin` role differ based on the Rancher version. + + + + +The `restricted-admin` permissions are as follows: + +- Has full admin access to all downstream clusters managed by Rancher. +- Can add other users and assign them to clusters outside of the local cluster. +- Can create other restricted admins. + + + + +The `restricted-admin` permissions are as follows: + +- Has full admin access to all downstream clusters managed by Rancher. +- Has very limited access to the local Kubernetes cluster. Can access Rancher custom resource definitions, but has no access to any Kubernetes native types. +- Can add other users and assign them to clusters outside of the local cluster. +- Can create other restricted admins. +- Cannot grant any permissions in the local cluster they don't currently have. (This is how Kubernetes normally operates) + + + + +### Upgrading from Rancher with a Hidden Local Cluster + +Before Rancher v2.5, it was possible to run the Rancher server using this flag to hide the local cluster: + +``` +--add-local=false +``` + +You will need to drop this flag when upgrading to Rancher v2.5. Otherwise, Rancher will not start. The `restricted-admin` role can be used to continue restricting access to the local cluster. + +### Changing Global Administrators to Restricted Admins + +If Rancher already has a global administrator, they should change all global administrators over to the new `restricted-admin` role. + +This can be done through **Security > Users** and moving any Administrator role over to Restricted Administrator. + +Signed-in users can change themselves over to the `restricted-admin` if they wish, but they should only do that as the last step, otherwise they won't have the permissions to do so. + +# Global Permission Assignment + +Global permissions for local users are assigned differently than users who log in to Rancher using external authentication. + +### Global Permissions for New Local Users + +When you create a new local user, you assign them a global permission as you complete the **Add User** form. + +To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column. You can [change the default global permissions to meet your needs.](#configuring-default-global-permissions) + +### Global Permissions for Users with External Authentication + +When a user logs into Rancher using an external authentication provider for the first time, they are automatically assigned the **New User Default** global permissions. By default, Rancher assigns the **Standard User** permission for new users. + +To see the default permissions for new users, go to the **Global** view and click **Security > Roles.** On the **Global** tab, there is a column named **New User Default.** When adding a new local user, the user receives all default global permissions that are marked as checked in this column, and you can [change them to meet your needs.](#configuring-default-global-permissions) + +Permissions can be assigned to an individual user with [these steps.](#configuring-global-permissions-for-existing-individual-users) + +You can [assign a role to everyone in the group at the same time](#configuring-global-permissions-for-groups) if the external authentication provider supports groups. + +# Custom Global Permissions + +Using custom permissions is convenient for providing users with narrow or specialized access to Rancher. + +When a user from an [external authentication source](../../../../pages-for-subheaders/about-authentication.md) signs into Rancher for the first time, they're automatically assigned a set of global permissions (hereafter, permissions). By default, after a user logs in for the first time, they are created as a user and assigned the default `user` permission. The standard `user` permission allows users to login and create clusters. + +However, in some organizations, these permissions may extend too much access. Rather than assigning users the default global permissions of `Administrator` or `Standard User`, you can assign them a more restrictive set of custom global permissions. + +The default roles, Administrator and Standard User, each come with multiple global permissions built into them. The Administrator role includes all global permissions, while the default user role includes three global permissions: Create Clusters, Use Catalog Templates, and User Base, which is equivalent to the minimum permission to log in to Rancher. In other words, the custom global permissions are modularized so that if you want to change the default user role permissions, you can choose which subset of global permissions are included in the new default user role. + +Administrators can enforce custom global permissions in multiple ways: + +- [Changing the default permissions for new users](#configuring-default-global-permissions) +- [Configuring global permissions for individual users](#configuring-global-permissions-for-individual-users) +- [Configuring global permissions for groups](#configuring-global-permissions-for-groups) + +### Custom Global Permissions Reference + +The following table lists each custom global permission available and whether it is included in the default global permissions, `Administrator`, `Standard User` and `User-Base`. + +| Custom Global Permission | Administrator | Standard User | User-Base | +| ---------------------------------- | ------------- | ------------- |-----------| +| Create Clusters | ✓ | ✓ | | +| Create RKE Templates | ✓ | ✓ | | +| Manage Authentication | ✓ | | | +| Manage Catalogs | ✓ | | | +| Manage Cluster Drivers | ✓ | | | +| Manage Node Drivers | ✓ | | | +| Manage PodSecurityPolicy Templates | ✓ | | | +| Manage Roles | ✓ | | | +| Manage Settings | ✓ | | | +| Manage Users | ✓ | | | +| Use Catalog Templates | ✓ | ✓ | | +| User Base\* (Basic log-in access) | ✓ | ✓ | | + +> \*This role has two names: +> +> - When you go to the Users tab and edit a user's global role, this role is called Login Access in the custom global permissions list. +> - When you go to the Security tab and edit the roles from the roles page, this role is called User Base. + +For details on which Kubernetes resources correspond to each global permission, you can go to the **Global** view in the Rancher UI. Then click **Security > Roles** and go to the **Global** tab. If you click an individual role, you can refer to the **Grant Resources** table to see all of the operations and resources that are permitted by the role. + +> **Notes:** +> +> - Each permission listed above is comprised of multiple individual permissions not listed in the Rancher UI. For a full list of these permissions and the rules they are comprised of, access through the API at `/v3/globalRoles`. +> - When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +### Configuring Default Global Permissions + +If you want to restrict the default permissions for new users, you can remove the `user` permission as default role and then assign multiple individual permissions as default instead. Conversely, you can also add administrative permissions on top of a set of other standard permissions. + +> **Note:** Default roles are only assigned to users added from an external authentication provider. For local users, you must explicitly assign global permissions when adding a user to Rancher. You can customize these global permissions when adding the user. + +To change the default global permissions that are assigned to external users upon their first log in, follow these steps: + +1. From the **Global** view, select **Security > Roles** from the main menu. Make sure the **Global** tab is selected. + +1. Find the permissions set that you want to add or remove as a default. Then edit the permission by selecting **⋮ > Edit**. + +1. If you want to add the permission as a default, Select **Yes: Default role for new users** and then click **Save**. + +1. If you want to remove a default permission, edit the permission and select **No** from **New User Default**. + +**Result:** The default global permissions are configured based on your changes. Permissions assigned to new users display a check in the **New User Default** column. + +### Configuring Global Permissions for Individual Users + +To configure permission for a user, + +1. Go to the **Users** tab. + +1. On this page, go to the user whose access level you want to change and click **⋮ > Edit.** + +1. In the **Global Permissions** section, click **Custom.** + +1. Check the boxes for each subset of permissions you want the user to have access to. + +1. Click **Save.** + +> **Result:** The user's global permissions have been updated. + +### Configuring Global Permissions for Groups + +If you have a group of individuals that need the same level of access in Rancher, it can save time to assign permissions to the entire group at once, so that the users in the group have the appropriate level of access the first time they sign into Rancher. + +After you assign a custom global role to a group, the custom global role will be assigned to a user in the group when they log in to Rancher. + +For existing users, the new permissions will take effect when the users log out of Rancher and back in again, or when an administrator [refreshes the group memberships.](#refreshing-group-memberships) + +For new users, the new permissions take effect when the users log in to Rancher for the first time. New users from this group will receive the permissions from the custom global role in addition to the **New User Default** global permissions. By default, the **New User Default** permissions are equivalent to the **Standard User** global role, but the default permissions can be [configured.](#configuring-default-global-permissions) + +If a user is removed from the external authentication provider group, they would lose their permissions from the custom global role that was assigned to the group. They would continue to have any remaining roles that were assigned to them, which would typically include the roles marked as **New User Default.** Rancher will remove the permissions that are associated with the group when the user logs out, or when an administrator [refreshes group memberships,](#refreshing-group-memberships) whichever comes first. + +> **Prerequisites:** You can only assign a global role to a group if: +> +> * You have set up an [external authentication provider](../../../../pages-for-subheaders/about-authentication.md#external-vs-local-authentication) +> * The external authentication provider supports [user groups](../about-authentication/authentication-config/manage-users-and-groups.md) +> * You have already set up at least one user group with the authentication provider + +To assign a custom global role to a group, follow these steps: + +1. From the **Global** view, go to **Security > Groups.** +1. Click **Assign Global Role.** +1. In the **Select Group To Add** field, choose the existing group that will be assigned the custom global role. +1. In the **Global Permissions,** **Custom,** and/or **Built-in** sections, select the permissions that the group should have. +1. Click **Create.** + +**Result:** The custom global role will take effect when the users in the group log into Rancher. + +### Refreshing Group Memberships + +When an administrator updates the global permissions for a group, the changes take effect for individual group members after they log out of Rancher and log in again. + +To make the changes take effect immediately, an administrator or cluster owner can refresh group memberships. + +An administrator might also want to refresh group memberships if a user is removed from a group in the external authentication service. In that case, the refresh makes Rancher aware that the user was removed from the group. + +To refresh group memberships, + +1. From the **Global** view, click **Security > Users.** +1. Click **Refresh Group Memberships.** + +**Result:** Any changes to the group members' permissions will take effect. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md new file mode 100644 index 0000000000..7cde1f7e44 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md @@ -0,0 +1,39 @@ +--- +title: Locked Roles +weight: 1129 +aliases: + - /rancher/v2.x/en/admin-settings/rbac/locked-roles/ +--- + +You can set roles to a status of `locked`. Locking roles prevent them from being assigned users in the future. + +Locked roles: + +- Cannot be assigned to users that don't already have it assigned. +- Are not listed in the **Member Roles** drop-down when you are adding a user to a cluster or project. +- Do not affect users assigned the role before you lock the role. These users retain access that the role provides. + + **Example:** let's say your organization creates an internal policy that users assigned to a cluster are prohibited from creating new projects. It's your job to enforce this policy. + + To enforce it, before you add new users to the cluster, you should lock the following roles: `Cluster Owner`, `Cluster Member`, and `Create Projects`. Then you could create a new custom role that includes the same permissions as a __Cluster Member__, except the ability to create projects. Then, you use this new custom role when adding users to a cluster. + +Roles can be locked by the following users: + +- Any user assigned the `Administrator` global permission. +- Any user assigned the `Custom Users` permission, along with the `Manage Roles` role. + + +## Locking/Unlocking Roles + +If you want to prevent a role from being assigned to users, you can set it to a status of `locked`. + +You can lock roles in two contexts: + +- When you're [adding a custom role](custom-roles.md). +- When you editing an existing role (see below). + +1. From the **Global** view, select **Security** > **Roles**. + +2. From the role that you want to lock (or unlock), select **⋮** > **Edit**. + +3. From the **Locked** option, choose the **Yes** or **No** radio button. Then click **Save**. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/cis-scan-guides/view-reports.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md new file mode 100644 index 0000000000..bd7afa98a0 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md @@ -0,0 +1,29 @@ +--- +title: 1. Enable Istio in the Cluster +weight: 1 +aliases: + - /rancher/v2.5/en/istio/setup/enable-istio-in-cluster + - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-cluster + - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-cluster/ +--- + +>**Prerequisites:** +> +>- Only a user with the `cluster-admin` [Kubernetes default role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) assigned can configure and install Istio in a Kubernetes cluster. +>- If you have pod security policies, you will need to install Istio with the CNI enabled. For details, see [this section.](../../../explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md) +>- To install Istio on an RKE2 cluster, additional steps are required. For details, see [this section.](../../../explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md) +>- To install Istio in a cluster where project network isolation is enabled, additional steps are required. For details, see [this section.](../../../explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md) + +1. From the **Cluster Explorer**, navigate to available **Charts** in **Apps & Marketplace** +1. Select the Istio chart from the rancher provided charts +1. If you have not already installed your own monitoring app, you will be prompted to install the rancher-monitoring app. Optional: Set your Selector or Scrape config options on rancher-monitoring app install. +1. Optional: Configure member access and [resource limits](../../../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) for the Istio components. Ensure you have enough resources on your worker nodes to enable Istio. +1. Optional: Make additional configuration changes to values.yaml if needed. +1. Optional: Add additional resources or configuration via the [overlay file.](../../../pages-for-subheaders/configuration-options.md#overlay-file) +1. Click **Install**. + +**Result:** Istio is installed at the cluster level. + +# Additional Config Options + +For more information on configuring Istio, refer to the [configuration reference.](../../../pages-for-subheaders/configuration-options.md) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md new file mode 100644 index 0000000000..4221bb5854 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md @@ -0,0 +1,45 @@ +--- +title: 2. Enable Istio in a Namespace +weight: 2 +aliases: + - /rancher/v2.5/en/istio/setup/enable-istio-in-namespace + - /rancher/v2.5/en/istio/v2.5/setup/enable-istio-in-namespace + - /rancher/v2.x/en/istio/v2.5/setup/enable-istio-in-namespace/ +--- + +You will need to manually enable Istio in each namespace that you want to be tracked or controlled by Istio. When Istio is enabled in a namespace, the Envoy sidecar proxy will be automatically injected into all new workloads that are deployed in the namespace. + +This namespace setting will only affect new workloads in the namespace. Any preexisting workloads will need to be re-deployed to leverage the sidecar auto injection. + +> **Prerequisite:** To enable Istio in a namespace, the cluster must have Istio installed. + +1. In the Rancher **Cluster Explorer,** open the kubectl shell. +1. Then run `kubectl label namespace istio-injection=enabled` + +**Result:** The namespace now has the label `istio-injection=enabled`. All new workloads deployed in this namespace will have the Istio sidecar injected by default. + +### Verifying that Automatic Istio Sidecar Injection is Enabled + +To verify that Istio is enabled, deploy a hello-world workload in the namespace. Go to the workload and click the pod name. In the **Containers** section, you should see the `istio-proxy` container. + +### Excluding Workloads from Being Injected with the Istio Sidecar + +If you need to exclude a workload from getting injected with the Istio sidecar, use the following annotation on the workload: + +``` +sidecar.istio.io/inject: “false” +``` + +To add the annotation to a workload, + +1. From the **Cluster Explorer** view, use the side-nav to select the **Overview** page for workloads. +1. Go to the workload that should not have the sidecar and edit as yaml +1. Add the following key, value `sidecar.istio.io/inject: false` as an annotation on the workload +1. Click **Save.** + +**Result:** The Istio sidecar will not be injected into the workload. + +> **NOTE:** If you are having issues with a Job you deployed not completing, you will need to add this annotation to your pod using the provided steps. Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. + + +### [Next: Select the Nodes ](../../../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md new file mode 100644 index 0000000000..9acbc86d11 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md @@ -0,0 +1,28 @@ +--- +title: 6. Generate and View Traffic +weight: 7 +aliases: + - /rancher/v2.5/en/istio/setup/view-traffic + - /rancher/v2.5/en/istio/setup/view-traffic + - /rancher/v2.5/en/istio/v2.5/setup/view-traffic + - /rancher/v2.x/en/istio/v2.5/setup/view-traffic/ +--- + +This section describes how to view the traffic that is being managed by Istio. + +# The Kiali Traffic Graph + +The Istio overview page provides a link to the Kiali dashboard. From the Kiali dashboard, you are able to view graphs for each namespace. The Kiali graph provides a powerful way to visualize the topology of your Istio service mesh. It shows you which services communicate with each other. + +>**Prerequisite:** To enable traffic to show up in the graph, ensure you have prometheus installed in the cluster. Rancher-istio installs Kiali configured by default to work with the rancher-monitoring chart. You can use rancher-monitoring or install your own monitoring solution. Optional: you can change configuration on how data scraping occurs by setting the [Selectors & Scrape Configs](../../../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) options. + +To see the traffic graph, + +1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. +1. Click the **Kiali** link on the Istio **Overview** page. +1. Click on **Graph** in the side nav. +1. Change the namespace in the **Namespace** dropdown to view the traffic for each namespace. + +If you refresh the URL to the BookInfo app several times, you should be able to see green arrows on the Kiali graph showing traffic to `v1` and `v3` of the `reviews` service. The control panel on the right side of the graph lets you configure details including how many minutes of the most recent traffic should be shown on the graph. + +For additional tools and visualizations, you can go to Grafana, and Prometheus dashboards from the **Monitoring** **Overview** page diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md new file mode 100644 index 0000000000..f6f1c24651 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md @@ -0,0 +1,144 @@ +--- +title: 4. Set up the Istio Gateway +weight: 5 +aliases: + - /rancher/v2.5/en/istio/setup/gateway + - /rancher/v2.5/en/istio/v2.5/setup/gateway + - /rancher/v2.x/en/istio/v2.5/setup/gateway/ +--- + +The gateway to each cluster can have its own port or load balancer, which is unrelated to a service mesh. By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. + +You can use the Nginx Ingress controller with or without Istio installed. If this is the only gateway to your cluster, Istio will be able to route traffic from service to service, but Istio will not be able to receive traffic from outside the cluster. + +To allow Istio to receive external traffic, you need to enable Istio's gateway, which works as a north-south proxy for external traffic. When you enable the Istio gateway, the result is that your cluster will have two Ingresses. + +You will also need to set up a Kubernetes gateway for your services. This Kubernetes resource points to Istio's implementation of the ingress gateway to the cluster. + +You can route traffic into the service mesh with a load balancer or use Istio's NodePort gateway. This section describes how to set up the NodePort gateway. + +For more information on the Istio gateway, refer to the [Istio documentation.](https://istio.io/docs/reference/config/networking/v1alpha3/gateway/) + +![In an Istio-enabled cluster, you can have two Ingresses: the default Nginx Ingress, and the default Istio controller.](/img/istio-ingress.svg) + +# Enable an Istio Gateway + +The ingress gateway is a Kubernetes service that will be deployed in your cluster. The Istio Gateway allows for more extensive customization and flexibility. + +1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. +1. Click **Gateways** in the side nav bar. +1. Click **Create from Yaml**. +1. Paste your Istio Gateway yaml, or **Read from File**. +1. Click **Create**. + +**Result:** The gateway is deployed, and will now route traffic with applied rules + +# Example Istio Gateway + +We add the BookInfo app deployments in services when going through the Workloads example. Next we add an Istio Gateway so that the app is accessible from outside your cluster. + +1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. +1. Click **Gateways** in the side nav bar. +1. Click **Create from Yaml**. +1. Copy and paste the Gateway yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" +--- +``` + +Then to deploy the VirtualService that provides the traffic routing for the Gateway + +1. Click **VirtualService** in the side nav bar. +1. Click **Create from Yaml**. +1. Copy and paste the VirtualService yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage + port: + number: 9080 +``` + +**Result:** You have configured your gateway resource so that Istio can receive traffic from outside the cluster. + +Confirm that the resource exists by running: +``` +kubectl get gateway -A +``` + +The result should be something like this: +``` +NAME AGE +bookinfo-gateway 64m +``` + +### Access the ProductPage Service from a Web Browser + +To test and see if the BookInfo app deployed correctly, the app can be viewed a web browser using the Istio controller IP and port, combined with the request name specified in your Kubernetes gateway resource: + +`http://:/productpage` + +To get the ingress gateway URL and port, + +1. From the **Cluster Explorer**, Click on **Workloads > Overview**. +1. Scroll down to the `istio-system` namespace. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Under the name of this workload, you should see links, such as `80/tcp`. +1. Click one of those links. This should show you the URL of the ingress gateway in your web browser. Append `/productpage` to the URL. + +**Result:** You should see the BookInfo app in the web browser. + +For help inspecting the Istio controller URL and ports, try the commands the [Istio documentation.](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports) + +# Troubleshooting + +The [official Istio documentation](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#troubleshooting) suggests `kubectl` commands to inspect the correct ingress host and ingress port for external requests. + +### Confirming that the Kubernetes Gateway Matches Istio's Ingress Controller + +You can try the steps in this section to make sure the Kubernetes gateway is configured properly. + +In the gateway resource, the selector refers to Istio's default ingress controller by its label, in which the key of the label is `istio` and the value is `ingressgateway`. To make sure the label is appropriate for the gateway, do the following: + +1. From the **Cluster Explorer**, Click on **Workloads > Overview**. +1. Scroll down to the `istio-system` namespace. +1. Within `istio-system`, there is a workload named `istio-ingressgateway`. Click the name of this workload and go to the **Labels and Annotations** section. You should see that it has the key `istio` and the value `ingressgateway`. This confirms that the selector in the Gateway resource matches Istio's default ingress controller. + +### [Next: Set up Istio's Components for Traffic Management](set-up-traffic-management.md) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md new file mode 100644 index 0000000000..f3d61423f3 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md @@ -0,0 +1,78 @@ +--- +title: 5. Set up Istio's Components for Traffic Management +weight: 6 +aliases: + - /rancher/v2.5/en/istio/setup/set-up-traffic-management + - /rancher/v2.5/en/istio/v2.5/setup/set-up-traffic-management + - /rancher/v2.x/en/istio/v2.5/setup/set-up-traffic-management/ +--- + +A central advantage of traffic management in Istio is that it allows dynamic request routing. Some common applications for dynamic request routing include canary deployments and blue/green deployments. The two key resources in Istio traffic management are *virtual services* and *destination rules*. + +- [Virtual services](https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/) intercept and direct traffic to your Kubernetes services, allowing you to divide percentages of traffic from a request to different services. You can use them to define a set of routing rules to apply when a host is addressed. +- [Destination rules](https://istio.io/docs/reference/config/networking/v1alpha3/destination-rule/) serve as the single source of truth about which service versions are available to receive traffic from virtual services. You can use these resources to define policies that apply to traffic that is intended for a service after routing has occurred. + +This section describes how to add an example virtual service that corresponds to the `reviews` microservice in the sample BookInfo app. The purpose of this service is to divide traffic between two versions of the `reviews` service. + +In this example, we take the traffic to the `reviews` service and intercept it so that 50 percent of it goes to `v1` of the service and 50 percent goes to `v2`. + +After this virtual service is deployed, we will generate traffic and see from the Kiali visualization that traffic is being routed evenly between the two versions of the service. + +To deploy the virtual service and destination rules for the `reviews` service, + +1. From the **Cluster Explorer**, select **Istio** from the nav dropdown. +1. Click **DestinationRule** in the side nav bar. +1. Click **Create from Yaml**. +1. Copy and paste the DestinationRule yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: reviews +spec: + host: reviews + subsets: + - name: v1 + labels: + version: v1 + - name: v2 + labels: + version: v2 + - name: v3 + labels: + version: v3 +``` + +Then to deploy the VirtualService that provides the traffic routing that utilizes the DestinationRule + +1. Click **VirtualService** in the side nav bar. +1. Click **Create from Yaml**. +1. Copy and paste the VirtualService yaml provided below. +1. Click **Create**. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: reviews +spec: + hosts: + - reviews + http: + - route: + - destination: + host: reviews + subset: v1 + weight: 50 + - destination: + host: reviews + subset: v3 + weight: 50 +--- +``` + +**Result:** When you generate traffic to this service (for example, by refreshing the ingress gateway URL), the Kiali traffic graph will reflect that traffic to the `reviews` service is divided evenly between `v1` and `v3`. + +### [Next: Generate and View Traffic](generate-and-view-traffic.md) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md new file mode 100644 index 0000000000..ce303085ac --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md @@ -0,0 +1,351 @@ +--- +title: 3. Add Deployments and Services with the Istio Sidecar +weight: 4 +aliases: + - /rancher/v2.5/en/istio/setup/deploy-workloads + - /rancher/v2.5/en/istio/v2.5/setup/deploy-workloads + - /rancher/v2.x/en/istio/v2.5/setup/deploy-workloads/ +--- + +> **Prerequisite:** To enable Istio for a workload, the cluster and namespace must have the Istio app installed. + +Enabling Istio in a namespace only enables automatic sidecar injection for new workloads. To enable the Envoy sidecar for existing workloads, you need to enable it manually for each workload. + +To inject the Istio sidecar on an existing workload in the namespace, from the **Cluster Explorer** go to the workload, click the **⋮,** and click **Redeploy.** When the workload is redeployed, it will have the Envoy sidecar automatically injected. + +Wait a few minutes for the workload to upgrade to have the istio sidecar. Click it and go to the Containers section. You should be able to see `istio-proxy` alongside your original workload. This means the Istio sidecar is enabled for the workload. Istio is doing all the wiring for the sidecar envoy. Now Istio can do all the features automatically if you enable them in the yaml. + +### Add Deployments and Services + +There are a few ways to add new **Deployments** in your namespace + +1. From the **Cluster Explorer** click on **Workload > Overview.** +1. Click **Create.** +1. Select **Deployment** from the various workload options. +1. Fill out the form, or **Edit as Yaml.** +1. Click **Create.** + +Alternatively, you can select the specific workload you want to deploy from the **Workload** section of the left navigation bar and create it from there. + +To add a **Service** to your namespace + +1. From the **Cluster Explorer** click on **Service Discovery > Services** +1. Click **Create** +1. Select the type of service you want to create from the various options +1. Fill out the form, or **Edit as Yaml** +1. Click **Create** + +You can also create deployments and services using the kubectl **shell** + +1. Run `kubectl create -f .yaml` if your file is stored locally in the cluster +1. Or run `cat<< EOF | kubectl apply -f -`, paste the file contents into the terminal, then run `EOF` to complete the command. + +### Example Deployments and Services + +Next we add the Kubernetes resources for the sample deployments and services for the BookInfo app in Istio's documentation. + +1. From the **Cluster Explorer**, open the kubectl **shell** +1. Run `cat<< EOF | kubectl apply -f -` +1. Copy the below resources into the the shell +1. Run `EOF` + +This will set up the following sample resources from Istio's example BookInfo app: + +Details service and deployment: + +- A `details` Service +- A ServiceAccount for `bookinfo-details` +- A `details-v1` Deployment + +Ratings service and deployment: + +- A `ratings` Service +- A ServiceAccount for `bookinfo-ratings` +- A `ratings-v1` Deployment + +Reviews service and deployments (three versions): + +- A `reviews` Service +- A ServiceAccount for `bookinfo-reviews` +- A `reviews-v1` Deployment +- A `reviews-v2` Deployment +- A `reviews-v3` Deployment + +Productpage service and deployment: + +This is the main page of the app, which will be visible from a web browser. The other services will be called from this page. + +- A `productpage` service +- A ServiceAccount for `bookinfo-productpage` +- A `productpage-v1` Deployment + +### Resource YAML + +```yaml +# Copyright 2017 Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 +--- +``` + +### [Next: Set up the Istio Gateway](set-up-istio-gateway.md) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md new file mode 100644 index 0000000000..e7a6c2da78 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md @@ -0,0 +1,59 @@ +--- +title: Adding Users to Clusters +weight: 2020 +aliases: + - /rancher/v2.5/en/tasks/clusters/adding-managing-cluster-members/ + - /rancher/v2.5/en/k8s-in-rancher/cluster-members/ + - /rancher/v2.5/en/cluster-admin/cluster-members + - /rancher/v2.5/en/cluster-provisioning/cluster-members/ + - /rancher/v2.x/en/cluster-admin/cluster-access/cluster-members/ +--- + +If you want to provide a user with access and permissions to _all_ projects, nodes, and resources within a cluster, assign the user a cluster membership. + +>**Tip:** Want to provide a user with access to a _specific_ project within a cluster? See [Adding Project Members](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) instead. + +There are two contexts where you can add cluster members: + +- Adding Members to a New Cluster + + You can add members to a cluster as you create it (recommended if possible). + +- [Adding Members to an Existing Cluster](#editing-cluster-membership) + + You can always add members to a cluster after a cluster is provisioned. + +## Editing Cluster Membership + +Cluster administrators can edit the membership for a cluster, controlling which Rancher users can access the cluster and what features they can use. + +1. From the **Global** view, open the cluster that you want to add members to. + +2. From the main menu, select **Members**. Then click **Add Member**. + +3. Search for the user or group that you want to add to the cluster. + + If external authentication is configured: + + - Rancher returns users from your [external authentication](../../../../pages-for-subheaders/about-authentication.md) source as you type. + + >**Using AD but can't find your users?** + >There may be an issue with your search attribute configuration. See [Configuring Active Directory Authentication: Step 5](../../authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md). + + - A drop-down allows you to add groups instead of individual users. The drop-down only lists groups that you, the logged in user, are part of. + + >**Note:** If you are logged in as a local user, external users do not display in your search results. For more information, see [External Authentication Configuration and Principal Users](../../../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +4. Assign the user or group **Cluster** roles. + + [What are Cluster Roles?](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + + >**Tip:** For Custom Roles, you can modify the list of individual roles available for assignment. + > + > - To add roles to the list, [Add a Custom Role](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + > - To remove roles from the list, [Lock/Unlock Roles](../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). + +**Result:** The chosen users are added to the cluster. + +- To revoke cluster membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the cluster, delete them from the cluster, and then re-add them with modified roles. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md new file mode 100644 index 0000000000..28a62afd51 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint.md @@ -0,0 +1,46 @@ +--- +title: How the Authorized Cluster Endpoint Works +weight: 2015 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-access/ace/ +--- + +This section describes how the kubectl CLI, the kubeconfig file, and the authorized cluster endpoint work together to allow you to access a downstream Kubernetes cluster directly, without authenticating through the Rancher server. It is intended to provide background information and context to the instructions for [how to set up kubectl to directly access a cluster.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) + +### About the kubeconfig File + +The _kubeconfig file_ is a file used to configure access to Kubernetes when used in conjunction with the kubectl command line tool (or other clients). + +This kubeconfig file and its contents are specific to the cluster you are viewing. It can be downloaded from the cluster view in Rancher. You will need a separate kubeconfig file for each cluster that you have access to in Rancher. + +After you download the kubeconfig file, you will be able to use the kubeconfig file and its Kubernetes [contexts](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-context-and-configuration) to access your downstream cluster. + +If admins have [enforced TTL on kubeconfig tokens](../../../../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires [rancher cli](../../../../pages-for-subheaders/cli-with-rancher.md) to be present in your PATH. + + +### Two Authentication Methods for RKE Clusters + +If the cluster is not an [RKE cluster,](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) the kubeconfig file allows you to access the cluster in only one way: it lets you be authenticated with the Rancher server, then Rancher allows you to run kubectl commands on the cluster. + +For RKE clusters, the kubeconfig file allows you to be authenticated in two ways: + +- **Through the Rancher server authentication proxy:** Rancher's authentication proxy validates your identity, then connects you to the downstream cluster that you want to access. +- **Directly with the downstream cluster's API server:** RKE clusters have an authorized cluster endpoint enabled by default. This endpoint allows you to access your downstream Kubernetes cluster with the kubectl CLI and a kubeconfig file, and it is enabled by default for RKE clusters. In this scenario, the downstream cluster's Kubernetes API server authenticates you by calling a webhook (the `kube-api-auth` microservice) that Rancher set up. + +This second method, the capability to connect directly to the cluster's Kubernetes API server, is important because it lets you access your downstream cluster if you can't connect to Rancher. + +To use the authorized cluster endpoint, you will need to configure kubectl to use the extra kubectl context in the kubeconfig file that Rancher generates for you when the RKE cluster is created. This file can be downloaded from the cluster view in the Rancher UI, and the instructions for configuring kubectl are on [this page.](use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) + +These methods of communicating with downstream Kubernetes clusters are also explained in the [architecture page](../../../../pages-for-subheaders/rancher-manager-architecture.md#communicating-with-downstream-user-clusters) in the larger context of explaining how Rancher works and how Rancher communicates with downstream clusters. + +### About the kube-api-auth Authentication Webhook + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the [authorized cluster endpoint,](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) which is only available for [RKE clusters.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +During cluster provisioning, the file `/etc/kubernetes/kube-api-authn-webhook.yaml` is deployed and `kube-apiserver` is configured with `--authentication-token-webhook-config-file=/etc/kubernetes/kube-api-authn-webhook.yaml`. This configures the `kube-apiserver` to query `http://127.0.0.1:6440/v1/authenticate` to determine authentication for bearer tokens. + +The scheduling rules for `kube-api-auth` are listed below: + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| -------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| kube-api-auth | `beta.kubernetes.io/os:NotIn:windows`
    `node-role.kubernetes.io/controlplane:In:"true"` | none | `operator:Exists` | diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md new file mode 100644 index 0000000000..9ccf6c0309 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md @@ -0,0 +1,110 @@ +--- +title: "Access a Cluster with Kubectl and kubeconfig" +description: "Learn how you can access and manage your Kubernetes clusters using kubectl with kubectl Shell or with kubectl CLI and kubeconfig file. A kubeconfig file is used to configure access to Kubernetes. When you create a cluster with Rancher, it automatically creates a kubeconfig for your cluster." +weight: 2010 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/kubectl/ + - /rancher/v2.5/en/cluster-admin/kubectl + - /rancher/v2.5/en/concepts/clusters/kubeconfig-files/ + - /rancher/v2.5/en/k8s-in-rancher/kubeconfig/ + - /rancher/2.x/en/cluster-admin/kubeconfig + - /rancher/v2.x/en/cluster-admin/cluster-access/kubectl/ +--- + +This section describes how to manipulate your downstream Kubernetes cluster with kubectl from the Rancher UI or from your workstation. + +For more information on using kubectl, see [Kubernetes Documentation: Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/). + +- [Accessing clusters with kubectl shell in the Rancher UI](#accessing-clusters-with-kubectl-shell-in-the-rancher-ui) +- [Accessing clusters with kubectl from your workstation](#accessing-clusters-with-kubectl-from-your-workstation) +- [Note on Resources created using kubectl](#note-on-resources-created-using-kubectl) +- [Authenticating Directly with a Downstream Cluster](#authenticating-directly-with-a-downstream-cluster) + - [Connecting Directly to Clusters with FQDN Defined](#connecting-directly-to-clusters-with-fqdn-defined) + - [Connecting Directly to Clusters without FQDN Defined](#connecting-directly-to-clusters-without-fqdn-defined) + + +### Accessing Clusters with kubectl Shell in the Rancher UI + +You can access and manage your clusters by logging into Rancher and opening the kubectl shell in the UI. No further configuration necessary. + +1. From the **Global** view, open the cluster that you want to access with kubectl. + +2. Click **Launch kubectl**. Use the window that opens to interact with your Kubernetes cluster. + +### Accessing Clusters with kubectl from Your Workstation + +This section describes how to download your cluster's kubeconfig file, launch kubectl from your workstation, and access your downstream cluster. + +This alternative method of accessing the cluster allows you to authenticate with Rancher and manage your cluster without using the Rancher UI. + +> **Prerequisites:** These instructions assume that you have already created a Kubernetes cluster, and that kubectl is installed on your workstation. For help installing kubectl, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +1. Log into Rancher. From the **Global** view, open the cluster that you want to access with kubectl. +1. Click **Kubeconfig File**. +1. Copy the contents displayed to your clipboard. +1. Paste the contents into a new file on your local computer. Move the file to `~/.kube/config`. Note: The default location that kubectl uses for the kubeconfig file is `~/.kube/config`, but you can use any directory and specify it using the `--kubeconfig` flag, as in this command: + ``` + kubectl --kubeconfig /custom/path/kube.config get pods + ``` +1. From your workstation, launch kubectl. Use it to interact with your kubernetes cluster. + + +### Note on Resources Created Using kubectl + +Rancher will discover and show resources created by `kubectl`. However, these resources might not have all the necessary annotations on discovery. If an operation (for instance, scaling the workload) is done to the resource using the Rancher UI/API, this may trigger recreation of the resources due to the missing annotations. This should only happen the first time an operation is done to the discovered resource. + +# Authenticating Directly with a Downstream Cluster + +This section intended to help you set up an alternative method to access an [RKE cluster.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +This method is only available for RKE clusters that have the [authorized cluster endpoint](../../../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) enabled. When Rancher creates this RKE cluster, it generates a kubeconfig file that includes additional kubectl context(s) for accessing your cluster. This additional context allows you to use kubectl to authenticate with the downstream cluster without authenticating through Rancher. For a longer explanation of how the authorized cluster endpoint works, refer to [this page.](authorized-cluster-endpoint.md) + +We recommend that as a best practice, you should set up this method to access your RKE cluster, so that just in case you can’t connect to Rancher, you can still access the cluster. + +> **Prerequisites:** The following steps assume that you have created a Kubernetes cluster and followed the steps to [connect to your cluster with kubectl from your workstation.](#accessing-clusters-with-kubectl-from-your-workstation) + +To find the name of the context(s) in your downloaded kubeconfig file, run: + +``` +kubectl config get-contexts --kubeconfig /custom/path/kube.config +CURRENT NAME CLUSTER AUTHINFO NAMESPACE +* my-cluster my-cluster user-46tmn + my-cluster-controlplane-1 my-cluster-controlplane-1 user-46tmn +``` + +In this example, when you use `kubectl` with the first context, `my-cluster`, you will be authenticated through the Rancher server. + +With the second context, `my-cluster-controlplane-1`, you would authenticate with the authorized cluster endpoint, communicating with an downstream RKE cluster directly. + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.](../../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#architecture-for-an-authorized-cluster-endpoint) + +Now that you have the name of the context needed to authenticate directly with the cluster, you can pass the name of the context in as an option when running kubectl commands. The commands will differ depending on whether your cluster has an FQDN defined. Examples are provided in the sections below. + +When `kubectl` works normally, it confirms that you can access your cluster while bypassing Rancher's authentication proxy. + +### Connecting Directly to Clusters with FQDN Defined + +If an FQDN is defined for the cluster, a single context referencing the FQDN will be created. The context will be named `-fqdn`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: + +``` +kubectl --context -fqdn get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context -fqdn get pods +``` + +### Connecting Directly to Clusters without FQDN Defined + +If there is no FQDN defined for the cluster, extra contexts will be created referencing the IP address of each node in the control plane. Each context will be named `-`. When you want to use `kubectl` to access this cluster without Rancher, you will need to use this context. + +Assuming the kubeconfig file is located at `~/.kube/config`: +``` +kubectl --context - get nodes +``` +Directly referencing the location of the kubeconfig file: +``` +kubectl --kubeconfig /custom/path/kube.config --context - get pods +``` diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md new file mode 100644 index 0000000000..633006d2ec --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md @@ -0,0 +1,32 @@ +--- +title: Adding a Pod Security Policy +weight: 80 +aliases: + - /rancher/v2.x/en/cluster-admin/pod-security-policy/ +--- + +> **Prerequisite:** The options below are available only for clusters that are [launched using RKE.](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +When your cluster is running pods with security-sensitive configurations, assign it a [pod security policy](../authentication-permissions-and-global-configuration/create-pod-security-policies.md), which is a set of rules that monitors the conditions and settings in your pods. If a pod doesn't meet the rules specified in your policy, the policy stops it from running. + +You can assign a pod security policy when you provision a cluster. However, if you need to relax or restrict security for your pods later, you can update the policy while editing your cluster. + +1. From the **Global** view, find the cluster to which you want to apply a pod security policy. Select **⋮ > Edit**. + +2. Expand **Cluster Options**. + +3. From **Pod Security Policy Support**, select **Enabled**. + + >**Note:** This option is only available for clusters [provisioned by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +4. From the **Default Pod Security Policy** drop-down, select the policy you want to apply to the cluster. + + Rancher ships with [policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) of `restricted` and `unrestricted`, although you can [create custom policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md#default-pod-security-policies) as well. + +5. Click **Save**. + +**Result:** The pod security policy is applied to the cluster and any projects within the cluster. + +>**Note:** Workloads already running before assignment of a pod security policy are grandfathered in. Even if they don't meet your pod security policy, workloads running before assignment of the policy continue to run. +> +>To check if a running workload passes your pod security policy, clone or upgrade it. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md new file mode 100644 index 0000000000..5fef5e00c3 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies.md @@ -0,0 +1,21 @@ +--- +title: Assigning Pod Security Policies +weight: 2260 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/options/pod-security-policies/ +--- + +_Pod Security Policies_ are objects that control security-sensitive aspects of pod specification (like root privileges). + +## Adding a Default Pod Security Policy + +When you create a new cluster with RKE, you can configure it to apply a PSP immediately. As you create the cluster, use the **Cluster Options** to enable a PSP. The PSP assigned to the cluster will be the default PSP for projects within the cluster. + +>**Prerequisite:** +>Create a Pod Security Policy within Rancher. Before you can assign a default PSP to a new cluster, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). +>**Note:** +>For security purposes, we recommend assigning a PSP as you create your clusters. + +To enable a default Pod Security Policy, set the **Pod Security Policy Support** option to **Enabled**, and then make a selection from the **Default Pod Security Policy** drop-down. + +When the cluster finishes provisioning, the PSP you selected is applied to all projects within the cluster. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md new file mode 100644 index 0000000000..97bb0db574 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md @@ -0,0 +1,288 @@ +--- +title: Removing Kubernetes Components from Nodes +description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually +weight: 2055 +aliases: + - /rancher/v2.5/en/faq/cleaning-cluster-nodes/ + - /rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. + +When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. + +When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. + +## What Gets Removed? + +When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. + +| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Registered Nodes][4] | +| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | +| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | +| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | +| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | +| Rancher Deployment | ✓ | ✓ | ✓ | | +| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | +| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | +| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | + +[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +[2]: ../../../pages-for-subheaders/use-existing-nodes.md +[3]: ../../../pages-for-subheaders/amazon-eks-permissions.md +[4]: ../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md + +## Removing a Node from a Cluster by Rancher UI + +When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +## Removing Rancher Components from a Cluster Manually + +When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. + +>**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. + +### Removing Rancher Components from Registered Clusters + +For registered clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. + +After the registered cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. + + + + +>**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. + +After you initiate the removal of a registered cluster using the Rancher UI (or API), the following events occur. + +1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. + +1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. + +1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. + +**Result:** All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + + +Rather than cleaning registered cluster nodes using the Rancher UI, you can run a script instead. + +>**Prerequisite:** +> +>Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. + +1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: + + ``` + chmod +x user-cluster.sh + ``` + +1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. + + If you don't have an air gap environment, skip this step. + +1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): + + >**Tip:** + > + >Add the `-dry-run` flag to preview the script's outcome without making changes. + ``` + ./user-cluster.sh rancher/rancher-agent: + ``` + +**Result:** The script runs. All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + +### Windows Nodes + +To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. + +To run the script, you can use this command in the PowerShell: + +``` +pushd c:\etc\rancher +.\cleanup.ps1 +popd +``` + +**Result:** The node is reset and can be re-added to a Kubernetes cluster. + +### Docker Containers, Images, and Volumes + +Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) + +**To clean all Docker containers, images and volumes:** + +``` +docker rm -f $(docker ps -qa) +docker rmi -f $(docker images -q) +docker volume rm $(docker volume ls -q) +``` + +### Mounts + +Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. + +Mounts | +--------| +`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | +`/var/lib/kubelet` | +`/var/lib/rancher` | + +**To unmount all mounts:** + +``` +for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done +``` + +### Directories and Files + +The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. + +>**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. + +Directories | +--------| +`/etc/ceph` | +`/etc/cni` | +`/etc/kubernetes` | +`/opt/cni` | +`/opt/rke` | +`/run/secrets/kubernetes.io` | +`/run/calico` | +`/run/flannel` | +`/var/lib/calico` | +`/var/lib/etcd` | +`/var/lib/cni` | +`/var/lib/kubelet` | +`/var/lib/rancher/rke/log` | +`/var/log/containers` | +`/var/log/kube-audit` | +`/var/log/pods` | +`/var/run/calico` | + +**To clean the directories:** + +``` +rm -rf /etc/ceph \ + /etc/cni \ + /etc/kubernetes \ + /opt/cni \ + /opt/rke \ + /run/secrets/kubernetes.io \ + /run/calico \ + /run/flannel \ + /var/lib/calico \ + /var/lib/etcd \ + /var/lib/cni \ + /var/lib/kubelet \ + /var/lib/rancher/rke/log \ + /var/log/containers \ + /var/log/kube-audit \ + /var/log/pods \ + /var/run/calico +``` + +### Network Interfaces and Iptables + +The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. + +### Network Interfaces + +>**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. + +Interfaces | +--------| +`flannel.1` | +`cni0` | +`tunl0` | +`caliXXXXXXXXXXX` (random interface names) | +`vethXXXXXXXX` (random interface names) | + +**To list all interfaces:** + +``` +# Using ip +ip address show + +# Using ifconfig +ifconfig -a +``` + +**To remove an interface:** + +``` +ip link delete interface_name +``` + +### Iptables + +>**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. + +Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. + +Chains | +--------| +`cali-failsafe-in` | +`cali-failsafe-out` | +`cali-fip-dnat` | +`cali-fip-snat` | +`cali-from-hep-forward` | +`cali-from-host-endpoint` | +`cali-from-wl-dispatch` | +`cali-fw-caliXXXXXXXXXXX` (random chain names) | +`cali-nat-outgoing` | +`cali-pri-kns.NAMESPACE` (chain per namespace) | +`cali-pro-kns.NAMESPACE` (chain per namespace) | +`cali-to-hep-forward` | +`cali-to-host-endpoint` | +`cali-to-wl-dispatch` | +`cali-tw-caliXXXXXXXXXXX` (random chain names) | +`cali-wl-to-host` | +`KUBE-EXTERNAL-SERVICES` | +`KUBE-FIREWALL` | +`KUBE-MARK-DROP` | +`KUBE-MARK-MASQ` | +`KUBE-NODEPORTS` | +`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | +`KUBE-SERVICES` | +`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | + +**To list all iptables rules:** + +``` +iptables -L -t nat +iptables -L -t mangle +iptables -L +``` diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md new file mode 100644 index 0000000000..78a9ccac94 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md @@ -0,0 +1,102 @@ +--- +title: Cloning Clusters +weight: 2035 +aliases: + - /rancher/v2.5/en/cluster-provisioning/cloning-clusters/ + - /rancher/v2.x/en/cluster-admin/cloning-clusters/ +--- + +If you have a cluster in Rancher that you want to use as a template for creating similar clusters, you can use Rancher CLI to clone the cluster's configuration, edit it, and then use it to quickly launch the cloned cluster. + +Duplication of registered clusters is not supported. + +| Cluster Type | Cloneable? | +|----------------------------------|---------------| +| [Nodes Hosted by Infrastructure Provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) | ✓ | +| [Hosted Kubernetes Providers](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) | ✓ | +| [Custom Cluster](../../../pages-for-subheaders/use-existing-nodes.md) | ✓ | +| [Registered Cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) | | + +> **Warning:** During the process of duplicating a cluster, you will edit a config file full of cluster settings. However, we recommend editing only values explicitly listed in this document, as cluster duplication is designed for simple cluster copying, _not_ wide scale configuration changes. Editing other values may invalidate the config file, which will lead to cluster deployment failure. + +## Prerequisites + +Download and install [Rancher CLI](../../../pages-for-subheaders/cli-with-rancher.md). Remember to [create an API bearer token](../../../reference-guides/user-settings/api-keys.md) if necessary. + + +## 1. Export Cluster Config + +Begin by using Rancher CLI to export the configuration for the cluster that you want to clone. + +1. Open Terminal and change your directory to the location of the Rancher CLI binary, `rancher`. + +1. Enter the following command to list the clusters managed by Rancher. + + + ./rancher cluster ls + + +1. Find the cluster that you want to clone, and copy either its resource `ID` or `NAME` to your clipboard. From this point on, we'll refer to the resource `ID` or `NAME` as ``, which is used as a placeholder in the next step. + +1. Enter the following command to export the configuration for your cluster. + + + ./rancher clusters export + + + **Step Result:** The YAML for a cloned cluster prints to Terminal. + +1. Copy the YAML to your clipboard and paste it in a new file. Save the file as `cluster-template.yml` (or any other name, as long as it has a `.yml` extension). + +## 2. Modify Cluster Config + +Use your favorite text editor to modify the cluster configuration in `cluster-template.yml` for your cloned cluster. + +> **Note:** Cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + +1. Open `cluster-template.yml` (or whatever you named your config) in your favorite text editor. + + >**Warning:** Only edit the cluster config values explicitly called out below. Many of the values listed in this file are used to provision your cloned cluster, and editing their values may break the provisioning process. + + +1. As depicted in the example below, at the `` placeholder, replace your original cluster's name with a unique name (``). If your cloned cluster has a duplicate name, the cluster will not provision successfully. + + ```yml + Version: v3 + clusters: + : # ENTER UNIQUE NAME + dockerRootDir: /var/lib/docker + enableNetworkPolicy: false + rancherKubernetesEngineConfig: + addonJobTimeout: 30 + authentication: + strategy: x509 + authorization: {} + bastionHost: {} + cloudProvider: {} + ignoreDockerVersion: true + ``` + +1. For each `nodePools` section, replace the original nodepool name with a unique name at the `` placeholder. If your cloned cluster has a duplicate nodepool name, the cluster will not provision successfully. + + ```yml + nodePools: + : + clusterId: do + controlPlane: true + etcd: true + hostnamePrefix: mark-do + nodeTemplateId: do + quantity: 1 + worker: true + ``` + +1. When you're done, save and close the configuration. + +## 3. Launch Cloned Cluster + +Move `cluster-template.yml` into the same directory as the Rancher CLI binary. Then run this command: + + ./rancher up --file cluster-template.yml + +**Result:** Your cloned cluster begins provisioning. Enter `./rancher cluster ls` to confirm. You can also log into the Rancher UI and open the **Global** view to watch your provisioning cluster's progress. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md new file mode 100644 index 0000000000..e9dc887601 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md @@ -0,0 +1,34 @@ +--- +title: GlusterFS Volumes +weight: 5000 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/glusterfs-volumes/ +--- + +> This section only applies to [RKE clusters.](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. The logging of the `kubelet` will show: `transport endpoint is not connected`. To prevent this from happening, you can configure your cluster to mount the `systemd-run` binary in the `kubelet` container. There are two requirements before you can change the cluster configuration: + +- The node needs to have the `systemd-run` binary installed (this can be checked by using the command `which systemd-run` on each cluster node) +- The `systemd-run` binary needs to be compatible with Debian OS on which the hyperkube image is based (this can be checked using the following command on each cluster node, replacing the image tag with the Kubernetes version you want to use) + +``` +docker run -v /usr/bin/systemd-run:/usr/bin/systemd-run --entrypoint /usr/bin/systemd-run rancher/hyperkube:v1.16.2-rancher1 --version +``` + +>**Note:** +> +>Before updating your Kubernetes YAML to mount the `systemd-run` binary, make sure the `systemd` package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed. + +``` +services: + kubelet: + extra_binds: + - "/usr/bin/systemd-run:/usr/bin/systemd-run" +``` + +After the cluster has finished provisioning, you can check the `kubelet` container logging to see if the functionality is activated by looking for the following logline: + +``` +Detected OS with systemd +``` \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md new file mode 100644 index 0000000000..527c9fe4bb --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md @@ -0,0 +1,79 @@ +--- +title: How Persistent Storage Works +weight: 1 +aliases: + - /rancher/v2.5/en/tasks/workloads/add-persistent-volume-claim + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/how-storage-works/ +--- + +A persistent volume (PV) is a piece of storage in the Kubernetes cluster, while a persistent volume claim (PVC) is a request for storage. + +There are two ways to use persistent storage in Kubernetes: + +- Use an existing persistent volume +- Dynamically provision new persistent volumes + +To use an existing PV, your application will need to use a PVC that is bound to a PV, and the PV should include the minimum resources that the PVC requires. + +For dynamic storage provisioning, your application will need to use a PVC that is bound to a storage class. The storage class contains the authorization to provision new persistent volumes. + +![Setting Up New and Existing Persistent Storage](/img/rancher-storage.svg) + +For more information, refer to the [official Kubernetes documentation on storage](https://kubernetes.io/docs/concepts/storage/volumes/) + +This section covers the following topics: + +- [About persistent volume claims](#about-persistent-volume-claims) + - [PVCs are required for both new and existing persistent storage](#pvcs-are-required-for-both-new-and-existing-persistent-storage) +- [Setting up existing storage with a PVC and PV](#setting-up-existing-storage-with-a-pvc-and-pv) + - [Binding PVs to PVCs](#binding-pvs-to-pvcs) +- [Provisioning new storage with a PVC and storage class](#provisioning-new-storage-with-a-pvc-and-storage-class) + +# About Persistent Volume Claims + +Persistent volume claims (PVCs) are objects that request storage resources from your cluster. They're similar to a voucher that your deployment can redeem for storage access. A PVC is mounted into a workloads as a volume so that the workload can claim its specified share of the persistent storage. + +To access persistent storage, a pod must have a PVC mounted as a volume. This PVC lets your deployment application store its data in an external location, so that if a pod fails, it can be replaced with a new pod and continue accessing its data stored externally, as though an outage never occurred. + +Each Rancher project contains a list of PVCs that you've created, available from **Resources > Workloads > Volumes.** You can reuse these PVCs when creating deployments in the future. + +### PVCs are Required for Both New and Existing Persistent Storage + +A PVC is required for pods to use any persistent storage, regardless of whether the workload is intended to use storage that already exists, or the workload will need to dynamically provision new storage on demand. + +If you are setting up existing storage for a workload, the workload mounts a PVC, which refers to a PV, which corresponds to existing storage infrastructure. + +If a workload should request new storage, the workload mounts PVC, which refers to a storage class, which has the capability to create a new PV along with its underlying storage infrastructure. + +Rancher lets you create as many PVCs within a project as you'd like. + +You can mount PVCs to a deployment as you create it, or later, after the deployment is running. + +# Setting up Existing Storage with a PVC and PV + +Your pods can store data in [volumes,](https://kubernetes.io/docs/concepts/storage/volumes/) but if the pod fails, that data is lost. To solve this issue, Kubernetes offers persistent volumes (PVs), which are Kubernetes resources that correspond to external storage disks or file systems that your pods can access. If a pod crashes, its replacement pod can access the data in persistent storage without any data loss. + +PVs can represent a physical disk or file system that you host on premise, or a vendor-hosted storage resource, such as Amazon EBS or Azure Disk. + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +> **Important:** PVs are created at the cluster level, which means that in a multi-tenant cluster, teams with access to separate namespaces could have access to the same PV. + +### Binding PVs to PVCs + +When pods are set up to use persistent storage, they mount a persistent volume claim (PVC) that is mounted the same way as any other Kubernetes volume. When each PVC is created, the Kubernetes master considers it to be a request for storage and binds it to a PV that matches the minimum resource requirements of the PVC. Not every PVC is guaranteed to be bound to a PV. According to the Kubernetes [documentation,](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + +> Claims will remain unbound indefinitely if a matching volume does not exist. Claims will be bound as matching volumes become available. For example, a cluster provisioned with many 50Gi PVs would not match a PVC requesting 100Gi. The PVC can be bound when a 100Gi PV is added to the cluster. + +In other words, you can create unlimited PVCs, but they will only be bound to PVs if the Kubernetes master can find a sufficient PVs that has at least the amount of disk space required by the PVC. + +To dynamically provision new storage, the PVC mounted in the pod would have to correspond to a storage class instead of a persistent volume. + +# Provisioning New Storage with a PVC and Storage Class + +Storage Classes allow you to create PVs dynamically without having to create persistent storage in an infrastructure provider first. + +For example, if a workload is bound to a PVC and the PVC refers to an Amazon EBS Storage Class, the storage class can dynamically create an EBS volume and a corresponding PV. + +The Kubernetes master will then bind the newly created PV to your workload's PVC, allowing your workload to use the persistent storage. + diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md new file mode 100644 index 0000000000..4f6deb1aba --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md @@ -0,0 +1,115 @@ +--- +title: Dynamically Provisioning New Storage in Rancher +weight: 2 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/provisioning-new-storage/ +--- + +This section describes how to provision new persistent storage for workloads in Rancher. + +This section assumes that you understand the Kubernetes concepts of storage classes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) + +New storage is often provisioned by a cloud provider such as Amazon EBS. However, new storage doesn't have to be in the cloud. + +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.](../../../../../explanations/integrations-in-rancher/longhorn.md) + +To provision new storage for your workloads, follow these steps: + +1. [Add a storage class and configure it to use your storage.](#1-add-a-storage-class-and-configure-it-to-use-your-storage) +2. [Add a persistent volume claim that refers to the storage class.](#2-add-a-persistent-volume-claim-that-refers-to-the-storage-class) +3. [Mount the persistent volume claim as a volume for your workload.](#3-mount-the-persistent-volume-claim-as-a-volume-for-your-workload) + +### Prerequisites + +- To set up persistent storage, the `Manage Volumes` [role](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. +- The cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](../../../../../pages-for-subheaders/set-up-cloud-providers.md/) +- Make sure your storage provisioner is available to be enabled. + +The following storage provisioners are enabled by default: + +Name | Plugin +--------|---------- +Amazon EBS Disk | `aws-ebs` +AzureFile | `azure-file` +AzureDisk | `azure-disk` +Google Persistent Disk | `gce-pd` +Longhorn | `flex-volume-longhorn` +VMware vSphere Volume | `vsphere-volume` +Local | `local` +Network File System | `nfs` +hostPath | `host-path` + +To use a storage provisioner that is not on the above list, you will need to use a [feature flag to enable unsupported storage drivers.](../../../../../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) + +### 1. Add a storage class and configure it to use your storage + +These steps describe how to set up a storage class at the cluster level. + +1. Go to the **Cluster Explorer** of the cluster for which you want to dynamically provision persistent storage volumes. + +1. From the cluster view, select `Storage > Storage Classes`. Click `Add Class`. + +1. Enter a `Name` for your storage class. + +1. From the `Provisioner` drop-down, select the service that you want to use to dynamically provision storage volumes. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, use the `Amazon EBS Disk` provisioner. + +1. From the `Parameters` section, fill out the information required for the service to dynamically provision storage volumes. Each provisioner requires different information to dynamically provision storage volumes. Consult the service's documentation for help on how to obtain this information. + +1. Click `Save`. + +**Result:** The storage class is available to be consumed by a PVC. + +For full information about the storage class parameters, refer to the official [Kubernetes documentation.](https://kubernetes.io/docs/concepts/storage/storage-classes/#parameters). + +### 2. Add a persistent volume claim that refers to the storage class + +These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. + +1. Go to the **Cluster Manager** to the project containing a workload that you want to add a PVC to. + +1. From the main navigation bar, choose **Resources > Workloads.** Then select the **Volumes** tab. Click **Add Volume**. + +1. Enter a **Name** for the volume claim. + +1. Select the namespace of the volume claim. + +1. In the **Source** field, click **Use a Storage Class to provision a new persistent volume.** + +1. Go to the **Storage Class** drop-down and select the storage class that you created. + +1. Enter a volume **Capacity**. + +1. Optional: Expand the **Customize** section and select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. + +1. Click **Create.** + +**Result:** Your PVC is created. You can now attach it to any workload in the project. + +### 3. Mount the persistent volume claim as a volume for your workload + +Mount PVCs to workloads so that your applications can store their data. + +You can mount PVCs during the deployment of a workload, or following workload creation. + +To attach the PVC to a new workload, + +1. Create a workload as you would in [Deploying Workloads](../../../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). +1. For **Workload Type**, select **Stateful set of 1 pod**. +1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** +1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch.** + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +To attach the PVC to an existing workload, + +1. Go to the project that has the workload that will have the PVC attached. +1. Go to the workload that will have persistent storage and click **⋮ > Edit.** +1. Expand the **Volumes** section and click **Add Volume > Add a New Persistent Volume (Claim).** +1. In the **Persistent Volume Claim** section, select the newly created persistent volume claim that is attached to the storage class. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save.** + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. If not, Rancher will provision new persistent storage. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md new file mode 100644 index 0000000000..6f456414c2 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md @@ -0,0 +1,35 @@ +--- +title: iSCSI Volumes +weight: 6000 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/iscsi-volumes/ +--- + +In [Rancher Launched Kubernetes clusters](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. This failure is likely due to an incompatibility issue involving the iSCSI initiator tool. You can resolve this issue by installing the iSCSI initiator tool on each of your cluster nodes. + +Rancher Launched Kubernetes clusters storing data on iSCSI volumes leverage the [iSCSI initiator tool](http://www.open-iscsi.com/), which is embedded in the kubelet's `rancher/hyperkube` Docker image. From each kubelet (i.e., the _initiator_), the tool discovers and launches sessions with an iSCSI volume (i.e., the _target_). However, in some instances, the versions of the iSCSI initiator tool installed on the initiator and the target may not match, resulting in a connection failure. + +If you encounter this issue, you can work around it by installing the initiator tool on each node in your cluster. You can install the iSCSI initiator tool by logging into your cluster nodes and entering one of the following commands: + +| Platform | Package Name | Install Command | +| ------------- | ----------------------- | -------------------------------------- | +| Ubuntu/Debian | `open-iscsi` | `sudo apt install open-iscsi` | +| RHEL | `iscsi-initiator-utils` | `yum install iscsi-initiator-utils -y` | + + +After installing the initiator tool on your nodes, edit the YAML for your cluster, editing the kubelet configuration to mount the iSCSI binary and configuration, as shown in the sample below. + +>**Notes:** +> +>- Before updating your Kubernetes YAML to mount the iSCSI binary and configuration, make sure either the `open-iscsi` (deb) or `iscsi-initiator-utils` (yum) package is installed on your cluster nodes. If this package isn't installed _before_ the bind mounts are created in your Kubernetes YAML, Docker will automatically create the directories and files on each node and will not allow the package install to succeed.
    +>
    +> +>- The example YAML below does not apply to K3s, but only to RKE clusters. Since the K3s kubelet does not run in a container, adding extra binds is not necessary. However, all iSCSI tools must still be installed on your K3s nodes. + +``` +services: + kubelet: + extra_binds: + - "/etc/iscsi:/etc/iscsi" + - "/sbin/iscsiadm:/sbin/iscsiadm" +``` diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md new file mode 100644 index 0000000000..e7dd896a9e --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md @@ -0,0 +1,107 @@ +--- +title: Setting up Existing Storage +weight: 1 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/volumes-and-storage/persistent-volume-claims/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/attaching-existing-storage/ +--- + +This section describes how to set up existing persistent storage for workloads in Rancher. + +> This section assumes that you understand the Kubernetes concepts of persistent volumes and persistent volume claims. For more information, refer to the section on [how storage works.](about-persistent-storage.md) + +To set up storage, follow these steps: + +1. [Set up persistent storage.](#1-set-up-persistent-storage) +2. [Add a persistent volume that refers to the persistent storage.](#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) +3. [Add a persistent volume claim that refers to the persistent volume.](#3-add-a-persistent-volume-claim-that-refers-to-the-persistent-volume) +4. [Mount the persistent volume claim as a volume in your workload.](#4-mount-the-persistent-volume-claim-as-a-volume-in-your-workload) + +### Prerequisites + +- To create a persistent volume as a Kubernetes resource, you must have the `Manage Volumes` [role.](../../../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) +- If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +### 1. Set up persistent storage + +Creating a persistent volume in Rancher will not create a storage volume. It only creates a Kubernetes resource that maps to an existing volume. Therefore, before you can create a persistent volume as a Kubernetes resource, you must have storage provisioned. + +The steps to set up a persistent storage device will differ based on your infrastructure. We provide examples of how to set up storage using [vSphere,](../provisioning-storage-examples/vsphere-storage.md) [NFS,](../provisioning-storage-examples/nfs-storage.md) or Amazon's [EBS.](../provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) + +If you have a pool of block storage, and you don't want to use a cloud provider, Longhorn could help you provide persistent storage to your Kubernetes cluster. For more information, see [this page.](../../../../../explanations/integrations-in-rancher/longhorn.md) + +### 2. Add a persistent volume that refers to the persistent storage + +These steps describe how to set up a persistent volume at the cluster level in Kubernetes. + +1. From the cluster view, select **Storage > Persistent Volumes**. + +1. Click **Add Volume**. + +1. Enter a **Name** for the persistent volume. + +1. Select the **Volume Plugin** for the disk type or service that you're using. When adding storage to a cluster that's hosted by a cloud provider, use the cloud provider's plug-in for cloud storage. For example, if you have a Amazon EC2 cluster and you want to use cloud storage for it, you must use the `Amazon EBS Disk` volume plugin. + +1. Enter the **Capacity** of your volume in gigabytes. + +1. Complete the **Plugin Configuration** form. Each plugin type requires information specific to the vendor of disk type. For help regarding each plugin's form and the information that's required, refer to the plug-in's vendor documentation. + +1. Optional: In the **Customize** form, configure the [access modes.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) This options sets how many nodes can access the volume, along with the node read/write permissions. The [Kubernetes Documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) includes a table that lists which access modes are supported by the plugins available. + +1. Optional: In the **Customize** form, configure the [mount options.](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options) Each volume plugin allows you to specify additional command line options during the mounting process. Consult each plugin's vendor documentation for the mount options available. + +1. Click **Save**. + +**Result:** Your new persistent volume is created. + +### 3. Add a persistent volume claim that refers to the persistent volume + +These steps describe how to set up a PVC in the namespace where your stateful workload will be deployed. + +1. Go to the project containing a workload that you want to add a persistent volume claim to. + +1. Then click the **Volumes** tab and click **Add Volume**. + +1. Enter a **Name** for the volume claim. + +1. Select the namespace of the workload that you want to add the persistent storage to. + +1. In the section called **Use an existing persistent volume,** go to the **Persistent Volume** drop-down and choose the persistent volume that you created. + +1. **Optional:** From **Customize**, select the [Access Modes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) that you want to use. + +1. Click **Create.** + +**Result:** Your PVC is created. You can now attach it to any workload in the project. + +### 4. Mount the persistent volume claim as a volume in your workload + +Mount PVCs to stateful workloads so that your applications can store their data. + +You can mount PVCs during the deployment of a workload, or following workload creation. + +The following steps describe how to assign existing storage to a new workload that is a stateful set: + +1. From the **Project** view, go to the **Workloads** tab. +1. Click **Deploy.** +1. Enter a name for the workload. +1. Next to the **Workload Type** field, click **More Options.** +1. Click **Stateful set of 1 pod.** Optionally, configure the number of pods. +1. Choose the namespace where the workload will be deployed. +1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. +1. In the **Persistent Volume Claim** field, select the PVC that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Launch.** + +**Result:** When the workload is deployed, it will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. + +The following steps describe how to assign persistent storage to an existing workload: + +1. From the **Project** view, go to the **Workloads** tab. +1. Go to the workload that you want to add the persistent storage to. The workload type should be a stateful set. Click **⋮ > Edit.** +1. Expand the **Volumes** section and click **Add Volume > Use an existing persistent volume (claim).**. +1. In the **Persistent Volume Claim** field, select the PVC that you created. +1. In the **Mount Point** field, enter the path that the workload will use to access the volume. +1. Click **Save.** + +**Result:** The workload will make a request for the specified amount of disk space to the Kubernetes master. If a PV with the specified resources is available when the workload is deployed, the Kubernetes master will bind the PV to the PVC. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver.md new file mode 100644 index 0000000000..46dd40bfca --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver.md @@ -0,0 +1,435 @@ +--- +title: Using an External Ceph Driver +weight: 10 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/ +--- + +These instructions are about using the external Ceph driver in an RKE2 cluster. If you are using RKE, additional steps are required. For details, refer to [this section.](#using-the-ceph-driver-with-rke) + +- [Requirements](#requirements) +- [Using the Ceph Driver with RKE](#using-the-ceph-driver-with-rke) +- [Installing the ceph-csi driver on an RKE2 cluster](#installing-the-ceph-csi-driver-on-an-rke2-cluster) +- [Install the ceph-csi driver using Helm](#install-the-ceph-csi-driver-using-helm) +- [Creating RBD Ceph Resources](#creating-rbd-ceph-resources) +- [Configure RBD Ceph Access Secrets](#configure-rbd-ceph-access-secrets) + - [User Account](#user-account) + - [Admin Account](#admin-account) +- [Create RBD Testing Resources](#create-rbd-testing-resources) + - [Using RBD in Pods](#using-rbd-in-pods) + - [Using RBD in Persistent Volumes](#using-rbd-in-persistent-volumes) + - [Using RBD in Storage Classes](#using-rbd-in-storage-classes) + - [RKE2 Server/Master Provisioning](#rke2-server-master-provisioning) + - [RKE2 Agent/Worker provisioning](#rke2-agent-worker-provisioning) +- [Tested Versions](#tested-versions) +- [Troubleshooting](#troubleshooting) + +# Requirements + +Make sure ceph-common and xfsprogs packages are installed on SLE worker nodes. + +# Using the Ceph Driver with RKE + +The resources below are fully compatible with RKE based clusters, but there is a need to do an additional kubelet configuration for RKE. + +On RKE clusters, the kubelet component is running in a Docker container and doesn't have access to the host's kernel modules as rbd and libceph by default. + +To solve this limitation, you can either run `modprobe rbd` on worker nodes, or configure the kubelet containers to automatically mount the `/lib/modules` directory from the host into the container. + +For the kubelet configuration, put the following lines into the `cluster.yml` file prior to RKE cluster provisioning. You can also modify the `cluster.yml` later in the Rancher UI by clicking on **Edit Cluster > Edit as YAML** and restarting the worker nodes. + +```yaml +services: + kubelet: + extra_binds: + - '/lib/modules:/lib/modules:ro' +``` + +For more information about the `extra_binds` directive, refer to [this section.](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds) + +# Installing the ceph-csi driver on an RKE2 cluster + +> **Note:** These steps are needed for dynamic RBD provisioning only. + +For more information about the `ceph-csi-rbd` chart, refer to [this page.](https://github.com/ceph/ceph-csi/blob/devel/charts/ceph-csi-rbd/README.md) + +To get details about your SES cluster, run: + +``` +ceph mon dump +``` + +Read its output: + +``` +dumped monmap epoch 3 +epoch 3 +fsid 79179d9d-98d8-4976-ab2e-58635caa7235 +last_changed 2021-02-11T10:56:42.110184+0000 +created 2021-02-11T10:56:22.913321+0000 +min_mon_release 15 (octopus) +0: [v2:10.85.8.118:3300/0,v1:10.85.8.118:6789/0] mon.a +1: [v2:10.85.8.123:3300/0,v1:10.85.8.123:6789/0] mon.b +2: [v2:10.85.8.124:3300/0,v1:10.85.8.124:6789/0] mon.c +``` + +Later you'll need the fsid and mon addresses values. + +# Install the ceph-csi Driver Using Helm + +Run these commands: + +``` +helm repo add ceph-csi https://ceph.github.io/csi-charts +helm repo update +helm search repo ceph-csi -l +helm inspect values ceph-csi/ceph-csi-rbd > ceph-csi-rbd-values.yaml +``` + +Modify the `ceph-csi-rbd-values.yaml` file and keep there only the required changes: + +```yaml +# ceph-csi-rbd-values.yaml +csiConfig: + - clusterID: "79179d9d-98d8-4976-ab2e-58635caa7235" + monitors: + - "10.85.8.118:6789" + - "10.85.8.123:6789" + - "10.85.8.124:6789" +provisioner: + name: provisioner + replicaCount: 2 +``` + +Make sure the ceph monitors are reachable from the RKE2 cluster, for example, by ping. + +``` +kubectl create namespace ceph-csi-rbd +helm install --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml +kubectl rollout status deployment ceph-csi-rbd-provisioner -n ceph-csi-rbd +helm status ceph-csi-rbd -n ceph-csi-rbd +``` + +in case you'd like to modify the configuration directly via Helm, you may adapt the `ceph-csi-rbd-values.yaml` file and call: + +``` +helm upgrade \ + --namespace ceph-csi-rbd ceph-csi-rbd ceph-csi/ceph-csi-rbd --values ceph-csi-rbd-values.yaml +``` + +# Creating RBD Ceph Resources + +``` +# Create a ceph pool: +ceph osd pool create myPool 64 64 + +# Create a block device pool: +rbd pool init myPool + +# Create a block device image: +rbd create -s 2G myPool/image + +# Create a block device user and record the key: +ceph auth get-or-create-key client.myPoolUser mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=myPool" | tr -d '\n' | base64 +QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== + +# Encode the ceph user myPoolUser into a bash64 hash: +echo "myPoolUser" | tr -d '\n' | base64 +bXlQb29sVXNlcg== + +# Create a block device admin user and record the key: +ceph auth get-or-create-key client.myPoolAdmin mds 'allow *' mgr 'allow *' mon 'allow *' osd 'allow * pool=myPool' | tr -d '\n' | base64 +QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== + +# Encode the ceph user myPoolAdmin into a bash64 hash: +echo "myPoolAdmin" | tr -d '\n' | base64 +bXlQb29sQWRtaW4= +``` +# Configure RBD Ceph Access Secrets + +### User Account + +For static RBD provisioning (the image within the ceph pool must exist), run these commands: + +``` +cat > ceph-user-secret.yaml << EOF +apiVersion: v1 +kind: Secret +metadata: + name: ceph-user + namespace: default +type: kubernetes.io/rbd +data: + userID: bXlQb29sVXNlcg== + userKey: QVFDZ0R5VmdyRk9KREJBQTJ5b2s5R1E2NUdSWExRQndhVVBwWXc9PQ== +EOF + +kubectl apply -f ceph-user-secret.yaml +``` + +### Admin Account + +For dynamic RBD provisioning (used for automatic image creation within a given ceph pool), run these commands: + +``` +cat > ceph-admin-secret.yaml << EOF +apiVersion: v1 +kind: Secret +metadata: + name: ceph-admin + namespace: default +type: kubernetes.io/rbd +data: + userID: bXlQb29sQWRtaW4= + userKey: QVFCK0hDVmdXSjQ1T0JBQXBrc0VtcVhlZFpjc0JwaStIcmU5M3c9PQ== +EOF + +kubectl apply -f ceph-admin-secret.yaml +``` + +# Create RBD Testing Resources + +### Using RBD in Pods + +``` +# pod +cat > ceph-rbd-pod-inline.yaml << EOF +apiVersion: v1 +kind: Pod +metadata: + name: ceph-rbd-pod-inline +spec: + containers: + - name: ceph-rbd-pod-inline + image: busybox + command: ["sleep", "infinity"] + volumeMounts: + - mountPath: /mnt/ceph_rbd + name: volume + volumes: + - name: volume + rbd: + monitors: + - 10.85.8.118:6789 + - 10.85.8.123:6789 + - 10.85.8.124:6789 + pool: myPool + image: image + user: myPoolUser + secretRef: + name: ceph-user + fsType: ext4 + readOnly: false +EOF + +kubectl apply -f ceph-rbd-pod-inline.yaml +kubectl get pod +kubectl exec pod/ceph-rbd-pod-inline -- df -k | grep rbd +``` + +### Using RBD in Persistent Volumes + +``` +# pod-pvc-pv +cat > ceph-rbd-pod-pvc-pv-allinone.yaml << EOF +apiVersion: v1 +kind: PersistentVolume +metadata: + name: ceph-rbd-pv +spec: + capacity: + storage: 2Gi + accessModes: + - ReadWriteOnce + rbd: + monitors: + - 10.85.8.118:6789 + - 10.85.8.123:6789 + - 10.85.8.124:6789 + pool: myPool + image: image + user: myPoolUser + secretRef: + name: ceph-user + fsType: ext4 + readOnly: false +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: ceph-rbd-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: ceph-rbd-pod-pvc-pv +spec: + containers: + - name: ceph-rbd-pod-pvc-pv + image: busybox + command: ["sleep", "infinity"] + volumeMounts: + - mountPath: /mnt/ceph_rbd + name: volume + volumes: + - name: volume + persistentVolumeClaim: + claimName: ceph-rbd-pvc +EOF + +kubectl apply -f ceph-rbd-pod-pvc-pv-allinone.yaml +kubectl get pv,pvc,pod +kubectl exec pod/ceph-rbd-pod-pvc-pv -- df -k | grep rbd +``` + +### Using RBD in Storage Classes + +This example is for dynamic provisioning. The ceph-csi driver is needed. + +``` +# pod-pvc-sc +cat > ceph-rbd-pod-pvc-sc-allinone.yaml < /root/.bashrc << EOF +export PATH=$PATH:/var/lib/rancher/rke2/bin/ +export KUBECONFIG=/etc/rancher/rke2/rke2.yaml +EOF + +cat /var/lib/rancher/rke2/server/node-token +token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED +``` + +### RKE2 Agent/Worker provisioning + +``` +mkdir -p /etc/rancher/rke2/ + +cat > /etc/rancher/rke2/config.yaml << EOF +server: https://10.100.103.23:9345 +token: K10ca0c38d4ff90d8b80319ab34092e315a8b732622e6adf97bc9eb0536REDACTED::server:ec0308000b8a6b595da000efREDACTED +EOF + +curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +systemctl enable --now rke2-agent.service +``` + +The cluster can be imported into Rancher from the Rancher UI by clicking **Global/Add Cluster > Other Cluster.** Then run the provided kubectl command on the server/master node. + +# Tested Versions + +OS for running RKE2 nodes: JeOS SLE15-SP2 with installed kernel-default-5.3.18-24.49 + +``` +kubectl version +Client Version: version.Info{Major:"1", Minor:"18", GitVersion:"v1.18.4", GitCommit:"c96aede7b5205121079932896c4ad89bb93260af", GitTreeState:"clean", BuildDate:"2020-06-22T12:00:00Z", GoVersion:"go1.13.11", Compiler:"gc", Platform:"linux/amd64"} +Server Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.7+rke2r1", GitCommit:"1dd5338295409edcfff11505e7bb246f0d325d15", GitTreeState:"clean", BuildDate:"2021-01-20T01:50:52Z", GoVersion:"go1.15.5b5", Compiler:"gc", Platform:"linux/amd64"} + +helm version +version.BuildInfo{Version:"3.4.1", GitCommit:"c4e74854886b2efe3321e185578e6db9be0a6e29", GitTreeState:"clean", GoVersion:"go1.14.12"} +``` + +Kubernetes version on RKE2 cluster: v1.19.7+rke2r1 + +# Troubleshooting + +In case you are using SUSE's ceph-rook based on SES7, it might be useful to expose the monitors on hostNetwork by editing `rook-1.4.5/ceph/cluster.yaml` and setting `spec.network.hostNetwork=true`. + +Also for operating the ceph-rook cluster, it is useful to deploy a toolbox on the Kubernetes cluster where ceph-rook is provisioned by `kubectl apply -f rook-1.4.5/ceph/toolbox.yaml` Then all the ceph related commands can be executed in the toolbox pod, for example, by running `kubectl exec -it -n rook-ceph rook-ceph-tools-686d8b8bfb-2nvqp -- bash` + +Operating with the ceph - basic commands: + +``` +ceph osd pool stats +ceph osd pool delete myPool myPool --yes-i-really-really-mean-it +rbd list -p myPool +> csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 +> image +``` + +Delete the image: `rbd rm csi-vol-f5d3766c-7296-11eb-b32a-c2b045952d38 -p myPool` + +CephFS commands in rook toolbox: + +``` +ceph -s +ceph fs ls +ceph fs fail cephfs +ceph fs rm cephfs --yes-i-really-mean-it +ceph osd pool delete cephfs_data cephfs_data --yes-i-really-really-mean-it +ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it +``` + +To prepare a cephfs filesystem, you can run this command on a rook cluster: + +``` +kubectl apply -f rook-1.4.5/ceph/filesystem.yaml +``` \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md new file mode 100644 index 0000000000..e785b0c62f --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md @@ -0,0 +1,69 @@ +--- +title: NFS Storage +weight: 3054 +aliases: + - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/nfs/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/nfs/ +--- + +Before you can use the NFS storage volume plug-in with Rancher deployments, you need to provision an NFS server. + +>**Note:** +> +>- If you already have an NFS share, you don't need to provision a new NFS server to use the NFS volume plugin within Rancher. Instead, skip the rest of this procedure and complete [adding storage](../../../../../pages-for-subheaders/create-kubernetes-persistent-storage.md). +> +>- This procedure demonstrates how to set up an NFS server using Ubuntu, although you should be able to use these instructions for other Linux distros (e.g. Debian, RHEL, Arch Linux, etc.). For official instruction on how to create an NFS server using another Linux distro, consult the distro's documentation. + +>**Recommended:** To simplify the process of managing firewall rules, use NFSv4. + +1. Using a remote Terminal connection, log into the Ubuntu server that you intend to use for NFS storage. + +1. Enter the following command: + + ``` + sudo apt-get install nfs-kernel-server + ``` + +1. Enter the command below, which sets the directory used for storage, along with user access rights. Modify the command if you'd like to keep storage at a different directory. + + ``` + mkdir -p /nfs && chown nobody:nogroup /nfs + ``` + - The `-p /nfs` parameter creates a directory named `nfs` at root. + - The `chown nobody:nogroup /nfs` parameter allows all access to the storage directory. + +1. Create an NFS exports table. This table sets the directory paths on your NFS server that are exposed to the nodes that will use the server for storage. + + 1. Open `/etc/exports` using your text editor of choice. + 1. Add the path of the `/nfs` folder that you created in step 3, along with the IP addresses of your cluster nodes. Add an entry for each IP address in your cluster. Follow each address and its accompanying parameters with a single space that is a delimiter. + + ``` + /nfs (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) (rw,sync,no_subtree_check) + ``` + + **Tip:** You can replace the IP addresses with a subnet. For example: `10.212.50.12/24` + + 1. Update the NFS table by entering the following command: + + ``` + exportfs -ra + ``` + +1. Open the ports used by NFS. + + 1. To find out what ports NFS is using, enter the following command: + + ``` + rpcinfo -p | grep nfs + ``` + 2. [Open the ports](https://help.ubuntu.com/lts/serverguide/firewall.html.en) that the previous command outputs. For example, the following command opens port 2049: + + ``` + sudo ufw allow 2049 + ``` + +**Result:** Your NFS server is configured to be used for storage with your Rancher nodes. + +## What's Next? + +Within Rancher, add the NFS server as a storage volume and/or storage class. After adding the server, you can use it for storage for your deployments. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md new file mode 100644 index 0000000000..5ddb1dc703 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md @@ -0,0 +1,18 @@ +--- +title: Creating Persistent Storage in Amazon's EBS +weight: 3053 +aliases: + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ebs/ +--- + +This section describes how to set up Amazon's Elastic Block Store in EC2. + +1. From the EC2 console, go to the **ELASTIC BLOCK STORE** section in the left panel and click **Volumes.** +1. Click **Create Volume.** +1. Optional: Configure the size of the volume or other options. The volume should be created in the same availability zone as the instance it will be attached to. +1. Click **Create Volume.** +1. Click **Close.** + +**Result:** Persistent storage has been created. + +For details on how to set up the newly created storage in Rancher, refer to the section on [setting up existing storage.](../manage-persistent-storage/set-up-existing-storage.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md new file mode 100644 index 0000000000..1d62a6a691 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md @@ -0,0 +1,79 @@ +--- +title: vSphere Storage +weight: 3055 +aliases: + - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/vsphere/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/vsphere/ +--- + +To provide stateful workloads with vSphere storage, we recommend creating a vSphereVolume StorageClass. This practice dynamically provisions vSphere storage when workloads request volumes through a persistent volume claim. + +In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../../../../pages-for-subheaders/vsphere-cloud-provider.md) + +- [Prerequisites](#prerequisites) +- [Creating a StorageClass](#creating-a-storageclass) +- [Creating a Workload with a vSphere Volume](#creating-a-workload-with-a-vsphere-volume) +- [Verifying Persistence of the Volume](#verifying-persistence-of-the-volume) +- [Why to Use StatefulSets Instead of Deployments](#why-to-use-statefulsets-instead-of-deployments) + +### Prerequisites + +In order to provision vSphere volumes in a cluster created with the [Rancher Kubernetes Engine (RKE)](../../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), the [vSphere cloud provider](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere) must be explicitly enabled in the [cluster options](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md). + +### Creating a StorageClass + +> **Note:** +> +> The following steps can also be performed using the `kubectl` command line tool. See [Kubernetes documentation on persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for details. + +1. From the Global view, open the cluster where you want to provide vSphere storage. +2. From the main menu, select **Storage > Storage Classes**. Then click **Add Class**. +3. Enter a **Name** for the class. +4. Under **Provisioner**, select **VMWare vSphere Volume**. + + ![](/img/vsphere-storage-class.png) + +5. Optionally, specify additional properties for this storage class under **Parameters**. Refer to the [vSphere storage documentation](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/storageclass.html) for details. +5. Click **Save**. + +### Creating a Workload with a vSphere Volume + +1. From the cluster where you configured vSphere storage, begin creating a workload as you would in [Deploying Workloads](../../../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md). +2. For **Workload Type**, select **Stateful set of 1 pod**. +3. Expand the **Volumes** section and click **Add Volume**. +4. Choose **Add a new persistent volume (claim)**. This option will implicitly create the claim once you deploy the workload. +5. Assign a **Name** for the claim, ie. `test-volume` and select the vSphere storage class created in the previous step. +6. Enter the required **Capacity** for the volume. Then click **Define**. + + ![](/img/workload-add-volume.png) + +7. Assign a path in the **Mount Point** field. This is the full path where the volume will be mounted in the container file system, e.g. `/persistent`. +8. Click **Launch** to create the workload. + +### Verifying Persistence of the Volume + +1. From the context menu of the workload you just created, click **Execute Shell**. +2. Note the directory at root where the volume has been mounted to (in this case `/persistent`). +3. Create a file in the volume by executing the command `touch //data.txt`. +4. **Close** the shell window. +5. Click on the name of the workload to reveal detail information. +6. Open the context menu next to the Pod in the *Running* state. +7. Delete the Pod by selecting **Delete**. +8. Observe that the pod is deleted. Then a new pod is scheduled to replace it so that the workload maintains its configured scale of a single stateful pod. +9. Once the replacement pod is running, click **Execute Shell**. +10. Inspect the contents of the directory where the volume is mounted by entering `ls -l /`. Note that the file you created earlier is still present. + + ![workload-persistent-data](/img/workload-persistent-data.png) + +### Why to Use StatefulSets Instead of Deployments + +You should always use [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) for workloads consuming vSphere storage, as this resource type is designed to address a VMDK block storage caveat. + +Since vSphere volumes are backed by VMDK block storage, they only support an [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) of `ReadWriteOnce`. This setting restricts the volume so that it can only be mounted to a single pod at a time, unless all pods consuming that volume are co-located on the same node. This behavior makes a deployment resource unusable for scaling beyond a single replica if it consumes vSphere volumes. + +Even using a deployment resource with just a single replica may result in a deadlock situation while updating the deployment. If the updated pod is scheduled to a node different from where the existing pod lives, it will fail to start because the VMDK is still attached to the other node. + +### Related Links + +- [vSphere Storage for Kubernetes](https://vmware.github.io/vsphere-storage-for-kubernetes/documentation/) +- [Kubernetes Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md new file mode 100644 index 0000000000..f12d5d8b06 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md @@ -0,0 +1,582 @@ +--- +title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-autoscaler/amazon/ +--- + +This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. + +- [Prerequisites](#prerequisites) +- [1. Create a Custom Cluster](#1-create-a-custom-cluster) +- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) +- [3. Deploy Nodes](#3-deploy-nodes) +- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) + - [Parameters](#parameters) + - [Deployment](#deployment) +- [Testing](#testing) + - [Generating Load](#generating-load) + - [Checking Scale](#checking-scale) + +# Prerequisites + +These elements are required to follow this guide: + +* The Rancher server is up and running +* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles + +### 1. Create a Custom Cluster + +On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: + +* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag +* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag +* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster + + ```sh + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum + ``` + +### 2. Configure the Cloud Provider + +On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. + +1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. + * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:DescribeTags", + "autoscaling:DescribeLaunchConfigurations", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + +2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. + * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + + * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` + * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) + * Tags: + `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--etcd --controlplane" + + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. + * IAM profile: Provides cloud_provider worker integration. + This profile is called `K8sWorkerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } + ] + } + ``` + + * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` + * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes) + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--worker" + + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +More info is at [RKE clusters on AWS](../../../new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) + +### 3. Deploy Nodes + +Once we've configured AWS, let's create VMs to bootstrap our cluster: + +* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.](../../../../pages-for-subheaders/checklist-for-production-ready-clusters.md) + * IAM role: `K8sMasterRole` + * Security group: `K8sMasterSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` + +* worker: Define an ASG on EC2 with the following settings: + * Name: `K8sWorkerAsg` + * IAM role: `K8sWorkerRole` + * Security group: `K8sWorkerSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` + * Instances: + * minimum: 2 + * desired: 2 + * maximum: 10 + +Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. + +### 4. Install Cluster-autoscaler + +At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. + +#### Parameters + +This table shows cluster-autoscaler parameters for fine tuning: + +| Parameter | Default | Description | +|---|---|---| +|cluster-name|-|Autoscaled cluster name, if available| +|address|:8085|The address to expose Prometheus metrics| +|kubernetes|-|Kubernetes master location. Leave blank for default| +|kubeconfig|-|Path to kubeconfig file with authorization and master location information| +|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| +|namespace|"kube-system"|Namespace in which cluster-autoscaler run| +|scale-down-enabled|true|Should CA scale down the cluster| +|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| +|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| +|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| +|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| +|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| +|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| +|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| +|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| +|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| +|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| +|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| +|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +cloud-provider|-|Cloud provider type| +|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| +|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| +|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| +|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| +|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| +|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| +|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| +|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| +|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: `::`| +|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| +|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| +|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| +|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| +|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| +|write-status-configmap|true|Should CA write status information to a configmap| +|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| +|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| +|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| +|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| +|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| +|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| +|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| +|regional|false|Cluster is regional| +|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| +|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| +|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| +|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| +|profiling|false|Is debug/pprof endpoint enabled| + +#### Deployment + +Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: + + +```yml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler + name: cluster-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["endpoints"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list", "get", "update"] + - apiGroups: [""] + resources: + - "pods" + - "services" + - "replicationcontrollers" + - "persistentvolumeclaims" + - "persistentvolumes" + verbs: ["watch", "list", "get"] + - apiGroups: ["extensions"] + resources: ["replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["watch", "list"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["coordination.k8s.io"] + resourceNames: ["cluster-autoscaler"] + resources: ["leases"] + verbs: ["get", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create","list","watch"] + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] + verbs: ["delete", "get", "update", "watch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8085' + spec: + serviceAccountName: cluster-autoscaler + tolerations: + - effect: NoSchedule + operator: "Equal" + value: "true" + key: node-role.kubernetes.io/controlplane + nodeSelector: + node-role.kubernetes.io/controlplane: "true" + containers: + - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=aws + - --skip-nodes-with-local-storage=false + - --expander=least-waste + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + imagePullPolicy: "Always" + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" + +``` + +Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): + +```sh +kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml +``` + +**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) + +# Testing + +At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. + +### Generating Load + +We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hello-world + name: hello-world +spec: + replicas: 3 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + ports: + - containerPort: 80 + protocol: TCP + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi +``` + +Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): + +``` +kubectl -n default apply -f test-deployment.yaml +``` + +### Checking Scale + +Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. + +Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md new file mode 100644 index 0000000000..13f946bcb5 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md @@ -0,0 +1,194 @@ +--- +title: Nodes and Node Pools +weight: 2030 +aliases: + - /rancher/v2.x/en/cluster-admin/nodes/ +--- + +After you launch a Kubernetes cluster in Rancher, you can manage individual nodes from the cluster's **Node** tab. Depending on the [option used](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) to provision the cluster, there are different node options available. + +> If you want to manage the _cluster_ and not individual nodes, see [Editing Clusters](../../../pages-for-subheaders/cluster-configuration.md). + +This section covers the following topics: + +- [Node options available for each cluster creation option](#node-options-available-for-each-cluster-creation-option) + - [Nodes hosted by an infrastructure provider](#nodes-hosted-by-an-infrastructure-provider) + - [Nodes provisioned by hosted Kubernetes providers](#nodes-provisioned-by-hosted-kubernetes-providers) + - [Registered nodes](#registered-nodes) +- [Managing and editing individual nodes](#managing-and-editing-individual-nodes) +- [Viewing a node in the Rancher API](#viewing-a-node-in-the-rancher-api) +- [Deleting a node](#deleting-a-node) +- [Scaling nodes](#scaling-nodes) +- [SSH into a node hosted by an infrastructure provider](#ssh-into-a-node-hosted-by-an-infrastructure-provider) +- [Cordoning a node](#cordoning-a-node) +- [Draining a node](#draining-a-node) + - [Aggressive and safe draining options](#aggressive-and-safe-draining-options) + - [Grace period](#grace-period) + - [Timeout](#timeout) + - [Drained and cordoned state](#drained-and-cordoned-state) +- [Labeling a node to be ignored by Rancher](#labeling-a-node-to-be-ignored-by-rancher) + +# Node Options Available for Each Cluster Creation Option + +The following table lists which node options are available for each type of cluster in Rancher. Click the links in the **Option** column for more detailed information about each feature. + +| Option | [Nodes Hosted by an Infrastructure Provider][1] | [Custom Node][2] | [Hosted Cluster][3] | [Registered EKS Nodes][4] | [All Other Registered Nodes][5] | Description | +| ------------------------------------------------ | ------------------------------------------------ | ---------------- | ------------------- | ------------------- | -------------------| ------------------------------------------------------------------ | +| [Cordon](#cordoning-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable. | +| [Drain](#draining-a-node) | ✓ | ✓ | ✓ | ✓ | ✓ | Marks the node as unschedulable _and_ evicts all pods. | +| [Edit](#managing-and-editing-individual-nodes) | ✓ | ✓ | ✓ | ✓ | ✓ | Enter a custom name, description, label, or taints for a node. | +| [View API](#viewing-a-node-in-the-rancher-api) | ✓ | ✓ | ✓ | ✓ | ✓ | View API data. | +| [Delete](#deleting-a-node) | ✓ | ✓ | | * | * | Deletes defective nodes from the cluster. | +| [Download Keys](#ssh-into-a-node-hosted-by-an-infrastructure-provider) | ✓ | | | | | Download SSH key in order to SSH into the node. | +| [Node Scaling](#scaling-nodes) | ✓ | | | ✓ | | Scale the number of nodes in the node pool up or down. | + +[1]: ../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md +[2]: ../../../pages-for-subheaders/use-existing-nodes.md +[3]: ../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md +[4]: ../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md +[5]: ../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md + +\* Delete option accessible via View API + + +### Nodes Hosted by an Infrastructure Provider + +Node pools are available when you provision Rancher-launched Kubernetes clusters on nodes that are [hosted in an infrastructure provider.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +Clusters provisioned using [one of the node pool options](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) can be scaled up or down if the node pool is edited. + +A node pool can also automatically maintain the node scale that's set during the initial cluster provisioning if [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) This scale determines the number of active nodes that Rancher maintains for the cluster. + +Rancher uses [node templates](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) to replace nodes in the node pool. Each node template uses cloud provider credentials to allow Rancher to set up the node in the infrastructure provider. + +### Nodes Provisioned by Hosted Kubernetes Providers + +Options for managing nodes [hosted by a Kubernetes provider](../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md) are somewhat limited in Rancher. Rather than using the Rancher UI to make edits such as scaling the number of nodes up or down, edit the cluster directly. + +### Registered Nodes + +Although you can deploy workloads to a [registered cluster](../../new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) using Rancher, you cannot manage individual cluster nodes. All management of imported cluster nodes must take place outside of Rancher. + +# Managing and Editing Individual Nodes + +Editing a node lets you: + +* Change its name +* Change its description +* Add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +* Add/Remove [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) + +To manage individual nodes, browse to the cluster that you want to manage and then select **Nodes** from the main menu. You can open the options menu for a node by clicking its **⋮** icon (**...**). + +# Viewing a Node in the Rancher API + +Select this option to view the node's [API endpoints](../../../pages-for-subheaders/about-the-api.md). + +# Deleting a Node + +Use **Delete** to remove defective nodes from the cloud provider. + +When you the delete a defective node, Rancher can automatically replace it with an identically provisioned node if the node is in a node pool and [node auto-replace is enabled.](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) + +>**Tip:** If your cluster is hosted by an infrastructure provider, and you want to scale your cluster down instead of deleting a defective node, [scale down](#scaling-nodes) rather than delete. + +# Scaling Nodes + +For nodes hosted by an infrastructure provider, you can scale the number of nodes in each [node pool](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) by using the scale controls. This option isn't available for other cluster types. + +# SSH into a Node Hosted by an Infrastructure Provider + +For [nodes hosted by an infrastructure provider](../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), you have the option of downloading its SSH key so that you can connect to it remotely from your desktop. + +1. From the cluster hosted by an infrastructure provider, select **Nodes** from the main menu. + +1. Find the node that you want to remote into. Select **⋮ > Download Keys**. + + **Step Result:** A ZIP file containing files used for SSH is downloaded. + +1. Extract the ZIP file to any location. + +1. Open Terminal. Change your location to the extracted ZIP file. + +1. Enter the following command: + + ``` + ssh -i id_rsa root@ + ``` + +# Cordoning a Node + +_Cordoning_ a node marks it as unschedulable. This feature is useful for performing short tasks on the node during small maintenance windows, like reboots, upgrades, or decommissions. When you're done, power back on and make the node schedulable again by uncordoning it. + +# Draining a Node + +_Draining_ is the process of first cordoning the node, and then evicting all its pods. This feature is useful for performing node maintenance (like kernel upgrades or hardware maintenance). It prevents new pods from deploying to the node while redistributing existing pods so that users don't experience service interruption. + +- For pods with a replica set, the pod is replaced by a new pod that will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. + +- For pods with no replica set, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. + +You can drain nodes that are in either a `cordoned` or `active` state. When you drain a node, the node is cordoned, the nodes are evaluated for conditions they must meet to be drained, and then (if it meets the conditions) the node evicts its pods. + +However, you can override the conditions draining when you initiate the drain. You're also given an opportunity to set a grace period and timeout value. + +### Aggressive and Safe Draining Options + +There are two drain modes: aggressive and safe. + +- **Aggressive Mode** + + In this mode, pods won't get rescheduled to a new node, even if they do not have a controller. Kubernetes expects you to have your own logic that handles the deletion of these pods. + + Kubernetes also expects the implementation to decide what to do with pods using emptyDir. If a pod uses emptyDir to store local data, you might not be able to safely delete it, since the data in the emptyDir will be deleted once the pod is removed from the node. Choosing aggressive mode will delete these pods. + +- **Safe Mode** + + If a node has standalone pods or ephemeral data it will be cordoned but not drained. +### Grace Period + +The timeout given to each pod for cleaning things up, so they will have chance to exit gracefully. For example, when pods might need to finish any outstanding requests, roll back transactions or save state to some external storage. If negative, the default value specified in the pod will be used. + +### Timeout + +The amount of time drain should continue to wait before giving up. + +>**Kubernetes Known Issue:** The [timeout setting](https://github.com/kubernetes/kubernetes/pull/64378) was not enforced while draining a node before Kubernetes 1.12. + +### Drained and Cordoned State + +If there's any error related to user input, the node enters a `cordoned` state because the drain failed. You can either correct the input and attempt to drain the node again, or you can abort by uncordoning the node. + +If the drain continues without error, the node enters a `draining` state. You'll have the option to stop the drain when the node is in this state, which will stop the drain process and change the node's state to `cordoned`. + +Once drain successfully completes, the node will be in a state of `drained`. You can then power off or delete the node. + +>**Want to know more about cordon and drain?** See the [Kubernetes documentation](https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). + +# Labeling a Node to be Ignored by Rancher + +Some solutions, such as F5's BIG-IP integration, may require creating a node that is never registered to a cluster. + +Since the node will never finish registering, it will always be shown as unhealthy in the Rancher UI. + +In that case, you may want to label the node to be ignored by Rancher so that Rancher only shows nodes as unhealthy when they are actually failing. + +You can label nodes to be ignored by using a setting in the Rancher UI, or by using `kubectl`. + +> **Note:** There is an [open issue](https://github.com/rancher/rancher/issues/24172) in which nodes labeled to be ignored can get stuck in an updating state. + +### Labeling Nodes to be Ignored with kubectl + +To add a node that will be ignored by Rancher, use `kubectl` to create a node that has the following label: + +``` +cattle.rancher.io/node-status: ignore +``` + +**Result:** If you add the node to a cluster, Rancher will not attempt to sync with this node. The node can still be part of the cluster and can be listed with `kubectl`. + +If the label is added before the node is added to the cluster, the node will not be shown in the Rancher UI. + +If the label is added after the node is added to a Rancher cluster, the node will not be removed from the UI. + +If you delete the node from the Rancher server using the Rancher UI or API, the node will not be removed from the cluster if the `nodeName` is listed in the Rancher settings in the Rancher API under `v3/settings/ignore-node-name`. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md new file mode 100644 index 0000000000..8b5505cd81 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md @@ -0,0 +1,198 @@ +--- +title: Projects and Kubernetes Namespaces with Rancher +description: Rancher Projects ease the administrative burden of your cluster and support multi-tenancy. Learn to create projects and divide projects into Kubernetes namespaces +weight: 2032 +aliases: + - /rancher/v2.5/en/concepts/projects/ + - /rancher/v2.5/en/tasks/projects/ + - /rancher/v2.5/en/tasks/projects/create-project/ + - /rancher/v2.5/en/tasks/projects/create-project/ + - /rancher/v2.x/en/cluster-admin/projects-and-namespaces/ +--- + +A namespace is a Kubernetes concept that allows a virtual cluster within a cluster, which is useful for dividing the cluster into separate "virtual clusters" that each have their own access control and resource quotas. + +A project is a group of namespaces, and it is a concept introduced by Rancher. Projects allow you to manage multiple namespaces as a group and perform Kubernetes operations in them. You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +This section describes how projects and namespaces work with Rancher. It covers the following topics: + +- [About namespaces](#about-namespaces) +- [About projects](#about-projects) + - [The cluster's default project](#the-cluster-s-default-project) + - [The system project](#the-system-project) +- [Project authorization](#project-authorization) +- [Pod security policies](#pod-security-policies) +- [Creating projects](#creating-projects) +- [Switching between clusters and projects](#switching-between-clusters-and-projects) + +# About Namespaces + +A namespace is a concept introduced by Kubernetes. According to the [official Kubernetes documentation on namespaces,](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) + +> Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces. [...] Namespaces are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. + +Namespaces provide the following functionality: + +- **Providing a scope for names:** Names of resources need to be unique within a namespace, but not across namespaces. Namespaces can not be nested inside one another and each Kubernetes resource can only be in one namespace. +- **Resource quotas:** Namespaces provide a way to divide cluster resources between multiple users. + +You can assign resources at the project level so that each namespace in the project can use them. You can also bypass this inheritance by assigning resources explicitly to a namespace. + +You can assign the following resources directly to namespaces: + +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Persistent Volume Claims](../../../pages-for-subheaders/create-kubernetes-persistent-storage.md) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) + +To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. + +For more information on creating and moving namespaces, see [Namespaces](../manage-projects/manage-namespaces.md). + +### Role-based access control issues with namespaces and kubectl + +Because projects are a concept introduced by Rancher, kubectl does not have the capability to restrict the creation of namespaces to a project the creator has access to. + +This means that when standard users with project-scoped permissions create a namespaces with `kubectl`, it may be unusable because `kubectl` doesn't require the new namespace to be scoped within a certain project. + +If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](../manage-projects/manage-namespaces.md) to ensure that you will have permission to access the namespace. + +If a standard user is a project owner, the user will be able to create namespaces within that project. The Rancher UI will prevent that user from creating namespaces outside the scope of the projects they have access to. + +# About Projects + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +You can use projects to support multi-tenancy, so that a team can access a project within a cluster without having access to other projects in the same cluster. + +In the base version of Kubernetes, features like role-based access rights or cluster resources are assigned to individual namespaces. A project allows you to save time by giving an individual or a team access to multiple namespaces simultaneously. + +You can use projects to perform actions such as: + +- Assign users to a group of namespaces (i.e., [project membership](../manage-projects/add-users-to-projects.md)). +- Assign users specific roles in a project. A role can be owner, member, read-only, or [custom](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). +- Assign resources to the project. +- Assign Pod Security Policies. + +When you create a cluster, two projects are automatically created within it: + +- [Default Project](#the-cluster-s-default-project) +- [System Project](#the-system-project) + +### The Cluster's Default Project + +When you provision a cluster with Rancher, it automatically creates a `default` project for the cluster. This is a project you can use to get started with your cluster, but you can always delete it and replace it with projects that have more descriptive names. + +If you don't have a need for more than the default namespace, you also do not need more than the **Default** project in Rancher. + +If you require another level of organization beyond the **Default** project, you can create more projects in Rancher to isolate namespaces, applications and resources. + +### The System Project + +When troubleshooting, you can view the `system` project to check if important namespaces in the Kubernetes system are working properly. This easily accessible project saves you from troubleshooting individual system namespace containers. + +To open it, open the **Global** menu, and then select the `system` project for your cluster. + +The `system` project: + +- Is automatically created when you provision a cluster. +- Lists all namespaces that exist in `v3/settings/system-namespaces`, if they exist. +- Allows you to add more namespaces or move its namespaces to other projects. +- Cannot be deleted because it's required for cluster operations. + +>**Note:** In RKE clusters where the project network isolation option is enabled, the `system` project overrides the project network isolation option so that it can communicate with other projects, collect logs, and check health. + +# Project Authorization + +Standard users are only authorized for project access in two situations: + +- An administrator, cluster owner or cluster member explicitly adds the standard user to the project's **Members** tab. +- Standard users can access projects that they create themselves. + +# Pod Security Policies + +Rancher extends Kubernetes to allow the application of [Pod Security Policies](https://kubernetes.io/docs/concepts/policluster-admin/pod-security-policy/) at the [project level](../manage-projects/manage-pod-security-policies.md) in addition to the [cluster level.](./add-a-pod-security-policy.md) However, as a best practice, we recommend applying Pod Security Policies at the cluster level. + +# Creating Projects + +This section describes how to create a new project with a name and with optional pod security policy, members, and resource quotas. + +1. [Name a new project.](#1-name-a-new-project) +2. [Optional: Select a pod security policy.](#2-optional-select-a-pod-security-policy) +3. [Recommended: Add project members.](#3-recommended-add-project-members) +4. [Optional: Add resource quotas.](#4-optional-add-resource-quotas) + +### 1. Name a New Project + +1. From the **Global** view, choose **Clusters** from the main menu. From the **Clusters** page, open the cluster from which you want to create a project. + +1. From the main menu, choose **Projects/Namespaces**. Then click **Add Project**. + +1. Enter a **Project Name**. + +### 2. Optional: Select a Pod Security Policy + +This option is only available if you've already created a Pod Security Policy. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +Assigning a PSP to a project will: + +- Override the cluster's default PSP. +- Apply the PSP to the project. +- Apply the PSP to any namespaces you add to the project later. + +### 3. Recommended: Add Project Members + +Use the **Members** section to provide other users with project access and roles. + +By default, your user is added as the project `Owner`. + +>**Notes on Permissions:** +> +>- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. +> +>- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. +> +>- Choose `Custom` to create a custom role on the fly: [Custom Project Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#custom-project-roles). + +To add members: + +1. Click **Add Member**. +1. From the **Name** combo box, search for a user or group that you want to assign project access. Note: You can only search for groups if external authentication is enabled. +1. From the **Role** drop-down, choose a role. For more information, refer to the [documentation on project roles.](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + +### 4. Optional: Add Resource Quotas + +Resource quotas limit the resources that a project (and its namespaces) can consume. For more information, see [Resource Quotas](../../../pages-for-subheaders/manage-project-resource-quotas.md). + +To add a resource quota, + +1. Click **Add Quota**. +1. Select a Resource Type. For more information, see [Resource Quotas.](../../../pages-for-subheaders/manage-project-resource-quotas.md). +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. +1. **Optional:** Specify **Container Default Resource Limit**, which will be applied to every container started in the project. The parameter is recommended if you have CPU or Memory limits set by the Resource Quota. It can be overridden on per an individual namespace or a container level. For more information, see [Container Default Resource Limit](../../../pages-for-subheaders/manage-project-resource-quotas.md) +1. Click **Create**. + +**Result:** Your project is created. You can view it from the cluster's **Projects/Namespaces** view. + +| Field | Description | +| ----------------------- | -------------------------------------------------------------------------------------------------------- | +| Project Limit | The overall resource limit for the project. | +| Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project when created. The combined limit of all project namespaces shouldn't exceed the project limit. | + +# Switching between Clusters and Projects + +To switch between clusters and projects, use the **Global** drop-down available in the main menu. + +![Global Menu](/img/global-menu.png) + +Alternatively, you can switch between projects and clusters using the main menu. + +- To switch between clusters, open the **Global** view and select **Clusters** from the main menu. Then open a cluster. +- To switch between projects, open a cluster, and then select **Projects/Namespaces** from the main menu. Select the link for the project that you want to open. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md new file mode 100644 index 0000000000..be83aba86d --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md @@ -0,0 +1,40 @@ +--- +title: Certificate Rotation +weight: 2040 +aliases: + - /rancher/v2.x/en/cluster-admin/certificate-rotation/ +--- + +> **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. + +By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. + +Certificates can be rotated for the following services: + +- etcd +- kubelet (node certificate) +- kubelet (serving certificate, if [enabled](https://rancher.com/docs/rke/latest/en/config-options/services/#kubelet-options)) +- kube-apiserver +- kube-proxy +- kube-scheduler +- kube-controller-manager + + +### Certificate Rotation + +Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. + +1. In the **Global** view, navigate to the cluster that you want to rotate certificates. + +2. Select **⋮ > Rotate Certificates**. + +3. Select which certificates that you want to rotate. + + * Rotate all Service certificates (keep the same CA) + * Rotate an individual service and choose one of the services from the drop-down menu + +4. Click **Save**. + +**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. + +> **Note:** Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher launched Kubernetes clusters. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md new file mode 100644 index 0000000000..9d30761cbd --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md @@ -0,0 +1,56 @@ +--- +title: Adding Users to Projects +weight: 2505 +aliases: + - /rancher/v2.5/en/tasks/projects/add-project-members/ + - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/project-members + - /rancher/v2.x/en/project-admin/project-members/ +--- + +If you want to provide a user with access and permissions to _specific_ projects and resources within a cluster, assign the user a project membership. + +You can add members to a project as it is created, or add them to an existing project. + +>**Tip:** Want to provide a user with access to _all_ projects within a cluster? See [Adding Cluster Members](../manage-clusters/access-clusters/add-users-to-clusters.md) instead. + +### Adding Members to a New Project + +You can add members to a project as you create it (recommended if possible). For details on creating a new project, refer to the [cluster administration section.](../manage-clusters/projects-and-namespaces.md) + +### Adding Members to an Existing Project + +Following project creation, you can add users as project members so that they can access its resources. + +1. From the **Global** view, open the project that you want to add members to. + +2. From the main menu, select **Members**. Then click **Add Member**. + +3. Search for the user or group that you want to add to the project. + + If external authentication is configured: + + - Rancher returns users from your external authentication source as you type. + + - A drop-down allows you to add groups instead of individual users. The dropdown only lists groups that you, the logged in user, are included in. + + >**Note:** If you are logged in as a local user, external users do not display in your search results. + +1. Assign the user or group **Project** roles. + + [What are Project Roles?](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) + + >**Notes:** + > + >- Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `Owner` or `Member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + > + >- By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + > + >- For `Custom` roles, you can modify the list of individual roles available for assignment. + > + > - To add roles to the list, [Add a Custom Role](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md). + > - To remove roles from the list, [Lock/Unlock Roles](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles.md). + +**Result:** The chosen users are added to the project. + +- To revoke project membership, select the user and click **Delete**. This action deletes membership, not the user. +- To modify a user's roles in the project, delete them from the project, and then re-add them with modified roles. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md new file mode 100644 index 0000000000..7c98a04071 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md @@ -0,0 +1,21 @@ +--- +title: Rancher's CI/CD Pipelines +description: Use Rancher’s CI/CD pipeline to automatically checkout code, run builds or scripts, publish Docker images, and deploy software to users +weight: 4000 +aliases: + - /rancher/v2.5/en/concepts/ci-cd-pipelines/ + - /rancher/v2.5/en/tasks/pipelines/ + - /rancher/v2.5/en/tools/pipelines/configurations/ + - /rancher/v2.x/en/project-admin/pipelines/ +--- +Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +For details, refer to the [pipelines](../../../pages-for-subheaders/pipelines.md) section. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md new file mode 100644 index 0000000000..c81c5a72f8 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md @@ -0,0 +1,69 @@ +--- +title: Namespaces +weight: 2520 +aliases: + - /rancher/v2.x/en/project-admin/namespaces/ +--- + +Within Rancher, you can further divide projects into different [namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), which are virtual clusters within a project backed by a physical cluster. Should you require another level of organization beyond projects and the `default` namespace, you can use multiple namespaces to isolate applications and resources. + +Although you assign resources at the project level so that each namespace in the project can use them, you can override this inheritance by assigning resources explicitly to a namespace. + +Resources that you can assign directly to namespaces include: + +- [Workloads](../../../pages-for-subheaders/workloads-and-pods.md) +- [Load Balancers/Ingress](../../../pages-for-subheaders/load-balancer-and-ingress-controller.md) +- [Service Discovery Records](../../new-user-guides/kubernetes-resources-setup/create-services.md) +- [Certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md) +- [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md) +- [Registries](../../new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md) +- [Secrets](../../new-user-guides/kubernetes-resources-setup/secrets.md) + +To manage permissions in a vanilla Kubernetes cluster, cluster admins configure role-based access policies for each namespace. With Rancher, user permissions are assigned on the project level instead, and permissions are automatically inherited by any namespace owned by the particular project. + +> **Note:** If you create a namespace with `kubectl`, it may be unusable because `kubectl` doesn't require your new namespace to be scoped within a project that you have access to. If your permissions are restricted to the project level, it is better to [create a namespace through Rancher](manage-namespaces.md) to ensure that you will have permission to access the namespace. + + +### Creating Namespaces + +Create a new namespace to isolate apps and resources in a project. + +>**Tip:** When working with project resources that you can assign to a namespace (i.e., [workloads](../../new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md), [certificates](../../new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md), [ConfigMaps](../../new-user-guides/kubernetes-resources-setup/configmaps.md), etc.) you can create a namespace on the fly. + +1. From the **Global** view, open the project where you want to create a namespace. + + >**Tip:** As a best practice, we recommend creating namespaces from the project level. However, cluster owners and members can create them from the cluster level as well. + +1. From the main menu, select **Namespace**. The click **Add Namespace**. + +1. **Optional:** If your project has [Resource Quotas](../../../pages-for-subheaders/manage-project-resource-quotas.md) in effect, you can override the default resource **Limits** (which places a cap on the resources that the namespace can consume). + +1. Enter a **Name** and then click **Create**. + +**Result:** Your namespace is added to the project. You can begin assigning cluster resources to the namespace. + +### Moving Namespaces to Another Project + +Cluster admins and members may occasionally need to move a namespace to another project, such as when you want a different team to start using the application. + +1. From the **Global** view, open the cluster that contains the namespace you want to move. + +1. From the main menu, select **Projects/Namespaces**. + +1. Select the namespace(s) that you want to move to a different project. Then click **Move**. You can move multiple namespaces at one. + + >**Notes:** + > + >- Don't move the namespaces in the `System` project. Moving these namespaces can adversely affect cluster networking. + >- You cannot move a namespace into a project that already has a [resource quota](../../../pages-for-subheaders/manage-project-resource-quotas.md) configured. + >- If you move a namespace from a project that has a quota set to a project with no quota set, the quota is removed from the namespace. + +1. Choose a new project for the new namespace and then click **Move**. Alternatively, you can remove the namespace from all projects by selecting **None**. + +**Result:** Your namespace is moved to a different project (or is unattached from all projects). If any project resources are attached to the namespace, the namespace releases them and then attached resources from the new project. + +### Editing Namespace Resource Quotas + +You can always override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +For more information, see how to [edit namespace resource quotas](manage-project-resource-quotas/override-default-limit-in-namespaces.md). \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md new file mode 100644 index 0000000000..f1214fb1b3 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md @@ -0,0 +1,33 @@ +--- +title: Pod Security Policies +weight: 5600 +aliases: + - /rancher/v2.x/en/project-admin/pod-security-policies/ +--- + +> These cluster options are only available for [clusters in which Rancher has launched Kubernetes](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +You can always assign a pod security policy (PSP) to an existing project if you didn't assign one during creation. + +### Prerequisites + +- Create a Pod Security Policy within Rancher. Before you can assign a default PSP to an existing project, you must have a PSP available for assignment. For instruction, see [Creating Pod Security Policies](../authentication-permissions-and-global-configuration/create-pod-security-policies.md). +- Assign a default Pod Security Policy to the project's cluster. You can't assign a PSP to a project until one is already applied to the cluster. For more information, see [the documentation about adding a pod security policy to a cluster](../manage-clusters/add-a-pod-security-policy.md). + +### Applying a Pod Security Policy + +1. From the **Global** view, find the cluster containing the project you want to apply a PSP to. +1. From the main menu, select **Projects/Namespaces**. +1. Find the project that you want to add a PSP to. From that project, select **⋮ > Edit**. +1. From the **Pod Security Policy** drop-down, select the PSP you want to apply to the project. + Assigning a PSP to a project will: + + - Override the cluster's default PSP. + - Apply the PSP to the project. + - Apply the PSP to any namespaces you add to the project later. + +1. Click **Save**. + +**Result:** The PSP is applied to the project and any namespaces added to the project. + +>**Note:** Any workloads that are already running in a cluster or project before a PSP is assigned will not be checked to determine if they comply with the PSP. Workloads would need to be cloned or upgraded to see if they pass the PSP. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md new file mode 100644 index 0000000000..783f8d4fe3 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md @@ -0,0 +1,43 @@ +--- +title: How Resource Quotas Work in Rancher Projects +weight: 1 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/quotas-for-projects/ +--- + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). However, in Rancher, resource quotas have been extended so that you can apply them to projects. + +In a standard Kubernetes deployment, resource quotas are applied to individual namespaces. However, you cannot apply the quota to your namespaces simultaneously with a single action. Instead, the resource quota must be applied multiple times. + +In the following diagram, a Kubernetes administrator is trying to enforce a resource quota without Rancher. The administrator wants to apply a resource quota that sets the same CPU and memory limit to every namespace in his cluster (`Namespace 1-4`) . However, in the base version of Kubernetes, each namespace requires a unique resource quota. The administrator has to create four different resource quotas that have the same specs configured (`Resource Quota 1-4`) and apply them individually. + +Base Kubernetes: Unique Resource Quotas Being Applied to Each Namespace +![Native Kubernetes Resource Quota Implementation](/img/kubernetes-resource-quota.svg) + +Resource quotas are a little different in Rancher. In Rancher, you apply a resource quota to the project, and then the quota propagates to each namespace, whereafter Kubernetes enforces your limits using the native version of resource quotas. If you want to change the quota for a specific namespace, you can override it. + +The resource quota includes two limits, which you set while creating or editing a project: + + +- **Project Limits:** + + This set of values configures an overall resource limit for the project. If you try to add a new namespace to the project, Rancher uses the limits you've set to validate that the project has enough resources to accommodate the namespace. In other words, if you try to move a namespace into a project near its resource quota, Rancher blocks you from moving the namespace. + +- **Namespace Default Limits:** + + This value is the default resource limit available for each namespace. When the resource quota is created at the project level, this limit is automatically propagated to each namespace in the project. Each namespace is bound to this default limit unless you override it. + +In the following diagram, a Rancher administrator wants to apply a resource quota that sets the same CPU and memory limit for every namespace in their project (`Namespace 1-4`). However, in Rancher, the administrator can set a resource quota for the project (`Project Resource Quota`) rather than individual namespaces. This quota includes resource limits for both the entire project (`Project Limit`) and individual namespaces (`Namespace Default Limit`). Rancher then propagates the `Namespace Default Limit` quotas to each namespace (`Namespace Resource Quota`) when created. + +Rancher: Resource Quotas Propagating to Each Namespace +![Rancher Resource Quota Implementation](/img/rancher-resource-quota.png) + +Let's highlight some more nuanced functionality. If a quota is deleted at the project level, it will also be removed from all namespaces contained within that project, despite any overrides that may exist. Further, updating an existing namespace default limit for a quota at the project level will not result in that value being propagated to existing namespaces in the project; the updated value will only be applied to newly created namespaces in that project. To update a namespace default limit for existing namespaces you can delete and subsequently recreate the quota at the project level with the new default value. This will result in the new default value being applied to all existing namespaces in the project. + +The following table explains the key differences between the two quota types. + +| Rancher Resource Quotas | Kubernetes Resource Quotas | +| ---------------------------------------------------------- | -------------------------------------------------------- | +| Applies to projects and namespace. | Applies to namespaces only. | +| Creates resource pool for all namespaces in project. | Applies static resource limits to individual namespaces. | +| Applies resource quotas to namespaces through propagation. | Applies only to the assigned namespace. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md new file mode 100644 index 0000000000..1a314da733 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces.md @@ -0,0 +1,36 @@ +--- +title: Overriding the Default Limit for a Namespace +weight: 2 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/override-namespace-default/ +--- + +Although the **Namespace Default Limit** propagates from the project to each namespace when created, in some cases, you may need to increase (or decrease) the quotas for a specific namespace. In this situation, you can override the default limits by editing the namespace. + +In the diagram below, the Rancher administrator has a resource quota in effect for their project. However, the administrator wants to override the namespace limits for `Namespace 3` so that it has more resources available. Therefore, the administrator [raises the namespace limits](../../manage-clusters/projects-and-namespaces.md) for `Namespace 3` so that the namespace can access more resources. + +Namespace Default Limit Override +![Namespace Default Limit Override](/img/rancher-resource-quota-override.svg) + +How to: [Editing Namespace Resource Quotas](../../manage-clusters/projects-and-namespaces.md) + +### Editing Namespace Resource Quotas + +If there is a [resource quota](../../../../pages-for-subheaders/manage-project-resource-quotas.md) configured for a project, you can override the namespace default limit to provide a specific namespace with access to more (or less) project resources. + +1. From the **Global** view, open the cluster that contains the namespace for which you want to edit the resource quota. + +1. From the main menu, select **Projects/Namespaces**. + +1. Find the namespace for which you want to edit the resource quota. Select **⋮ > Edit**. + +1. Edit the Resource Quota **Limits**. These limits determine the resources available to the namespace. The limits must be set within the configured project limits. + + For more information about each **Resource Type**, see [Resource Quotas](../../../../pages-for-subheaders/manage-project-resource-quotas.md). + + >**Note:** + > + >- If a resource quota is not configured for the project, these options will not be available. + >- If you enter limits that exceed the configured project limits, Rancher will not let you save your edits. + +**Result:** Your override is applied to the namespace's resource quota. diff --git a/content/rancher/v2.5/en/project-admin/resource-quotas/quota-type-reference/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md similarity index 100% rename from content/rancher/v2.5/en/project-admin/resource-quotas/quota-type-reference/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md new file mode 100644 index 0000000000..38740198bc --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits.md @@ -0,0 +1,41 @@ +--- +title: Setting Container Default Resource Limits +weight: 3 +aliases: + - /rancher/v2.x/en/project-admin/resource-quotas/override-container-default/ +--- + +When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. + +To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. + +### Editing the Container Default Resource Limit + +Edit [container default resource limit](../../../../pages-for-subheaders/manage-project-resource-quotas.md) when: + +- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. +- You want to edit the default container resource limit. + +1. From the **Global** view, open the cluster containing the project to which you want to edit the container default resource limit. +1. From the main menu, select **Projects/Namespaces**. +1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit**. +1. Expand **Container Default Resource Limit** and edit the values. + +### Resource Limit Propagation + +When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. + +You can set a default container resource limit on a project and launch any catalog applications. + +Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. + +### Container Resource Quota Types + +The following resource limits can be configured: + +| Resource Type | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| +| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | +| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | +| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md new file mode 100644 index 0000000000..0e3034880b --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md @@ -0,0 +1,137 @@ +--- +title: Persistent Grafana Dashboards +weight: 6 +aliases: + - /rancher/v2.5/en/monitoring-alerting/persist-grafana + - /rancher/v2.x/en/monitoring-alerting/v2.5/persist-grafana/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. + +- [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) +- [Known Issues](#known-issues) + +# Creating a Persistent Grafana Dashboard + + + + +> **Prerequisites:** +> +> - The monitoring application needs to be installed. +> - To create the persistent dashboard, you must have at least the **Manage Config Maps** Rancher RBAC permissions assigned to you in the project or namespace that contains the Grafana Dashboards. This correlates to the `monitoring-dashboard-edit` or `monitoring-dashboard-admin` Kubernetes native RBAC Roles exposed by the Monitoring chart. +> - To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.](../../../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#users-with-rancher-cluster-manager-based-permissions) + +### 1. Get the JSON model of the dashboard that you want to persist + +To create a persistent dashboard, you will need to get the JSON model of the dashboard you want to persist. You can use a premade dashboard or build your own. + +To use a premade dashboard, go to [https://grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards), open up its detail page, and click on the **Download JSON** button to get the JSON model for the next step. + +To use your own dashboard: + +1. Click on the link to open Grafana. From the **Cluster Explorer,** click **Cluster Explorer > Monitoring.** +1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. + + > **Note:** Regardless of who has the password, in order to access the Grafana instance, you still need at least the Manage Services or View Monitoring permissions in the project that Rancher Monitoring is deployed into. Alternative credentials can also be supplied on deploying or upgrading the chart. +1. Create a dashboard using Grafana's UI. Once complete, go to the dashboard's settings by clicking on the gear icon in the top navigation menu. In the left navigation menu, click **JSON Model.** +1. Copy the JSON data structure that appears. + +### 2. Create a ConfigMap using the Grafana JSON model + +Create a ConfigMap in the namespace that contains your Grafana Dashboards (e.g. cattle-dashboards by default). + +The ConfigMap should look like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + grafana_dashboard: "1" + name: + namespace: cattle-dashboards # Change if using a non-default namespace +data: + .json: |- + +``` + +By default, Grafana is configured to watch all ConfigMaps with the `grafana_dashboard` label within the `cattle-dashboards` namespace. + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, refer to [this section.](#configuring-namespaces-for-the-grafana-dashboard-configmap) + +To create the ConfigMap in the Rancher UI, + +1. Go to the Cluster Explorer. +1. Click **Core > ConfigMaps**. +1. Click **Create**. +1. Set up the key-value pairs similar to the example above. When entering the value for `.json`, click **Read from File** to upload the JSON data model as the value. +1. Click **Create**. + +**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. + +Dashboards that are persisted using ConfigMaps cannot be deleted or edited from the Grafana UI. + +If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. + +### Configuring Namespaces for the Grafana Dashboard ConfigMap + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: + +``` +grafana.sidecar.dashboards.searchNamespace=ALL +``` + +Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. + + + + +> **Prerequisites:** +> +> - The monitoring application needs to be installed. +> - You must have the cluster-admin ClusterRole permission. + +1. Open the Grafana dashboard. From the **Cluster Explorer,** click **Cluster Explorer > Monitoring.** +1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. + + > **Note:** Regardless of who has the password, cluster administrator permission in Rancher is still required to access the Grafana instance. +1. Go to the dashboard that you want to persist. In the top navigation menu, go to the dashboard settings by clicking the gear icon. +1. In the left navigation menu, click **JSON Model.** +1. Copy the JSON data structure that appears. +1. Create a ConfigMap in the `cattle-dashboards` namespace. The ConfigMap needs to have the label `grafana_dashboard: "1"`. Paste the JSON into the ConfigMap in the format shown in the example below: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + labels: + grafana_dashboard: "1" + name: + namespace: cattle-dashboards + data: + .json: |- + + ``` + +**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. + +Dashboards that are persisted using ConfigMaps cannot be deleted from the Grafana UI. If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. + +To prevent the persistent dashboard from being deleted when Monitoring v2 is uninstalled, add the following annotation to the `cattle-dashboards` namespace: + +``` +helm.sh/resource-policy: "keep" +``` + + + + +# Known Issues + +For users who are using Monitoring V2 v9.4.203 or below, uninstalling the Monitoring chart will delete the `cattle-dashboards` namespace, which will delete all persisted dashboards, unless the namespace is marked with the annotation `helm.sh/resource-policy: "keep"`. + +This annotation will be added by default in the new monitoring chart released by Rancher v2.5.8, but it still needs to be manually applied for users of earlier Rancher versions. diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md new file mode 100644 index 0000000000..ad266ba8c1 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md @@ -0,0 +1,41 @@ +--- +title: Customizing Grafana Dashboards +weight: 5 +--- + +In this section, you'll learn how to customize the Grafana dashboard to show metrics that apply to a certain container. + +### Prerequisites + +Before you can customize a Grafana dashboard, the `rancher-monitoring` application must be installed. + +To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.](../../../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md#users-with-rancher-cluster-manager-based-permissions) + +### Signing in to Grafana + +1. In the Rancher UI, go to the cluster that has the dashboard you want to customize. +1. In the left navigation menu, click **Monitoring.** +1. Click **Grafana.** The Grafana dashboard should open in a new tab. +1. Go to the log in icon in the lower left corner and click **Sign In.** +1. Log in to Grafana. The default Admin username and password for the Grafana instance is `admin/prom-operator`. (Regardless of who has the password, cluster administrator permission in Rancher is still required access the Grafana instance.) Alternative credentials can also be supplied on deploying or upgrading the chart. + + +### Getting the PromQL Query Powering a Grafana Panel + +For any panel, you can click the title and click **Explore** to get the PromQL queries powering the graphic. + +For this example, we would like to get the CPU usage for the Alertmanager container, so we click **CPU Utilization > Inspect.** + +The **Data** tab shows the underlying data as a time series, with the time in first column and the PromQL query result in the second column. Copy the PromQL query. + + ``` + (1 - (avg(irate({__name__=~"node_cpu_seconds_total|windows_cpu_time_total",mode="idle"}[5m])))) * 100 + + ``` + +You can then modify the query in the Grafana panel or create a new Grafana panel using the query. + +See also: + +- [Grafana docs on editing a panel](https://grafana.com/docs/grafana/latest/panels/panel-editor/) +- [Grafana docs on adding a panel to a dashboard](https://grafana.com/docs/grafana/latest/panels/add-a-panel/) \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/memory-usage/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/guides/memory-usage/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md new file mode 100644 index 0000000000..998a53a1cd --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md @@ -0,0 +1,88 @@ +--- +title: Enable Monitoring +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +As an [administrator](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) or [cluster owner](../authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), you can configure Rancher to deploy Prometheus to monitor your Kubernetes cluster. + +This page describes how to enable monitoring and alerting within a cluster using the new monitoring application. + +You can enable monitoring with or without SSL. + +# Requirements + +- Make sure that you are allowing traffic on port 9796 for each of your nodes because Prometheus will scrape metrics from here. +- Make sure your cluster fulfills the resource requirements. The cluster should have at least 1950Mi memory available, 2700m CPU, and 50Gi storage. A breakdown of the resource limits and requests is [here.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#configuring-resource-limits-and-requests) +- When installing monitoring on an RKE cluster using RancherOS or Flatcar Linux nodes, change the etcd node certificate directory to `/opt/rke/etc/kubernetes/ssl`. +- For clusters provisioned with the RKE CLI and the address is set to a hostname instead of an IP address, set `rkeEtcd.clients.useLocalhost` to `true` during the Values configuration step of the installation. The YAML snippet will look like the following: + +```yaml +rkeEtcd: + clients: + useLocalhost: true +``` + +> **Note:** If you want to set up Alertmanager, Grafana or Ingress, it has to be done with the settings on the Helm chart deployment. It's problematic to create Ingress outside the deployment. + +# Setting Resource Limits and Requests + +The resource requests and limits can be configured when installing `rancher-monitoring`. To configure Prometheus resources from the Rancher UI, click **Apps & Marketplace > Monitoring** in the upper left corner. + +For more information about the default limits, see [this page.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md#configuring-resource-limits-and-requests) + +# Install the Monitoring Application + + + + +### Enable Monitoring for use without SSL + +1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** +1. Click **Apps.** +1. Click the `rancher-monitoring` app. +1. Optional: Click **Chart Options** and configure alerting, Prometheus and Grafana. For help, refer to the [configuration reference.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md) +1. Scroll to the bottom of the Helm chart README and click **Install.** + +**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. + +### Enable Monitoring for use with SSL + +1. Follow the steps on [this page](../../new-user-guides/kubernetes-resources-setup/secrets.md) to create a secret in order for SSL to be used for alerts. + - The secret should be created in the `cattle-monitoring-system` namespace. If it doesn't exist, create it first. + - Add the `ca`, `cert`, and `key` files to the secret. +1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** +1. Click **Apps.** +1. Click the `rancher-monitoring` app. +1. Click **Alerting**. +1. Click **Additional Secrets** and add the secrets created earlier. + +**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. + +When [creating a receiver,](../../../reference-guides/monitoring-v2-configuration/receivers.md#creating-receivers-in-the-rancher-ui) SSL-enabled receivers such as email or webhook will have a **SSL** section with fields for **CA File Path**, **Cert File Path**, and **Key File Path**. Fill in these fields with the paths to each of `ca`, `cert`, and `key`. The path will be of the form `/etc/alertmanager/secrets/name-of-file-in-secret`. + +For example, if you created a secret with these key-value pairs: + +```yaml +ca.crt=`base64-content` +cert.pem=`base64-content` +key.pfx=`base64-content` +``` + +Then **Cert File Path** would be set to `/etc/alertmanager/secrets/cert.pem`. + + + + +1. In the Rancher UI, go to the cluster where you want to install monitoring and click **Cluster Explorer.** +1. Click **Apps.** +1. Click the `rancher-monitoring` app. +1. Optional: Click **Chart Options** and configure alerting, Prometheus and Grafana. For help, refer to the [configuration reference.](../../../reference-guides/monitoring-v2-configuration/helm-chart-options.md) +1. Scroll to the bottom of the Helm chart README and click **Install.** + +**Result:** The monitoring app is deployed in the `cattle-monitoring-system` namespace. + + + diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md new file mode 100644 index 0000000000..f518bd8f00 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md @@ -0,0 +1,141 @@ +--- +title: Migrating to Rancher v2.5 Monitoring +weight: 9 +aliases: + - /rancher/v2.5/en/monitoring-alerting/migrating + - /rancher/v2.x/en/monitoring-alerting/v2.5/migrating/ +--- + +If you previously enabled Monitoring, Alerting, or Notifiers in Rancher before v2.5, there is no automatic upgrade path for switching to the new monitoring/alerting solution. Before deploying the new monitoring solution via Cluster Explore, you will need to disable and remove all existing custom alerts, notifiers and monitoring installations for the whole cluster and in all projects. + +- [Monitoring Before Rancher v2.5](#monitoring-before-rancher-v2-5) +- [Monitoring and Alerting via Cluster Explorer in Rancher v2.5](#monitoring-and-alerting-via-cluster-explorer-in-rancher-v2-5) +- [Changes to Role-based Access Control](#changes-to-role-based-access-control) +- [Migrating from Monitoring V1 to Monitoring V2](#migrating-from-monitoring-v1-to-monitoring-v2) + - [Migrating Grafana Dashboards](#migrating-grafana-dashboards) + - [Migrating Alerts](#migrating-alerts) + - [Migrating Notifiers](#migrating-notifiers) + - [Migrating for RKE Template Users](#migrating-for-rke-template-users) + +# Monitoring Before Rancher v2.5 + +As of v2.2.0, Rancher's Cluster Manager allowed users to enable Monitoring & Alerting V1 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) independently within a cluster. + +When Monitoring is enabled, Monitoring V1 deploys [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/docs/grafana/latest/getting-started/what-is-grafana/) onto a cluster to monitor the state of processes of your cluster nodes, Kubernetes components, and software deployments and create custom dashboards to make it easy to visualize collected metrics. + +Monitoring V1 could be configured on both a cluster-level and on a project-level and would automatically scrape certain workloads deployed as Apps on the Rancher cluster. + +When Alerts or Notifiers are enabled, Alerting V1 deploys [Prometheus Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) and a set of Rancher controllers onto a cluster that allows users to define alerts and configure alert-based notifications via Email, Slack, PagerDuty, etc. Users can choose to create different types of alerts depending on what needs to be monitored (e.g. System Services, Resources, CIS Scans, etc.); however, PromQL Expression-based alerts can only be created if Monitoring V1 is enabled. + +# Monitoring and Alerting via Cluster Explorer in Rancher 2.5 + +As of v2.5.0, Rancher's Cluster Explorer now allows users to enable Monitoring & Alerting V2 (both powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)) together within a cluster. + +Unlike in Monitoring & Alerting V1, both features are packaged in a single Helm chart found [here](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring). The behavior of this chart and configurable fields closely matches [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), a Prometheus Community Helm chart, and any deviations from the upstream chart can be found in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md) maintained with the chart. + +Monitoring V2 can only be configured on the cluster level. Project-level monitoring and alerting is no longer supported. + +For more information on how to configure Monitoring & Alerting V2, see [this page.](../../../pages-for-subheaders/monitoring-v2-configuration-guides.md) + +# Changes to Role-based Access Control + +Project owners and members no longer get access to Grafana or Prometheus by default. If view-only users had access to Grafana, they would be able to see data from any namespace. For Kiali, any user can edit things they don’t own in any namespace. + +For more information about role-based access control in `rancher-monitoring`, refer to [this page.](../../../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md) + +# Migrating from Monitoring V1 to Monitoring V2 + +While there is no automatic migration available, it is possible to manually migrate custom Grafana dashboards and alerts that were created in Monitoring V1 to Monitoring V2. + +Before you can install Monitoring V2, Monitoring V1 needs to be uninstalled completely. In order to uninstall Monitoring V1: + +* Remove all cluster and project specific alerts and alerts groups. +* Remove all notifiers. +* Disable all project monitoring installations under Cluster -> Project -> Tools -> Monitoring. +* Ensure that all project-monitoring apps in all projects have been removed and are not recreated after a few minutes +* Disable the cluster monitoring installation under Cluster -> Tools -> Monitoring. +* Ensure that the cluster-monitoring app and the monitoring-operator app in the System project have been removed and are not recreated after a few minutes. + +#### RKE Template Clusters + +To prevent V1 monitoring from being re-enabled, disable monitoring and in future RKE template revisions via modification of the RKE template yaml: + +```yaml +enable_cluster_alerting: false +enable_cluster_monitoring: false +``` + +#### Migrating Grafana Dashboards + +You can migrate any dashboard added to Grafana in Monitoring V1 to Monitoring V2. In Monitoring V1 you can export an existing dashboard like this: + +* Sign into Grafana +* Navigate to the dashboard you want to export +* Go to the dashboard settings +* Copy the [JSON Model](https://grafana.com/docs/grafana/latest/dashboards/json-model/) + +In the JSON Model, change all `datasource` fields from `RANCHER_MONITORING` to `Prometheus`. You can easily do this by replacing all occurrences of `"datasource": "RANCHER_MONITORING"` with `"datasource": "Prometheus"`. + +If Grafana is backed by a persistent volume, you can now [import](https://grafana.com/docs/grafana/latest/dashboards/export-import/) this JSON Model into the Monitoring V2 Grafana UI. +It is recommended to provide the dashboard to Grafana with a ConfigMap in the `cattle-dashboards` namespace that has the label `grafana_dashboard: "1"`: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: custom-dashboard + namespace: cattle-dashboards + labels: + grafana_dashboard: "1" +data: + custom-dashboard.json: | + { + ... + } +``` + +Once this ConfigMap is created, the dashboard will automatically be added to Grafana. + +### Migrating Alerts + +It is only possible to directly migrate expression-based alerts to Monitoring V2. Fortunately, the event-based alerts that could be set up to alert on system component, node or workload events, are already covered out-of-the-box by the alerts that are part of Monitoring V2. So it is not necessary to migrate them. + +To migrate the following expression alert + +![](/img/monitoring/migration/alert_2.4_to_2.5_source.png) + +you have to either create a PrometheusRule configuration like this in any namespace + +```yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: custom-rules + namespace: default +spec: + groups: + - name: custom.rules + rules: + - alert: Custom Expression Alert + expr: prometheus_query > 5 + for: 5m + labels: + severity: critical + annotations: + summary: "The result of prometheus_query has been larger than 5 for 5m. Current value {{ $value }}" +``` + +or add the Prometheus Rule through the Cluster Explorer + +![](/img/monitoring/migration/alert_2.4_to_2.5_target.png) + +For more details on how to configure PrometheusRules in Monitoring V2 see [Monitoring Configuration](../../../pages-for-subheaders/monitoring-v2-configuration-guides.md#prometheusrules). + +### Migrating Notifiers + +There is no direct equivalent for how notifiers work in Monitoring V1. Instead you have to replicate the desired setup with [Routes and Receivers](../../../pages-for-subheaders/monitoring-v2-configuration-guides.md#alertmanager-config) in Monitoring V2. + + +### Migrating for RKE Template Users + +If the cluster is managed using an RKE template, you will need to disable monitoring in future RKE template revisions to prevent legacy monitoring from being re-enabled. \ No newline at end of file diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/monitoring-workloads/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md similarity index 100% rename from content/rancher/v2.5/en/monitoring-alerting/guides/monitoring-workloads/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/uninstall/_index.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md similarity index 100% rename from content/rancher/v2.5/en/monitoring-alerting/guides/uninstall/_index.md rename to versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md new file mode 100644 index 0000000000..f5b27efc8b --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md @@ -0,0 +1,40 @@ +--- +title: Alertmanager Configuration +weight: 1 +--- + +It is usually not necessary to directly edit the Alertmanager custom resource. For most use cases, you will only need to edit the Receivers and Routes to configure notifications. + +When Receivers and Routes are updated, the monitoring application will automatically update the Alertmanager custom resource to be consistent with those changes. + +> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#how-alertmanager-works) + +# About the Alertmanager Custom Resource + +By default, Rancher Monitoring deploys a single Alertmanager onto a cluster that uses a default Alertmanager Config Secret. + +You may want to edit the Alertmanager custom resource if you would like to take advantage of advanced options that are not exposed in the Rancher UI forms, such as the ability to create a routing tree structure that is more than two levels deep. + +It is also possible to create more than one Alertmanager in a cluster, which may be useful if you want to implement namespace-scoped monitoring. In this case, you should manage the Alertmanager custom resources using the same underlying Alertmanager Config Secret. + +### Deeply Nested Routes + +While the Rancher UI only supports a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager YAML. + +### Multiple Alertmanager Replicas + +As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster. The replicas can all be managed using the same underlying Alertmanager Config Secret. + +This Secret should be updated or modified any time you want to: + +- Add in new notifiers or receivers +- Change the alerts that should be sent to specific notifiers or receivers +- Change the group of alerts that are sent out + +By default, you can either choose to supply an existing Alertmanager Config Secret (i.e. any Secret in the `cattle-monitoring-system` namespace) or allow Rancher Monitoring to deploy a default Alertmanager Config Secret onto your cluster. + +By default, the Alertmanager Config Secret created by Rancher will never be modified or deleted on an upgrade or uninstall of the `rancher-monitoring` chart. This restriction prevents users from losing or overwriting their alerting configuration when executing operations on the chart. + +For more information on what fields can be specified in the Alertmanager Config Secret, please look at the [Prometheus Alertmanager docs.](https://prometheus.io/docs/alerting/latest/alertmanager/) + +The full spec for the Alertmanager configuration file and what it takes in can be found [here.](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md new file mode 100644 index 0000000000..7f6e8492f4 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md @@ -0,0 +1,19 @@ +--- +title: Prometheus Configuration +weight: 1 +aliases: + - /rancher/v2.5/en/monitoring-alerting/configuration/prometheusrules + - /rancher/v2.5/en/monitoring-alerting/configuration/prometheusrules + - /rancher/v2.5/en/monitoring-alerting/configuration/advanced/prometheusrules +--- + +It is usually not necessary to directly edit the Prometheus custom resource because the monitoring application automatically updates it based on changes to ServiceMonitors and PodMonitors. +> This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +# About the Prometheus Custom Resource + +The Prometheus CR defines a desired Prometheus deployment. The Prometheus Operator observes the Prometheus CR. When the CR changes, the Prometheus Operator creates `prometheus-rancher-monitoring-prometheus`, a Prometheus deployment based on the CR configuration. + +The Prometheus CR specifies details such as rules and what Alertmanagers are connected to Prometheus. Rancher builds this CR for you. + +Monitoring V2 only supports one Prometheus per cluster. However, you might want to edit the Prometheus CR if you want to limit monitoring to certain namespaces. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md new file mode 100644 index 0000000000..dd89063ea9 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md @@ -0,0 +1,94 @@ +--- +title: Configuring PrometheusRules +weight: 3 +aliases: + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/prometheusrules/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +A PrometheusRule defines a group of Prometheus alerting and/or recording rules. + +> This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +### Creating PrometheusRules in the Rancher UI + +_Available as of v2.5.4_ + +> **Prerequisite:** The monitoring application needs to be installed. + +To create rule groups in the Rancher UI, + +1. Click **Cluster Explorer > Monitoring** and click **Prometheus Rules.** +1. Click **Create.** +1. Enter a **Group Name.** +1. Configure the rules. In Rancher's UI, we expect a rule group to contain either alert rules or recording rules, but not both. For help filling out the forms, refer to the configuration options below. +1. Click **Create.** + +**Result:** Alerts can be configured to send notifications to the receiver(s). + +### About the PrometheusRule Custom Resource + +When you define a Rule (which is declared within a RuleGroup in a PrometheusRule resource), the [spec of the Rule itself](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#rule) contains labels that are used by Alertmanager to figure out which Route should receive this Alert. For example, an Alert with the label `team: front-end` will be sent to all Routes that match on that label. + +Prometheus rule files are held in PrometheusRule custom resources. A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: + +- The name of the new alert or record +- A PromQL expression for the new alert or record +- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) +- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. + +For more information on what fields can be specified, please look at the [Prometheus Operator spec.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec) + +Use the label selector field `ruleSelector` in the Prometheus object to define the rule files that you want to be mounted into Prometheus. + +For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) + +# Configuration + + + + +Rancher v2.5.4 introduced the capability to configure PrometheusRules by filling out forms in the Rancher UI. + + +### Rule Group + +| Field | Description | +|-------|----------------| +| Group Name | The name of the group. Must be unique within a rules file. | +| Override Group Interval | Duration in seconds for how often rules in the group are evaluated. | + + +### Alerting Rules + +[Alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) allow you to define alert conditions based on PromQL (Prometheus Query Language) expressions and to send notifications about firing alerts to an external service. + +| Field | Description | +|-------|----------------| +| Alert Name | The name of the alert. Must be a valid label value. | +| Wait To Fire For | Duration in seconds. Alerts are considered firing once they have been returned for this long. Alerts which have not yet fired for long enough are considered pending. | +| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and all resultant time series will become pending/firing alerts. For more information, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md) | +| Labels | Labels to add or overwrite for each alert. | +| Severity | When enabled, labels are attached to the alert or record that identify it by the severity level. | +| Severity Label Value | Critical, warning, or none | +| Annotations | Annotations are a set of informational labels that can be used to store longer additional information, such as alert descriptions or runbook links. A [runbook](https://en.wikipedia.org/wiki/Runbook) is a set of documentation about how to handle alerts. The annotation values can be [templated.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/#templating) | + +### Recording Rules + +[Recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) allow you to precompute frequently needed or computationally expensive PromQL (Prometheus Query Language) expressions and save their result as a new set of time series. + +| Field | Description | +|-------|----------------| +| Time Series Name | The name of the time series to output to. Must be a valid metric name. | +| PromQL Expression | The PromQL expression to evaluate. Prometheus will evaluate the current value of this PromQL expression on every evaluation cycle and the result will be recorded as a new set of time series with the metric name as given by 'record'. For more information about expressions, refer to the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/) or our [example PromQL expressions.](../../../../explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions.md) | +| Labels | Labels to add or overwrite before storing the result. | + + + + +For Rancher v2.5.0-v2.5.3, PrometheusRules must be configured in YAML. For examples, refer to the Prometheus documentation on [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [alerting rules.](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) + + + diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md new file mode 100644 index 0000000000..b31c1ccf05 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md @@ -0,0 +1,79 @@ +--- +title: Backing up Rancher Installed with Docker +shortTitle: Backups +weight: 3 +aliases: + - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ + - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ + - /rancher/v2.5/en/backups/backups/single-node-backups/ + - /rancher/v2.5/en/backups/legacy/backup/single-node-backups/ + - /rancher/v2.5/en/backups/v2.5/docker-installs/docker-backups/ + - /rancher/v2.x/en/backups/v2.5/docker-installs/docker-backups/ +--- + + +After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. + +## Before You Start + +During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher +``` + +In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. + +## Creating a Backup + +This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. + + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data- rancher/rancher: + ``` + +1. From the data container that you just created (rancher-data-<DATE>), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). Use the following command, replacing each placeholder: + + ``` + docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** A stream of commands runs on the screen. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. + +1. Restart Rancher Server. Replace `` with the name of your Rancher container: + + ``` + docker start + ``` + +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs](./restore-docker-installed-rancher.md) if you need to restore backup data. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md new file mode 100644 index 0000000000..11f44ddff2 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md @@ -0,0 +1,168 @@ +--- +title: Backing up a Cluster +weight: 2045 +aliases: + - /rancher/v2.x/en/cluster-admin/backing-up-etcd/ +--- + +In the Rancher UI, etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. + +Rancher recommends configuring recurrent `etcd` snapshots for all production clusters. Additionally, one-time snapshots can easily be taken as well. + +Snapshots of the etcd database are taken and saved either [locally onto the etcd nodes](#local-backup-target) or to a [S3 compatible target](#s3-backup-target). The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. + +This section covers the following topics: + +- [How snapshots work](#how-snapshots-work) +- [Configuring recurring snapshots](#configuring-recurring-snapshots) +- [One-time snapshots](#one-time-snapshots) +- [Snapshot backup targets](#snapshot-backup-targets) + - [Local backup target](#local-backup-target) + - [S3 backup target](#s3-backup-target) + - [Using a custom CA certificate for S3](#using-a-custom-ca-certificate-for-s3) + - [IAM Support for storing snapshots in S3](#iam-support-for-storing-snapshots-in-s3) +- [Viewing available snapshots](#viewing-available-snapshots) +- [Safe timestamps](#safe-timestamps) +- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) + +# How Snapshots Work + +### Snapshot Components + +When Rancher creates a snapshot, it includes three components: + +- The cluster data in etcd +- The Kubernetes version +- The cluster configuration in the form of the `cluster.yml` + +Because the Kubernetes version is now included in the snapshot, it is possible to restore a cluster to a prior Kubernetes version. + +The multiple components of the snapshot allow you to select from the following options if you need to restore a cluster from a snapshot: + +- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. +- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. +- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. + +It's always recommended to take a new snapshot before any upgrades. + +### Generating the Snapshot from etcd Nodes + +For each etcd node in the cluster, the etcd cluster health is checked. If the node reports that the etcd cluster is healthy, a snapshot is created from it and optionally uploaded to S3. + +The snapshot is stored in `/opt/rke/etcd-snapshots`. If the directory is configured on the nodes as a shared mount, it will be overwritten. On S3, the snapshot will always be from the last node that uploads it, as all etcd nodes upload it and the last will remain. + +In the case when multiple etcd nodes exist, any created snapshot is created after the cluster has been health checked, so it can be considered a valid snapshot of the data in the etcd cluster. + +### Snapshot Naming Conventions + +The name of the snapshot is auto-generated. The `--name` option can be used to override the name of the snapshot when creating one-time snapshots with the RKE CLI. + +When Rancher creates a snapshot of an RKE cluster, the snapshot name is based on the type (whether the snapshot is manual or recurring) and the target (whether the snapshot is saved locally or uploaded to S3). The naming convention is as follows: + +- `m` stands for manual +- `r` stands for recurring +- `l` stands for local +- `s` stands for S3 + +Some example snapshot names are: + +- c-9dmxz-rl-8b2cx +- c-9dmxz-ml-kr56m +- c-9dmxz-ms-t6bjb +- c-9dmxz-rs-8gxc8 + +### How Restoring from a Snapshot Works + +On restore, the following process is used: + +1. The snapshot is retrieved from S3, if S3 is configured. +2. The snapshot is unzipped (if zipped). +3. One of the etcd nodes in the cluster serves that snapshot file to the other nodes. +4. The other etcd nodes download the snapshot and validate the checksum so that they all use the same snapshot for the restore. +5. The cluster is restored and post-restore actions will be done in the cluster. + +# Configuring Recurring Snapshots + +Select how often you want recurring snapshots to be taken as well as how many snapshots to keep. The amount of time is measured in hours. With timestamped snapshots, the user has the ability to do a point-in-time recovery. + +By default, [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) are configured to take recurring snapshots (saved to local disk). To protect against local disk failure, using the [S3 Target](#s3-backup-target) or replicating the path on disk is advised. + +During cluster provisioning or editing the cluster, the configuration for snapshots can be found in the advanced section for **Cluster Options**. Click on **Show advanced options**. + +In the **Advanced Cluster Options** section, there are several options available to configure: + +| Option | Description | Default Value| +| --- | ---| --- | +| etcd Snapshot Backup Target | Select where you want the snapshots to be saved. Options are either local or in S3 | local| +|Recurring etcd Snapshot Enabled| Enable/Disable recurring snapshots | Yes| +| Recurring etcd Snapshot Creation Period | Time in hours between recurring snapshots| 12 hours | +| Recurring etcd Snapshot Retention Count | Number of snapshots to retain| 6 | + +# One-Time Snapshots + +In addition to recurring snapshots, you may want to take a "one-time" snapshot. For example, before upgrading the Kubernetes version of a cluster it's best to backup the state of the cluster to protect against upgrade failure. + +1. In the **Global** view, navigate to the cluster that you want to take a one-time snapshot. + +2. Click the **⋮ > Snapshot Now**. + +**Result:** Based on your [snapshot backup target](#snapshot-backup-targets), a one-time snapshot will be taken and saved in the selected backup target. + +# Snapshot Backup Targets + +Rancher supports two different backup targets: + +* [Local Target](#local-backup-target) +* [S3 Target](#s3-backup-target) + +### Local Backup Target + +By default, the `local` backup target is selected. The benefits of this option is that there is no external configuration. Snapshots are automatically saved locally to the etcd nodes in the [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) in `/opt/rke/etcd-snapshots`. All recurring snapshots are taken at configured intervals. The downside of using the `local` backup target is that if there is a total disaster and _all_ etcd nodes are lost, there is no ability to restore the cluster. + +### S3 Backup Target + +The `S3` backup target allows users to configure a S3 compatible backend to store the snapshots. The primary benefit of this option is that if the cluster loses all the etcd nodes, the cluster can still be restored as the snapshots are stored externally. Rancher recommends external targets like `S3` backup, however its configuration requirements do require additional effort that should be considered. + +| Option | Description | Required| +|---|---|---| +|S3 Bucket Name| S3 bucket name where backups will be stored| *| +|S3 Region|S3 region for the backup bucket| | +|S3 Region Endpoint|S3 regions endpoint for the backup bucket|* | +|S3 Access Key|S3 access key with permission to access the backup bucket|*| +|S3 Secret Key|S3 secret key with permission to access the backup bucket|*| +| Custom CA Certificate | A custom certificate used to access private S3 backends || + +### Using a custom CA certificate for S3 + +The backup snapshot can be stored on a custom `S3` backup like [minio](https://min.io/). If the S3 back end uses a self-signed or custom certificate, provide a custom certificate using the `Custom CA Certificate` option to connect to the S3 backend. + +### IAM Support for Storing Snapshots in S3 + +The `S3` backup target supports using IAM authentication to AWS API in addition to using API credentials. An IAM role gives temporary permissions that an application can use when making API calls to S3 storage. To use IAM authentication, the following requirements must be met: + + - The cluster etcd nodes must have an instance role that has read/write access to the designated backup bucket. + - The cluster etcd nodes must have network access to the specified S3 endpoint. + - The Rancher Server worker node(s) must have an instance role that has read/write to the designated backup bucket. + - The Rancher Server worker node(s) must have network access to the specified S3 endpoint. + + To give an application access to S3, refer to the AWS documentation on [Using an IAM Role to Grant Permissions to Applications Running on Amazon EC2 Instances.](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html) + +# Viewing Available Snapshots + +The list of all available snapshots for the cluster is available in the Rancher UI. + +1. In the **Global** view, navigate to the cluster that you want to view snapshots. + +2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. + +# Safe Timestamps + +Snapshot files are timestamped to simplify processing the files using external tools and scripts, but in some S3 compatible backends, these timestamps were unusable. + +The option `safe_timestamp` is added to support compatible file names. When this flag is set to `true`, all special characters in the snapshot filename timestamp are replaced. + +This option is not available directly in the UI, and is only available through the `Edit as Yaml` interface. + +# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 + +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restore-rancher-launched-kubernetes-clusters-from-backup.md). diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md new file mode 100644 index 0000000000..a26e8b70b2 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md @@ -0,0 +1,73 @@ +--- +title: Backing up Rancher +weight: 1 +aliases: + - /rancher/v2.5/en/backups/v2.5/back-up-rancher + - /rancher/v2.x/en/backups/ + - /rancher/v2.x/en/backups/v2.5/back-up-rancher/ +--- + +In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer to the instructions for [single node backups](./back-up-docker-installed-rancher.md). + +The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. + +Note that the rancher-backup operator version 1.x.x is for Rancher v2.5.x. + +> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. The Kubernetes version should also be considered when restoring a backup, since the supported apiVersion in the cluster and in the backup file could be different. + +### Prerequisites + +Rancher version must be v2.5.0 and up + +### 1. Install the `rancher-backup` operator + +The backup storage location is an operator-level setting, so it needs to be configured when `rancher-backup` is installed or upgraded. + +Backups are created as .tar.gz files. These files can be pushed to S3 or Minio, or they can be stored in a persistent volume. + +1. In the Rancher UI, go to the **Cluster Explorer** view for the local cluster. +1. Click **Apps.** +1. Click **Rancher Backups.** +1. Configure the default storage location. For help, refer to the [storage configuration section.](../../../reference-guides/backup-restore-configuration/storage-configuration.md) + +>**NOTE:** There are two known issues in Fleet that occur after performing a restoration using the backup-restore-operator: Fleet agents are inoperable and clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here](../deploy-apps-across-clusters/fleet.md#troubleshooting) for workarounds. + +### 2. Perform a Backup + +To perform a backup, a custom resource of type Backup must be created. + +1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** +1. Click **Backup.** +1. Create the Backup with the form, or with the YAML editor. +1. For configuring the Backup details using the form, click **Create** and refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/backup-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md#backup) +1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Backup YAML. This example Backup custom resource would create encrypted recurring backups in S3. The app uses the `credentialSecretNamespace` value to determine where to look for the S3 backup secret: + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Backup + metadata: + name: s3-recurring-backup + spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 10 + ``` + + > **Note:** When creating the Backup resource using YAML editor, the `resourceSetName` must be set to `rancher-resource-set` + + For help configuring the Backup, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/backup-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md#backup) + + > **Important:** The `rancher-backup` operator doesn't save the EncryptionConfiguration file. The contents of the EncryptionConfiguration file must be saved when an encrypted backup is created, and the same file must be used when restoring from this backup. +1. Click **Create.** + +**Result:** The backup file is created in the storage location configured in the Backup custom resource. The name of this file is used when performing a restore. + diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md new file mode 100644 index 0000000000..df35047fbf --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md @@ -0,0 +1,105 @@ +--- +title: Migrating Rancher to a New Cluster +weight: 3 +aliases: + - /rancher/v2.x/en/backups/v2.5/migrating-rancher/ +--- + +If you are migrating Rancher to a new Kubernetes cluster, you don't need to install Rancher on the new cluster first. If Rancher is restored to a new cluster with Rancher already installed, it can cause problems. + +### Prerequisites + +These instructions assume you have [created a backup](back-up-rancher.md) and you have already installed a new Kubernetes cluster where Rancher will be deployed. + +It is required to use the same hostname that was set as the server URL in the first cluster. + +Rancher version must be v2.5.0 and up + +Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes clusters such as Amazon EKS clusters. For help installing Kubernetes, refer to the documentation of the Kubernetes distribution. One of Rancher's Kubernetes distributions may also be used: + +- [RKE Kubernetes installation docs](https://rancher.com/docs/rke/latest/en/installation/) +- [K3s Kubernetes installation docs](https://rancher.com/docs/k3s/latest/en/installation/) + +### 1. Install the rancher-backup Helm chart +Install version 1.x.x of the rancher-backup chart. The following assumes a connected environment with access to DockerHub: + +``` +helm repo add rancher-charts https://charts.rancher.io +helm repo update +helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace --version $CHART_VERSION +helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system --version $CHART_VERSION +``` +
    +For an **air-gapped environment**, use the option below to pull the `backup-restore-operator` image from your private registry when installing the rancher-backup-crd helm chart. +``` +--set image.repository $REGISTRY/rancher/backup-restore-operator +``` + +### 2. Restore from backup using a Restore custom resource + +If you are using an S3 store as the backup source, and need to use your S3 credentials for restore, create a secret in this cluster using your S3 credentials. The Secret data must have two keys, `accessKey` and `secretKey` containing the s3 credentials like this: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: s3-creds +type: Opaque +stringData: + accessKey: + secretKey: +``` + +This secret can be created in any namespace, with the above example it will get created in the default namespace + +In the Restore custom resource, `prune` must be set to false. + +Create a Restore custom resource like the example below: + +```yaml +# migrationResource.yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-migration +spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + prune: false + encryptionConfigSecretName: encryptionconfig + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: backup-test + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com +``` + +>**Important:** The field `encryptionConfigSecretName` must be set only if your backup was created with encryption enabled. Provide the name of the Secret containing the encryption config file. If you only have the encryption config file, but don't have a secret created with it in this cluster, use the following steps to create the secret: + +1. The encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. So save your `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: + ``` + kubectl create secret generic encryptionconfig \ + --from-file=./encryption-provider-config.yaml \ + -n cattle-resources-system + ``` + +1. Then apply the resource: + ``` + kubectl apply -f migrationResource.yaml + ``` + +### 3. Install cert-manager + +Follow the steps to [install cert-manager](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md#5-install-cert-manager) in the documentation about installing cert-manager on Kubernetes. + +### 4. Bring up Rancher with Helm + +Use the same version of Helm to install Rancher, that was used on the first cluster. + +``` +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname= \ +``` diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md new file mode 100644 index 0000000000..c2f0feccb4 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md @@ -0,0 +1,75 @@ +--- +title: Restoring Backups—Docker Installs +shortTitle: Restores +weight: 3 +aliases: + - /rancher/v2.5/en/installation/after-installation/single-node-backup-and-restoration/ + - /rancher/v2.5/en/backups/restorations/single-node-restoration + - /rancher/v2.5/en/backups/v2.5/docker-installs/docker-restores + - /rancher/v2.x/en/backups/v2.5/docker-installs/docker-restores/ +--- + +If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. + +## Before You Start + +During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from -v $PWD:/backup \ +busybox sh -c "rm /var/lib/rancher/* -rf && \ +tar pzxvf /backup/rancher-data-backup--" +``` + +In this command, `` and `-` are environment variables for your Rancher deployment. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference](/img/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version number for your Rancher backup. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Restoring Backups + +Using a [backup](back-up-docker-installed-rancher.md) that you created earlier, restore Rancher to its last known healthy state. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container: + + ``` + docker stop + ``` +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs](back-up-docker-installed-rancher.md) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Creating Backups—Docker Installs](back-up-docker-installed-rancher.md), it will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. + + >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. + + ``` + docker run --volumes-from -v $PWD:/backup \ + busybox sh -c "rm /var/lib/rancher/* -rf && \ + tar pzxvf /backup/rancher-data-backup--.tar.gz" + ``` + + **Step Result:** A series of commands should run. + +1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. + + ``` + docker start + ``` + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md new file mode 100644 index 0000000000..04485d9db2 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md @@ -0,0 +1,87 @@ +--- +title: Restoring a Cluster from Backup +weight: 2050 +aliases: + - /rancher/v2.x/en/cluster-admin/restoring-etcd/ +--- + +etcd backup and recovery for [Rancher launched Kubernetes clusters](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) can be easily performed. Snapshots of the etcd database are taken and saved either locally onto the etcd nodes or to a S3 compatible target. The advantages of configuring S3 is that if all etcd nodes are lost, your snapshot is saved remotely and can be used to restore the cluster. + +Rancher recommends enabling the [ability to set up recurring snapshots of etcd](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots), but [one-time snapshots](back-up-rancher-launched-kubernetes-clusters.md#one-time-snapshots) can easily be taken as well. Rancher allows restore from [saved snapshots](#restoring-a-cluster-from-a-snapshot) or if you don't have any snapshots, you can still [restore etcd](#recovering-etcd-without-a-snapshot). + +Clusters can also be restored to a prior Kubernetes version and cluster configuration. + +This section covers the following topics: + +- [Viewing Available Snapshots](#viewing-available-snapshots) +- [Restoring a Cluster from a Snapshot](#restoring-a-cluster-from-a-snapshot) +- [Recovering etcd without a Snapshot](#recovering-etcd-without-a-snapshot) +- [Enabling snapshot features for clusters created before Rancher v2.2.0](#enabling-snapshot-features-for-clusters-created-before-rancher-v2-2-0) + +## Viewing Available Snapshots + +The list of all available snapshots for the cluster is available. + +1. In the **Global** view, navigate to the cluster that you want to view snapshots. + +2. Click **Tools > Snapshots** from the navigation bar to view the list of saved snapshots. These snapshots include a timestamp of when they were created. + +## Restoring a Cluster from a Snapshot + +If your Kubernetes cluster is broken, you can restore the cluster from a snapshot. + +Snapshots are composed of the cluster data in etcd, the Kubernetes version, and the cluster configuration in the `cluster.yml.` These components allow you to select from the following options when restoring a cluster from a snapshot: + +- **Restore just the etcd contents:** This restore is similar to restoring to snapshots in Rancher before v2.4.0. +- **Restore etcd and Kubernetes version:** This option should be used if a Kubernetes upgrade is the reason that your cluster is failing, and you haven't made any cluster configuration changes. +- **Restore etcd, Kubernetes versions and cluster configuration:** This option should be used if you changed both the Kubernetes version and cluster configuration when upgrading. + +When rolling back to a prior Kubernetes version, the [upgrade strategy options](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md#configuring-the-upgrade-strategy) are ignored. Worker nodes are not cordoned or drained before being reverted to the older Kubernetes version, so that an unhealthy cluster can be more quickly restored to a healthy state. + +> **Prerequisite:** To restore snapshots from S3, the cluster needs to be configured to [take recurring snapshots on S3.](back-up-rancher-launched-kubernetes-clusters.md#configuring-recurring-snapshots) + +1. In the **Global** view, navigate to the cluster that you want to restore from a snapshots. + +2. Click the **⋮ > Restore Snapshot**. + +3. Select the snapshot that you want to use for restoring your cluster from the dropdown of available snapshots. + +4. In the **Restoration Type** field, choose one of the restore options described above. + +5. Click **Save**. + +**Result:** The cluster will go into `updating` state and the process of restoring the `etcd` nodes from the snapshot will start. The cluster is restored when it returns to an `active` state. + +## Recovering etcd without a Snapshot + +If the group of etcd nodes loses quorum, the Kubernetes cluster will report a failure because no operations, e.g. deploying workloads, can be executed in the Kubernetes cluster. The cluster should have three etcd nodes to prevent a loss of quorum. If you want to recover your set of etcd nodes, follow these instructions: + +1. Keep only one etcd node in the cluster by removing all other etcd nodes. + +2. On the single remaining etcd node, run the following command: + + ``` + $ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock assaflavie/runlike etcd + ``` + + This command outputs the running command for etcd, save this command to use later. + +3. Stop the etcd container that you launched in the previous step and rename it to `etcd-old`. + + ``` + $ docker stop etcd + $ docker rename etcd etcd-old + ``` + +4. Take the saved command from Step 2 and revise it: + + - If you originally had more than 1 etcd node, then you need to change `--initial-cluster` to only contain the node that remains. + - Add `--force-new-cluster` to the end of the command. + +5. Run the revised command. + +6. After the single nodes is up and running, Rancher recommends adding additional etcd nodes to your cluster. If you have a [custom cluster](../../../pages-for-subheaders/use-existing-nodes.md) and you want to reuse an old node, you are required to [clean up the nodes](../../advanced-user-guides/manage-clusters/clean-cluster-nodes.md) before attempting to add them back into a cluster. + +# Enabling Snapshot Features for Clusters Created Before Rancher v2.2.0 + +If you have any Rancher launched Kubernetes clusters that were created before v2.2.0, after upgrading Rancher, you must [edit the cluster](../../../pages-for-subheaders/cluster-configuration.md) and _save_ it, in order to enable the updated snapshot features. Even if you were already creating snapshots before v2.2.0, you must do this step as the older snapshots will not be available to use to [back up and restore etcd through the UI](restore-rancher-launched-kubernetes-clusters-from-backup.md). diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md new file mode 100644 index 0000000000..e80b21e64d --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md @@ -0,0 +1,72 @@ +--- +title: Restoring Rancher +weight: 2 +aliases: + - /rancher/v2.x/en/installation/backups/restores + - /rancher/v2.x/en/backups/restoring-rancher + - /rancher/v2.x/en/backups/v2.5/restoring-rancher/ +--- + +A restore is performed by creating a Restore custom resource. + +> **Important** +> +> * Follow the instructions from this page for restoring rancher on the same cluster where it was backed up from. In order to migrate rancher to a new cluster, follow the steps to [migrate rancher.](migrate-rancher-to-new-cluster.md) +> * While restoring rancher on the same setup, the operator will scale down the rancher deployment when restore starts, and it will scale back up the deployment once restore completes. So Rancher will be unavailable during the restore. +> * When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. + +### Create the Restore Custom Resource + +1. In the **Cluster Explorer,** go to the dropdown menu in the upper left corner and click **Rancher Backups.** +1. Click **Restore.** +1. Create the Restore with the form, or with YAML. For creating the Restore resource using form, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) +1. For using the YAML editor, we can click **Create > Create from YAML.** Enter the Restore YAML. + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Restore + metadata: + name: restore-migration + spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + encryptionConfigSecretName: encryptionconfig + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + ``` + + For help configuring the Restore, refer to the [configuration reference](../../../reference-guides/backup-restore-configuration/restore-configuration.md) and to the [examples.](../../../reference-guides/backup-restore-configuration/examples.md) + +1. Click **Create.** + +**Result:** The rancher-operator scales down the rancher deployment during restore, and scales it back up once the restore completes. The resources are restored in this order: + +1. Custom Resource Definitions (CRDs) +2. Cluster-scoped resources +3. Namespaced resources + +### Logs + +To check how the restore is progressing, you can check the logs of the operator. Run this command to follow the logs: + +``` +kubectl logs -n cattle-resources-system -l app.kubernetes.io/name=rancher-backup -f +``` + +### Cleanup + +If you created the restore resource with kubectl, remove the resource to prevent a naming conflict with future restores. + +### Known Issues +In some cases, after restoring the backup, Rancher logs will show errors similar to the following: +``` +2021/10/05 21:30:45 [ERROR] error syncing 'c-89d82/m-4067aa68dd78': handler rke-worker-upgrader: clusters.management.cattle.io "c-89d82" not found, requeuing +``` +This happens because one of the resources that was just restored has finalizers but the related resources have been deleted so the handler cannot find it. + +To eliminate the errors, we need to find and delete the resource that causes the error. See more information [here](https://github.com/rancher/rancher/issues/35050#issuecomment-937968556) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md new file mode 100644 index 0000000000..bc7a8c1304 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md @@ -0,0 +1,93 @@ +--- +title: Fleet - GitOps at Scale +weight: 1 +aliases: + - /rancher/v2.x/en/deploy-across-clusters/fleet/ +--- + +_Available as of Rancher v2.5_ + +Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. + +Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. + +- [Architecture](#architecture) +- [Accessing Fleet in the Rancher UI](#accessing-fleet-in-the-rancher-ui) +- [Windows Support](#windows-support) +- [GitHub Repository](#github-repository) +- [Using Fleet Behind a Proxy](#using-fleet-behind-a-proxy) +- [Helm Chart Dependencies](#helm-chart-dependencies) +- [Troubleshooting](#troubleshooting) +- [Documentation](#documentation) + +# Architecture + +For information about how Fleet works, see [this page.](../../../explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture.md) + +# Accessing Fleet in the Rancher UI + +Fleet comes preinstalled in Rancher v2.5. Users can leverage continuous delivery to deploy their applications to the Kubernetes clusters in the git repository without any manual operation by following **gitops** practice. For additional information on Continuous Delivery and other Fleet troubleshooting tips, refer [here](https://fleet.rancher.io/troubleshooting/). + +Follow the steps below to access Continuous Delivery in the Rancher UI: + +1. Click **Cluster Explorer** in the Rancher UI. + +1. In the top left dropdown menu, click **Cluster Explorer > Continuous Delivery.** + +1. Select your namespace at the top of the menu, noting the following: + - By default,`fleet-default` is selected which includes all downstream clusters that are registered through Rancher. + - You may switch to `fleet-local`, which only contains the `local` cluster, or you may create your own workspace to which you may assign and move clusters. + - You can then manage clusters by clicking on **Clusters** on the left navigation bar. + +1. Click on **Gitrepos** on the left navigation bar to deploy the gitrepo into your clusters in the current workspace. + +1. Select your [git repository](https://fleet.rancher.io/gitrepo-add/) and [target clusters/cluster group](https://fleet.rancher.io/gitrepo-structure/). You can also create the cluster group in the UI by clicking on **Cluster Groups** from the left navigation bar. + +1. Once the gitrepo is deployed, you can monitor the application through the Rancher UI. + +# Windows Support + +_Available as of v2.5.6_ + +For details on support for clusters with Windows nodes, see [this page.](../../../explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support.md) + + +# GitHub Repository + +The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/releases/latest) + + +# Using Fleet Behind a Proxy + +_Available as of v2.5.8_ + +For details on using Fleet behind a proxy, see [this page.](../../../explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy.md) + +# Helm Chart Dependencies + +In order for Helm charts with dependencies to deploy successfully, you must run a manual command (as listed below), as it is up to the user to fulfill the dependency list. If you do not do this and proceed to clone your repository and run `helm install`, your installation will fail because the dependencies will be missing. + +The Helm chart in the git repository must include its dependencies in the charts subdirectory. You must either manually run `helm dependencies update $chart` OR run `helm dependencies build $chart` locally, then commit the complete charts directory to your git repository. Note that you will update your commands with the applicable parameters. + +# Troubleshooting +--- +* **Known Issue:** Fleet becomes inoperable after a restore using the [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. + +* **Temporary Workaround:**
    + 1. Find the two service account tokens listed in the fleet-controller and the fleet-controller-bootstrap service accounts. These are under the fleet-system namespace of the local cluster.
    + 2. Remove the non-existent token secret. Doing so allows for only one entry to be present for the service account token secret that actually exists.
    + 3. Delete the fleet-controller Pod in the fleet-system namespace to reschedule.
    + 4. After the service account token issue is resolved, you can force redeployment of the fleet-agents. In the Rancher UI, go to **☰ > Cluster Management**, click on **Clusters** page, then click **Force Update**.
    + 5. If the fleet-agent bundles remain in a `Modified` state after Step 4, update the field `spec.forceSyncGeneration` for the fleet-agent bundle to force re-creation. + +--- +* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator](../backup-restore-and-disaster-recovery/back-up-rancher.md#1-install-the-rancher-backup-operator). We will update the community once a permanent solution is in place. + +* **Temporary Workaround:**
    +By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). + +--- + +# Documentation + +The Fleet documentation is at [https://fleet.rancher.io/.](https://fleet.rancher.io/) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md new file mode 100644 index 0000000000..63f2a4300d --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md @@ -0,0 +1,164 @@ +--- +title: Multi-cluster Apps +weight: 2 +aliases: + - /rancher/v2.x/en/deploy-across-clusters/multi-cluster-apps/ +--- + +> As of Rancher v2.5, we now recommend using [Fleet](fleet.md) for deploying apps across clusters. + +Typically, most applications are deployed on a single Kubernetes cluster, but there will be times you might want to deploy multiple copies of the same application across different clusters and/or projects. In Rancher, a _multi-cluster application_, is an application deployed using a Helm chart across multiple clusters. With the ability to deploy the same application across multiple clusters, it avoids the repetition of the same action on each cluster, which could introduce user error during application configuration. With multi-cluster applications, you can customize to have the same configuration across all projects/clusters as well as have the ability to change the configuration based on your target project. Since multi-cluster application is considered a single application, it's easy to manage and maintain this application. + +Any Helm charts from a global catalog can be used to deploy and manage multi-cluster applications. + +After creating a multi-cluster application, you can program a global DNS entry to make it easier to access the application. + +- [Prerequisites](#prerequisites) +- [Launching a multi-cluster app](#launching-a-multi-cluster-app) +- [Multi-cluster app configuration options](#multi-cluster-app-configuration-options) + - [Targets](#targets) + - [Upgrades](#upgrades) + - [Roles](#roles) +- [Application configuration options](#application-configuration-options) + - [Using a questions.yml file](#using-a-questions-yml-file) + - [Key value pairs for native Helm charts](#key-value-pairs-for-native-helm-charts) + - [Members](#members) + - [Overriding application configuration options for specific projects](#overriding-application-configuration-options-for-specific-projects) +- [Upgrading multi-cluster app roles and projects](#upgrading-multi-cluster-app-roles-and-projects) +- [Multi-cluster application management](#multi-cluster-application-management) +- [Deleting a multi-cluster application](#deleting-a-multi-cluster-application) + +# Prerequisites + +To create a multi-cluster app in Rancher, you must have at least one of the following permissions: + +- A [project-member role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) in the target cluster(s), which gives you the ability to create, read, update, and delete the workloads +- A [cluster owner role](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) for the clusters(s) that include the target project(s) + +# Launching a Multi-Cluster App + +1. From the **Global** view, choose **Apps** in the navigation bar. Click **Launch**. + +2. Find the application that you want to launch, and then click **View Details**. + +3. (Optional) Review the detailed descriptions, which are derived from the Helm chart's `README`. + +4. Under **Configuration Options** enter a **Name** for the multi-cluster application. By default, this name is also used to create a Kubernetes namespace in each [target project](#targets) for the multi-cluster application. The namespace is named as `-`. + +5. Select a **Template Version**. + +6. Complete the [multi-cluster applications specific configuration options](#multi-cluster-app-configuration-options) as well as the [application configuration options](#application-configuration-options). + +7. Select the **Members** who can [interact with the multi-cluster application](#members). + +8. Add any [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects) that would change the configuration for specific project(s) from the default application configuration answers. + +7. Review the files in the **Preview** section. When you're satisfied, click **Launch**. + +**Result**: Your application is deployed to your chosen namespace. You can view the application status from the project's: + +# Multi-cluster App Configuration Options + +Rancher has divided the configuration option for the multi-cluster application into several sections. + +### Targets + +In the **Targets** section, select the projects that you want the application to be deployed in. The list of projects is based on what projects you have access to. For each project that you select, it will be added to the list, which shows the cluster name and project name that were selected. To remove a target project, click on **-**. + +### Upgrades + +In the **Upgrades** section, select the upgrade strategy to use, when you decide to upgrade your application. + +* **Rolling Update (batched):** When selecting this upgrade strategy, the number of applications upgraded at a time is based on the selected **Batch size** and the **Interval** specifies how many seconds to wait before starting the next batch of updates. + +* **Upgrade all apps simultaneously:** When selecting this upgrade strategy, all applications across all projects will be upgraded at the same time. + +### Roles + +In the **Roles** section, you define the role of the multi-cluster application. Typically, when a user [launches catalog applications](../../../pages-for-subheaders/helm-charts-in-rancher.md), that specific user's permissions are used for creation of all workloads/resources that is required by the app. + +For multi-cluster applications, the application is deployed by a _system user_ and is assigned as the creator of all underlying resources. A _system user_ is used instead of the actual user due to the fact that the actual user could be removed from one of the target projects. If the actual user was removed from one of the projects, then that user would no longer be able to manage the application for the other projects. + +Rancher will let you select from two options for Roles, **Project** and **Cluster**. Rancher will allow creation using any of these roles based on the user's permissions. + +- **Project** - This is the equivalent of a [project member](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [project member](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _project member_ role, if the user is an [administrator](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), a [cluster owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or a [project owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles), then the user is considered to have the appropriate level of permissions. + +- **Cluster** - This is the equivalent of a [cluster owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles). If you select this role, Rancher will check that in all the target projects, the user has minimally the [cluster owner](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) role. While the user might not be explicitly granted the _cluster owner_ role, if the user is an [administrator](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), then the user is considered to have the appropriate level of permissions. + +When launching the application, Rancher will confirm if you have these permissions in the target projects before launching the application. + +> **Note:** There are some applications like _Grafana_ or _Datadog_ that require access to specific cluster-scoped resources. These applications will require the _Cluster_ role. If you find out later that the application requires cluster roles, the multi-cluster application can be upgraded to update the roles. + +# Application Configuration Options + +For each Helm chart, there are a list of desired answers that must be entered in order to successfully deploy the chart. When entering answers, you must format them using the syntax rules found in [Using Helm: The format and limitations of –set](https://helm.sh/docs/intro/using_helm/#the-format-and-limitations-of---set), as Rancher passes them as `--set` flags to Helm. + +> For example, when entering an answer that includes two values separated by a comma (i.e. `abc, bcd`), it is required to wrap the values with double quotes (i.e., ``"abc, bcd"``). + +### Using a questions.yml file + +If the Helm chart that you are deploying contains a `questions.yml` file, Rancher's UI will translate this file to display an easy to use UI to collect the answers for the questions. + +### Key Value Pairs for Native Helm Charts + +For native Helm charts (i.e., charts from the **Helm Stable** or **Helm Incubator** catalogs or a custom Helm chart repository, answers are provided as key value pairs in the **Answers** section. These answers are used to override the default values. + +### Members + +By default, multi-cluster applications can only be managed by the user who created it. In the **Members** section, other users can be added so that they can also help manage or view the multi-cluster application. + +1. Find the user that you want to add by typing in the member's name in the **Member** search box. + +2. Select the **Access Type** for that member. There are three access types for a multi-cluster project, but due to how the permissions of a multi-cluster application are launched, please read carefully to understand what these access types mean. + + - **Owner**: This access type can manage any configuration part of the multi-cluster application including the template version, the [multi-cluster applications specific configuration options](#Multi-cluster App Configuration Options), the [application specific configuration options](#application-configuration-options), the members who can interact with the multi-cluster application and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _owner_ of the multi-cluster application can manage/remove applications in [target projects](#targets) without explicitly having access to these project(s). Only trusted users should be provided with this access type. + + - **Member**: This access type can only modify the template version, the [application specific configuration options](#application-configuration-options) and the [custom application configuration answers](#overriding-application-configuration-options-for-specific-projects). Since a multi-cluster application is created with a different set of permissions from the user, any _member_ of the multi-cluster application can modify the application without explicitly having access to these project(s). Only trusted users should be provided with this access type. + + - **Read-only**: This access type cannot modify any configuration option for the multi-cluster application. Users can only view these applications. + + > **Note:** Please ensure only trusted users are given _Owner_ or _Member_ access as they will automatically be able to manage applications created for this multi-cluster application in target projects they might not have direct access to. + +### Overriding Application Configuration Options for Specific Projects + +The ability to use the same configuration to deploy the same application across multiple clusters/projects is one of the main benefits of multi-cluster applications. There might be a specific project that requires a slightly different configuration option, but you want to manage that application with all the other matching applications. Instead of creating a brand new application, you can override specific [application specific configuration options](#application-configuration-options) for specific projects. + +1. In the **Answer Overrides** section, click **Add Override**. + +2. For each override, you can select the following: + + - **Scope**: Select which target projects you want to override the answer in the configuration option. + + - **Question**: Select which question you want to override. + + - **Answer**: Enter the answer that you want to be used instead. + +# Upgrading Multi-Cluster App Roles and Projects + +- **Changing Roles on an existing Multi-Cluster app** +The creator and any users added with the access-type "owner" to a multi-cluster app, can upgrade its Roles. When adding a new Role, we check if the user has that exact role in all current target projects. These checks allow the same relaxations for global admins, cluster owners and project-owners as described in the installation section for the field `Roles`. + +- **Adding/Removing target projects** +1. The creator and any users added with access-type "owner" to a multi-cluster app, can add or remove its target projects. When adding a new project, we check if the caller of this request has all Roles defined on multi-cluster app, in the new projects they want to add. The roles checks are again relaxed for global admins, cluster-owners and project-owners. +2. We do not do these membership checks when removing target projects. This is because the caller's permissions could have with respect to the target project, or the project could have been deleted and hence the caller wants to remove it from targets list. + + +# Multi-Cluster Application Management + +One of the benefits of using a multi-cluster application as opposed to multiple individual applications of the same type, is the ease of management. Multi-cluster applications can be cloned, upgraded or rolled back. + +1. From the **Global** view, choose **Apps** in the navigation bar. + +2. Choose the multi-cluster application you want to take one of these actions on and click the **⋮**. Select one of the following options: + + * **Clone**: Creates another multi-cluster application with the same configuration. By using this option, you can easily duplicate a multi-cluster application. + * **Upgrade**: Upgrade your multi-cluster application to change some part of the configuration. When performing an upgrade for multi-cluster application, the [upgrade strategy](#upgrades) can be modified if you have the correct [access type](#members). + * **Rollback**: Rollback your application to a specific version. If after an upgrade, there are issues for your multi-cluster application for one or more of your [targets](#targets), Rancher has stored up to 10 versions of the multi-cluster application. Rolling back a multi-cluster application reverts the application for **all** target clusters and projects, not just the targets(s) affected by the upgrade issue. + +# Deleting a Multi-Cluster Application + +1. From the **Global** view, choose **Apps** in the navigation bar. + +2. Choose the multi-cluster application you want to delete and click the **⋮ > Delete**. When deleting the multi-cluster application, all applications and namespaces are deleted in all of the target projects. + + > **Note:** The applications in the target projects, that are created for a multi-cluster application, cannot be deleted individually. The applications can only be deleted when the multi-cluster application is deleted. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md new file mode 100644 index 0000000000..3067f1d706 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md @@ -0,0 +1,184 @@ +--- +title: Setting up Amazon ELB Network Load Balancer +weight: 5 +aliases: + - /rancher/v2.5/en/installation/ha/create-nodes-lb/nlb + - /rancher/v2.5/en/installation/k8s-install/create-nodes-lb/nlb + - /rancher/v2.5/en/installation/options/nlb + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/ +--- + +This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. + +These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. + +This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. + +Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. + +# Setting up the Load Balancer + +Configuring an Amazon NLB is a multistage process: + +1. [Create Target Groups](#1-create-target-groups) +2. [Register Targets](#2-register-targets) +3. [Create Your NLB](#3-create-your-nlb) +4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) + +# Requirements + +These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. + +# 1. Create Target Groups + +Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. + +Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. + +1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. +1. Click **Create target group** to create the first target group, regarding TCP port 443. + +> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. + +| Option | Setting | +|-------------------|-------------------| +| Target Group Name | `rancher-tcp-443` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `443` | +| VPC | Choose your VPC | + +Health check settings: + +| Option | Setting | +|---------------------|-----------------| +| Protocol | TCP | +| Port | `override`,`80` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. + +| Option | Setting | +|-------------------|------------------| +| Target Group Name | `rancher-tcp-80` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `80` | +| VPC | Choose your VPC | + + +Health check settings: + +| Option |Setting | +|---------------------|----------------| +| Protocol | TCP | +| Port | `traffic port` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +# 2. Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +![](/img/ha/nlb/edit-targetgroup-443.png) + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +*** +**Screenshot Add targets to target group TCP port 443**
    + +![](/img/ha/nlb/add-targets-targetgroup-443.png) + +*** +**Screenshot Added targets to target group TCP port 443**
    + +![](/img/ha/nlb/added-targets-targetgroup-443.png) + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +# 3. Create Your NLB + +Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. Then complete each form. + +- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) +- [Step 2: Configure Routing](#step-2-configure-routing) +- [Step 3: Register Targets](#step-3-register-targets) +- [Step 4: Review](#step-4-review) + +### Step 1: Configure Load Balancer + +Set the following fields in the form: + +- **Name:** `rancher` +- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. +- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. +- **Availability Zones:** Select Your **VPC** and **Availability Zones**. + +### Step 2: Configure Routing + +1. From the **Target Group** drop-down, choose **Existing target group**. +1. From the **Name** drop-down, choose `rancher-tcp-443`. +1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +### Step 3: Register Targets + +Since you registered your targets earlier, all you have to do is click **Next: Review**. + +### Step 4: Review + +Look over the load balancer details and click **Create** when you're satisfied. + +After AWS creates the NLB, click **Close**. + +# 4. Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to...** + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. + +# Health Check Paths for NGINX Ingress and Traefik Ingresses + +K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. + +For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. + +- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. +- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. + +To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md new file mode 100644 index 0000000000..07cdc53e22 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md @@ -0,0 +1,70 @@ +--- +title: 'Set up Infrastructure for a High Availability K3s Kubernetes Cluster' +weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/ + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-with-external-db/ +--- + +This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. + +The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. + +For more information about each installation option, refer to [this page.](../../../pages-for-subheaders/installation-and-upgrade.md) + +> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +To install the Rancher management server on a high-availability K3s cluster, we recommend setting up the following infrastructure: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. We recommend MySQL. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set a [MySQL](https://www.mysql.com/) external database. Rancher has been tested on K3s Kubernetes clusters using MySQL version 5.7 as the datastore. + +When you install Kubernetes using the K3s installation script, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the MySQL database, refer to this [tutorial](../../../how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md) for setting up MySQL on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md new file mode 100644 index 0000000000..bb1fc6b520 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md @@ -0,0 +1,60 @@ +--- +title: 'Set up Infrastructure for a High Availability RKE Kubernetes Cluster' +weight: 2 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/ +--- + +This tutorial is intended to help you create a high-availability RKE cluster that can be used to install a Rancher server. + +> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on any of the three nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md new file mode 100644 index 0000000000..56c53b2f86 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md @@ -0,0 +1,54 @@ +--- +title: 'Set up Infrastructure for a High Availability RKE2 Kubernetes Cluster' +weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/ +--- + +This tutorial is intended to help you provision the underlying infrastructure for a Rancher management server. + +The recommended infrastructure for the Rancher-only Kubernetes cluster differs depending on whether Rancher will be installed on a RKE2 Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. + +> **Note:** These nodes must be in the same region. You may place these servers in separate availability zones (datacenter). + +To install the Rancher management server on a high-availability RKE2 cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. + +### 1. Set up Linux Nodes + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +For an example of one way to set up Linux nodes, refer to this [tutorial](../../../how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on all nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE2 tool will deploy an Nginx Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Nginx Ingress controller to listen for traffic destined for the Rancher hostname. The Nginx Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.](../../../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.](../../../how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.](../../../how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer.md) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md new file mode 100644 index 0000000000..16b26d4df2 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds.md @@ -0,0 +1,37 @@ +--- +title: Setting up a MySQL Database in Amazon RDS +weight: 4 +aliases: + - /rancher/v2.5/en/installation/options/rds + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/ +--- +This tutorial describes how to set up a MySQL database in Amazon's RDS. + +This database can later be used as an external datastore for a high-availability K3s Kubernetes cluster. + +1. Log into the [Amazon AWS RDS Console](https://console.aws.amazon.com/rds/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. In the left panel, click **Databases.** +1. Click **Create database.** +1. In the **Engine type** section, click **MySQL.** +1. In the **Version** section, choose **MySQL 5.7.22.** +1. In **Settings** section, under **Credentials Settings,** enter a master password for the **admin** master username. Confirm the password. +1. Expand the **Additional configuration** section. In the **Initial database name** field, enter a name. The name can have only letters, numbers, and underscores. This name will be used to connect to the database. +1. Click **Create database.** + +You'll need to capture the following information about the new database so that the K3s Kubernetes cluster can connect to it. + +To see this information in the Amazon RDS console, click **Databases,** and click the name of the database that you created. + +- **Username:** Use the admin username. +- **Password:** Use the admin password. +- **Hostname:** Use the **Endpoint** as the hostname. The endpoint is available in the **Connectivity & security** section. +- **Port:** The port should be 3306 by default. You can confirm it in the **Connectivity & security** section. +- **Database name:** Confirm the name by going to the **Configuration** tab. The name is listed under **DB name.** + +This information will be used to connect to the database in the following format: + +``` +mysql://username:password@tcp(hostname:3306)/database-name +``` + +For more information on configuring the datastore for K3s, refer to the [K3s documentation.](https://rancher.com/docs/k3s/latest/en/installation/datastore/) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md similarity index 100% rename from content/rancher/v2.5/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md new file mode 100644 index 0000000000..0d9d74d874 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2.md @@ -0,0 +1,70 @@ +--- +title: Setting up Nodes in Amazon EC2 +weight: 3 +aliases: + - /rancher/v2.5/en/installation/options/ec2-node + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ +--- + +In this tutorial, you will learn one way to set up Linux nodes for the Rancher management server. These nodes will fulfill the node requirements for [OS, Docker, hardware, and networking.](../../../pages-for-subheaders/installation-requirements.md) + +If the Rancher server will be installed on an RKE Kubernetes cluster, you should provision three instances. + +If the Rancher server will be installed on a K3s Kubernetes cluster, you only need to provision two instances. + +If the Rancher server is installed in a single Docker container, you only need one instance. + +### 1. Optional Preparation + +- **Create IAM role:** To allow Rancher to manipulate AWS resources, such as provisioning new storage or new nodes, you will need to configure Amazon as a cloud provider. There are several things you'll need to do to set up the cloud provider on EC2, but part of this process is setting up an IAM role for the Rancher server nodes. For the full details on setting up the cloud provider, refer to this [page.](../../../pages-for-subheaders/set-up-cloud-providers.md) +- **Create security group:** We also recommend setting up a security group for the Rancher nodes that complies with the [port requirements for Rancher nodes.](../../../pages-for-subheaders/installation-requirements.md#port-requirements) + +### 2. Provision Instances + +1. Log into the [Amazon AWS EC2 Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to take note of the **Region** where your EC2 instances (Linux nodes) are created, because all of the infrastructure for the Rancher management server should be in the same region. +1. In the left panel, click **Instances.** +1. Click **Launch Instance.** +1. In the section called **Step 1: Choose an Amazon Machine Image (AMI),** we will use Ubuntu 18.04 as the Linux OS, using `ami-0d1cd67c26f5fca19 (64-bit x86)`. Go to the Ubuntu AMI and click **Select.** +1. In the **Step 2: Choose an Instance Type** section, select the `t2.medium` type. +1. Click **Next: Configure Instance Details.** +1. In the **Number of instances** field, enter the number of instances. A high-availability K3s cluster requires only two instances, while a high-availability RKE cluster requires three instances. +1. Optional: If you created an IAM role for Rancher to manipulate AWS resources, select the new IAM role in the **IAM role** field. +1. Click **Next: Add Storage,** **Next: Add Tags,** and **Next: Configure Security Group.** +1. In **Step 6: Configure Security Group,** select a security group that complies with the [port requirements](../../../pages-for-subheaders/installation-requirements.md#port-requirements) for Rancher nodes. +1. Click **Review and Launch.** +1. Click **Launch.** +1. Choose a new or existing key pair that you will use to connect to your instance later. If you are using an existing key pair, make sure you already have access to the private key. +1. Click **Launch Instances.** + + +**Result:** You have created Rancher nodes that satisfy the requirements for OS, hardware, and networking. + +**Note:** If the nodes are being used for an RKE Kubernetes cluster, install Docker on each node in the next step. For a K3s Kubernetes cluster, the nodes are now ready to install K3s. + +### 3. Install Docker and Create User for RKE Kubernetes Cluster Nodes + +1. From the [AWS EC2 console,](https://console.aws.amazon.com/ec2/) click **Instances** in the left panel. +1. Go to the instance that you want to install Docker on. Select the instance and click **Actions > Connect.** +1. Connect to the instance by following the instructions on the screen that appears. Copy the Public DNS of the instance. An example command to SSH into the instance is as follows: +``` +sudo ssh -i [path-to-private-key] ubuntu@[public-DNS-of-instance] +``` +1. Run the following command on the instance to install Docker with one of Rancher's installation scripts: +``` +curl https://releases.rancher.com/install-docker/18.09.sh | sh +``` +1. When you are connected to the instance, run the following command on the instance to create a user: +``` +sudo usermod -aG docker ubuntu +``` +1. Repeat these steps so that Docker is installed on each node that will eventually run the Rancher management server. + +> To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher’s Docker installation scripts. + +**Result:** You have set up Rancher server nodes that fulfill all the node requirements for OS, Docker, hardware and networking. + +### Next Steps for RKE Kubernetes Cluster Nodes + +If you are going to install an RKE cluster on the new nodes, take note of the **IPv4 Public IP** and **Private IP** of each node. This information can be found on the **Description** tab for each node after it is created. The public and private IP will be used to populate the `address` and `internal_address` of each node in the RKE cluster configuration file, `rancher-cluster.yml`. + +RKE will also need access to the private key to connect to each node. Therefore, you might want to take note of the path to your private keys to connect to the nodes, which can also be included in the `rancher-cluster.yml` under the `ssh_key_path` directive for each node. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md new file mode 100644 index 0000000000..05a2f1553f --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs.md @@ -0,0 +1,27 @@ +--- +title: About High-availability Installations +weight: 1 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/how-ha-works/ +--- + +We recommend using Helm, a Kubernetes package manager, to install Rancher on a dedicated Kubernetes cluster. This is called a high-availability Kubernetes installation because increased availability is achieved by running Rancher on multiple nodes. + +In a standard installation, Kubernetes is first installed on three nodes that are hosted in an infrastructure provider such as Amazon's EC2 or Google Compute Engine. + +Then Helm is used to install Rancher on top of the Kubernetes cluster. Helm uses Rancher's Helm chart to install a replica of Rancher on each of the three nodes in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster, in order to increase Rancher's availability. + +The Rancher server data is stored on etcd. This etcd database also runs on all three nodes, and requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can fail, requiring the cluster to be restored from backup. + +For information on how Rancher works, regardless of the installation method, refer to the [architecture section.](../../../pages-for-subheaders/rancher-manager-architecture.md) + +### Recommended Architecture + +- DNS for Rancher should resolve to a layer 4 load balancer +- The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +- The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +- The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
    Kubernetes Rancher install with layer 4 load balancer, depicting SSL termination at ingress controllers
    +![High-availability Kubernetes Installation of Rancher](/img/ha/rancher2ha.svg) +Kubernetes Rancher install with Layer 4 load balancer (TCP), depicting SSL termination at ingress controllers diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md new file mode 100644 index 0000000000..ef6e6daa84 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md @@ -0,0 +1,120 @@ +--- +title: Setting up a High-availability K3s Kubernetes Cluster for Rancher +shortTitle: Set up K3s for Rancher +weight: 2 +--- + +This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) + +For systems without direct internet access, refer to the air gap installation instructions. + +> **Single-node Installation Tip:** +> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. +> +> To set up a single-node K3s cluster, run the Rancher server installation command on just one node instead of two nodes. +> +> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. + +# Prerequisites + +These instructions assume you have set up two nodes, a load balancer, a DNS record, and an external MySQL database as described in [this section.](../infrastructure-setup/ha-k3s-kubernetes-cluster.md) + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. +# Installing Kubernetes + +### 1. Install Kubernetes and Set up the K3s Server + +When running the command to start the K3s Kubernetes API server, you will pass in an option to use the external datastore that you set up earlier. + +1. Connect to one of the Linux nodes that you have prepared to run the Rancher server. +1. On the Linux node, run this command to start the K3s server and connect it to the external datastore: + ``` + curl -sfL https://get.k3s.io | sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" + ``` + To specify the K3s version, use the INSTALL_K3S_VERSION environment variable: + ```sh + curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z sh -s - server \ + --datastore-endpoint="mysql://username:password@tcp(hostname:3306)/database-name" + ``` + Note: The datastore endpoint can also be passed in using the environment variable `$K3S_DATASTORE_ENDPOINT`. + +1. Repeat the same command on your second K3s server node. + +### 2. Confirm that K3s is Running + +To confirm that K3s has been set up successfully, run the following command on either of the K3s server nodes: +``` +sudo k3s kubectl get nodes +``` + +Then you should see two nodes with the master role: +``` +ubuntu@ip-172-31-60-194:~$ sudo k3s kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-172-31-60-194 Ready master 44m v1.17.2+k3s1 +ip-172-31-63-88 Ready master 6m8s v1.17.2+k3s1 +``` + +Then test the health of the cluster pods: +``` +sudo k3s kubectl get pods --all-namespaces +``` + +**Result:** You have successfully set up a K3s Kubernetes cluster. + +### 3. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +```yml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation](https://rancher.com/docs/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### 4. Check the Health of Your Cluster Pods + +Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. + +Check that all the required pods and containers are healthy are ready to continue: + +``` +ubuntu@ip-172-31-60-194:~$ sudo kubectl get pods --all-namespaces +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system metrics-server-6d684c7b5-bw59k 1/1 Running 0 8d +kube-system local-path-provisioner-58fb86bdfd-fmkvd 1/1 Running 0 8d +kube-system coredns-d798c9dd-ljjnf 1/1 Running 0 8d +``` + +**Result:** You have confirmed that you can access the cluster with `kubectl` and the K3s cluster is running successfully. Now the Rancher management server can be installed on the cluster. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md new file mode 100644 index 0000000000..7f46595de6 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md @@ -0,0 +1,170 @@ +--- +title: Setting up a High-availability RKE Kubernetes Cluster +shortTitle: Set up RKE Kubernetes +weight: 3 +aliases: + - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-rke/ +--- + + +This section describes how to install a Kubernetes cluster. This cluster should be dedicated to run only the Rancher server. + +> As of Rancher v2.5, Rancher can run on any Kubernetes cluster, included hosted Kubernetes solutions such as Amazon EKS. The below instructions represent only one possible way to install Kubernetes. + +For systems without direct internet access, refer to [Air Gap: Kubernetes install.](../../../pages-for-subheaders/air-gapped-helm-cli-install.md) + +> **Single-node Installation Tip:** +> In a single-node Kubernetes cluster, the Rancher server does not have high availability, which is important for running Rancher in production. However, installing Rancher on a single-node cluster can be useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. +> +> To set up a single-node RKE cluster, configure only one node in the `cluster.yml` . The single node should have all three roles: `etcd`, `controlplane`, and `worker`. +> +> In both single-node setups, Rancher can be installed with Helm on the Kubernetes cluster in the same way that it would be installed on any other cluster. + +# Installing Kubernetes + +### Required CLI Tools + +Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. + +Also install [RKE,](https://rancher.com/docs/rke/latest/en/installation/) the Rancher Kubernetes Engine, a Kubernetes distribution and command-line tool. + +### 1. Create the cluster configuration file + +In this section, you will create a Kubernetes cluster configuration file called `rancher-cluster.yml`. In a later step, when you set up the cluster with an RKE command, it will use this file to install Kubernetes on your nodes. + +Using the sample below as a guide, create the `rancher-cluster.yml` file. Replace the IP addresses in the `nodes` list with the IP address or DNS names of the 3 nodes you created. + +If your node has public and internal addresses, it is recommended to set the `internal_address:` so Kubernetes will use it for intra-cluster communication. Some services like AWS EC2 require setting the `internal_address:` if you want to use self-referencing security groups or firewalls. + +RKE will need to connect to each node over SSH, and it will look for a private key in the default location of `~/.ssh/id_rsa`. If your private key for a certain node is in a different location than the default, you will also need to configure the `ssh_key_path` option for that node. + +```yaml +nodes: + - address: 165.227.114.63 + internal_address: 172.16.22.12 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.116.167 + internal_address: 172.16.32.37 + user: ubuntu + role: [controlplane, worker, etcd] + - address: 165.227.127.226 + internal_address: 172.16.42.73 + user: ubuntu + role: [controlplane, worker, etcd] + +services: + etcd: + snapshot: true + creation: 6h + retention: 24h + +# Required for external TLS termination with +# ingress-nginx v0.22+ +ingress: + provider: nginx + options: + use-forwarded-headers: "true" +``` + +
    Common RKE Nodes Options
    + +| Option | Required | Description | +| ------------------ | -------- | -------------------------------------------------------------------------------------- | +| `address` | yes | The public DNS or IP address | +| `user` | yes | A user that can run docker commands | +| `role` | yes | List of Kubernetes roles assigned to the node | +| `internal_address` | no | The private DNS or IP address for internal cluster traffic | +| `ssh_key_path` | no | Path to SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`) | + +> **Advanced Configurations:** RKE has many configuration options for customizing the install to suit your specific environment. +> +> Please see the [RKE Documentation](https://rancher.com/docs/rke/latest/en/config-options/) for the full list of options and capabilities. +> +> For tuning your etcd cluster for larger Rancher installations, see the [etcd settings guide](../../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md). + +### 2. Run RKE + +``` +rke up --config ./rancher-cluster.yml +``` + +When finished, it should end with the line: `Finished building Kubernetes cluster successfully`. + +### 3. Test Your Cluster + +This section describes how to set up your workspace so that you can interact with this cluster using the `kubectl` command-line tool. + +Assuming you have installed `kubectl`, you need to place the `kubeconfig` file in a location where `kubectl` can reach it. The `kubeconfig` file contains the credentials necessary to access your cluster with `kubectl`. + +When you ran `rke up`, RKE should have created a `kubeconfig` file named `kube_config_cluster.yml`. This file has the credentials for `kubectl` and `helm`. + +> **Note:** If you have used a different file name from `rancher-cluster.yml`, then the kube config file will be named `kube_config_.yml`. + +Move this file to `$HOME/.kube/config`, or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_cluster.yml`: + +``` +export KUBECONFIG=$(pwd)/kube_config_cluster.yml +``` + +Test your connectivity with `kubectl` and see if all your nodes are in `Ready` state: + +``` +kubectl get nodes + +NAME STATUS ROLES AGE VERSION +165.227.114.63 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.116.167 Ready controlplane,etcd,worker 11m v1.13.5 +165.227.127.226 Ready controlplane,etcd,worker 11m v1.13.5 +``` + +### 4. Check the Health of Your Cluster Pods + +Check that all the required pods and containers are healthy are ready to continue. + +- Pods are in `Running` or `Completed` state. +- `READY` column shows all the containers are running (i.e. `3/3`) for pods with `STATUS` `Running` +- Pods with `STATUS` `Completed` are run-once Jobs. For these pods `READY` should be `0/1`. + +``` +kubectl get pods --all-namespaces + +NAMESPACE NAME READY STATUS RESTARTS AGE +ingress-nginx nginx-ingress-controller-tnsn4 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-tw2ht 1/1 Running 0 30s +ingress-nginx nginx-ingress-controller-v874b 1/1 Running 0 30s +kube-system canal-jp4hz 3/3 Running 0 30s +kube-system canal-z2hg8 3/3 Running 0 30s +kube-system canal-z6kpw 3/3 Running 0 30s +kube-system kube-dns-7588d5b5f5-sf4vh 3/3 Running 0 30s +kube-system kube-dns-autoscaler-5db9bbb766-jz2k6 1/1 Running 0 30s +kube-system metrics-server-97bc649d5-4rl2q 1/1 Running 0 30s +kube-system rke-ingress-controller-deploy-job-bhzgm 0/1 Completed 0 30s +kube-system rke-kubedns-addon-deploy-job-gl7t4 0/1 Completed 0 30s +kube-system rke-metrics-addon-deploy-job-7ljkc 0/1 Completed 0 30s +kube-system rke-network-plugin-deploy-job-6pbgj 0/1 Completed 0 30s +``` + +This confirms that you have successfully installed a Kubernetes cluster that the Rancher server will run on. + +### 5. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file](https://rancher.com/docs/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file](https://rancher.com/docs/rke/latest/en/installation/#kubernetes-cluster-state), this file contains credentials for full access to the cluster.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting](../../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) page. + + +### [Next: Install Rancher](../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md new file mode 100644 index 0000000000..c35f995f5b --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md @@ -0,0 +1,185 @@ +--- +title: Setting up a High-availability RKE2 Kubernetes Cluster for Rancher +shortTitle: Set up RKE2 for Rancher +weight: 2 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2 + - /rancher/v2.x/en/installation/resources/k8s-tutorials/ha-RKE2/ +--- +_Tested on v2.5.6_ + +This section describes how to install a Kubernetes cluster according to the [best practices for the Rancher server environment.](../../../reference-guides/rancher-manager-architecture/architecture-recommendations.md#environment-for-kubernetes-installations) + +# Prerequisites + +These instructions assume you have set up three nodes, a load balancer, and a DNS record, as described in [this section.](../infrastructure-setup/ha-rke2-kubernetes-cluster.md) + +Note that in order for RKE2 to work correctly with the load balancer, you need to set up two listeners: one for the supervisor on port 9345, and one for the Kubernetes API on port 6443. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the RKE2 version, use the INSTALL_RKE2_VERSION environment variable when running the RKE2 installation script. +# Installing Kubernetes + +### 1. Install Kubernetes and Set up the RKE2 Server + +RKE2 server runs with embedded etcd so you will not need to set up an external datastore to run in HA mode. + +On the first node, you should set up the configuration file with your own pre-shared secret as the token. The token argument can be set on startup. + +If you do not specify a pre-shared secret, RKE2 will generate one and place it at /var/lib/rancher/rke2/server/node-token. + +To avoid certificate errors with the fixed registration address, you should launch the server with the tls-san parameter set. This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access via both the IP and the hostname. + +First, you must create the directory where the RKE2 config file is going to be placed: + +``` +mkdir -p /etc/rancher/rke2/ +``` + +Next, create the RKE2 config file at `/etc/rancher/rke2/config.yaml` using the following example: + +``` +token: my-shared-secret +tls-san: + - my-kubernetes-domain.com + - another-kubernetes-domain.com +``` +After that, you need to run the install command and enable and start rke2: + +``` +curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=v1.20 sh - +systemctl enable rke2-server.service +systemctl start rke2-server.service +``` +1. To join the rest of the nodes, you need to configure each additional node with the same shared token or the one generated automatically. Here is an example of the configuration file: + + token: my-shared-secret + server: https://:9345 + tls-san: + - my-kubernetes-domain.com + - another-kubernetes-domain.com +After that, you need to run the installer and enable, then start, rke2: + + curl -sfL https://get.rke2.io | sh - + systemctl enable rke2-server.service + systemctl start rke2-server.service + + +1. Repeat the same command on your third RKE2 server node. + +### 2. Confirm that RKE2 is Running + +Once you've launched the rke2 server process on all server nodes, ensure that the cluster has come up properly with + +``` +/var/lib/rancher/rke2/bin/kubectl \ + --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes +You should see your server nodes in the Ready state. +``` + +Then test the health of the cluster pods: +``` +/var/lib/rancher/rke2/bin/kubectl \ + --kubeconfig /etc/rancher/rke2/rke2.yaml get pods --all-namespaces +``` + +**Result:** You have successfully set up a RKE2 Kubernetes cluster. + +### 3. Save and Start Using the kubeconfig File + +When you installed RKE2 on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/rke2/rke2.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/rke2/rke2.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `rke2.yaml`: + +```yml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your RKE2 cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/rke2.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [RKE2 documentation](https://docs.rke2.io/cluster_access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### 4. Check the Health of Your Cluster Pods + +Now that you have set up the `kubeconfig` file, you can use `kubectl` to access the cluster from your local machine. + +Check that all the required pods and containers are healthy are ready to continue: + +``` + /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get pods -A +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system etcd-ip-172-31-18-145 1/1 Running 0 4m37s +kube-system etcd-ip-172-31-25-73 1/1 Running 0 20m +kube-system etcd-ip-172-31-31-210 1/1 Running 0 9m12s +kube-system helm-install-rke2-canal-th9k9 0/1 Completed 0 21m +kube-system helm-install-rke2-coredns-6njr6 0/1 Completed 0 21m +kube-system helm-install-rke2-ingress-nginx-vztsd 0/1 Completed 0 21m +kube-system helm-install-rke2-kube-proxy-6std5 0/1 Completed 0 21m +kube-system helm-install-rke2-metrics-server-9sl7m 0/1 Completed 0 21m +kube-system kube-apiserver-ip-172-31-18-145 1/1 Running 0 4m22s +kube-system kube-apiserver-ip-172-31-25-73 1/1 Running 0 20m +kube-system kube-apiserver-ip-172-31-31-210 1/1 Running 0 9m8s +kube-system kube-controller-manager-ip-172-31-18-145 1/1 Running 0 4m8s +kube-system kube-controller-manager-ip-172-31-25-73 1/1 Running 0 21m +kube-system kube-controller-manager-ip-172-31-31-210 1/1 Running 0 8m55s +kube-system kube-proxy-57twm 1/1 Running 0 10m +kube-system kube-proxy-f7pc6 1/1 Running 0 5m24s +kube-system kube-proxy-rj4t5 1/1 Running 0 21m +kube-system kube-scheduler-ip-172-31-18-145 1/1 Running 0 4m15s +kube-system kube-scheduler-ip-172-31-25-73 1/1 Running 0 21m +kube-system kube-scheduler-ip-172-31-31-210 1/1 Running 0 8m48s +kube-system rke2-canal-4x972 2/2 Running 0 10m +kube-system rke2-canal-flh8m 2/2 Running 0 5m24s +kube-system rke2-canal-zfhkr 2/2 Running 0 21m +kube-system rke2-coredns-rke2-coredns-6cd96645d6-cmstq 1/1 Running 0 21m +kube-system rke2-ingress-nginx-controller-54946dd48f-6mp76 1/1 Running 0 20m +kube-system rke2-ingress-nginx-default-backend-5795954f8-p92xx 1/1 Running 0 20m +kube-system rke2-metrics-server-5f9b5757dc-k5sgh 1/1 Running 0 20m +``` + +**Result:** You have confirmed that you can access the cluster with `kubectl` and the RKE2 cluster is running successfully. Now the Rancher management server can be installed on the cluster. + +### 5. Configure nginx to be a daemonset + +Currently, RKE2 deploys nginx-ingress as a deployment, and that can impact the Rancher deployment so that you cannot use all servers to proxy requests to the Rancher pods. + +To rectify that, place the following file in /var/lib/rancher/rke2/server/manifests on any of the server nodes: + +```yaml +apiVersion: helm.cattle.io/v1 +kind: HelmChartConfig +metadata: + name: rke2-ingress-nginx + namespace: kube-system +spec: + valuesContent: |- + controller: + kind: DaemonSet + daemonset: + useHostPort: true +``` diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md new file mode 100644 index 0000000000..af64ebac09 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture.md @@ -0,0 +1,76 @@ +--- +title: Recommended Cluster Architecture +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/production/recommended-architecture/ +--- + +There are three roles that can be assigned to nodes: `etcd`, `controlplane` and `worker`. + +# Separating Worker Nodes from Nodes with Other Roles + +When designing your cluster(s), you have two options: + +* Use dedicated nodes for each role. This ensures resource availability for the components needed for the specified role. It also strictly isolates network traffic between each of the roles according to the [port requirements](../node-requirements-for-rancher-managed-clusters.md#networking-requirements). +* Assign the `etcd` and `controlplane` roles to the same nodes. These nodes must meet the hardware requirements for both roles. + +In either case, the `worker` role should not be used or added to nodes with the `etcd` or `controlplane` role. + +Therefore, each node should have one of the following role configurations: + + * `etcd` + * `controlplane` + * Both `etcd` and `controlplane` + * `worker` + +# Recommended Number of Nodes with Each Role + +The cluster should have: + +- At least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +- At least two nodes with the role `controlplane` for master component high availability. +- At least two nodes with the role `worker` for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](roles-for-nodes-in-kubernetes.md) + + +### Number of Controlplane Nodes + +Adding more than one node with the `controlplane` role makes every master component highly available. + +### Number of etcd Nodes + +The number of nodes that you can lose at once while maintaining cluster availability is determined by the number of nodes assigned the `etcd` role. For a cluster with n members, the minimum is (n/2)+1. Therefore, we recommend creating an `etcd` node in 3 different availability zones within a region to survive the loss of one availability zone. If you use only two zones, you can only survive the loss of the zone where you don't lose the majority of nodes. + +| Nodes with `etcd` role | Majority | Failure Tolerance | +|--------------|------------|-------------------| +| 1 | 1 | 0 | +| 2 | 2 | 0 | +| 3 | 2 | **1** | +| 4 | 3 | 1 | +| 5 | 3 | **2** | +| 6 | 4 | 2 | +| 7 | 4 | **3** | +| 8 | 5 | 3 | +| 9 | 5 | **4** | + +References: + +* [Official etcd documentation on optimal etcd cluster size](https://etcd.io/docs/v3.4.0/faq/#what-is-failure-tolerance) +* [Official Kubernetes documentation on operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) + +### Number of Worker Nodes + +Adding more than one node with the `worker` role will make sure your workloads can be rescheduled if a node fails. + +### Why Production Requirements are Different for the Rancher Cluster and the Clusters Running Your Applications + +You may have noticed that our [Kubernetes Install](../../../../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) instructions do not meet our definition of a production-ready cluster, as there are no dedicated nodes for the `worker` role. However, for your Rancher installation, this three node cluster is valid, because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +# References + +* [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md new file mode 100644 index 0000000000..b689c85901 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md @@ -0,0 +1,45 @@ +--- +title: Roles for Nodes in Kubernetes +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/production/nodes-and-roles/ +--- + +This section describes the roles for etcd nodes, controlplane nodes, and worker nodes in Kubernetes, and how the roles work together in a cluster. + +This diagram is applicable to Kubernetes clusters [launched with Rancher using RKE.](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md). + +![Cluster diagram](/img/clusterdiagram.svg)
    +Lines show the traffic flow between components. Colors are used purely for visual aid + +# etcd + +Nodes with the `etcd` role run etcd, which is a consistent and highly available key value store used as Kubernetes’ backing store for all cluster data. etcd replicates the data to each node. + +>**Note:** Nodes with the `etcd` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +# controlplane + +Nodes with the `controlplane` role run the Kubernetes master components (excluding `etcd`, as it's a separate role). See [Kubernetes: Master Components](https://kubernetes.io/docs/concepts/overview/components/#master-components) for a detailed list of components. + +>**Note:** Nodes with the `controlplane` role are shown as `Unschedulable` in the UI, meaning no pods will be scheduled to these nodes by default. + +### kube-apiserver + +The Kubernetes API server (`kube-apiserver`) scales horizontally. Each node with the role `controlplane` will be added to the NGINX proxy on the nodes with components that need to access the Kubernetes API server. This means that if a node becomes unreachable, the local NGINX proxy on the node will forward the request to another Kubernetes API server in the list. + +### kube-controller-manager + +The Kubernetes controller manager uses leader election using an endpoint in Kubernetes. One instance of the `kube-controller-manager` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +### kube-scheduler + +The Kubernetes scheduler uses leader election using an endpoint in Kubernetes. One instance of the `kube-scheduler` will create an entry in the Kubernetes endpoints and updates that entry in a configured interval. Other instances will see an active leader and wait for that entry to expire (for example, when a node is unresponsive). + +# worker + +Nodes with the `worker` role run the Kubernetes node components. See [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) for a detailed list of components. + +# References + +* [Kubernetes: Node Components](https://kubernetes.io/docs/concepts/overview/components/#node-components) \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md new file mode 100644 index 0000000000..fefc518065 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents.md @@ -0,0 +1,61 @@ +--- +title: Rancher Agents +weight: 2400 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/rancher-agents/ +--- + +There are two different agent resources deployed on Rancher managed clusters: + +- [cattle-cluster-agent](#cattle-cluster-agent) +- [cattle-node-agent](#cattle-node-agent) + +For a conceptual overview of how the Rancher server provisions clusters and communicates with them, refer to the [architecture](../../../../pages-for-subheaders/rancher-manager-architecture.md) + +### cattle-cluster-agent + +The `cattle-cluster-agent` is used to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. The `cattle-cluster-agent` is deployed using a Deployment resource. + +### cattle-node-agent + +The `cattle-node-agent` is used to interact with nodes in a [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) cluster when performing cluster operations. Examples of cluster operations are upgrading Kubernetes version and creating/restoring etcd snapshots. The `cattle-node-agent` is deployed using a DaemonSet resource to make sure it runs on every node. The `cattle-node-agent` is used as fallback option to connect to the Kubernetes API of [Rancher Launched Kubernetes](../../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters when `cattle-cluster-agent` is unavailable. + +### Scheduling rules + +_Applies to v2.5.4 and higher_ + +Starting with Rancher v2.5.4, the tolerations for the `cattle-cluster-agent` changed from `operator:Exists` (allowing all taints) to a fixed set of tolerations (listed below, if no controlplane nodes are visible in the cluster) or dynamically added tolerations based on taints applied to the controlplane nodes. This change was made to allow [Taint based Evictions](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions) to work properly for `cattle-cluster-agent`. The default tolerations are described below. If controlplane nodes are present the cluster, the tolerations will be replaced with tolerations matching the taints on the controlplane nodes. + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | **Note:** These are the default tolerations, and will be replaced by tolerations matching taints applied to controlplane nodes.

    `effect:NoSchedule`
    `key:node-role.kubernetes.io/controlplane`
    `value:true`

    `effect:NoSchedule`
    `key:node-role.kubernetes.io/control-plane`
    `operator:Exists`

    `effect:NoSchedule`
    `key:node-role.kubernetes.io/master`
    `operator:Exists` | +| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | + +The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. When there are no controlplane nodes visible in the cluster (this is usually the case when using [Clusters from Hosted Kubernetes Providers](../../../../pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md)), you can add the label `cattle.io/cluster-agent=true` on a node to prefer scheduling the `cattle-cluster-agent` pod to that node. + +See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. + +The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: + +| Weight | Expression | +| ------ | ------------------------------------------------ | +| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | +| 100 | `node-role.kubernetes.io/control-plane:In:"true"` | +| 100 | `node-role.kubernetes.io/master:In:"true"` | +| 1 | `cattle.io/cluster-agent:In:"true"` | + +_Applies to v2.3.0 up to v2.5.3_ + +| Component | nodeAffinity nodeSelectorTerms | nodeSelector | Tolerations | +| ---------------------- | ------------------------------------------ | ------------ | ------------------------------------------------------------------------------ | +| `cattle-cluster-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | +| `cattle-node-agent` | `beta.kubernetes.io/os:NotIn:windows` | none | `operator:Exists` | + +The `cattle-cluster-agent` Deployment has preferred scheduling rules using `preferredDuringSchedulingIgnoredDuringExecution`, favoring to be scheduled on nodes with the `controlplane` node. See [Kubernetes: Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) to find more information about scheduling rules. + +The `preferredDuringSchedulingIgnoredDuringExecution` configuration is shown in the table below: + +| Weight | Expression | +| ------ | ------------------------------------------------ | +| 100 | `node-role.kubernetes.io/controlplane:In:"true"` | +| 1 | `node-role.kubernetes.io/etcd:In:"true"` | diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md new file mode 100644 index 0000000000..074c965e33 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md @@ -0,0 +1,152 @@ +--- +title: Setting up the Amazon Cloud Provider +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/ +--- + +When using the `Amazon` cloud provider, you can leverage the following capabilities: + +- **Load Balancers:** Launches an AWS Elastic Load Balancer (ELB) when choosing `Layer-4 Load Balancer` in **Port Mapping** or when launching a `Service` with `type: LoadBalancer`. +- **Persistent Volumes**: Allows you to use AWS Elastic Block Stores (EBS) for persistent volumes. + +See [cloud-provider-aws README](https://kubernetes.github.io/cloud-provider-aws/) for all information regarding the Amazon cloud provider. + +To set up the Amazon cloud provider, + +1. [Create an IAM role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) +2. [Configure the ClusterID](#2-configure-the-clusterid) + +### 1. Create an IAM Role and attach to the instances + +All nodes added to the cluster must be able to interact with EC2 so that they can create and remove resources. You can enable this interaction by using an IAM role attached to the instance. See [Amazon documentation: Creating an IAM Role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#create-iam-role) how to create an IAM role. There are two example policies: + +* The first policy is for the nodes with the `controlplane` role. These nodes have to be able to create/remove EC2 resources. The following IAM policy is an example, please remove any unneeded permissions for your use case. +* The second policy is for the nodes with the `etcd` or `worker` role. These nodes only have to be able to retrieve information from EC2. + +While creating an [Amazon EC2 cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), you must fill in the **IAM Instance Profile Name** (not ARN) of the created IAM role when creating the **Node Template**. + +While creating a [Custom cluster](../../../../../../pages-for-subheaders/use-existing-nodes.md), you must manually attach the IAM role to the instance(s). + +IAM Policy for nodes with the `controlplane` role: + +```json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } +] +} +``` + +IAM policy for nodes with the `etcd` or `worker` role: + +```json +{ +"Version": "2012-10-17", +"Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } +] +} +``` + +### 2. Configure the ClusterID + +The following resources need to tagged with a `ClusterID`: + +- **Nodes**: All hosts added in Rancher. +- **Subnet**: The subnet used for your cluster. +- **Security Group**: The security group used for your cluster. + +>**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating an Elastic Load Balancer (ELB). + +When you create an [Amazon EC2 Cluster](../../use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md), the `ClusterID` is automatically configured for the created nodes. Other resources still need to be tagged manually. + +Use the following tag: + +**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `owned` + +`CLUSTERID` can be any string you like, as long as it is equal across all tags set. + +Setting the value of the tag to `owned` tells the cluster that all resources with this tag are owned and managed by this cluster. If you share resources between clusters, you can change the tag to: + +**Key** = `kubernetes.io/cluster/CLUSTERID` **Value** = `shared`. + +### Using Amazon Elastic Container Registry (ECR) + +The kubelet component has the ability to automatically obtain ECR credentials, when the IAM profile mentioned in [Create an IAM Role and attach to the instances](#1-create-an-iam-role-and-attach-to-the-instances) is attached to the instance(s). When using a Kubernetes version older than v1.15.0, the Amazon cloud provider needs be configured in the cluster. Starting with Kubernetes version v1.15.0, the kubelet can obtain ECR credentials without having the Amazon cloud provider configured in the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md new file mode 100644 index 0000000000..66e1ae08b1 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md @@ -0,0 +1,72 @@ +--- +title: Setting up the Azure Cloud Provider +weight: 2 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/azure/ +--- + +When using the `Azure` cloud provider, you can leverage the following capabilities: + +- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. + +- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. + +- **Network Storage:** Support Azure Files via CIFS mounts. + +The following account types are not supported for Azure Subscriptions: + +- Single tenant accounts (i.e. accounts with no subscriptions). +- Multi-subscription accounts. + +To set up the Azure cloud provider following credentials need to be configured: + +1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) +2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) +3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) +4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) + +### 1. Set up the Azure Tenant ID + +Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). + +If you want to use the Azure CLI, you can run the command `az account show` to get the information. + +### 2. Set up the Azure Client ID and Azure Client Secret + +Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). + +1. Select **Azure Active Directory**. +1. Select **App registrations**. +1. Select **New application registration**. +1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. +1. Select **Create**. + +In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. + +The next step is to generate the **Azure Client Secret**: + +1. Open your created App registration. +1. In the **Settings** view, open **Keys**. +1. Enter a **Key description**, select an expiration time and select **Save**. +1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. + +### 3. Configure App Registration Permissions + +The last thing you will need to do, is assign the appropriate permissions to your App registration. + +1. Go to **More services**, search for **Subscriptions** and open it. +1. Open **Access control (IAM)**. +1. Select **Add**. +1. For **Role**, select `Contributor`. +1. For **Select**, select your created App registration name. +1. Select **Save**. + +### 4. Set up Azure Network Security Group Name + +A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. + +If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. + +You should already assign custom hosts to this Network Security Group during provisioning. + +Only hosts expected to be load balancer back ends need to be in this group. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md new file mode 100644 index 0000000000..82c6e99a25 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md @@ -0,0 +1,26 @@ +--- +title: How to Configure In-tree vSphere Cloud Provider +shortTitle: In-tree Cloud Provider +weight: 10 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/ +--- + +To set up the in-tree vSphere cloud provider, follow these steps while creating the vSphere cluster in Rancher: + +1. Set **Cloud Provider** option to `Custom` or `Custom (In-Tree)`. + + ![](/img/vsphere-node-driver-cloudprovider.png) + +1. Click on **Edit as YAML** +1. Insert the following structure to the pre-populated cluster YAML. This structure must be placed under `rancher_kubernetes_engine_config`. Note that the `name` *must* be set to `vsphere`. + + ```yaml + rancher_kubernetes_engine_config: + cloud_provider: + name: vsphere + vsphereCloudProvider: + [Insert provider configuration] + ``` + +Rancher uses RKE (the Rancher Kubernetes Engine) to provision Kubernetes clusters. Refer to the [vSphere configuration reference in the RKE documentation](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/vsphere/config-reference/) for details about the properties of the `vsphereCloudProvider` directive. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md new file mode 100644 index 0000000000..778d225382 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md @@ -0,0 +1,66 @@ +--- +title: Creating a DigitalOcean Cluster +shortTitle: DigitalOcean +weight: 2215 +aliases: + - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-digital-ocean/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/ +--- +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. + +First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. + +Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **DigitalOcean**. +1. Enter your Digital Ocean credentials. +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md) + +### 3. Create a cluster with node pools using the node template + +Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **DigitalOcean**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md new file mode 100644 index 0000000000..ce26bee5c7 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md @@ -0,0 +1,233 @@ +--- +title: Creating an Amazon EC2 Cluster +shortTitle: Amazon EC2 +description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher +weight: 2210 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ +--- +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. + +First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. + +Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +### Prerequisites + +- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. +- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: + - [Example IAM Policy](#example-iam-policy) + - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](../../../../../pages-for-subheaders/set-up-cloud-providers.md) or want to pass an IAM Profile to an instance) + - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) +- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. + +# Creating an EC2 Cluster + +The steps to create a cluster differ based on your Rancher version. + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **Amazon.** +1. In the **Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key.** +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials and information from EC2 + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md) + +### 3. Create a cluster with node pools using the node template + +Add one or more node pools to your cluster. For more information about node pools, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) + +Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Amazon EC2**. +1. Enter a **Cluster Name**. +1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers](../../../../../pages-for-subheaders/set-up-cloud-providers.md) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# IAM Policies + +### Example IAM Policy + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` + +### Example IAM Policy with PassRole + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", + "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` +### Example IAM Policy to allow encrypted EBS volumes +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Encrypt", + "kms:DescribeKey", + "kms:CreateGrant", + "ec2:DetachVolume", + "ec2:AttachVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", + "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Resource": "*" + } + ] +} +``` diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md new file mode 100644 index 0000000000..1de381dd96 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md @@ -0,0 +1,103 @@ +--- +title: Creating an Azure Cluster +shortTitle: Azure +weight: 2220 +aliases: + - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-azure/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/azure/ +--- + +In this section, you'll learn how to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Azure through Rancher. + +First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. + +Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +>**Warning:** When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: + +> - Terminate the SSL/TLS on the internal load balancer +> - Use the L7 load balancer + +> For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) + +- [Preparation in Azure](#preparation-in-azure) +- [Creating an Azure Cluster](#creating-an-azure-cluster) + +# Preparation in Azure + +Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. + +To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. + +The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: + +``` +az ad sp create-for-rbac \ + --name="" \ + --role="Contributor" \ + --scopes="/subscriptions/" +``` + +The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, *The client secret*, and *The tenant ID*. This information will be used when you create a node template for Azure. + +# Creating an Azure Cluster + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **Azure**. +1. Enter your Azure credentials. +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md) + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in Azure. + +Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Azure**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.](../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md new file mode 100644 index 0000000000..6cace56955 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials.md @@ -0,0 +1,44 @@ +--- +title: Creating Credentials in the vSphere Console +weight: 3 +aliases: + - /rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/creating-credentials + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/ +--- + +This section describes how to create a vSphere username and password. You will need to provide these vSphere credentials to Rancher, which allows Rancher to provision resources in vSphere. + +The following table lists the permissions required for the vSphere user account: + +| Privilege Group | Operations | +|:----------------------|:-----------------------------------------------------------------------| +| Datastore | AllocateSpace
    Browse
    FileManagement (Low level file operations)
    UpdateVirtualMachineFiles
    UpdateVirtualMachineMetadata | +| Network | Assign | +| Resource | AssignVMToPool | +| Virtual Machine | Config (All)
    GuestOperations (All)
    Interact (All)
    Inventory (All)
    Provisioning (All) | + +The following steps create a role with the required privileges and then assign it to a new user in the vSphere console: + +1. From the **vSphere** console, go to the **Administration** page. + +2. Go to the **Roles** tab. + +3. Create a new role. Give it a name and select the privileges listed in the permissions table above. + + ![](/img/rancherroles1.png) + +4. Go to the **Users and Groups** tab. + +5. Create a new user. Fill out the form and then click **OK**. Make sure to note the username and password, because you will need it when configuring node templates in Rancher. + + ![](/img/rancheruser.png) + +6. Go to the **Global Permissions** tab. + +7. Create a new Global Permission. Add the user you created earlier and assign it the role you created earlier. Click **OK**. + + ![](/img/globalpermissionuser.png) + + ![](/img/globalpermissionrole.png) + +**Result:** You now have credentials that Rancher can use to manipulate vSphere resources. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md new file mode 100644 index 0000000000..f657c8ca47 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md @@ -0,0 +1,111 @@ +--- +title: Provisioning Kubernetes Clusters in vSphere +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/ +--- + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. + +First, you will set up your vSphere cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision nodes in vSphere. + +Then you will create a vSphere cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +For details on configuring the vSphere node template, refer to the [vSphere node template configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md) + +For details on configuring RKE Kubernetes clusters in Rancher, refer to the [cluster configuration reference.](../../../../../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) + +- [Preparation in vSphere](#preparation-in-vsphere) +- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) + +# Preparation in vSphere + +This section describes the requirements for setting up vSphere so that Rancher can provision VMs and clusters. + +The node templates are documented and tested with the vSphere Web Services API version 6.5. + +### Create Credentials in vSphere + +Before proceeding to create a cluster, you must ensure that you have a vSphere user with sufficient permissions. When you set up a node template, the template will need to use these vSphere credentials. + +Refer to this [how-to guide](./create-credentials.md) for instructions on how to create a user in vSphere with the required permissions. These steps result in a username and password that you will need to provide to Rancher, which allows Rancher to provision resources in vSphere. + +### Network Permissions + +It must be ensured that the hosts running the Rancher server are able to establish the following network connections: + +- To the vSphere API on the vCenter server (usually port 443/TCP). +- To the Host API (port 443/TCP) on all ESXi hosts used to instantiate virtual machines for the clusters (*only required when using the ISO creation method*). +- To port 22/TCP and 2376/TCP on the created VMs + +See [Node Networking Requirements](../../../node-requirements-for-rancher-managed-clusters.md#networking-requirements) for a detailed list of port requirements applicable for creating nodes on an infrastructure provider. + +### Valid ESXi License for vSphere API Access + +The free ESXi license does not support API access. The vSphere servers must have a valid or evaluation ESXi license. + +### VM-VM Affinity Rules for Clusters with DRS + +If you have a cluster with DRS enabled, setting up [VM-VM Affinity Rules](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.resmgmt.doc/GUID-7297C302-378F-4AF2-9BD6-6EDB1E0A850A.html) is recommended. These rules allow VMs assigned the etcd and control-plane roles to operate on separate ESXi hosts when they are assigned to different node pools. This practice ensures that the failure of a single physical machine does not affect the availability of those planes. + +# Creating a vSphere Cluster + +The a vSphere cluster is created in Rancher depends on the Rancher version. + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for the cloud credential. +1. In the **Cloud Credential Type** field, select **VMware vSphere**. +1. Enter your vSphere credentials. For help, refer to **Account Access** in the [node template configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md) +1. Click **Create.** + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for vSphere will allow Rancher to provision new nodes in vSphere. Node templates can be reused for other clusters. + +1. In the Rancher UI, click the user profile button in the upper right corner, and click **Node Templates.** +1. Click **Add Template.** +1. Fill out a node template for vSphere. For help filling out the form, refer to the vSphere node template [configuration reference.](../../../../../../reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md). + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in vSphere. + +Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. + +1. Navigate to **Clusters** in the **Global** view. +1. Click **Add Cluster** and select the **vSphere** infrastructure provider. +1. Enter a **Cluster Name.** +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** For help configuring the cluster, refer to the [RKE cluster configuration reference.](cluster-provisioning/rke-clusters/options) +1. If you want to dynamically provision persistent storage or other infrastructure later, you will need to enable the vSphere cloud provider by modifying the cluster YAML file. For details, refer to [this section.](../../../../../../pages-for-subheaders/vsphere-cloud-provider.md) +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to the nodes, see [this section.](../../../../../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-pools) +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../../../../../advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. +- **Provision Storage:** For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../../../../../advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](../../../../../../pages-for-subheaders/vsphere-cloud-provider.md) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md new file mode 100644 index 0000000000..81baadf879 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md @@ -0,0 +1,43 @@ +--- +title: Configuration for Storage Classes in Azure +weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/ +--- + +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. + +In order to have the Azure platform create the required storage resources, follow these steps: + +1. [Configure the Azure cloud provider.](../set-up-cloud-providers/other-cloud-providers/azure.md) +1. Configure `kubectl` to connect to your cluster. +1. Copy the `ClusterRole` and `ClusterRoleBinding` manifest for the service account: + + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:azure-cloud-provider + rules: + - apiGroups: [''] + resources: ['secrets'] + verbs: ['get','create'] + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:azure-cloud-provider + roleRef: + kind: ClusterRole + apiGroup: rbac.authorization.k8s.io + name: system:azure-cloud-provider + subjects: + - kind: ServiceAccount + name: persistent-volume-binder + namespace: kube-system + +1. Create these in your cluster using one of the follow command. + + ``` + # kubectl create -f + ``` diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/windows-linux-cluster-feature-parity.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/windows-linux-cluster-feature-parity.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md new file mode 100644 index 0000000000..cb28379dae --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md @@ -0,0 +1,130 @@ +--- +title: Node Requirements for Rancher Managed Clusters +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/node-requirements/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. + +> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.](../../../pages-for-subheaders/installation-requirements.md) + +Make sure the nodes for the Rancher server fulfill the following requirements: + +- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) +- [Hardware Requirements](#hardware-requirements) +- [Networking Requirements](#networking-requirements) +- [Optional: Security Considerations](#optional-security-considerations) + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +If you plan to use ARM64, see [Running on ARM64 (Experimental).](../../../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md) + +For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) + +### Oracle Linux and RHEL Derived Linux Nodes + +Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. + +>**Note:** In RHEL 8.4, two extra services are included on the NetworkManager: `nm-cloud-setup.service` and `nm-cloud-setup.timer`. These services add a routing table that interferes with the CNI plugin's configuration. If these services are enabled, you must disable them using the command below, and then reboot the node to restore connectivity: +> +> ``` + systemctl disable nm-cloud-setup.service nm-cloud-setup.timer + reboot + ``` + +### SUSE Linux Nodes + +SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. + +### Flatcar Container Linux Nodes + +When [Launching Kubernetes with Rancher](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File](cluster-provisioning/rke-clusters/options/#cluster-config-file) + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: canal + options: + canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: calico + options: + calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +It is also required to enable the Docker service, you can enable the Docker service using the following command: + +``` +systemctl enable docker.service +``` + +The Docker service is enabled automatically when using [Node Drivers](../../../pages-for-subheaders/about-provisioning-drivers.md#node-drivers). + +### Windows Nodes + +Nodes with Windows Server must run Docker Enterprise Edition. + +Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows](../../../pages-for-subheaders/use-windows-clusters.md) + +# Hardware Requirements + +The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. + +Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. + +For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) + +For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) + +# Networking Requirements + +For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. + +IPv6 should be disabled at the OS level. Unless you specifically intend to utilize IPv6, you should disable it on your nodes. IPv6 is not yet fully supported and often times it is not enough to disable IPv6 on the NICs to avoid complications. + +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md). + +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.](https://rancher.com/docs/rke/latest/en/os/#ports) + +Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements](../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#downstream-kubernetes-cluster-nodes). + +# Optional: Security Considerations + +If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. + +For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.](../../../pages-for-subheaders/rancher-security.md#rancher-hardening-guide) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md new file mode 100644 index 0000000000..ac9e0b950f --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md @@ -0,0 +1,330 @@ +--- +title: Registering Existing Clusters +weight: 6 +aliases: + - /rancher/v2.5/en/cluster-provisioning/imported-clusters + - /rancher/v2.x/en/cluster-provisioning/imported-clusters/ + - /rancher/v2.x/en/cluster-provisioning/registered-clusters/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The cluster registration feature replaced the feature to import clusters. + +The control that Rancher has to manage a registered cluster depends on the type of cluster. For details, see [Management Capabilities for Registered Clusters.](#management-capabilities-for-registered-clusters) + +- [Prerequisites](#prerequisites) +- [Registering a Cluster](#registering-a-cluster) +- [Management Capabilities for Registered Clusters](#management-capabilities-for-registered-clusters) +- [Configuring K3s Cluster Upgrades](#configuring-k3s-cluster-upgrades) +- [Debug Logging and Troubleshooting for Registered K3s Clusters](#debug-logging-and-troubleshooting-for-registered-k3s-clusters) +- [Annotating Registered Clusters](#annotating-registered-clusters) + +# Prerequisites + + + + +### Kubernetes Node Roles + +Registered RKE Kubernetes clusters must have all three node roles - etcd, controlplane and worker. A cluster with only controlplane components cannot be registered in Rancher. + +For more information on RKE node roles, see the [best practices.](../../../pages-for-subheaders/checklist-for-production-ready-clusters.md#cluster-architecture) + +### Permissions + +If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to register the cluster in Rancher. + +In order to apply the privilege, you need to run: + +```plain +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user [USER_ACCOUNT] +``` + +before running the `kubectl` command to register the cluster. + +By default, GKE users are not given this privilege, so you will need to run the command before registering GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). + +If you are registering a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-registration-in-rancher) + +### EKS Clusters + +EKS clusters must have at least one managed node group to be imported into Rancher or provisioned from Rancher successfully. + + + + +### Permissions + +If your existing Kubernetes cluster already has a `cluster-admin` role defined, you must have this `cluster-admin` privilege to register the cluster in Rancher. + +In order to apply the privilege, you need to run: + +```plain +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user [USER_ACCOUNT] +``` + +before running the `kubectl` command to register the cluster. + +By default, GKE users are not given this privilege, so you will need to run the command before registering GKE clusters. To learn more about role-based access control for GKE, please click [here](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). + +If you are registering a K3s cluster, make sure the `cluster.yml` is readable. It is protected by default. For details, refer to [Configuring a K3s cluster to enable importation to Rancher.](#configuring-a-k3s-cluster-to-enable-registration-in-rancher) + +### EKS Clusters + +EKS clusters must have at least one managed node group to be imported into Rancher or provisioned from Rancher successfully. + + + + +# Registering a Cluster + +1. From the **Clusters** page, click **Add Cluster**. +2. Under **Register an existing Kubernetes cluster**, click the type of Kubernetes cluster you want to register. +3. Enter a **Cluster Name**. +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +5. For Rancher v2.5.6+, use **Agent Environment Variables** under **Cluster Options** to set environment variables for [rancher cluster agent](launch-kubernetes-with-rancher/about-rancher-agents.md). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. +6. Click **Create**. +7. The prerequisite for `cluster-admin` privileges is shown (see **Prerequisites** above), including an example command to fulfil the prerequisite. +8. Copy the `kubectl` command to your clipboard and run it on a node where kubeconfig is configured to point to the cluster you want to import. If you are unsure it is configured correctly, run `kubectl get nodes` to verify before running the command shown in Rancher. +9. If you are using self signed certificates, you will receive the message `certificate signed by unknown authority`. To work around this validation, copy the command starting with `curl` displayed in Rancher to your clipboard. Then run the command on a node where kubeconfig is configured to point to the cluster you want to import. +10. When you finish running the command(s) on your node, click **Done**. + + +**Result:** + +- Your cluster is registered and assigned a state of **Pending.** Rancher is deploying resources to manage your cluster. +- You can access your cluster after its state is updated to **Active.** +- **Active** clusters are assigned two Projects: `Default` (containing the namespace `default`) and `System` (containing the namespaces `cattle-system`, `ingress-nginx`, `kube-public` and `kube-system`, if present). + + +> **Note:** +> You can not re-register a cluster that is currently active in a Rancher setup. + +### Configuring a K3s Cluster to Enable Registration in Rancher + +The K3s server needs to be configured to allow writing to the kubeconfig file. + +This can be accomplished by passing `--write-kubeconfig-mode 644` as a flag during installation: + +``` +$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 +``` + +The option can also be specified using the environment variable `K3S_KUBECONFIG_MODE`: + +``` +$ curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" sh -s - +``` + +### Configuring an Imported EKS Cluster with Terraform + +You should define **only** the minimum fields that Rancher requires when importing an EKS cluster with Terraform. This is important as Rancher will overwrite what was in the EKS cluster with any config that the user has provided. + +>**Warning:** Even a small difference between the current EKS cluster and a user-provided config could have unexpected results. + +The minimum config fields required by Rancher to import EKS clusters with Terraform using `eks_config_v2` are as follows: + +- cloud_credential_id +- name +- region +- imported (this field should always be set to `true` for imported clusters) + +Example YAML configuration for imported EKS clusters: + +``` +resource "rancher2_cluster" "my-eks-to-import" { + name = "my-eks-to-import" + description = "Terraform EKS Cluster" + eks_config_v2 { + cloud_credential_id = rancher2_cloud_credential.aws.id + name = var.aws_eks_name + region = var.aws_region + imported = true + } +} +``` + +# Management Capabilities for Registered Clusters + +The control that Rancher has to manage a registered cluster depends on the type of cluster. + + + + +- [Changes in v2.5.8](#changes-in-v2-5-8) +- [Features for All Registered Clusters](#2-5-8-features-for-all-registered-clusters) +- [Additional Features for Registered K3s Clusters](#2-5-8-additional-features-for-registered-k3s-clusters) +- [Additional Features for Registered EKS and GKE Clusters](#additional-features-for-registered-eks-and-gke-clusters) + +### Changes in v2.5.8 + +Greater management capabilities are now available for [registered GKE clusters.](#additional-features-for-registered-eks-and-gke-clusters) The same configuration options are available for registered GKE clusters as for the GKE clusters created through the Rancher UI. + + +### Features for All Registered Clusters + +After registering a cluster, the cluster owner can: + +- [Manage cluster access](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) through role-based access control +- Enable [monitoring, alerts and notifiers](../../../pages-for-subheaders/monitoring-and-alerting.md) +- Enable [logging](../../../pages-for-subheaders/logging.md) +- Enable [Istio](../../../pages-for-subheaders/istio.md) +- Use [pipelines](../../advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- Manage projects and workloads + + +### Additional Features for Registered K3s Clusters + +[K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. + +When a K3s cluster is registered in Rancher, Rancher will recognize it as K3s. The Rancher UI will expose the features for [all registered clusters,](#features-for-all-registered-clusters) in addition to the following features for editing and upgrading the cluster: + +- The ability to [upgrade the K3s version](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- The ability to configure the maximum number of nodes that will be upgraded concurrently +- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster + +### Additional Features for Registered EKS and GKE Clusters + +Registering an Amazon EKS cluster or GKE cluster allows Rancher to treat it as though it were created in Rancher. + +Amazon EKS clusters and GKE clusters can now be registered in Rancher. For the most part, these registered clusters are treated the same way as clusters created in the Rancher UI, except for deletion. + +When you delete an EKS cluster or GKE cluster that was created in Rancher, the cluster is destroyed. When you delete a cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. + +The capabilities for registered clusters are listed in the table on [this page.](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) + + + + +- [Features for All Registered Clusters](#before-2-5-8-features-for-all-registered-clusters) +- [Additional Features for Registered K3s Clusters](#before-2-5-8-additional-features-for-registered-k3s-clusters) +- [Additional Features for Registered EKS Clusters](#additional-features-for-registered-eks-clusters) + + +### Features for All Registered Clusters + +After registering a cluster, the cluster owner can: + +- [Manage cluster access](../../advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) through role-based access control +- Enable [monitoring, alerts and notifiers](../../../pages-for-subheaders/monitoring-and-alerting.md) +- Enable [logging](logging/v2.5/) +- Enable [Istio](../../../pages-for-subheaders/istio.md) +- Use [pipelines](../../advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- Manage projects and workloads + + +### Additional Features for Registered K3s Clusters + +[K3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight, fully compliant Kubernetes distribution. + +When a K3s cluster is registered in Rancher, Rancher will recognize it as K3s. The Rancher UI will expose the features for [all registered clusters,](#features-for-all-registered-clusters) in addition to the following features for editing and upgrading the cluster: + +- The ability to [upgrade the K3s version](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md) +- The ability to configure the maximum number of nodes that will be upgraded concurrently +- The ability to see a read-only version of the K3s cluster's configuration arguments and environment variables used to launch each node in the cluster + +### Additional Features for Registered EKS Clusters + +Registering an Amazon EKS cluster allows Rancher to treat it as though it were created in Rancher. + +Amazon EKS clusters can now be registered in Rancher. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. + +When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. + +The capabilities for registered EKS clusters are listed in the table on [this page.](../../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) + + + + +# Configuring K3s Cluster Upgrades + +> It is a Kubernetes best practice to back up the cluster before upgrading. When upgrading a high-availability K3s cluster with an external database, back up the database in whichever way is recommended by the relational database provider. + +The **concurrency** is the maximum number of nodes that are permitted to be unavailable during an upgrade. If number of unavailable nodes is larger than the **concurrency,** the upgrade will fail. If an upgrade fails, you may need to repair or remove failed nodes before the upgrade can succeed. + +- **Controlplane concurrency:** The maximum number of server nodes to upgrade at a single time; also the maximum unavailable server nodes +- **Worker concurrency:** The maximum number worker nodes to upgrade at the same time; also the maximum unavailable worker nodes + +In the K3s documentation, controlplane nodes are called server nodes. These nodes run the Kubernetes master, which maintains the desired state of the cluster. In K3s, these controlplane nodes have the capability to have workloads scheduled to them by default. + +Also in the K3s documentation, nodes with the worker role are called agent nodes. Any workloads or pods that are deployed in the cluster can be scheduled to these nodes by default. + +# Debug Logging and Troubleshooting for Registered K3s Clusters + +Nodes are upgraded by the system upgrade controller running in the downstream cluster. Based on the cluster configuration, Rancher deploys two [plans](https://github.com/rancher/system-upgrade-controller#example-upgrade-plan) to upgrade K3s nodes: one for controlplane nodes and one for workers. The system upgrade controller follows the plans and upgrades the nodes. + +To enable debug logging on the system upgrade controller deployment, edit the [configmap](https://github.com/rancher/system-upgrade-controller/blob/50a4c8975543d75f1d76a8290001d87dc298bdb4/manifests/system-upgrade-controller.yaml#L32) to set the debug environment variable to true. Then restart the `system-upgrade-controller` pod. + +Logs created by the `system-upgrade-controller` can be viewed by running this command: + +``` +kubectl logs -n cattle-system system-upgrade-controller +``` + +The current status of the plans can be viewed with this command: + +``` +kubectl get plans -A -o yaml +``` + +If the cluster becomes stuck in upgrading, restart the `system-upgrade-controller`. + +To prevent issues when upgrading, the [Kubernetes upgrade best practices](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) should be followed. + + + + +# Annotating Registered Clusters + +For all types of registered Kubernetes clusters except for K3s Kubernetes clusters, Rancher doesn't have any information about how the cluster is provisioned or configured. + +Therefore, when Rancher registers a cluster, it assumes that several capabilities are disabled by default. Rancher assumes this in order to avoid exposing UI options to the user even when the capabilities are not enabled in the registered cluster. + +However, if the cluster has a certain capability, such as the ability to use a pod security policy, a user of that cluster might still want to select pod security policies for the cluster in the Rancher UI. In order to do that, the user will need to manually indicate to Rancher that pod security policies are enabled for the cluster. + +By annotating a registered cluster, it is possible to indicate to Rancher that a cluster was given a pod security policy, or another capability, outside of Rancher. + +This example annotation indicates that a pod security policy is enabled: + +``` +"capabilities.cattle.io/pspEnabled": "true" +``` + +The following annotation indicates Ingress capabilities. Note that that the values of non-primitive objects need to be JSON encoded, with quotations escaped. + +``` +"capabilities.cattle.io/ingressCapabilities": "[ + { + "customDefaultBackend":true, + "ingressProvider":"asdf" + } +]" +``` + +These capabilities can be annotated for the cluster: + +- `ingressCapabilities` +- `loadBalancerCapabilities` +- `nodePoolScalingSupported` +- `nodePortRange` +- `pspEnabled` +- `taintSupport` + +All the capabilities and their type definitions can be viewed in the Rancher API view, at `[Rancher Server URL]/v3/schemas/capabilities`. + +To annotate a registered cluster, + +1. Go to the cluster view in Rancher and select **⋮ > Edit.** +1. Expand the **Labels & Annotations** section. +1. Click **Add Annotation.** +1. Add an annotation to the cluster with the format `capabilities/: ` where `value` is the cluster capability that will be overridden by the annotation. In this scenario, Rancher is not aware of any capabilities of the cluster until you add the annotation. +1. Click **Save.** + +**Result:** The annotation does not give the capabilities to the cluster, but it does indicate to Rancher that the cluster has those capabilities. + diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md new file mode 100644 index 0000000000..bd58055586 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md @@ -0,0 +1,57 @@ +--- +title: Creating an Aliyun ACK Cluster +shortTitle: Alibaba Cloud Container Service for Kubernetes +weight: 2120 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ack/ +--- + +You can use Rancher to create a cluster hosted in Alibaba Cloud Kubernetes (ACK). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for ACK, but by default, this cluster driver is `inactive`. In order to launch ACK clusters, you will need to [enable the ACK cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning ACK clusters. + +## Prerequisites + +>**Note** +>Deploying to ACK will incur charges. + +1. In Aliyun, activate the following services in their respective consoles. + + - [Container Service](https://cs.console.aliyun.com) + - [Resource Orchestration Service](https://ros.console.aliyun.com) + - [RAM](https://ram.console.aliyun.com) + +2. Make sure that the account you will be using to create the ACK cluster has the appropriate permissions. Referring to the official Alibaba Cloud documentation about [Role authorization](https://www.alibabacloud.com/help/doc-detail/86483.htm) and [Use the Container Service console as a RAM user](https://www.alibabacloud.com/help/doc-detail/86484.htm) for details. + +3. In Alibaba Cloud, create an [access key](https://www.alibabacloud.com/help/doc-detail/53045.html). + +4. In Alibaba Cloud, create an [SSH key pair](https://www.alibabacloud.com/help/doc-detail/51793.html). This key is used to access nodes in the Kubernetes cluster. + +## Create an ACK Cluster + +1. From the **Clusters** page, click **Add Cluster**. + +1. Choose **Alibaba ACK**. + +1. Enter a **Cluster Name**. + +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +1. Configure **Account Access** for the ACK cluster. Choose the geographical region in which to build your cluster, and input the access key that was created as part of the prerequisite steps. + +1. Click **Next: Configure Cluster**, then choose cluster type, the version of Kubernetes and the availability zone. + +1. If you choose **Kubernetes** as the cluster type, Click **Next: Configure Master Nodes**, then complete the **Master Nodes** form. + +1. Click **Next: Configure Worker Nodes**, then complete the **Worker Nodes** form. + +1. Review your options to confirm they're correct. Then click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md new file mode 100644 index 0000000000..73e4c883aa --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md @@ -0,0 +1,165 @@ +--- +title: Managing GKE Clusters +shortTitle: Google Kubernetes Engine +weight: 2105 +aliases: + - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-gke/ + - /rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/gke + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/gke/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +- [Prerequisites](#prerequisites) +- [Provisioning a GKE Cluster](#provisioning-a-gke-cluster) +- [Private Clusters](#private-clusters) +- [Configuration Reference](#configuration-reference) +- [Updating Kubernetes Version](#updating-kubernetes-version) +- [Syncing](#syncing) + +# Prerequisites + +Some setup in Google Kubernetes Engine is required. + +### Service Account Token + +Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. + +The service account requires the following roles: + +- **Compute Viewer:** `roles/compute.viewer` +- **Project Viewer:** `roles/viewer` +- **Kubernetes Engine Admin:** `roles/container.admin` +- **Service Account User:** `roles/iam.serviceAccountUser` + +[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + +For help obtaining a private key for your service account, refer to the Google cloud documentation [here.](https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys) You will need to save the key in JSON format. + +### Google Project ID + +Your cluster will need to be part of a Google Project. + +To create a new project, refer to the Google cloud documentation [here.](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project) + +To get the project ID of an existing project, refer to the Google cloud documentation [here.](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) + +# Provisioning a GKE Cluster + +>**Note** +>Deploying to GKE will incur charges. + +### 1. Create a Cloud Credential + +1. In the upper right corner, click the user profile dropdown menu and click **Cloud Credentials.** +1. Click **Add Cloud Credential.** +1. Enter a name for your Google cloud credentials. +1. In the **Cloud Credential Type** field, select **Google.** +1. In the **Service Account** text box, paste your service account private key JSON, or upload the JSON file. +1. Click **Create.** + +**Result:** You have created credentials that Rancher will use to provision the new GKE cluster. + +### 2. Create the GKE Cluster +Use Rancher to set up and configure your Kubernetes cluster. + +1. From the **Clusters** page, click **Add Cluster**. +1. Under **With a hosted Kubernetes provider,** click **Google GKE**. +1. Enter a **Cluster Name**. +1. Optional: Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Optional: Add Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to the cluster. +1. Enter your Google project ID and your Google cloud credentials. +1. Fill out the rest of the form. For help, refer to the [GKE cluster configuration reference.](../../../../pages-for-subheaders/gke-cluster-configuration.md) +1. Click **Create.** + +**Result:** You have successfully deployed a GKE cluster. + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# Private Clusters + +Private GKE clusters are supported. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md) + +# Configuration Reference + +For details on configuring GKE clusters in Rancher, see [this page.](../../../../pages-for-subheaders/gke-cluster-configuration.md) +# Updating Kubernetes Version + +The Kubernetes version of a cluster can be upgraded to any version available in the region or zone fo the GKE cluster. Upgrading the master Kubernetes version does not automatically upgrade worker nodes. Nodes can be upgraded independently. + +>**Note** +>GKE has removed basic authentication in 1.19+. In order to upgrade a cluster to 1.19+, basic authentication must be disabled in the Google Cloud. Otherwise, an error will appear in Rancher when an upgrade to 1.19+ is attempted. You can follow the [Google documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication#disabling_authentication_with_a_static_password). After this, the Kubernetes version can be updated to 1.19+ via Rancher. + +# Syncing + +The GKE provisioner can synchronize the state of a GKE cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../../../../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md) + +For information on configuring the refresh interval, see [this section.](../../../../pages-for-subheaders/gke-cluster-configuration.md#configuring-the-refresh-interval) + + + + +# Prerequisites + +Some setup in Google Kubernetes Engine is required. + +### Service Account Token + +Create a service account using [Google Kubernetes Engine](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts). GKE uses this account to operate your cluster. Creating this account also generates a private key used for authentication. + +The service account requires the following roles: + +- **Compute Viewer:** `roles/compute.viewer` +- **Project Viewer:** `roles/viewer` +- **Kubernetes Engine Admin:** `roles/container.admin` +- **Service Account User:** `roles/iam.serviceAccountUser` + +[Google Documentation: Creating and Enabling Service Accounts](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + + +>**Note** +>Deploying to GKE will incur charges. + +# Create the GKE Cluster + +Use Rancher to set up and configure your Kubernetes cluster. + +1. From the **Clusters** page, click **Add Cluster**. + +2. Choose **Google Kubernetes Engine**. + +3. Enter a **Cluster Name**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +5. Either paste your service account private key in the **Service Account** text box or **Read from a file**. Then click **Next: Configure Nodes**. + + >**Note:** After submitting your private key, you may have to enable the Google Kubernetes Engine API. If prompted, browse to the URL displayed in the Rancher UI to enable the API. + +6. Select your cluster options, node options and security options. For help, refer to the [GKE Cluster Configuration Reference.](#gke-before-v2-5-8) +9. Review your options to confirm they're correct. Then click **Create**. + +**Result:** You have successfully deployed a GKE cluster. + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + + diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md new file mode 100644 index 0000000000..05969b923b --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md @@ -0,0 +1,87 @@ +--- +title: Creating a Huawei CCE Cluster +shortTitle: Huawei Cloud Kubernetes Service +weight: 2130 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/cce/ +--- + +You can use Rancher to create a cluster hosted in Huawei Cloud Container Engine (CCE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for CCE, but by default, this cluster driver is `inactive`. In order to launch CCE clusters, you will need to [enable the CCE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning CCE clusters. + +## Prerequisites in Huawei + +>**Note** +>Deploying to CCE will incur charges. + +1. Find your project ID in Huawei CCE portal. See the CCE documentation on how to [manage your projects](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0066738518.html). + +2. Create an [Access Key ID and Secret Access Key](https://support.huaweicloud.com/en-us/usermanual-iam/en-us_topic_0079477318.html). + +## Limitations + +Huawei CCE service doesn't support the ability to create clusters with public access through their API. You are required to run Rancher in the same VPC as the CCE clusters that you want to provision. + +## Create the CCE Cluster + +1. From the **Clusters** page, click **Add Cluster**. +1. Choose **Huawei CCE**. +1. Enter a **Cluster Name**. +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Enter **Project Id**, Access Key ID as **Access Key** and Secret Access Key **Secret Key**. Then Click **Next: Configure cluster**. Fill in the cluster configuration. For help filling out the form, refer to [Huawei CCE Configuration.](#huawei-cce-configuration) +1. Fill the following node configuration of the cluster. For help filling out the form, refer to [Node Configuration.](#node-configuration) +1. Click **Create** to create the CCE cluster. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# Huawei CCE Configuration + +|Settings|Description| +|---|---| +| Cluster Type | Which type or node you want to include into the cluster, `VirtualMachine` or `BareMetal`. | +| Description | The description of the cluster. | +| Master Version | The Kubernetes version. | +| Management Scale Count | The max node count of the cluster. The options are 50, 200 and 1000. The larger of the scale count, the more the cost. | +| High Availability | Enable master node high availability. The cluster with high availability enabled will have more cost. | +| Container Network Mode | The network mode used in the cluster. `overlay_l2` and `vpc-router` is supported in `VirtualMachine` type and `underlay_ipvlan` is supported in `BareMetal` type | +| Container Network CIDR | Network CIDR for the cluster. | +| VPC Name | The VPC name which the cluster is going to deploy into. Rancher will create one if it is blank. | +| Subnet Name | The Subnet name which the cluster is going to deploy into. Rancher will create one if it is blank. | +| External Server | This option is reserved for the future we can enable CCE cluster public access via API. For now, it is always disabled. | +| Cluster Label | The labels for the cluster. | +| Highway Subnet | This option is only supported in `BareMetal` type. It requires you to select a VPC with high network speed for the bare metal machines. | + +**Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure.](cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + +# Node Configuration + +|Settings|Description| +|---|---| +| Zone | The available zone at where the node(s) of the cluster is deployed. | +| Billing Mode | The bill mode for the cluster node(s). In `VirtualMachine` type, only `Pay-per-use` is supported. in `BareMetal`, you can choose `Pay-per-use` or `Yearly/Monthly`. | +| Validity Period | This option only shows in `Yearly/Monthly` bill mode. It means how long you want to pay for the cluster node(s). | +| Auto Renew | This option only shows in `Yearly/Monthly` bill mode. It means that the cluster node(s) will renew the `Yearly/Monthly` payment automatically or not. | +| Data Volume Type | Data volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | +| Data Volume Size | Data volume size for the cluster node(s) | +| Root Volume Type | Root volume type for the cluster node(s). `SATA`, `SSD` or `SAS` for this option. | +| Root Volume Size | Root volume size for the cluster node(s) | +| Node Flavor | The node flavor of the cluster node(s). The flavor list in Rancher UI is fetched from Huawei Cloud. It includes all the supported node flavors. | +| Node Count | The node count of the cluster | +| Node Operating System | The operating system for the cluster node(s). Only `EulerOS 2.2` and `CentOS 7.4` are supported right now. | +| SSH Key Name | The ssh key for the cluster node(s) | +| EIP | The public IP options for the cluster node(s). `Disabled` means that the cluster node(s) are not going to bind a public IP. `Create EIP` means that the cluster node(s) will bind one or many newly created Eips after provisioned and more options will be shown in the UI to set the to-create EIP parameters. And `Select Existed EIP` means that the node(s) will bind to the EIPs you select. | +| EIP Count | This option will only be shown when `Create EIP` is selected. It means how many EIPs you want to create for the node(s). | +| EIP Type | This option will only be shown when `Create EIP` is selected. The options are `5_bgp` and `5_sbgp`. | +| EIP Share Type | This option will only be shown when `Create EIP` is selected. The only option is `PER`. | +| EIP Charge Mode | This option will only be shown when `Create EIP` is selected. The options are pay by `BandWidth` and pay by `Traffic`. | +| EIP Bandwidth Size | This option will only be shown when `Create EIP` is selected. The BandWidth of the EIPs. | +| Authentication Mode | It means enabling `RBAC` or also enabling `Authenticating Proxy`. If you select `Authenticating Proxy`, the certificate which is used for authenticating proxy will be also required. | +| Node Label | The labels for the cluster node(s). Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) | \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md new file mode 100644 index 0000000000..60e1722261 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md @@ -0,0 +1,86 @@ +--- +title: Creating a Tencent TKE Cluster +shortTitle: Tencent Kubernetes Engine +weight: 2125 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/tke/ +--- + +You can use Rancher to create a cluster hosted in Tencent Kubernetes Engine (TKE). Rancher has already implemented and packaged the [cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md) for TKE, but by default, this cluster driver is `inactive`. In order to launch TKE clusters, you will need to [enable the TKE cluster driver](../../../advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers.md#activating-deactivating-cluster-drivers). After enabling the cluster driver, you can start provisioning TKE clusters. + +## Prerequisites in Tencent + +>**Note** +>Deploying to TKE will incur charges. + +1. Make sure that the account you will be using to create the TKE cluster has the appropriate permissions by referring to the [Cloud Access Management](https://intl.cloud.tencent.com/document/product/598/10600) documentation for details. + +2. Create a [Cloud API Secret ID and Secret Key](https://console.cloud.tencent.com/capi). + +3. Create a [Private Network and Subnet](https://intl.cloud.tencent.com/document/product/215/4927) in the region that you want to deploy your Kubernetes cluster. + +4. Create a [SSH key pair](https://intl.cloud.tencent.com/document/product/213/6092). This key is used to access the nodes in the Kubernetes cluster. + +## Create a TKE Cluster + +1. From the **Clusters** page, click **Add Cluster**. + +2. Choose **Tencent TKE**. + +3. Enter a **Cluster Name**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +5. Configure **Account Access** for the TKE cluster. Complete each drop-down and field using the information obtained in [Prerequisites](#prerequisites-in-tencent). + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Region | From the drop-down chooses the geographical region in which to build your cluster. | + | Secret ID | Enter the Secret ID that you obtained from the Tencent Cloud Console. | + | Secret Key | Enter the Secret key that you obtained from Tencent Cloud Console. | + +6. Click `Next: Configure Cluster` to set your TKE cluster configurations. + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Kubernetes Version | The TKE only supports Kubernetes version 1.10.5 now. | + | Node Count | Enter the amount of worker node you want to purchase for your Kubernetes cluster, up to 100. | + | VPC | Select the VPC name that you have created in the Tencent Cloud Console. | + | Container Network CIDR | Enter the CIDR range of your Kubernetes cluster, you may check the available range of the CIDR in the VPC service of the Tencent Cloud Console. Default to 172.16.0.0/16. | + + **Note:** If you are editing the cluster in the `cluster.yml` instead of the Rancher UI, note that, cluster configuration directives must be nested under the `rancher_kubernetes_engine_config` directive in `cluster.yml`. For more information, refer to the section on [the config file structure in Rancher v2.3.0+.](cluster-provisioning/rke-clusters/options/#config-file-structure-in-rancher-v2-3-0) + +7. Click `Next: Select Instance Type` to choose the instance type that will use for your TKE cluster. + + | Option | Description | + | ---------- | -------------------------------------------------------------------------------------------------------------------- | + | Availability Zone | Choose the availability zone of the VPC region. | + | Subnet | Select the Subnet that you have created within the VPC, and add a new one if you don't have it in the chosen availability zone. | + | Instance Type | From the drop-down chooses the VM instance type that you want to use for the TKE cluster, default to S2.MEDIUM4 (CPU 2 Memory 4 GiB). | + +8. Click `Next: Configure Instance` to configure the VM instance that will use for your TKE cluster. + + Option | Description + -------|------------ + Operating System | The name of the operating system, currently supports Centos7.2x86_64 or ubuntu16.04.1 LTSx86_64 + Security Group | Security group ID, default does not bind any security groups. + Root Disk Type | System disk type. System disk type restrictions are detailed in the [CVM instance configuration](https://cloud.tencent.com/document/product/213/11518). + Root Disk Size | System disk size. Linux system adjustment range is 20 - 50G, step size is 1. + Data Disk Type | Data disk type, default value to the SSD cloud drive + Data Disk Size | Data disk size (GB), the step size is 10 + Band Width Type | Type of bandwidth, PayByTraffic or PayByHour + Band Width | Public network bandwidth (Mbps) + Key Pair | Key id, after associating the key can be used to logging to the VM node + +9. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md new file mode 100644 index 0000000000..9ea9d13256 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md @@ -0,0 +1,45 @@ +--- +title: ConfigMaps +weight: 3061 +aliases: + - /rancher/v2.5/en/tasks/projects/add-configmaps + - /rancher/v2.5/en/k8s-in-rancher/configmaps + - /rancher/v2.x/en/k8s-in-rancher/configmaps/ +--- + +While most types of Kubernetes secrets store sensitive information, [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) store general configuration information, such as a group of config files. Because ConfigMaps don't store sensitive information, they can be updated automatically, and therefore don't require their containers to be restarted following update (unlike most secret types, which require manual updates and a container restart to take effect). + +ConfigMaps accept key value pairs in common string formats, like config files or JSON blobs. After you upload a config map, any workload can reference it as either an environment variable or a volume mount. + +>**Note:** ConfigMaps can only be applied to namespaces and not projects. + +1. From the **Global** view, select the project containing the namespace that you want to add a ConfigMap to. + +1. From the main menu, select **Resources > Config Maps**. Click **Add Config Map**. + +1. Enter a **Name** for the Config Map. + + >**Note:** Kubernetes classifies ConfigMaps as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your ConfigMaps must have a unique name among the other certificates, registries, and secrets within your workspace. + +1. Select the **Namespace** you want to add Config Map to. You can also add a new namespace on the fly by clicking **Add to a new namespace**. + +1. From **Config Map Values**, click **Add Config Map Value** to add a key value pair to your ConfigMap. Add as many values as you need. + +1. Click **Save**. + + >**Note:** Don't use ConfigMaps to store sensitive data [use a secret](secrets.md). + > + >**Tip:** You can add multiple key value pairs to the ConfigMap by copying and pasting. + > + > ![](/img/bulk-key-values.gif) + +**Result:** Your ConfigMap is added to the namespace. You can view it in the Rancher UI from the **Resources > Config Maps** view. + +## What's Next? + +Now that you have a ConfigMap added to a namespace, you can add it to a workload that you deploy from the namespace of origin. You can use the ConfigMap to specify information for you application to consume, such as: + +- Application environment variables. +- Specifying parameters for a Volume mounted to the workload. + +For more information on adding ConfigMaps to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/content/rancher/v2.5/en/k8s-in-rancher/service-discovery/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md similarity index 100% rename from content/rancher/v2.5/en/k8s-in-rancher/service-discovery/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md new file mode 100644 index 0000000000..72a8bf6f7d --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md @@ -0,0 +1,47 @@ +--- +title: Encrypting HTTP Communication +description: Learn how to add an SSL (Secure Sockets Layer) certificate or TLS (Transport Layer Security) certificate to either a project, a namespace, or both, so that you can add it to deployments +weight: 3060 +aliases: + - /rancher/v2.5/en/tasks/projects/add-ssl-certificates/ + - /rancher/v2.5/en/k8s-in-rancher/certificates + - /rancher/v2.x/en/k8s-in-rancher/certificates/ +--- + +When you create an ingress within Rancher/Kubernetes, you must provide it with a secret that includes a TLS private key and certificate, which are used to encrypt and decrypt communications that come through the ingress. You can make certificates available for ingress use by navigating to its project or namespace, and then uploading the certificate. You can then add the certificate to the ingress deployment. + +Add SSL certificates to either projects, namespaces, or both. A project scoped certificate will be available in all its namespaces. + +>**Prerequisites:** You must have a TLS private key and certificate available to upload. + +1. From the **Global** view, select the project where you want to deploy your ingress. + +1. From the main menu, select **Resources > Secrets > Certificates**. Click **Add Certificate**. + +1. Enter a **Name** for the certificate. + + >**Note:** Kubernetes classifies SSL certificates as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your SSL certificate must have a unique name among the other certificates, registries, and secrets within your project/workspace. + +1. Select the **Scope** of the certificate. + + - **Available to all namespaces in this project:** The certificate is available for any deployment in any namespaces in the project. + + - **Available to a single namespace:** The certificate is only available for the deployments in one namespace. If you choose this option, select a **Namespace** from the drop-down list or click **Add to a new namespace** to add the certificate to a namespace you create on the fly. + +1. From **Private Key**, either copy and paste your certificate's private key into the text box (include the header and footer), or click **Read from a file** to browse to the private key on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. + + Private key files end with an extension of `.key`. + +1. From **Certificate**, either copy and paste your certificate into the text box (include the header and footer), or click **Read from a file** to browse to the certificate on your file system. If possible, we recommend using **Read from a file** to reduce likelihood of error. + + Certificate files end with an extension of `.crt`. + +**Result:** Your certificate is added to the project or namespace. You can now add it to deployments. + +- If you added an SSL certificate to the project, the certificate is available for deployments created in any project namespace. +- If you added an SSL certificate to a namespace, the certificate is available only for deployments in that namespace. +- Your certificate is added to the **Resources > Secrets > Certificates** view. + +## What's Next? + +Now you can add the certificate when launching an ingress within the current project or namespace. For more information, see [Adding Ingress](load-balancer-and-ingress-controller/add-ingresses.md). diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md new file mode 100644 index 0000000000..1a2c9bc858 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas.md @@ -0,0 +1,43 @@ +--- +title: Background Information on HPAs +weight: 3027 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/hpa-background + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/ +--- + +The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. This section provides explanation on how HPA works with Kubernetes. + +## Why Use Horizontal Pod Autoscaler? + +Using HPA, you can automatically scale the number of pods within a replication controller, deployment, or replica set up or down. HPA automatically scales the number of pods that are running for maximum efficiency. Factors that affect the number of pods include: + +- A minimum and maximum number of pods allowed to run, as defined by the user. +- Observed CPU/memory use, as reported in resource metrics. +- Custom metrics provided by third-party metrics application like Prometheus, Datadog, etc. + +HPA improves your services by: + +- Releasing hardware resources that would otherwise be wasted by an excessive number of pods. +- Increase/decrease performance as needed to accomplish service level agreements. + +## How HPA Works + +![HPA Schema](/img/horizontal-pod-autoscaler.jpg) + +HPA is implemented as a control loop, with a period controlled by the `kube-controller-manager` flags below: + +Flag | Default | Description | +---------|----------|----------| + `--horizontal-pod-autoscaler-sync-period` | `30s` | How often HPA audits resource/custom metrics in a deployment. + `--horizontal-pod-autoscaler-downscale-delay` | `5m0s` | Following completion of a downscale operation, how long HPA must wait before launching another downscale operations. + `--horizontal-pod-autoscaler-upscale-delay` | `3m0s` | Following completion of an upscale operation, how long HPA must wait before launching another upscale operation. + + +For full documentation on HPA, refer to the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). + +## Horizontal Pod Autoscaler API Objects + +HPA is an API resource in the Kubernetes `autoscaling` API group. The current stable version is `autoscaling/v1`, which only includes support for CPU autoscaling. To get additional support for scaling based on memory and custom metrics, use the beta version instead: `autoscaling/v2beta1`. + +For more information about the HPA API object, see the [HPA GitHub Readme](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md new file mode 100644 index 0000000000..95f6fc6b7a --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md @@ -0,0 +1,208 @@ +--- +title: Managing HPAs with kubectl +weight: 3029 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-kubectl + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/ +--- + +This section describes HPA management with `kubectl`. This document has instructions for how to: + +- Create an HPA +- Get information on HPAs +- Delete an HPA +- Configure your HPAs to scale with CPU or memory utilization +- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics + + +You can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI](manage-hpas-with-ui.md). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. + +##### Basic kubectl Command for Managing HPAs + +If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: + +- Creating HPA + + - With manifest: `kubectl create -f ` + + - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` + +- Getting HPA info + + - Basic: `kubectl get hpa hello-world` + + - Detailed description: `kubectl describe hpa hello-world` + +- Deleting HPA + + - `kubectl delete hpa hello-world` + +##### HPA Manifest Definition Example + +The HPA manifest is the config file used for managing an HPA with `kubectl`. + +The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. + +```yml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: hello-world +spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi +``` + + +Directive | Description +---------|----------| + `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | + `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | + `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | + `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. + `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. + `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. +
    + +##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) + +Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. + +Run the following commands to check if metrics are available in your installation: + +``` +$ kubectl top nodes +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +node-controlplane 196m 9% 1623Mi 42% +node-etcd 80m 4% 1090Mi 28% +node-worker 64m 3% 1146Mi 29% +$ kubectl -n kube-system top pods +NAME CPU(cores) MEMORY(bytes) +canal-pgldr 18m 46Mi +canal-vhkgr 20m 45Mi +canal-x5q5v 17m 37Mi +canal-xknnz 20m 37Mi +kube-dns-7588d5b5f5-298j2 0m 22Mi +kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi +metrics-server-97bc649d5-jxrlt 0m 12Mi +$ kubectl -n kube-system logs -l k8s-app=metrics-server +I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true +I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 +I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version +I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 +I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink +I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) +I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ +I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 +``` + + +##### Configuring HPA to Scale Using Custom Metrics with Prometheus + +You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. + +For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: + +- Prometheus is deployed in the cluster. +- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. +- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` + +Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. + +For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). + +1. Initialize Helm in your cluster. + ``` + # kubectl -n kube-system create serviceaccount tiller + kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + helm init --service-account tiller + ``` + +1. Clone the `banzai-charts` repo from GitHub: + ``` + # git clone https://github.com/banzaicloud/banzai-charts + ``` + +1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. + ``` + # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system + ``` + +1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. + + 1. Check that the service pod is `Running`. Enter the following command. + ``` + # kubectl get pods -n kube-system + ``` + From the resulting output, look for a status of `Running`. + ``` + NAME READY STATUS RESTARTS AGE + ... + prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h + ... + ``` + 1. Check the service logs to make sure the service is running correctly by entering the command that follows. + ``` + # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system + ``` + Then review the log output to confirm the service is running. + +
    + Prometheus Adaptor Logs + + ... + I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds + I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: + I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT + I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json + I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 + I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} + I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.sslip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK + I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} + I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] + I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} + ... +
    + + + +1. Check that the metrics API is accessible from kubectl. + + - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. + ``` + # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
    + API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} +
    + + - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. + ``` + # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
    + API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} +
    diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md new file mode 100644 index 0000000000..140d748429 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md @@ -0,0 +1,56 @@ +--- +title: Managing HPAs with the Rancher UI +weight: 3028 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/manage-hpa-with-rancher-ui + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/ +--- + +The Rancher UI supports creating, managing, and deleting HPAs. You can configure CPU or memory usage as the metric that the HPA uses to scale. + +If you want to create HPAs that scale based on other metrics than CPU and memory, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +## Creating an HPA + +1. From the **Global** view, open the project that you want to deploy a HPA to. + +1. Click **Resources > HPA.** + +1. Click **Add HPA.** + +1. Enter a **Name** for the HPA. + +1. Select a **Namespace** for the HPA. + +1. Select a **Deployment** as scale target for the HPA. + +1. Specify the **Minimum Scale** and **Maximum Scale** for the HPA. + +1. Configure the metrics for the HPA. You can choose memory or CPU usage as the metric that will cause the HPA to scale the service up or down. In the **Quantity** field, enter the percentage of the workload's memory or CPU usage that will cause the HPA to scale the service. To configure other HPA metrics, including metrics available from Prometheus, you need to [manage HPAs using kubectl](manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +1. Click **Create** to create the HPA. + +> **Result:** The HPA is deployed to the chosen namespace. You can view the HPA's status from the project's Resources > HPA view. + +## Get HPA Metrics and Status + +1. From the **Global** view, open the project with the HPAs you want to look at. + +1. Click **Resources > HPA.** The **HPA** tab shows the number of current replicas. + +1. For more detailed metrics and status of a specific HPA, click the name of the HPA. This leads to the HPA detail page. + + +## Deleting an HPA + +1. From the **Global** view, open the project that you want to delete an HPA from. + +1. Click **Resources > HPA.** + +1. Find the HPA which you would like to delete. + +1. Click **⋮ > Delete**. + +1. Click **Delete** to confirm. + +> **Result:** The HPA is deleted from the current cluster. diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md new file mode 100644 index 0000000000..3188cf97d6 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl.md @@ -0,0 +1,534 @@ +--- +title: Testing HPAs with kubectl +weight: 3031 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler/testing-hpa + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/ +--- + +This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI](manage-hpas-with-kubectl.md). + +For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. + +1. Configure `kubectl` to connect to your Kubernetes cluster. + +1. Copy the `hello-world` deployment manifest below. + +
    + Hello World Manifest + + ``` + apiVersion: apps/v1beta2 + kind: Deployment + metadata: + labels: + app: hello-world + name: hello-world + namespace: default + spec: + replicas: 1 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + resources: + requests: + cpu: 500m + memory: 64Mi + ports: + - containerPort: 80 + protocol: TCP + restartPolicy: Always + --- + apiVersion: v1 + kind: Service + metadata: + name: hello-world + namespace: default + spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: hello-world + ``` + +
    + +1. Deploy it to your cluster. + + ``` + # kubectl create -f + ``` + +1. Copy one of the HPAs below based on the metric type you're using: + +
    + Hello World HPA: Resource Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 1000Mi + ``` + +
    +
    + Hello World HPA: Custom Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi + - type: Pods + pods: + metricName: cpu_system + targetAverageValue: 20m + ``` + +
    + +1. View the HPA info and description. Confirm that metric data is shown. + +
    + Resource Metrics + + 1. Enter the following commands. + ``` + # kubectl get hpa + NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE + hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m + # kubectl describe hpa + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 1253376 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
    +
    + Custom Metrics + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive the output that follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 3514368 / 100Mi + "cpu_system" on pods: 0 / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
    + +1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). + +1. Test that pod autoscaling works as intended.

    + **To Test Autoscaling Using Resource Metrics:** + +
    + Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to two pods based on CPU Usage. + + 1. View your HPA. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10928128 / 100Mi + resource cpu on pods (as a percentage of request): 56% (280m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm you've scaled to two pods. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
    +
    + Upscale to 3 pods: CPU Usage Up to Target + + Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 9424896 / 100Mi + resource cpu on pods (as a percentage of request): 66% (333m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + ``` + 2. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-f46kh 0/1 Running 0 1m + hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
    +
    + Downscale to 1 Pod: All Metrics Below Target + + Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10070016 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + +
    + + **To Test Autoscaling Using Custom Metrics:** + +
    + Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale two pods based on CPU usage. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8159232 / 100Mi + "cpu_system" on pods: 7m / 20m + resource cpu on pods (as a percentage of request): 64% (321m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm two pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Upscale to 3 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows: + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + ``` + 1. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + # kubectl get pods + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Upscale to 4 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm four pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m + hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Downscale to 1 Pod: All Metrics Below Target + + Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive similar output to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8101888 / 100Mi + "cpu_system" on pods: 8m / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + 1. Enter the following command to confirm a single pods is running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    diff --git a/content/rancher/v2.5/en/k8s-in-rancher/registries/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md similarity index 100% rename from content/rancher/v2.5/en/k8s-in-rancher/registries/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md new file mode 100644 index 0000000000..3d830e855a --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md @@ -0,0 +1,73 @@ +--- +title: Adding Ingresses to Your Project +description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress to your project +weight: 3042 +aliases: + - /rancher/v2.5/en/tasks/workloads/add-ingress/ + - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/ingress + - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ingress/ +--- + +Ingress can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. + +1. From the **Global** view, open the project that you want to add ingress to. +1. Click **Resources** in the main navigation bar. Click the **Load Balancing** tab. Then click **Add Ingress**. +1. Enter a **Name** for the ingress. +1. Select an existing **Namespace** from the drop-down list. Alternatively, you can create a new namespace on the fly by clicking **Add to a new namespace**. +1. Create ingress forwarding **Rules**. For help configuring the rules, refer to [this section.](#ingress-rule-configuration) If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. +1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. + +**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. + + +# Ingress Rule Configuration + +- [Automatically generate a sslip.io hostname](#automatically-generate-a-sslip-io-hostname) +- [Specify a hostname to use](#specify-a-hostname-to-use) +- [Use as the default backend](#use-as-the-default-backend) +- [Certificates](#certificates) +- [Labels and Annotations](#labels-and-annotations) + +### Automatically generate a sslip.io hostname + +If you choose this option, ingress routes requests to hostname to a DNS name that's automatically generated. Rancher uses [sslip.io](http://sslip.io/) to automatically generates the DNS name. This option is best used for testing, _not_ production environments. + +>**Note:** To use this option, you must be able to resolve to `sslip.io` addresses. + +1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. +1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. +1. Select a workload or service from the **Target** drop-down list for each target you've added. +1. Enter the **Port** number that each target operates on. + +### Specify a hostname to use + +If you use this option, ingress routes requests for a hostname to the service or workload that you specify. + +1. Enter the hostname that your ingress will handle request forwarding for. For example, `www.mysite.com`. +1. Add a **Target Backend**. By default, a workload is added to the ingress, but you can add more targets by clicking either **Service** or **Workload**. +1. **Optional:** If you want specify a workload or service when a request is sent to a particular hostname path, add a **Path** for the target. For example, if you want requests for `www.mysite.com/contact-us` to be sent to a different service than `www.mysite.com`, enter `/contact-us` in the **Path** field. Typically, the first rule that you create does not include a path. +1. Select a workload or service from the **Target** drop-down list for each target you've added. +1. Enter the **Port** number that each target operates on. + +### Use as the default backend + +Use this option to set an ingress rule for handling requests that don't match any other ingress rules. For example, use this option to route requests that can't be found to a `404` page. + +>**Note:** If you deployed Rancher using RKE, a default backend for 404s and 202s is already configured. + +1. Add a **Target Backend**. Click either **Service** or **Workload** to add the target. +1. Select a service or workload from the **Target** drop-down list. + +### Certificates +>**Note:** You must have an SSL certificate that the ingress can use to encrypt/decrypt communications. For more information see [Adding SSL Certificates](../encrypt-http-communication.md). + +1. Click **Add Certificate**. +1. Select a **Certificate** from the drop-down list. +1. Enter the **Host** using encrypted communication. +1. To add additional hosts that use the certificate, click **Add Hosts**. + +### Labels and Annotations + +Add [Labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) and/or [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to provide metadata for your ingress. + +For a list of annotations available for use, see the [Nginx Ingress Controller Documentation](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). \ No newline at end of file diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md new file mode 100644 index 0000000000..8da2331f9e --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md @@ -0,0 +1,68 @@ +--- +title: "Layer 4 and Layer 7 Load Balancing" +description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" +weight: 3041 +aliases: + - /rancher/v2.5/en/concepts/load-balancing/ + - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers + - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/load-balancers/ +--- +Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. + +## Layer-4 Load Balancer + +Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. + +Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. + +> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. + +### Support for Layer-4 Load Balancing + +Support for layer-4 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-4 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GCE cloud provider +Azure AKS | Supported by Azure cloud provider +RKE on EC2 | Supported by AWS cloud provider +RKE on DigitalOcean | Limited NGINX or third-party Ingress* +RKE on vSphere | Limited NGINX or third party-Ingress* +RKE on Custom Hosts
    (e.g. bare-metal servers) | Limited NGINX or third-party Ingress* +Third-party MetalLB | Limited NGINX or third-party Ingress* + +\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) + +## Layer-7 Load Balancer + +Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. + +### Support for Layer-7 Load Balancing + +Support for layer-7 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-7 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GKE cloud provider +Azure AKS | Not Supported +RKE on EC2 | Nginx Ingress Controller +RKE on DigitalOcean | Nginx Ingress Controller +RKE on vSphere | Nginx Ingress Controller +RKE on Custom Hosts
    (e.g. bare-metal servers) | Nginx Ingress Controller + +### Host Names in Layer-7 Load Balancer + +Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. + +Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: + +1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. +2. Ask Rancher to generate an sslip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say a.b.c.d, and generate a host name `..a.b.c.d.sslip.io`. + +The benefit of using sslip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. + +## Related Links + +- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md new file mode 100644 index 0000000000..0a0688dd48 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md @@ -0,0 +1,48 @@ +--- +title: Secrets +weight: 3062 +aliases: + - /rancher/v2.5/en/tasks/projects/add-a-secret + - /rancher/v2.5/en/k8s-in-rancher/secrets + - /rancher/v2.x/en/k8s-in-rancher/secrets/ +--- + +[Secrets](https://kubernetes.io/docs/concepts/configuration/secret/#overview-of-secrets) store sensitive data like passwords, tokens, or keys. They may contain one or more key value pairs. + +> This page is about secrets in general. For details on setting up a private registry, refer to the section on [registries.](kubernetes-and-docker-registries.md) + +When configuring a workload, you'll be able to choose which secrets to include. Like config maps, secrets can be referenced by workloads as either an environment variable or a volume mount. + +Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) + +# Creating Secrets + +When creating a secret, you can make it available for any deployment within a project, or you can limit it to a single namespace. + +1. From the **Global** view, select the project containing the namespace(s) where you want to add a secret. + +2. From the main menu, select **Resources > Secrets**. Click **Add Secret**. + +3. Enter a **Name** for the secret. + + >**Note:** Kubernetes classifies secrets, certificates, and registries all as [secrets](https://kubernetes.io/docs/concepts/configuration/secret/), and no two secrets in a project or namespace can have duplicate names. Therefore, to prevent conflicts, your secret must have a unique name among all secrets within your workspace. + +4. Select a **Scope** for the secret. You can either make the registry available for the entire project or a single namespace. + +5. From **Secret Values**, click **Add Secret Value** to add a key value pair. Add as many values as you need. + + >**Tip:** You can add multiple key value pairs to the secret by copying and pasting. + > + > ![](/img/bulk-key-values.gif) + +1. Click **Save**. + +**Result:** Your secret is added to the project or namespace, depending on the scope you chose. You can view the secret in the Rancher UI from the **Resources > Secrets** view. + +Mounted secrets will be updated automatically unless they are mounted as subpath volumes. For details on how updated secrets are propagated, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/configuration/secret/#mounted-secrets-are-updated-automatically) + +# What's Next? + +Now that you have a secret added to the project or namespace, you can add it to a workload that you deploy. + +For more information on adding secret to a workload, see [Deploying Workloads](workloads-and-pods/deploy-workloads.md). diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md new file mode 100644 index 0000000000..ed9ce17e6d --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md @@ -0,0 +1,39 @@ +--- +title: Adding a Sidecar +weight: 3029 +aliases: + - /rancher/v2.5/en/tasks/workloads/add-a-sidecar/ + - /rancher/v2.5/en/k8s-in-rancher/workloads/add-a-sidecar + - /rancher/v2.x/en/k8s-in-rancher/workloads/add-a-sidecar/ +--- +A _sidecar_ is a container that extends or enhances the main container in a pod. The main container and the sidecar share a pod, and therefore share the same network space and storage. You can add sidecars to existing workloads by using the **Add a Sidecar** option. + +1. From the **Global** view, open the project running the workload you want to add a sidecar to. + +1. Click **Resources > Workloads.** + +1. Find the workload that you want to extend. Select **⋮ icon (...) > Add a Sidecar**. + +1. Enter a **Name** for the sidecar. + +1. Select a **Sidecar Type**. This option determines if the sidecar container is deployed before or after the main container is deployed. + + - **Standard Container:** + + The sidecar container is deployed after the main container. + + - **Init Container:** + + The sidecar container is deployed before the main container. + +1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy in support of the main container. During deployment, Rancher pulls this image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears on Docker Hub. + +1. Set the remaining options. You can read about them in [Deploying Workloads](deploy-workloads.md). + +1. Click **Launch**. + +**Result:** The sidecar is deployed according to your parameters. Following its deployment, you can view the sidecar by selecting **⋮ icon (...) > Edit** for the main deployment. + +## Related Links + +- [The Distributed System ToolKit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns/) diff --git a/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md new file mode 100644 index 0000000000..709e686d59 --- /dev/null +++ b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md @@ -0,0 +1,61 @@ +--- +title: Deploying Workloads +description: Read this step by step guide for deploying workloads. Deploy a workload to run an application in one or more containers. +weight: 3026 +aliases: + - /rancher/v2.5/en/tasks/workloads/deploy-workloads/ + - /rancher/v2.5/en/k8s-in-rancher/workloads/deploy-workloads + - /rancher/v2.x/en/k8s-in-rancher/workloads/deploy-workloads/ +--- + +Deploy a workload to run an application in one or more containers. + +1. From the **Global** view, open the project that you want to deploy a workload to. + +1. 1. Click **Resources > Workloads.** From the **Workloads** view, click **Deploy**. + +1. Enter a **Name** for the workload. + +1. Select a [workload type](../../../../pages-for-subheaders/workloads-and-pods.md). The workload defaults to a scalable deployment, but you can change the workload type by clicking **More options.** + +1. From the **Docker Image** field, enter the name of the Docker image that you want to deploy to the project, optionally prefacing it with the registry host (e.g. `quay.io`, `registry.gitlab.com`, etc.). During deployment, Rancher pulls this image from the specified public or private registry. If no registry host is provided, Rancher will pull the image from [Docker Hub](https://hub.docker.com/explore/). Enter the name exactly as it appears in the registry server, including any required path, and optionally including the desired tag (e.g. `registry.gitlab.com/user/path/image:tag`). If no tag is provided, the `latest` tag will be automatically used. + +1. Either select an existing namespace, or click **Add to a new namespace** and enter a new namespace. + +1. Click **Add Port** to enter a port mapping, which enables access to the application inside and outside of the cluster . For more information, see [Services](../../../../pages-for-subheaders/workloads-and-pods.md#services). + +1. Configure the remaining options: + + - **Environment Variables** + + Use this section to either specify environment variables for your workload to consume on the fly, or to pull them from another source, such as a secret or [ConfigMap](../configmaps.md). + + - **Node Scheduling** + - **Health Check** + - **Volumes** + + Use this section to add storage for your workload. You can manually specify the volume that you want to add, use a persistent volume claim to dynamically create a volume for the workload, or read data for a volume to use from a file such as a [ConfigMap](../configmaps.md). + + When you are deploying a Stateful Set, you should use a Volume Claim Template when using Persistent Volumes. This will ensure that Persistent Volumes are created dynamically when you scale your Stateful Set. + + - **Scaling/Upgrade Policy** + + >**Amazon Note for Volumes:** + > + > To mount an Amazon EBS volume: + > + >- In [Amazon AWS](https://aws.amazon.com/), the nodes must be in the same Availability Zone and possess IAM permissions to attach/unattach volumes. + > + >- The cluster must be using the [AWS cloud provider](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#aws) option. For more information on enabling this option see [Creating an Amazon EC2 Cluster](../../kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) or [Creating a Custom Cluster](../../../../pages-for-subheaders/use-existing-nodes.md). + + +1. Click **Show Advanced Options** and configure: + + - **Command** + - **Networking** + - **Labels & Annotations** + - **Security and Host Config** + +1. Click **Launch**. + +**Result:** The workload is deployed to the chosen namespace. You can view the workload's status from the project's **Workloads** view. diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md similarity index 100% rename from content/rancher/v2.5/en/k8s-in-rancher/workloads/rollback-workloads/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md diff --git a/content/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md b/versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md similarity index 100% rename from content/rancher/v2.5/en/k8s-in-rancher/workloads/upgrade-workloads/_index.md rename to versioned_docs/version-2.5/how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-authentication.md b/versioned_docs/version-2.5/pages-for-subheaders/about-authentication.md new file mode 100644 index 0000000000..0f00ff6a07 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-authentication.md @@ -0,0 +1,98 @@ +--- +title: Authentication +weight: 1115 +aliases: + - /rancher/v2.5/en/concepts/global-configuration/authentication/ + - /rancher/v2.5/en/tasks/global-configuration/authentication/ + - /rancher/v2.x/en/admin-settings/authentication/ +--- + +One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows your users to use one set of credentials to authenticate with any of your Kubernetes clusters. + +This centralized user authentication is accomplished using the Rancher authentication proxy, which is installed along with the rest of Rancher. This proxy authenticates your users and forwards their requests to your Kubernetes clusters using a service account. + +## External vs. Local Authentication + +The Rancher authentication proxy integrates with the following external authentication services. The following table lists the first version of Rancher each service debuted. + +| Auth Service | +| ------------------------------------------------------------------------------------------------ | +| [Microsoft Active Directory](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md) | +| [GitHub](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github.md) | +| [Microsoft Azure AD](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad.md) | +| [FreeIPA](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa.md) | +| [OpenLDAP](configure-openldap.md) | +| [Microsoft AD FS](configure-microsoft-ad-federation-service-saml.md) | +| [PingIdentity](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity.md) | +| [Keycloak](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak.md) | +| [Okta](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml.md) | +| [Google OAuth](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth.md) | +| [Shibboleth](configure-shibboleth-saml.md) | + +
    +However, Rancher also provides [local authentication](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users.md). + +In most cases, you should use an external authentication service over local authentication, as external authentication allows user management from a central location. However, you may want a few local authentication users for managing Rancher under rare circumstances, such as if your external authentication provider is unavailable or undergoing maintenance. + +## Users and Groups + +Rancher relies on users and groups to determine who is allowed to log in to Rancher and which resources they can access. When authenticating with an external provider, groups are provided from the external provider based on the user. These users and groups are given specific roles to resources like clusters, projects, multi-cluster apps, and global DNS providers and entries. When you give access to a group, all users who are a member of that group in the authentication provider will be able to access the resource with the permissions that you've specified. For more information on roles and permissions, see [Role Based Access Control](manage-role-based-access-control-rbac.md). + +> **Note:** Local authentication does not support creating or managing groups. + +For more information, see [Users and Groups](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups.md) + +## Scope of Rancher Authorization + +After you configure Rancher to allow sign on using an external authentication service, you should configure who should be allowed to log in and use Rancher. The following options are available: + +| Access Level | Description | +|----------------------------------------------|-------------| +| Allow any valid Users | _Any_ user in the authorization service can access Rancher. We generally discourage use of this setting! | +| Allow members of Clusters, Projects, plus Authorized Users and Organizations | Any user in the authorization service and any group added as a **Cluster Member** or **Project Member** can log in to Rancher. Additionally, any user in the authentication service or group you add to the **Authorized Users and Organizations** list may log in to Rancher. | +| Restrict access to only Authorized Users and Organizations | Only users in the authentication service or groups added to the Authorized Users and Organizations can log in to Rancher. | + +To set the Rancher access level for users in the authorization service, follow these steps: + +1. From the **Global** view, click **Security > Authentication.** + +1. Use the **Site Access** options to configure the scope of user authorization. The table above explains the access level for each option. + +1. Optional: If you choose an option other than **Allow any valid Users,** you can add users to the list of authorized users and organizations by searching for them in the text field that appears. + +1. Click **Save.** + +**Result:** The Rancher access configuration settings are applied. + +{{< saml_caveats >}} + +## External Authentication Configuration and Principal Users + +Configuration of external authentication requires: + +- A local user assigned the administrator role, called hereafter the _local principal_. +- An external user that can authenticate with your external authentication service, called hereafter the _external principal_. + +Configuration of external authentication affects how principal users are managed within Rancher. Follow the list below to better understand these effects. + +1. Sign into Rancher as the local principal and complete configuration of external authentication. + + ![Sign In](/img/sign-in.png) + +2. Rancher associates the external principal with the local principal. These two users share the local principal's user ID. + + ![Principal ID Sharing](/img/principal-ID.png) + +3. After you complete configuration, Rancher automatically signs out the local principal. + + ![Sign Out Local Principal](/img/sign-out-local.png) + +4. Then, Rancher automatically signs you back in as the external principal. + + ![Sign In External Principal](/img/sign-in-external.png) + +5. Because the external principal and the local principal share an ID, no unique object for the external principal displays on the Users page. + + ![Sign In External Principal](/img/users-page.png) + +6. The external principal and the local principal share the same access rights. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md b/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md new file mode 100644 index 0000000000..dec80b0f83 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-provisioning-drivers.md @@ -0,0 +1,46 @@ +--- +title: Provisioning Drivers +weight: 1140 +aliases: + - /rancher/v2.x/en/admin-settings/drivers/ +--- + +Drivers in Rancher allow you to manage which providers can be used to deploy [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +### Rancher Drivers + +With Rancher drivers, you can enable/disable existing built-in drivers that are packaged in Rancher. Alternatively, you can add your own driver if Rancher has not yet implemented it. + +There are two types of drivers within Rancher: + +* [Cluster Drivers](#cluster-drivers) +* [Node Drivers](#node-drivers) + +### Cluster Drivers + +Cluster drivers are used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md), such as GKE, EKS, AKS, etc.. The availability of which cluster driver to display when creating a cluster is defined based on the cluster driver's status. Only `active` cluster drivers will be displayed as an option for creating clusters for hosted Kubernetes clusters. By default, Rancher is packaged with several existing cluster drivers, but you can also create custom cluster drivers to add to Rancher. + +By default, Rancher has activated several hosted Kubernetes cloud providers including: + +* [Amazon EKS](amazon-eks-permissions.md) +* [Google GKE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +* [Azure AKS](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) + +There are several other hosted Kubernetes cloud providers that are disabled by default, but are packaged in Rancher: + +* [Alibaba ACK](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +* [Huawei CCE](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) +* [Tencent](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) + +### Node Drivers + +Node drivers are used to provision hosts, which Rancher uses to launch and manage Kubernetes clusters. A node driver is the same as a [Docker Machine driver](https://docs.docker.com/machine/drivers/). The availability of which node driver to display when creating node templates is defined based on the node driver's status. Only `active` node drivers will be displayed as an option for creating node templates. By default, Rancher is packaged with many existing Docker Machine drivers, but you can also create custom node drivers to add to Rancher. + +If there are specific node drivers that you don't want to show to your users, you would need to de-activate these node drivers. + +Rancher supports several major cloud providers, but by default, these node drivers are active and available for deployment: + +* [Amazon EC2](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md) +* [Azure](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster.md) +* [Digital Ocean](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster.md) +* [vSphere](vsphere.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md b/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md new file mode 100644 index 0000000000..2d4f471a6f --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-rke1-templates.md @@ -0,0 +1,127 @@ +--- +title: RKE Templates +weight: 7010 +aliases: + - /rancher/v2.x/en/admin-settings/rke-templates/ +--- + +RKE templates are designed to allow DevOps and security teams to standardize and simplify the creation of Kubernetes clusters. + +RKE is the [Rancher Kubernetes Engine,](https://rancher.com/docs/rke/latest/en/) which is the tool that Rancher uses to provision Kubernetes clusters. + +With Kubernetes increasing in popularity, there is a trend toward managing a larger number of smaller clusters. When you want to create many clusters, it’s more important to manage them consistently. Multi-cluster management comes with challenges to enforcing security and add-on configurations that need to be standardized before turning clusters over to end users. + +RKE templates help standardize these configurations. Regardless of whether clusters are created with the Rancher UI, the Rancher API, or an automated process, Rancher will guarantee that every cluster it provisions from an RKE template is uniform and consistent in the way it is produced. + +Admins control which cluster options can be changed by end users. RKE templates can also be shared with specific users and groups, so that admins can create different RKE templates for different sets of users. + +If a cluster was created with an RKE template, you can't change it to a different RKE template. You can only update the cluster to a new revision of the same template. + +You can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. The new template can also be used to launch new clusters. + +The core features of RKE templates allow DevOps and security teams to: + +- Standardize cluster configuration and ensure that Rancher-provisioned clusters are created following best practices +- Prevent less technical users from making uninformed choices when provisioning clusters +- Share different templates with different sets of users and groups +- Delegate ownership of templates to users who are trusted to make changes to them +- Control which users can create templates +- Require users to create clusters from a template + +# Configurable Settings + +RKE templates can be created in the Rancher UI or defined in YAML format. They can define all the same parameters that can be specified when you use Rancher to provision custom nodes or nodes from an infrastructure provider: + +- Cloud provider options +- Pod security options +- Network providers +- Ingress controllers +- Network security configuration +- Network plugins +- Private registry URL and credentials +- Add-ons +- Kubernetes options, including configurations for Kubernetes components such as kube-api, kube-controller, kubelet, and services + +The [add-on section](#add-ons) of an RKE template is especially powerful because it allows a wide range of customization options. + +# Scope of RKE Templates + +RKE templates are supported for Rancher-provisioned clusters. The templates can be used to provision custom clusters or clusters that are launched by an infrastructure provider. + +RKE templates are for defining Kubernetes and Rancher settings. Node templates are responsible for configuring nodes. For tips on how to use RKE templates in conjunction with hardware, refer to [RKE Templates and Hardware](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +The settings of an existing cluster can be [saved as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) This creates a new template and binds the cluster settings to the template, so that the cluster can only be upgraded if the [template is updated](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#updating-a-template), and the cluster is upgraded to [use a newer version of the template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) The new template can also be used to create new clusters. + + +# Example Scenarios +When an organization has both basic and advanced Rancher users, administrators might want to give the advanced users more options for cluster creation, while restricting the options for basic users. + +These [example scenarios](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md) describe how an organization could use templates to standardize cluster creation. + +Some of the example scenarios include the following: + +- **Enforcing templates:** Administrators might want to [enforce one or more template settings for everyone](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#enforcing-a-template-setting-for-everyone) if they want all new Rancher-provisioned clusters to have those settings. +- **Sharing different templates with different users:** Administrators might give [different templates to basic and advanced users,](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#templates-for-basic-and-advanced-users) so that basic users can have more restricted options and advanced users can use more discretion when creating clusters. +- **Updating template settings:** If an organization's security and DevOps teams decide to embed best practices into the required settings for new clusters, those best practices could change over time. If the best practices change, [a template can be updated to a new revision](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#updating-templates-and-clusters-created-with-them) and clusters created from the template can [upgrade to the new version](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md#upgrading-a-cluster-to-use-a-new-template-revision) of the template. +- **Sharing ownership of a template:** When a template owner no longer wants to maintain a template, or wants to share ownership of the template, this scenario describes how [template ownership can be shared.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases.md#allowing-other-users-to-control-and-share-a-template) + +# Template Management + +When you create an RKE template, it is available in the Rancher UI from the **Global** view under **Tools > RKE Templates.** When you create a template, you become the template owner, which gives you permission to revise and share the template. You can share the RKE templates with specific users or groups, and you can also make it public. + +Administrators can turn on template enforcement to require users to always use RKE templates when creating a cluster. This allows administrators to guarantee that Rancher always provisions clusters with specific settings. + +RKE template updates are handled through a revision system. If you want to change or update a template, you create a new revision of the template. Then a cluster that was created with the older version of the template can be upgraded to the new template revision. + +In an RKE template, settings can be restricted to what the template owner chooses, or they can be open for the end user to select the value. The difference is indicated by the **Allow User Override** toggle over each setting in the Rancher UI when the template is created. + +For the settings that cannot be overridden, the end user will not be able to directly edit them. In order for a user to get different options of these settings, an RKE template owner would need to create a new revision of the RKE template, which would allow the user to upgrade and change that option. + +The documents in this section explain the details of RKE template management: + +- [Getting permission to create templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions.md) +- [Creating and revising templates](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates.md) +- [Enforcing template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates.md#requiring-new-clusters-to-use-an-rke-template) +- [Overriding template settings](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings.md) +- [Sharing templates with cluster creators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-templates-with-specific-users-or-groups) +- [Sharing ownership of a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md#sharing-ownership-of-templates) + +An [example YAML configuration file for a template](../reference-guides/rke1-template-example-yaml.md) is provided for reference. + +# Applying Templates + +You can [create a cluster from a template](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#creating-a-cluster-from-an-rke-template) that you created, or from a template that has been [shared with you.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates.md) + +If the RKE template owner creates a new revision of the template, you can [upgrade your cluster to that revision.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#updating-a-cluster-created-with-an-rke-template) + +RKE templates can be created from scratch to pre-define cluster configuration. They can be applied to launch new clusters, or templates can also be exported from existing running clusters. + +You can [save the configuration of an existing cluster as an RKE template.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates.md#converting-an-existing-cluster-to-use-an-rke-template) Then the cluster's settings can only be changed if the template is updated. + +# Standardizing Hardware + +RKE templates are designed to standardize Kubernetes and Rancher settings. If you want to standardize your infrastructure as well, you use RKE templates [in conjunction with other tools](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure.md). + +# YAML Customization + +If you define an RKE template as a YAML file, you can modify this [example RKE template YAML](../reference-guides/rke1-template-example-yaml.md). The YAML in the RKE template uses the same customization that Rancher uses when creating an RKE cluster, but since the YAML is located within the context of a Rancher provisioned cluster, you will need to nest the RKE template customization under the `rancher_kubernetes_engine_config` directive in the YAML. + +The RKE documentation also has [annotated](https://rancher.com/docs/rke/latest/en/example-yamls/) `cluster.yml` files that you can use for reference. + +For guidance on available options, refer to the RKE documentation on [cluster configuration.](https://rancher.com/docs/rke/latest/en/config-options/) + +### Add-ons + +The add-on section of the RKE template configuration file works the same way as the [add-on section of a cluster configuration file](https://rancher.com/docs/rke/latest/en/config-options/add-ons/). + +The user-defined add-ons directive allows you to either call out and pull down Kubernetes manifests or put them inline directly. If you include these manifests as part of your RKE template, Rancher will provision those in the cluster. + +Some things you could do with add-ons include: + +- Install applications on the Kubernetes cluster after it starts +- Install plugins on nodes that are deployed with a Kubernetes daemonset +- Automatically set up namespaces, service accounts, or role binding + +The RKE template configuration must be nested within the `rancher_kubernetes_engine_config` directive. To set add-ons, when creating the template, you will click **Edit as YAML.** Then use the `addons` directive to add a manifest, or the `addons_include` directive to set which YAML files are used for the add-ons. For more information on custom add-ons, refer to the [user-defined add-ons documentation.](https://rancher.com/docs/rke/latest/en/config-options/add-ons/user-defined-add-ons/) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md b/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md new file mode 100644 index 0000000000..65a0b42a06 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/about-the-api.md @@ -0,0 +1,68 @@ +--- +title: API +weight: 24 +aliases: + - /rancher/v2.x/en/api/ +--- + +## How to use the API + +The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it, click on your user avatar in the upper right corner. Under **API & Keys**, you can find the URL endpoint as well as create [API keys](../reference-guides/user-settings/api-keys.md). + +## Authentication + +API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys](../reference-guides/user-settings/api-keys.md). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. + +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page](../reference-guides/about-the-api/api-tokens.md). + +## Making requests + +The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). + +- Every type has a Schema which describes: + - The URL to get to the collection of this type of resources + - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. + - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). + - Every field that filtering is allowed on + - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. + + +- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. + +- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. + +- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. + +- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. + +- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. + +- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. + +- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). + +## Filtering + +Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. + +## Sorting + +Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. + +## Pagination + +API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. + +## Capturing Rancher API Calls + +You can use browser developer tools to capture how the Rancher API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster: + +1. In the Rancher UI, go to **Cluster Management** and click **Create.** +1. Click one of the cluster types. This example uses Digital Ocean. +1. Fill out the form with a cluster name and node template, but don't click **Create**. +1. You will need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click on the Rancher UI and click **Inspect.** +1. In the developer tools, click the **Network** tab. +1. On the **Network** tab, make sure **Fetch/XHR** is selected. +1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`. +1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.** +1. Paste the result into any text editor. You will be able to see the POST request, including the URL it was sent to, all of the headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: The request should be stored in a safe place because it contains credentials. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md new file mode 100644 index 0000000000..defcce8474 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/access-clusters.md @@ -0,0 +1,34 @@ +--- +title: Cluster Access +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-access/ +--- + +This section is about what tools can be used to access clusters managed by Rancher. + +For information on how to give users permission to access a cluster, see the section on [adding users to clusters.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) + +For more information on roles-based access control, see [this section.](manage-role-based-access-control-rbac.md) + +For information on how to set up an authentication system, see [this section.](about-authentication.md) + + +### Rancher UI + +Rancher provides an intuitive user interface for interacting with your clusters. All options available in the UI use the Rancher API. Therefore any action possible in the UI is also possible in the Rancher CLI or Rancher API. + +### kubectl + +You can use the Kubernetes command-line tool, [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), to manage your clusters. You have two options for using kubectl: + +- **Rancher kubectl shell:** Interact with your clusters by launching a kubectl shell available in the Rancher UI. This option requires no configuration actions on your part. For more information, see [Accessing Clusters with kubectl Shell](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). +- **Terminal remote connection:** You can also interact with your clusters by installing [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your local desktop and then copying the cluster's kubeconfig file to your local `~/.kube/config` directory. For more information, see [Accessing Clusters with kubectl and a kubeconfig File](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md). + +### Rancher CLI + +You can control your clusters by downloading Rancher's own command-line interface, [Rancher CLI](cli-with-rancher.md). This CLI tool can interact directly with different clusters and projects or pass them `kubectl` commands. + +### Rancher API + +Finally, you can interact with your clusters over the Rancher API. Before you use the API, you must obtain an [API key](../reference-guides/user-settings/api-keys.md). To view the different resource fields and actions for an API object, open the API UI, which can be accessed by clicking on **View in API** for any Rancher UI object. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md new file mode 100644 index 0000000000..e2386f978a --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/advanced-configuration.md @@ -0,0 +1,16 @@ +--- +title: Advanced Configuration +weight: 500 +--- + +### Alertmanager + +For information on configuring the Alertmanager custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +### Prometheus + +For information on configuring the Prometheus custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md) + +### PrometheusRules + +For information on configuring the Prometheus custom resource, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) \ No newline at end of file diff --git a/content/rancher/v2.5/en/installation/resources/advanced/_index.md b/versioned_docs/version-2.5/pages-for-subheaders/advanced-options.md similarity index 100% rename from content/rancher/v2.5/en/installation/resources/advanced/_index.md rename to versioned_docs/version-2.5/pages-for-subheaders/advanced-options.md diff --git a/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/advanced-user-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md b/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md new file mode 100644 index 0000000000..5657bf9033 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/air-gapped-helm-cli-install.md @@ -0,0 +1,32 @@ +--- +title: Air Gapped Helm CLI Install +weight: 1 +aliases: + - /rancher/v2.5/en/installation/air-gap-installation/ + - /rancher/v2.5/en/installation/air-gap-high-availability/ + - /rancher/v2.5/en/installation/air-gap-single-node/ + - /rancher/v2.x/en/installation/other-installation-methods/air-gap/ +--- + +This section is about using the Helm CLI to install the Rancher server in an air gapped environment. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +The installation steps differ depending on whether Rancher is installed on an RKE Kubernetes cluster, a K3s Kubernetes cluster, or a single Docker container. + +For more information on each installation option, refer to [this page.](installation-and-upgrade.md) + +Throughout the installation instructions, there will be _tabs_ for each installation option. + +> **Important:** If you install Rancher following the Docker installation guide, there is no upgrade path to transition your Docker Installation to a Kubernetes Installation. + +# Installation Outline + +1. [Set up infrastructure and private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) +2. [Collect and publish images to your private registry](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images.md) +3. [Set up a Kubernetes cluster (Skip this step for Docker installations)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes.md) +4. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +# Upgrades + +To upgrade Rancher with Helm CLI in an air gap environment, follow [this procedure.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades.md) + +### [Next: Prepare your Node(s)](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/amazon-eks-permissions.md b/versioned_docs/version-2.5/pages-for-subheaders/amazon-eks-permissions.md new file mode 100644 index 0000000000..60d6b4258f --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/amazon-eks-permissions.md @@ -0,0 +1,118 @@ +--- +title: Creating an EKS Cluster +shortTitle: Amazon EKS +weight: 2110 +aliases: + - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-eks/ + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/eks/ +--- +Amazon EKS provides a managed control plane for your Kubernetes cluster. Amazon EKS runs the Kubernetes control plane instances across multiple Availability Zones to ensure high availability. Rancher provides an intuitive user interface for managing and deploying the Kubernetes clusters you run in Amazon EKS. With this guide, you will use Rancher to quickly and easily launch an Amazon EKS Kubernetes cluster in your AWS account. For more information on Amazon EKS, see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/what-is-eks.html). + +- [Prerequisites in Amazon Web Services](#prerequisites-in-amazon-web-services) + - [Amazon VPC](#amazon-vpc) + - [IAM Policies](#iam-policies) +- [Create the EKS Cluster](#create-the-eks-cluster) +- [EKS Cluster Configuration Reference](#eks-cluster-configuration-reference) +- [Architecture](#architecture) +- [AWS Service Events](#aws-service-events) +- [Security and Compliance](#security-and-compliance) +- [Tutorial](#tutorial) +- [Minimum EKS Permissions](#minimum-eks-permissions) +- [Syncing](#syncing) +- [Troubleshooting](#troubleshooting) +# Prerequisites in Amazon Web Services + +>**Note** +>Deploying to Amazon AWS will incur charges. For more information, refer to the [EKS pricing page](https://aws.amazon.com/eks/pricing/). + +To set up a cluster on EKS, you will need to set up an Amazon VPC (Virtual Private Cloud). You will also need to make sure that the account you will be using to create the EKS cluster has the appropriate [permissions.](#minimum-eks-permissions) For details, refer to the official guide on [Amazon EKS Prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html#eks-prereqs). + +### Amazon VPC + +An Amazon VPC is required to launch the EKS cluster. The VPC enables you to launch AWS resources into a virtual network that you've defined. You can set one up yourself and provide it during cluster creation in Rancher. If you do not provide one during creation, Rancher will create one. For more information, refer to the [Tutorial: Creating a VPC with Public and Private Subnets for Your Amazon EKS Cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html). + +### IAM Policies + +Rancher needs access to your AWS account in order to provision and administer your Kubernetes clusters in Amazon EKS. You'll need to create a user for Rancher in your AWS account and define what that user can access. + +1. Create a user with programmatic access by following the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). + +2. Next, create an IAM policy that defines what this user has access to in your AWS account. It's important to only grant this user minimal access within your account. The minimum permissions required for an EKS cluster are listed [here.](#minimum-eks-permissions) Follow the steps [here](https://docs.aws.amazon.com/eks/latest/userguide/EKS_IAM_user_policies.html) to create an IAM policy and attach it to your user. + +3. Finally, follow the steps [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) to create an access key and secret key for this user. + +> **Note:** It's important to regularly rotate your access and secret keys. See this [documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#rotating_access_keys_console) for more information. + +For more detailed information on IAM policies for EKS, refer to the official [documentation on Amazon EKS IAM Policies, Roles, and Permissions](https://docs.aws.amazon.com/eks/latest/userguide/IAM_policies.html). + + +# Create the EKS Cluster + +Use Rancher to set up and configure your Kubernetes cluster. + +1. From the **Clusters** page, click **Add Cluster**. + +1. Choose **Amazon EKS**. + +1. Enter a **Cluster Name.** + +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +1. Fill out the rest of the form. For help, refer to the [configuration reference.](#eks-cluster-configuration-reference) + +1. Click **Create**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +# EKS Cluster Configuration Reference + +For the full list of EKS cluster configuration options, see [this page.](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md) + +# Architecture + +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two Kubernetes clusters: one created by RKE and another created by EKS. + +
    Managing Kubernetes Clusters through Rancher's Authentication Proxy
    + +![Architecture](/img/rancher-architecture-rancher-api-server.svg) + +# AWS Service Events + +To find information on any AWS Service events, please see [this page](https://status.aws.amazon.com/). + +# Security and Compliance + +By default only the IAM user or role that created a cluster has access to it. Attempting to access the cluster with any other user or role without additional configuration will lead to an error. In Rancher, this means using a credential that maps to a user or role that was not used to create the cluster will cause an unauthorized error. For example, an EKSCtl cluster will not register in Rancher unless the credentials used to register the cluster match the role or user used by EKSCtl. Additional users and roles can be authorized to access a cluster by being added to the aws-auth configmap in the kube-system namespace. For a more in-depth explanation and detailed instructions, please see this [documentation](https://aws.amazon.com/premiumsupport/knowledge-center/amazon-eks-cluster-access/). + +For more information on security and compliance with your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/shared-responsibilty.html). + +# Tutorial + +This [tutorial](https://aws.amazon.com/blogs/opensource/managing-eks-clusters-rancher/) on the AWS Open Source Blog will walk you through how to set up an EKS cluster with Rancher, deploy a publicly accessible app to test the cluster, and deploy a sample project to track real-time geospatial data using a combination of other open-source software such as Grafana and InfluxDB. + +# Minimum EKS Permissions + +See [this page](../reference-guides/amazon-eks-permissions/minimum-eks-permissions.md) for the minimum set of permissions necessary to use all functionality of the EKS driver in Rancher. + +# Syncing + +The EKS provisioner can synchronize the state of an EKS cluster between Rancher and the provider. For an in-depth technical explanation of how this works, see [Syncing.](../reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md) + +For information on configuring the refresh interval, refer to [this section.](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md#configuring-the-refresh-interval) + +# Troubleshooting + +If your changes were overwritten, it could be due to the way the cluster data is synced with EKS. Changes shouldn't be made to the cluster from another source, such as in the EKS console, and in Rancher within a five-minute span. For information on how this works and how to configure the refresh interval, refer to [Syncing.](#syncing) + +If an unauthorized error is returned while attempting to modify or register the cluster and the cluster was not created with the role or user that your credentials belong to, refer to [Security and Compliance.](#security-and-compliance) + +For any issues or troubleshooting details for your Amazon EKS Kubernetes cluster, please see this [documentation](https://docs.aws.amazon.com/eks/latest/userguide/troubleshooting.html). \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md b/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/authentication-config.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md new file mode 100644 index 0000000000..52a91d138a --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/authentication-permissions-and-global-configuration.md @@ -0,0 +1,57 @@ +--- +title: Authentication, Permissions and Global Configuration +weight: 6 +aliases: + - /rancher/v2.5/en/concepts/global-configuration/ + - /rancher/v2.5/en/tasks/global-configuration/ + - /rancher/v2.5/en/concepts/global-configuration/server-url/ + - /rancher/v2.5/en/tasks/global-configuration/server-url/ + - /rancher/v2.5/en/admin-settings/log-in/ + - /rancher/v2.x/en/admin-settings/ +--- + +After installation, the [system administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md) should configure Rancher to configure authentication, authorization, security, default settings, security policies, drivers and global DNS entries. + +## First Log In + +After you log into Rancher for the first time, Rancher will prompt you for a **Rancher Server URL**.You should set the URL to the main entry point to the Rancher Server. When a load balancer sits in front a Rancher Server cluster, the URL should resolve to the load balancer. The system will automatically try to infer the Rancher Server URL from the IP address or host name of the host running the Rancher Server. This is only correct if you are running a single node Rancher Server installation. In most cases, therefore, you need to set the Rancher Server URL to the correct value yourself. + +>**Important!** After you set the Rancher Server URL, we do not support updating it. Set the URL with extreme care. + +## Authentication + +One of the key features that Rancher adds to Kubernetes is centralized user authentication. This feature allows to set up local users and/or connect to an external authentication provider. By connecting to an external authentication provider, you can leverage that provider's user and groups. + +For more information how authentication works and how to configure each provider, see [Authentication](about-authentication.md). + +## Authorization + +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by the user's role. Rancher provides built-in roles to allow you to easily configure a user's permissions to resources, but Rancher also provides the ability to customize the roles for each Kubernetes resource. + +For more information how authorization works and how to customize roles, see [Roles Based Access Control (RBAC)](manage-role-based-access-control-rbac.md). + +## Pod Security Policies + +_Pod Security Policies_ (or PSPs) are objects that control security-sensitive aspects of pod specification, e.g. root privileges. If a pod does not meet the conditions specified in the PSP, Kubernetes will not allow it to start, and Rancher will display an error message. + +For more information how to create and use PSPs, see [Pod Security Policies](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md). + +## Provisioning Drivers + +Drivers in Rancher allow you to manage which providers can be used to provision [hosted Kubernetes clusters](set-up-clusters-from-hosted-kubernetes-providers.md) or [nodes in an infrastructure provider](use-new-nodes-in-an-infra-provider.md) to allow Rancher to deploy and manage Kubernetes. + +For more information, see [Provisioning Drivers](about-provisioning-drivers.md). + +## Adding Kubernetes Versions into Rancher + +With this feature, you can upgrade to the latest version of Kubernetes as soon as it is released, without upgrading Rancher. This feature allows you to easily upgrade Kubernetes patch versions (i.e. `v1.15.X`), but not intended to upgrade Kubernetes minor versions (i.e. `v1.X.0`) as Kubernetes tends to deprecate or add APIs between minor versions. + +The information that Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md) is now located in the Rancher Kubernetes Metadata. For details on metadata configuration and how to change the Kubernetes version used for provisioning RKE clusters, see [Rancher Kubernetes Metadata.](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md) + +Rancher Kubernetes Metadata contains Kubernetes version information which Rancher uses to provision [RKE clusters](launch-kubernetes-with-rancher.md). + +For more information on how metadata works and how to configure metadata config, see [Rancher Kubernetes Metadata](../getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher.md). + +## Enabling Experimental Features + +Rancher includes some features that are experimental and disabled by default. Feature flags were introduced to allow you to try these features. For more information, refer to the section about [feature flags.](./enable-experimental-features.md/) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md new file mode 100644 index 0000000000..971d98e865 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-and-disaster-recovery.md @@ -0,0 +1,122 @@ +--- +title: Backups and Disaster Recovery +weight: 5 +aliases: + - /rancher/v2.5/en/backups/v2.5 + - /rancher/v2.x/en/backups/v2.5/ +--- + +In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. + +As of Rancher v2.5, the `rancher-backup` operator is used to backup and restore Rancher. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.5/charts/rancher-backup) + +The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. + +> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. + +- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) + - [Backup and Restore for Rancher v2.5 installed with Docker](#backup-and-restore-for-rancher-v2-5-installed-with-docker) +- [How Backups and Restores Work](#how-backups-and-restores-work) +- [Installing the rancher-backup Operator](#installing-the-rancher-backup-operator) + - [Installing rancher-backup with the Rancher UI](#installing-rancher-backup-with-the-rancher-ui) + - [Installing rancher-backup with the Helm CLI](#installing-rancher-backup-with-the-helm-cli) + - [RBAC](#rbac) +- [Backing up Rancher](#backing-up-rancher) +- [Restoring Rancher](#restoring-rancher) +- [Migrating Rancher to a New Cluster](#migrating-rancher-to-a-new-cluster) +- [Default Storage Location Configuration](#default-storage-location-configuration) + - [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) + +# Changes in Rancher v2.5 + +The new `rancher-backup` operator allows Rancher to be backed up and restored on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** page, or by using the Helm CLI. + +Previously, the way that cluster data was backed up depended on the type of Kubernetes cluster that was used. + +In Rancher v2.4, it was only supported to install Rancher on two types of Kubernetes clusters: an RKE cluster, or a K3s cluster with an external database. If Rancher was installed on an RKE cluster, RKE would be used to take a snapshot of the etcd database and restore the cluster. If Rancher was installed on a K3s cluster with an external database, the database would need to be backed up and restored using the upstream documentation for the database. + +In Rancher v2.5, it is now supported to install Rancher hosted Kubernetes clusters, such as Amazon EKS clusters, which do not expose etcd to a degree that would allow snapshots to be created by an external tool. etcd doesn't need to be exposed for `rancher-backup` to work, because the operator gathers resources by making calls to `kube-apiserver`. + +### Backup and Restore for Rancher v2.5 installed with Docker + +For Rancher installed with Docker, refer to the same steps used up till 2.5 for [backups](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) and [restores.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md) + +# How Backups and Restores Work + +The `rancher-backup` operator introduces three custom resources: Backups, Restores, and ResourceSets. The following cluster-scoped custom resource definitions are added to the cluster: + +- `backups.resources.cattle.io` +- `resourcesets.resources.cattle.io` +- `restores.resources.cattle.io` + +The ResourceSet defines which Kubernetes resources need to be backed up. The ResourceSet is not available to be configured in the Rancher UI because the values required to back up Rancher are predefined. This ResourceSet should not be modified. + +When a Backup custom resource is created, the `rancher-backup` operator calls the `kube-apiserver` to get the resources in the ResourceSet (specifically, the predefined `rancher-resource-set`) that the Backup custom resource refers to. + +The operator then creates the backup file in the .tar.gz format and stores it in the location configured in the Backup resource. + +When a Restore custom resource is created, the operator accesses the backup .tar.gz file specified by the Restore, and restores the application from that file. + +The Backup and Restore custom resources can be created in the Rancher UI, or by using `kubectl apply`. + +# Installing the rancher-backup Operator + +The `rancher-backup` operator can be installed from the Rancher UI, or with the Helm CLI. In both cases, the `rancher-backup` Helm chart is installed on the Kubernetes cluster running the Rancher server. It is a cluster-admin only feature and available only for the **local** cluster. (*If you do not see `rancher-backup` in the Rancher UI, you may have selected the wrong cluster.*) + +>**NOTE:** There are two known issues in Fleet that occur after performing a restoration using the backup-restore-operator: Fleet agents are inoperable and clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md#troubleshooting) for workarounds. + +### Installing rancher-backup with the Rancher UI + +1. In the Rancher UI's Cluster Manager, choose the cluster named **local** +1. On the upper-right click on the **Cluster Explorer.** +1. Click **Apps.** +1. Click the `rancher-backup` operator. +1. Optional: Configure the default storage location. For help, refer to the [configuration section.](../reference-guides/backup-restore-configuration/storage-configuration.md) + +**Result:** The `rancher-backup` operator is installed. + +From the **Cluster Explorer,** you can see the `rancher-backup` operator listed under **Deployments.** + +To configure the backup app in Rancher, click **Cluster Explorer** in the upper left corner and click **Rancher Backups.** + +### Installing rancher-backup with the Helm CLI + +Install the backup app as a Helm chart: + +``` +helm repo add rancher-charts https://charts.rancher.io +helm repo update +helm install rancher-backup-crd rancher-charts/rancher-backup-crd -n cattle-resources-system --create-namespace +helm install rancher-backup rancher-charts/rancher-backup -n cattle-resources-system +``` + +### RBAC + +Only the rancher admins and the local cluster’s cluster-owner can: + +* Install the Chart +* See the navigation links for Backup and Restore CRDs +* Perform a backup or restore by creating a Backup CR and Restore CR respectively +* List backups/restores performed so far + +# Backing up Rancher + +A backup is performed by creating a Backup custom resource. For a tutorial, refer to [this page.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) + +# Restoring Rancher + +A restore is performed by creating a Restore custom resource. For a tutorial, refer to [this page.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher.md) + +# Migrating Rancher to a New Cluster + +A migration is performed by following [these steps.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +# Default Storage Location Configuration + +Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible or Minio object store. + +For information on configuring these options, refer to [this page.](../reference-guides/backup-restore-configuration/storage-configuration.md) + +### Example values.yaml for the rancher-backup Helm Chart + +The example [values.yaml file](../reference-guides/backup-restore-configuration/storage-configuration.md#example-values-yaml-for-the-rancher-backup-helm-chart) can be used to configure the `rancher-backup` operator when the Helm CLI is used to install it. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md new file mode 100644 index 0000000000..a75e1288ff --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/backup-restore-configuration.md @@ -0,0 +1,13 @@ +--- +title: Rancher Backup Configuration Reference +shortTitle: Configuration +weight: 4 +aliases: + - /rancher/v2.5/en/backups/v2.5/configuration + - /rancher/v2.x/en/backups/v2.5/configuration/ +--- + +- [Backup configuration](../reference-guides/backup-restore-configuration/backup-configuration.md) +- [Restore configuration](../reference-guides/backup-restore-configuration/restore-configuration.md) +- [Storage location configuration](../reference-guides/backup-restore-configuration/storage-configuration.md) +- [Example Backup and Restore Custom Resources](../reference-guides/backup-restore-configuration/examples.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md b/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md new file mode 100644 index 0000000000..03b7c634f4 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/best-practices.md @@ -0,0 +1,24 @@ +--- +title: Best Practices Guide +weight: 4 +aliases: + - /rancher/v2.5/en/best-practices/v2.5 + - /rancher/v2.x/en/best-practices/ + - /rancher/v2.x/en/best-practices/v2.5/ +--- + +The purpose of this section is to consolidate best practices for Rancher implementations. This also includes recommendations for related technologies, such as Kubernetes, Docker, containers, and more. The objective is to improve the outcome of a Rancher implementation using the operational experience of Rancher and its customers. + +If you have any questions about how these might apply to your use case, please contact your Customer Success Manager or Support. + +Use the navigation bar on the left to find the current best practices for managing and deploying the Rancher Server. + +For more guidance on best practices, you can consult these resources: + +- [Security](rancher-security.md) +- [Rancher Blog](https://rancher.com/blog/) + - [Articles about best practices on the Rancher blog](https://rancher.com/tags/best-practices/) + - [101 More Security Best Practices for Kubernetes](https://rancher.com/blog/2019/2019-01-17-101-more-kubernetes-security-best-practices/) +- [Rancher Forum](https://forums.rancher.com/) +- [Rancher Users Slack](https://slack.rancher.io/) +- [Rancher Labs YouTube Channel - Online Meetups, Demos, Training, and Webinars](https://www.youtube.com/channel/UCh5Xtp82q8wjijP8npkVTBA/featured) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md new file mode 100644 index 0000000000..cf5ca940d6 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/checklist-for-production-ready-clusters.md @@ -0,0 +1,52 @@ +--- +title: Checklist for Production-Ready Clusters +weight: 2 +aliases: + - /rancher/v2.x/en/cluster-provisioning/production/ +--- + +In this section, we recommend best practices for creating the production-ready Kubernetes clusters that will run your apps and services. + +For a list of requirements for your cluster, including the requirements for OS/Docker, hardware, and networking, refer to the section on [node requirements.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) + +This is a shortlist of best practices that we strongly recommend for all production clusters. + +For a full list of all the best practices that we recommend, refer to the [best practices section.](best-practices.md) + +### Node Requirements + +* Make sure your nodes fulfill all of the [node requirements,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) including the port requirements. + +### Back up etcd + +* Enable etcd snapshots. Verify that snapshots are being created, and run a disaster recovery scenario to verify the snapshots are valid. etcd is the location where the state of your cluster is stored, and losing etcd data means losing your cluster. Make sure you configure recurring snapshots of etcd for your cluster(s), and make sure the snapshots are stored externally (off the node) as well. + +### Cluster Architecture + +* Nodes should have one of the following role configurations: + * `etcd` + * `controlplane` + * `etcd` and `controlplane` + * `worker` (the `worker` role should not be used or added on nodes with the `etcd` or `controlplane` role) +* Have at least three nodes with the role `etcd` to survive losing one node. Increase this count for higher node fault toleration, and spread them across (availability) zones to provide even better fault tolerance. +* Assign two or more nodes the `controlplane` role for master component high availability. +* Assign two or more nodes the `worker` role for workload rescheduling upon node failure. + +For more information on what each role is used for, refer to the [section on roles for nodes in Kubernetes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes.md) + +For more information about the +number of nodes for each Kubernetes role, refer to the section on [recommended architecture.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +### Logging and Monitoring + +* Configure alerts/notifiers for Kubernetes components (System Service). +* Configure logging for cluster analysis and post-mortems. + +### Reliability + +* Perform load tests on your cluster to verify that its hardware can support your workloads. + +### Networking + +* Minimize network latency. Rancher recommends minimizing latency between the etcd nodes. The default setting for `heartbeat-interval` is `500`, and the default setting for `election-timeout` is `5000`. These [settings for etcd tuning](https://coreos.com/etcd/docs/latest/tuning.html) allow etcd to run in most networks (except really high latency networks). +* Cluster nodes should be located within a single region. Most cloud providers provide multiple availability zones within a region, which can be used to create higher availability for your cluster. Using multiple availability zones is fine for nodes with any role. If you are using [Kubernetes Cloud Provider](./set-up-cloud-providers.md) resources, consult the documentation for any restrictions (i.e. zone storage restrictions). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md new file mode 100644 index 0000000000..f4cd0736d9 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/cis-scan-guides.md @@ -0,0 +1,364 @@ +--- +title: CIS Scans +weight: 17 +aliases: + - /rancher/v2.5/en/cis-scans/v2.5 + - /rancher/v2.x/en/cis-scans/ + - /rancher/v2.x/en/cis-scans/v2.5/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. + +The `rancher-cis-benchmark` app leverages kube-bench, an open-source tool from Aqua Security, to check clusters for CIS Kubernetes Benchmark compliance. Also, to generate a cluster-wide report, the application utilizes Sonobuoy for report aggregation. + +- [Changes in Rancher v2.5](#changes-in-rancher-v2-5) +- [About the CIS Benchmark](#about-the-cis-benchmark) +- [About the Generated Report](#about-the-generated-report) +- [Test Profiles](#test-profiles) +- [About Skipped and Not Applicable Tests](#about-skipped-and-not-applicable-tests) +- [Roles-based Access Control](../explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md) +- [Configuration](../explanations/integrations-in-rancher/cis-scans/configuration-reference.md) +- [How-to Guides](#how-to-guides) + - [Installing rancher-cis-benchmark](#installing-rancher-cis-benchmark) + - [Uninstalling rancher-cis-benchmark](#uninstalling-rancher-cis-benchmark) + - [Running a Scan](#running-a-scan) + - [Running a Scan Periodically on a Schedule](#running-a-scan-periodically-on-a-schedule) + - [Skipping Tests](#skipping-tests) + - [Viewing Reports](#viewing-reports) + - [Enabling Alerting for rancher-cis-benchmark](#enabling-alerting-for-rancher-cis-benchmark) + - [Configuring Alerts for a Periodic Scan on a Schedule](#configuring-alerts-for-a-periodic-scan-on-a-schedule) + - [Creating a Custom Benchmark Version for Running a Cluster Scan](#creating-a-custom-benchmark-version-for-running-a-cluster-scan) + +# Changes in Rancher v2.5 + +We now support running CIS scans on any Kubernetes cluster, including hosted Kubernetes providers such as EKS, AKS, and GKE. Previously it was only supported to run CIS scans on RKE Kubernetes clusters. + +In Rancher v2.4, the CIS scan tool was available from the **cluster manager** in the Rancher UI. Now it is available in the **Cluster Explorer** and it can be enabled and deployed using a Helm chart. It can be installed from the Rancher UI, but it can also be installed independently of Rancher. It deploys a CIS scan operator for the cluster, and deploys Kubernetes custom resources for cluster scans. The custom resources can be managed directly from the **Cluster Explorer.** + +In v1 of the CIS scan tool, which was available in Rancher v2.4 through the cluster manager, recurring scans could be scheduled. The ability to schedule recurring scans is now also available for CIS v2 from Rancher v2.5.4. + +Support for alerting for the cluster scan results is now also available from Rancher v2.5.4. + +In Rancher v2.4, permissive and hardened profiles were included. In Rancher v2.5.0 and in v2.5.4, more profiles were included. + + + + +- Generic CIS 1.5 +- Generic CIS 1.6 +- RKE permissive 1.5 +- RKE hardened 1.5 +- RKE permissive 1.6 +- RKE hardened 1.6 +- EKS +- GKE +- RKE2 permissive 1.5 +- RKE2 permissive 1.5 + + + + +- Generic CIS 1.5 +- RKE permissive +- RKE hardened +- EKS +- GKE + + + +
    + + +The default profile and the supported CIS benchmark version depends on the type of cluster that will be scanned and the Rancher version: + + + + +The `rancher-cis-benchmark` supports the CIS 1.6 Benchmark version. + +- For RKE Kubernetes clusters, the RKE Permissive 1.6 profile is the default. +- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. +- For RKE2 Kubernetes clusters, the RKE2 Permissive 1.5 profile is the default. +- For cluster types other than RKE, RKE2, EKS and GKE, the Generic CIS 1.5 profile will be used by default. + + + + +The `rancher-cis-benchmark` supports the CIS 1.5 Benchmark version. + +- For RKE Kubernetes clusters, the RKE permissive profile is the default. +- EKS and GKE have their own CIS Benchmarks published by `kube-bench`. The corresponding test profiles are used by default for those clusters. +- For cluster types other than RKE, EKS and GKE, the Generic CIS 1.5 profile will be used by default. + + + + +> **Note:** CIS v1 cannot run on a cluster when CIS v2 is deployed. In other words, after `rancher-cis-benchmark` is installed, you can't run scans by going to the Cluster Manager view in the Rancher UI and clicking Tools > CIS Scans. + +# About the CIS Benchmark + +The Center for Internet Security is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". The organization is headquartered in East Greenbush, New York, with members including large corporations, government agencies, and academic institutions. + +CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. + +The official Benchmark documents are available through the CIS website. The sign-up form to access the documents is +here. + +# About the Generated Report + +Each scan generates a report can be viewed in the Rancher UI and can be downloaded in CSV format. + +From Rancher v2.5.4, the scan uses the CIS Benchmark v1.6 by default. In Rancher v2.5.0-2.5.3, the CIS Benchmark v1.5. is used. + +The Benchmark version is included in the generated report. + +The Benchmark provides recommendations of two types: Automated and Manual. Recommendations marked as Manual in the Benchmark are not included in the generated report. + +Some tests are designated as "Not Applicable." These tests will not be run on any CIS scan because of the way that Rancher provisions RKE clusters. For information on how test results can be audited, and why some tests are designated to be not applicable, refer to Rancher's [self-assessment guide](./rancher-security.md#the-cis-benchmark-and-self-assessment) for the corresponding Kubernetes version. + +The report contains the following information: + +| Column in Report | Description | +|------------------|-------------| +| `id` | The ID number of the CIS Benchmark. | +| `description` | The description of the CIS Benchmark test. | +| `remediation` | What needs to be fixed in order to pass the test. | +| `state` | Indicates if the test passed, failed, was skipped, or was not applicable. | +| `node_type` | The node role, which affects which tests are run on the node. Master tests are run on controlplane nodes, etcd tests are run on etcd nodes, and node tests are run on the worker nodes. | +| `audit` | This is the audit check that `kube-bench` runs for this test. | +| `audit_config` | Any configuration applicable to the audit script. | +| `test_info` | Test-related info as reported by `kube-bench`, if any. | +| `commands` | Test-related commands as reported by `kube-bench`, if any. | +| `config_commands` | Test-related configuration data as reported by `kube-bench`, if any. | +| `actual_value` | The test's actual value, present if reported by `kube-bench`. | +| `expected_result` | The test's expected result, present if reported by `kube-bench`. | + +Refer to the [table in the cluster hardening guide](./rancher-security.md) for information on which versions of Kubernetes, the Benchmark, Rancher, and our cluster hardening guide correspond to each other. Also refer to the hardening guide for configuration files of CIS-compliant clusters and information on remediating failed tests. + +# Test Profiles + +The following profiles are available: + + + + +- Generic CIS 1.5 +- Generic CIS 1.6 +- RKE permissive 1.5 +- RKE hardened 1.5 +- RKE permissive 1.6 +- RKE hardened 1.6 +- EKS +- GKE +- RKE2 permissive 1.5 +- RKE2 permissive 1.5 + + + + +- Generic CIS 1.5 +- RKE permissive +- RKE hardened +- EKS +- GKE + + + + +You also have the ability to customize a profile by saving a set of tests to skip. + +All profiles will have a set of not applicable tests that will be skipped during the CIS scan. These tests are not applicable based on how a RKE cluster manages Kubernetes. + +There are two types of RKE cluster scan profiles: + +- **Permissive:** This profile has a set of tests that have been will be skipped as these tests will fail on a default RKE Kubernetes cluster. Besides the list of skipped tests, the profile will also not run the not applicable tests. +- **Hardened:** This profile will not skip any tests, except for the non-applicable tests. + +The EKS and GKE cluster scan profiles are based on CIS Benchmark versions that are specific to those types of clusters. + +In order to pass the "Hardened" profile, you will need to follow the steps on the [hardening guide](./rancher-security.md#rancher-hardening-guide) and use the `cluster.yml` defined in the hardening guide to provision a hardened cluster. + +# About Skipped and Not Applicable Tests + +For a list of skipped and not applicable tests, refer to [this page](../explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests.md). + +For now, only user-defined skipped tests are marked as skipped in the generated report. + +Any skipped tests that are defined as being skipped by one of the default profiles are marked as not applicable. + +# Roles-based Access Control + +For information about permissions, refer to [this page](../explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans.md). + +# Configuration + +For more information about configuring the custom resources for the scans, profiles, and benchmark versions, refer to [this page](../explanations/integrations-in-rancher/cis-scans/configuration-reference.md). + +# How-to Guides + +- [Installing rancher-cis-benchmark](#installing-rancher-cis-benchmark) +- [Uninstalling rancher-cis-benchmark](#uninstalling-rancher-cis-benchmark) +- [Running a Scan](#running-a-scan) +- [Running a Scan Periodically on a Schedule](#running-a-scan-periodically-on-a-schedule) +- [Skipping Tests](#skipping-tests) +- [Viewing Reports](#viewing-reports) +- [Enabling Alerting for rancher-cis-benchmark](#enabling-alerting-for-rancher-cis-benchmark) +- [Configuring Alerts for a Periodic Scan on a Schedule](#configuring-alerts-for-a-periodic-scan-on-a-schedule) +- [Creating a Custom Benchmark Version for Running a Cluster Scan](#creating-a-custom-benchmark-version-for-running-a-cluster-scan) +### Installing rancher-cis-benchmark + +1. In the Rancher UI, go to the **Cluster Explorer.** +1. Click **Apps.** +1. Click `rancher-cis-benchmark`. +1. Click **Install.** + +**Result:** The CIS scan application is deployed on the Kubernetes cluster. + +### Uninstalling rancher-cis-benchmark + +1. From the **Cluster Explorer,** go to the top left dropdown menu and click **Apps & Marketplace.** +1. Click **Installed Apps.** +1. Go to the `cis-operator-system` namespace and check the boxes next to `rancher-cis-benchmark-crd` and `rancher-cis-benchmark`. +1. Click **Delete** and confirm **Delete.** + +**Result:** The `rancher-cis-benchmark` application is uninstalled. + +### Running a Scan + +When a ClusterScan custom resource is created, it launches a new CIS scan on the cluster for the chosen ClusterScanProfile. + +Note: There is currently a limitation of running only one CIS scan at a time for a cluster. If you create multiple ClusterScan custom resources, they will be run one after the other by the operator, and until one scan finishes, the rest of the ClusterScan custom resources will be in the "Pending" state. + +To run a scan, + +1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** +1. In the **Scans** section, click **Create.** +1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. +1. Click **Create.** + +**Result:** A report is generated with the scan results. To see the results, click the name of the scan that appears. +### Running a Scan Periodically on a Schedule +_Available as of v2.5.4_ + +To run a ClusterScan on a schedule, + +1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** +1. In the **Scans** section, click **Create.** +1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. +1. Choose the option **Run scan on a schedule.** +1. Enter a valid cron schedule expression in the field **Schedule.** +1. Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. +1. Click **Create.** + +**Result:** The scan runs and reschedules to run according to the cron schedule provided. The **Next Scan** value indicates the next time this scan will run again. + +A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. + +You can also see the previous reports by choosing the report from the **Reports** dropdown on the scan detail page. + +### Skipping Tests + +CIS scans can be run using test profiles with user-defined skips. + +To skip tests, you will create a custom CIS scan profile. A profile contains the configuration for the CIS scan, which includes the benchmark versions to use and any specific tests to skip in that benchmark. + +1. In the **Cluster Explorer,** go to the top-left dropdown menu and click **CIS Benchmark.** +1. Click **Profiles.** +1. From here, you can create a profile in multiple ways. To make a new profile, click **Create** and fill out the form in the UI. To make a new profile based on an existing profile, go to the existing profile, click the three vertical dots, and click **Clone as YAML.** If you are filling out the form, add the tests to skip using the test IDs, using the relevant CIS Benchmark as a reference. If you are creating the new test profile as YAML, you will add the IDs of the tests to skip in the `skipTests` directive. You will also give the profile a name: + + ```yaml + apiVersion: cis.cattle.io/v1 + kind: ClusterScanProfile + metadata: + annotations: + meta.helm.sh/release-name: clusterscan-operator + meta.helm.sh/release-namespace: cis-operator-system + labels: + app.kubernetes.io/managed-by: Helm + name: "" + spec: + benchmarkVersion: cis-1.5 + skipTests: + - "1.1.20" + - "1.1.21" + ``` +1. Click **Create.** + +**Result:** A new CIS scan profile is created. + +When you [run a scan](#running-a-scan) that uses this profile, the defined tests will be skipped during the scan. The skipped tests will be marked in the generated report as `Skip`. + +### Viewing Reports + +To view the generated CIS scan reports, + +1. In the **Cluster Explorer,** go to the top left dropdown menu and click **Cluster Explorer > CIS Benchmark.** +1. The **Scans** page will show the generated reports. To see a detailed report, go to a scan report and click the name. + +One can download the report from the Scans list or from the scan detail page. + +### Enabling Alerting for rancher-cis-benchmark +_Available as of v2.5.4_ + +Alerts can be configured to be sent out for a scan that runs on a schedule. + +> **Prerequisite:** +> +> Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.](../reference-guides/monitoring-v2-configuration/receivers.md) +> +> While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.](../reference-guides/monitoring-v2-configuration/receivers.md#example-route-config-for-cis-scan-alerts) + +While installing or upgrading the `rancher-cis-benchmark` application, set the following flag to `true` in the `values.yaml`: + +```yaml +alerts: + enabled: true +``` + +### Configuring Alerts for a Periodic Scan on a Schedule +_Available as of v2.5.4_ + +From Rancher v2.5.4, it is possible to run a ClusterScan on a schedule. + +A scheduled scan can also specify if you should receive alerts when the scan completes. + +Alerts are supported only for a scan that runs on a schedule. + +The `rancher-cis-benchmark` application supports two types of alerts: + +- Alert on scan completion: This alert is sent out when the scan run finishes. The alert includes details including the ClusterScan's name and the ClusterScanProfile name. +- Alert on scan failure: This alert is sent out if there are some test failures in the scan run or if the scan is in a `Fail` state. + +> **Prerequisite:** +> +> Before enabling alerts for `rancher-cis-benchmark`, make sure to install the `rancher-monitoring` application and configure the Receivers and Routes. For more information, see [this section.](monitoring-alertincis-scans/configuration/alertmanager/) +> +> While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. An example route configuration is [here.](monitoring-alertincis-scans/configuration/alertmanager/#example-route-config-for-cis-scan-alerts) + +To configure alerts for a scan that runs on a schedule, + +1. Please enable alerts on the `rancher-cis-benchmark` application (#enabling-alerting-for-rancher-cis-benchmark) +1. Go to the **Cluster Explorer** in the Rancher UI. In the top left dropdown menu, click **Cluster Explorer > CIS Benchmark.** +1. In the **Scans** section, click **Create.** +1. Choose a cluster scan profile. The profile determines which CIS Benchmark version will be used and which tests will be performed. If you choose the Default profile, then the CIS Operator will choose a profile applicable to the type of Kubernetes cluster it is installed on. +1. Choose the option **Run scan on a schedule.** +1. Enter a valid [cron schedule expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) in the field **Schedule.** +1. Check the boxes next to the Alert types under **Alerting.** +1. Optional: Choose a **Retention** count, which indicates the number of reports maintained for this recurring scan. By default this count is 3. When this retention limit is reached, older reports will get purged. +1. Click **Create.** + +**Result:** The scan runs and reschedules to run according to the cron schedule provided. Alerts are sent out when the scan finishes if routes and receiver are configured under `rancher-monitoring` application. + +A report is generated with the scan results every time the scan runs. To see the latest results, click the name of the scan that appears. + +### Creating a Custom Benchmark Version for Running a Cluster Scan +_Available as of v2.5.4_ + +There could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. + +It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. + +For details, see [this page.](../explanations/integrations-in-rancher/cis-scans/custom-benchmark.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md b/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/cis-scans.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md new file mode 100644 index 0000000000..6f53dcb143 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/cli-with-rancher.md @@ -0,0 +1,129 @@ +--- +title: CLI with Rancher +description: Interact with Rancher using command line interface (CLI) tools from your workstation. +weight: 21 +--- + +- [Rancher CLI](#rancher-cli) + - [Download Rancher CLI](#download-rancher-cli) + - [Requirements](#requirements) + - [CLI Authentication](#cli-authentication) + - [Project Selection](#project-selection) + - [Commands](#commands) + - [Rancher CLI Help](#rancher-cli-help) + - [Limitations](#limitations) +- [kubectl](#kubectl) + - [kubectl Utility](#kubectl-utility) + - [Authentication with kubectl and kubeconfig Tokens with TTL](#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) + +# Rancher CLI + +The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. + +### Download Rancher CLI + +The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/ranchcli/releases) for direct downloads of the binary. + +### Requirements + +After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: + +- Your Rancher Server URL, which is used to connect to Rancher Server. +- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key](../reference-guides/user-settings/api-keys.md). + +### CLI Authentication + +Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): + +```bash +$ ./rancher login https:// --token +``` + +If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. + +### Project Selection + +Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. + +**Example: `./rancher context switch` Output** +``` +User:rancher-cli-directory user$ ./rancher context switch +NUMBER CLUSTER NAME PROJECT ID PROJECT NAME +1 cluster-2 c-7q96s:p-h4tmb project-2 +2 cluster-2 c-7q96s:project-j6z6d Default +3 cluster-1 c-lchzv:p-xbpdt project-1 +4 cluster-1 c-lchzv:project-s2mch Default +Select a Project: +``` + +After you enter a number, the console displays a message that you've changed projects. + +``` +INFO[0005] Setting new context to project project-1 +INFO[0005] Saving config to /Users/markbishop/.ranchcli2.json +``` + +Ensure you can run `rancher kubectl get pods` successfully. + +### Commands + +The following commands are available for use in Rancher CLI. + +| Command | Result | +|---|---| +| `apps, [app]` | Performs operations on catalog applications (i.e., individual [Helm charts](https://docs.helm.sh/developing_charts/)) or Rancher charts. | +| `catalog` | Performs operations on [catalogs](./helm-charts-in-rancher.md). | +| `clusters, [cluster]` | Performs operations on your [clusters](kubernetes-clusters-in-rancher-setup.md). | +| `context` | Switches between Rancher [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). For an example, see [Project Selection](#project-selection). | +| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) and [workloads](workloads-and-pods.md)). Specify resources by name or ID. | +| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | +| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | +| `namespaces, [namespace]` |Performs operations on namespaces. | +| `nodes, [node]` |Performs operations on nodes. | +| `projects, [project]` | Performs operations on [projects](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md). | +| `ps` | Displays [workloads](workloads-and-pods.md) in a project. | +| `settings, [setting]` | Shows the current settings for your Rancher Server. | +| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | +| `help, [h]` | Shows a list of commands or help for one command. | + + +### Rancher CLI Help + +Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. + +All commands accept the `--help` flag, which documents each command's usage. + +### Limitations + +The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](helm-charts-in-rancher.md). + +# kubectl + +Interact with Rancher using kubectl. + +### kubectl Utility + +Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +Configure kubectl by visiting your cluster in the Rancher Web UI, clicking on `Kubeconfig`, copying contents, and putting them into your `~/.kube/config` file. + +Run `kubectl cluster-info` or `kubectl get pods` successfully. + +### Authentication with kubectl and kubeconfig Tokens with TTL + +_Requirements_ + +If admins have [enforced TTL on kubeconfig tokens](../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher CLI](cli-with-rancher.md) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like: +`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. + +This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: + +1. Local +2. Active Directory (LDAP only) +3. FreeIPA +4. OpenLDAP +5. SAML providers: Ping, Okta, ADFS, Keycloak, Shibboleth + +When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. +The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../reference-guides/about-the-api/api-tokens.md#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../reference-guides/about-the-api/api-tokens.md#deleting-tokens). +Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md new file mode 100644 index 0000000000..2172d5f6be --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/cluster-configuration.md @@ -0,0 +1,30 @@ +--- +title: Cluster Configuration +weight: 2025 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/editing-clusters + - /rancher/v2.x/en/cluster-admin/editing-clusters/ +--- + +After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. + +For information on editing cluster membership, go to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) + +### Cluster Configuration References + +The cluster configuration options depend on the type of Kubernetes cluster: + +- [RKE Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md) +- [EKS Cluster Configuration](../reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md) +- [GKE Cluster Configuration](gke-cluster-configuration.md) + +### Cluster Management Capabilities by Cluster Type + +The options and settings available for an existing cluster change based on the method that you used to provision it. + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md b/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md new file mode 100644 index 0000000000..6814b2a16f --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/configuration-options.md @@ -0,0 +1,51 @@ +--- +title: Configuration Options +weight: 3 +aliases: + - /rancher/v2.5/en/istio/v2.5/configuration-reference + - /rancher/v2.x/en/istio/v2.5/configuration-reference/ +--- + +- [Egress Support](#egress-support) +- [Enabling Automatic Sidecar Injection](#enabling-automatic-sidecar-injection) +- [Overlay File](#overlay-file) +- [Selectors and Scrape Configs](#selectors-and-scrape-configs) +- [Enable Istio with Pod Security Policies](#enable-istio-with-pod-security-policies) +- [Additional Steps for Installing Istio on an RKE2 Cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) +- [Additional Steps for Project Network Isolation](#additional-steps-for-project-network-isolation) + +### Egress Support + +By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](#overlay-file). + +### Enabling Automatic Sidecar Injection + +Automatic sidecar injection is disabled by default. To enable this, set the `sidecarInjectorWebhook.enableNamespacesByDefault=true` in the values.yaml on install or upgrade. This automatically enables Istio sidecar injection into all new namespaces that are deployed. + +### Overlay File + +An Overlay File is designed to support extensive configuration of your Istio installation. It allows you to make changes to any values available in the [IstioOperator API](https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/). This will ensure you can customize the default installation to fit any scenario. + +The Overlay File will add configuration on top of the default installation that is provided from the Istio chart installation. This means you do not need to redefine the components that already defined for installation. + +For more information on Overlay Files, refer to the [Istio documentation.](https://istio.io/latest/docs/setup/install/istioctl/#configure-component-settings) + +### Selectors and Scrape Configs + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which enables monitoring across all namespaces by default. This ensures you can view traffic, metrics and graphs for resources deployed in a namespace with `istio-injection=enabled` label. + +If you would like to limit Prometheus to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true`. Once you do this, you will need to add additional configuration to continue to monitor your resources. + +For details, refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) + +### Enable Istio with Pod Security Policies + +Refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies.md) + +### Additional Steps for Installing Istio on an RKE2 Cluster + +Refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md) + +### Additional Steps for Project Network Isolation + +Refer to [this section.](../explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md b/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md new file mode 100644 index 0000000000..8b365c2d1a --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/configure-microsoft-ad-federation-service-saml.md @@ -0,0 +1,32 @@ +--- +title: Configuring Microsoft Active Directory Federation Service (SAML) +weight: 1205 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/microsoft-adfs/ +--- + +If your organization uses Microsoft Active Directory Federation Services (AD FS) for user authentication, you can configure Rancher to allow your users to log in using their AD FS credentials. + +## Prerequisites + +You must have Rancher installed. + +- Obtain your Rancher Server URL. During AD FS configuration, substitute this URL for the `` placeholder. +- You must have a global administrator account on your Rancher installation. + +You must have a [Microsoft AD FS Server](https://docs.microsoft.com/en-us/windows-server/identity/active-directory-federation-services) configured. + +- Obtain your AD FS Server IP/DNS name. During AD FS configuration, substitute this IP/DNS name for the `` placeholder. +- You must have access to add [Relying Party Trusts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/create-a-relying-party-trust) on your AD FS Server. + +## Setup Outline + +Setting up Microsoft AD FS with Rancher Server requires configuring AD FS on your Active Directory server, and configuring Rancher to utilize your AD FS server. The following pages serve as guides for setting up Microsoft AD FS authentication on your Rancher installation. + +- [1. Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) +- [2. Configuring Rancher for Microsoft AD FS](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs.md) + +{{< saml_caveats >}} + + +### [Next: Configuring Microsoft AD FS for Rancher](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md b/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md new file mode 100644 index 0000000000..8e64faf3b1 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/configure-openldap.md @@ -0,0 +1,51 @@ +--- +title: Configuring OpenLDAP +weight: 1113 +aliases: + - /rancher/v2.5/en/tasks/global-configuration/authentication/openldap/ + - /rancher/v2.x/en/admin-settings/authentication/openldap/ +--- + +If your organization uses LDAP for user authentication, you can configure Rancher to communicate with an OpenLDAP server to authenticate users. This allows Rancher admins to control access to clusters and projects based on users and groups managed externally in the organisation's central user repository, while allowing end-users to authenticate with their LDAP credentials when logging in to the Rancher UI. + +## Prerequisites + +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognised certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +## Configure OpenLDAP in Rancher + +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) + +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Log into the Rancher UI using the initial local `admin` account. +2. From the **Global** view, navigate to **Security** > **Authentication** +3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. + +### Test Authentication + +Once you have completed the configuration, proceed by testing the connection to the OpenLDAP server. Authentication with OpenLDAP will be enabled implicitly if the test is successful. + +> **Note:** +> +> The OpenLDAP user pertaining to the credentials entered in this step will be mapped to the local principal account and assigned administrator privileges in Rancher. You should therefore make a conscious decision on which LDAP account you use to perform this step. + +1. Enter the **username** and **password** for the OpenLDAP account that should be mapped to the local principal account. +2. Click **Authenticate With OpenLDAP** to test the OpenLDAP connection and finalise the setup. + +**Result:** + +- OpenLDAP authentication is configured. +- The LDAP user pertaining to the entered credentials is mapped to the local principal (administrative) account. + +> **Note:** +> +> You will still be able to login using the locally configured `admin` account and password in case of a disruption of LDAP services. + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md b/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md new file mode 100644 index 0000000000..2ac55c9745 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/configure-shibboleth-saml.md @@ -0,0 +1,109 @@ +--- +title: Configuring Shibboleth (SAML) +weight: 1210 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/shibboleth/ +--- + +If your organization uses Shibboleth Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in to Rancher using their Shibboleth credentials. + +In this configuration, when Rancher users log in, they will be redirected to the Shibboleth IdP to enter their credentials. After authentication, they will be redirected back to the Rancher UI. + +If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then the authenticated user will be able to access resources in Rancher that their groups have permissions for. + +> The instructions in this section assume that you understand how Rancher, Shibboleth, and OpenLDAP work together. For a more detailed explanation of how it works, refer to [this page.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions.md) + +This section covers the following topics: + +- [Setting up Shibboleth in Rancher](#setting-up-shibboleth-in-rancher) + - [Shibboleth Prerequisites](#shibboleth-prerequisites) + - [Configure Shibboleth in Rancher](#configure-shibboleth-in-rancher) + - [SAML Provider Caveats](#saml-provider-caveats) +- [Setting up OpenLDAP in Rancher](#setting-up-openldap-in-rancher) + - [OpenLDAP Prerequisites](#openldap-prerequisites) + - [Configure OpenLDAP in Rancher](#configure-openldap-in-rancher) + - [Troubleshooting](#troubleshooting) + +# Setting up Shibboleth in Rancher + +### Shibboleth Prerequisites +> +>- You must have a Shibboleth IdP Server configured. +>- Following are the Rancher Service Provider URLs needed for configuration: +Metadata URL: `https:///v1-saml/shibboleth/saml/metadata` +Assertion Consumer Service (ACS) URL: `https:///v1-saml/shibboleth/saml/acs` +>- Export a `metadata.xml` file from your IdP Server. For more information, see the [Shibboleth documentation.](https://wiki.shibboleth.net/confluence/display/SP3/Home) + +### Configure Shibboleth in Rancher +If your organization uses Shibboleth for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +1. From the **Global** view, select **Security > Authentication** from the main menu. + +1. Select **Shibboleth**. + +1. Complete the **Configure Shibboleth Account** form. Shibboleth IdP lets you specify what data store you want to use. You can either add a database or use an existing ldap server. For example, if you select your Active Directory (AD) server, the examples below describe how you can map AD attributes to fields within Rancher. + + 1. **Display Name Field**: Enter the AD attribute that contains the display name of users (example: `displayName`). + + 1. **User Name Field**: Enter the AD attribute that contains the user name/given name (example: `givenName`). + + 1. **UID Field**: Enter an AD attribute that is unique to every user (example: `sAMAccountName`, `distinguishedName`). + + 1. **Groups Field**: Make entries for managing group memberships (example: `memberOf`). + + 1. **Rancher API Host**: Enter the URL for your Rancher Server. + + 1. **Private Key** and **Certificate**: This is a key-certificate pair to create a secure shell between Rancher and your IdP. + + You can generate one using an openssl command. For example: + + ``` + openssl req -x509 -newkey rsa:2048 -keyout myservice.key -out myservice.cert -days 365 -nodes -subj "/CN=myservice.example.com" + ``` + 1. **IDP-metadata**: The `metadata.xml` file that you exported from your IdP server. + + +1. After you complete the **Configure Shibboleth Account** form, click **Authenticate with Shibboleth**, which is at the bottom of the page. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Shibboleth IdP to validate your Rancher Shibboleth configuration. + + >**Note:** You may have to disable your popup blocker to see the IdP login page. + +**Result:** Rancher is configured to work with Shibboleth. Your users can now sign into Rancher using their Shibboleth logins. + +### SAML Provider Caveats + +If you configure Shibboleth without OpenLDAP, the following caveats apply due to the fact that SAML Protocol does not support search or lookup for users or groups. + +- There is no validation on users or groups when assigning permissions to them in Rancher. +- When adding users, the exact user IDs (i.e. UID Field) must be entered correctly. As you type the user ID, there will be no search for other user IDs that may match. +- When adding groups, you must select the group from the drop-down that is next to the text box. Rancher assumes that any input from the text box is a user. +- The group drop-down shows only the groups that you are a member of. You will not be able to add groups that you are not a member of. + +To enable searching for groups when assigning permissions in Rancher, you will need to configure a back end for the SAML provider that supports groups, such as OpenLDAP. + +# Setting up OpenLDAP in Rancher + +If you also configure OpenLDAP as the back end to Shibboleth, it will return a SAML assertion to Rancher with user attributes that include groups. Then authenticated users will be able to access resources in Rancher that their groups have permissions for. + +### OpenLDAP Prerequisites + +Rancher must be configured with a LDAP bind account (aka service account) to search and retrieve LDAP entries pertaining to users and groups that should have access. It is recommended to not use an administrator account or personal account for this purpose and instead create a dedicated account in OpenLDAP with read-only access to users and groups under the configured search base (see below). + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +### Configure OpenLDAP in Rancher + +Configure the settings for the OpenLDAP server, groups and users. For help filling out each field, refer to the [configuration reference.](../reference-guides/configure-openldap/openldap-config-reference.md) Note that nested group membership is not available for Shibboleth. + +> Before you proceed with the configuration, please familiarise yourself with the concepts of [External Authentication Configuration and Principal Users](about-authentication.md#external-authentication-configuration-and-principal-users). + +1. Log into the Rancher UI using the initial local `admin` account. +2. From the **Global** view, navigate to **Security** > **Authentication** +3. Select **OpenLDAP**. The **Configure an OpenLDAP server** form will be displayed. + +# Troubleshooting + +If you are experiencing issues while testing the connection to the OpenLDAP server, first double-check the credentials entered for the service account as well as the search base configuration. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging](../faq/technical-items.md#how-can-i-enable-debug-logging) in this documentation. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md b/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md new file mode 100644 index 0000000000..eaea6f581a --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/create-kubernetes-persistent-storage.md @@ -0,0 +1,78 @@ +--- +title: "Kubernetes Persistent Storage: Volumes and Storage Classes" +description: "Learn about the two ways with which you can create persistent storage in Kubernetes: persistent volumes and storage classes" +weight: 2031 +aliases: + - /rancher/v2.5/en/tasks/clusters/adding-storage/ + - /rancher/v2.5/en/cluster-admin/volumes-and-storage/persistent-volume-claims/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/ +--- +When deploying an application that needs to retain data, you'll need to create persistent storage. Persistent storage allows you to store application data external from the pod running your application. This storage practice allows you to maintain application data, even if the application's pod fails. + +The documents in this section assume that you understand the Kubernetes concepts of persistent volumes, persistent volume claims, and storage classes. For more information, refer to the section on [how storage works.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) + +### Prerequisites + +To set up persistent storage, the `Manage Volumes` [role](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-role-reference) is required. + +If you are provisioning storage for a cluster hosted in the cloud, the storage and cluster hosts must have the same cloud provider. + +For provisioning new storage with Rancher, the cloud provider must be enabled. For details on enabling cloud providers, refer to [this page.](./set-up-cloud-providers.md) + +For attaching existing persistent storage to a cluster, the cloud provider does not need to be enabled. + +### Setting up Existing Storage + +The overall workflow for setting up existing storage is as follows: + +1. Set up your persistent storage. This may be storage in an infrastructure provider, or it could be your own storage. +2. Add a persistent volume (PV) that refers to the persistent storage. +3. Add a persistent volume claim (PVC) that refers to the PV. +4. Mount the PVC as a volume in your workload. + +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md) + +### Dynamically Provisioning New Storage in Rancher + +The overall workflow for provisioning new storage is as follows: + +1. Add a StorageClass and configure it to use your storage provider. The StorageClass could refer to storage in an infrastructure provider, or it could refer to your own storage. +2. Add a persistent volume claim (PVC) that refers to the storage class. +3. Mount the PVC as a volume for your workload. + +For details and prerequisites, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md) + +### Longhorn Storage + +[Longhorn](https://longhorn.io/) is a lightweight, reliable and easy-to-use distributed block storage system for Kubernetes. + +Longhorn is free, open source software. Originally developed by Rancher Labs, it is now being developed as a sandbox project of the Cloud Native Computing Foundation. It can be installed on any Kubernetes cluster with Helm, with kubectl, or with the Rancher UI. + +If you have a pool of block storage, Longhorn can help you provide persistent storage to your Kubernetes cluster without relying on cloud providers. For more information about Longhorn features, refer to the [documentation.](https://longhorn.io/docs/1.0.2/what-is-longhorn/) + +Rancher v2.5 simplified the process of installing Longhorn on a Rancher-managed cluster. For more information, see [this page.](../explanations/integrations-in-rancher/longhorn.md) + +### Provisioning Storage Examples + +We provide examples of how to provision storage with [NFS,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) [vSphere,](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) and [Amazon's EBS.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) + +### GlusterFS Volumes + +In clusters that store data on GlusterFS volumes, you may experience an issue where pods fail to mount volumes after restarting the `kubelet`. For details on preventing this from happening, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes.md) + +### iSCSI Volumes + +In [Rancher Launched Kubernetes clusters](launch-kubernetes-with-rancher.md) that store data on iSCSI volumes, you may experience an issue where kubelets fail to automatically connect with iSCSI volumes. For details on resolving this issue, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes.md) + +### hostPath Volumes +Before you create a hostPath volume, you need to set up an [extra_bind](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds/) in your cluster configuration. This will mount the path as a volume in your kubelets, which can then be used for hostPath volumes in your workloads. + +### Migrating vSphere Cloud Provider from In-tree to Out-of-tree + +Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. + +For instructions on how to migrate from the in-tree vSphere cloud provider to out-of-tree, and manage the existing VMs post migration, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md) + +### Related Links + +- [Kubernetes Documentation: Storage](https://kubernetes.io/docs/concepts/storage/) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md new file mode 100644 index 0000000000..bb051c785d --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/custom-resource-configuration.md @@ -0,0 +1,9 @@ +--- +title: Custom Resource Configuration +weight: 5 +--- + +The following Custom Resource Definitions are used to configure logging: + +- [Flow and ClusterFlow](../explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) +- [Output and ClusterOutput](../explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md new file mode 100644 index 0000000000..0c02e3f03e --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/deploy-apps-across-clusters.md @@ -0,0 +1,17 @@ +--- +title: Deploying Applications across Clusters +weight: 12 +aliases: + - /rancher/v2.x/en/deploy-across-clusters/ +--- +### Fleet + +Rancher v2.5 introduced Fleet, a new way to deploy applications across clusters. + +Fleet is GitOps at scale. For more information, refer to the [Fleet section.](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) + +### Multi-cluster Apps + +In Rancher before v2.5, the multi-cluster apps feature was used to deploy applications across clusters. The multi-cluster apps feature is deprecated, but still available in Rancher v2.5. + +Refer to the documentation [here.](../how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md new file mode 100644 index 0000000000..529762c62f --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-manager.md @@ -0,0 +1,18 @@ +--- +title: Deploying Rancher Server +weight: 100 +aliases: + - /rancher/v2.x/en/quick-start-guide/deployment/ +--- + +Use one of the following guides to deploy and provision Rancher and a Kubernetes cluster in the provider of your choice. + +- [DigitalOcean](../getting-started/quick-start-guides/deploy-rancher-manager/digitalocean.md) (uses Terraform) +- [AWS](../getting-started/quick-start-guides/deploy-rancher-manager/aws.md) (uses Terraform) +- [Azure](../getting-started/quick-start-guides/deploy-rancher-manager/azure.md) (uses Terraform) +- [GCP](../getting-started/quick-start-guides/deploy-rancher-manager/gcp.md) (uses Terraform) +- [Vagrant](../getting-started/quick-start-guides/deploy-rancher-manager/vagrant.md) + +If you prefer, the following guide will take you through the same process in individual steps. Use this if you want to run Rancher in a different provider, on prem, or if you would just like to see how easy it is. + +- [Manual Install](../getting-started/quick-start-guides/deploy-rancher-manager/helm-cli.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md new file mode 100644 index 0000000000..e1b76ebb39 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/deploy-rancher-workloads.md @@ -0,0 +1,11 @@ +--- +title: Deploying Workloads +weight: 200 +aliases: + - /rancher/v2.x/en/quick-start-guide/workload/ +--- + +These guides walk you through the deployment of an application, including how to expose the application for use outside of the cluster. + +- [Workload with Ingress](../getting-started/quick-start-guides/deploy-workloads/workload-ingress.md) +- [Workload with NodePort](../getting-started/quick-start-guides/deploy-workloads/nodeports.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/downstream-cluster-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md b/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md new file mode 100644 index 0000000000..d72cb51f7b --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/enable-experimental-features.md @@ -0,0 +1,182 @@ +--- +title: Enabling Experimental Features +weight: 17 +aliases: + - /rancher/v2.5/en/installation/options/feature-flags/ + - /rancher/v2.5/en/admin-settings/feature-flags/ + - /rancher/v2.x/en/installation/resources/feature-flags/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher includes some features that are experimental and disabled by default. You might want to enable these features, for example, if you decide that the benefits of using an [unsupported storage type](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) outweighs the risk of using an untested feature. Feature flags were introduced to allow you to try these features that are not enabled by default. + +The features can be enabled in three ways: + +- [Enable features when starting Rancher.](#enabling-features-when-starting-rancher) When installing Rancher with a CLI, you can use a feature flag to enable a feature by default. +- [Enable features from the Rancher UI](#enabling-features-with-the-rancher-ui) by going to the **Settings** page. +- [Enable features with the Rancher API](#enabling-features-with-the-rancher-api) after installing Rancher. + +Each feature has two values: + +- A default value, which can be configured with a flag or environment variable from the command line +- A set value, which can be configured with the Rancher API or UI + +If no value has been set, Rancher uses the default value. + +Because the API sets the actual value and the command line sets the default value, that means that if you enable or disable a feature with the API or UI, it will override any value set with the command line. + +For example, if you install Rancher, then set a feature flag to true with the Rancher API, then upgrade Rancher with a command that sets the feature flag to false, the default value will still be false, but the feature will still be enabled because it was set with the Rancher API. If you then deleted the set value (true) with the Rancher API, setting it to NULL, the default value (false) would take effect. + +> **Note:** There are some feature flags that may require a restart of the Rancher server container. These features that require a restart are marked in the table of these docs and in the UI. + +The following is a list of the feature flags available in Rancher: + +- `fleet`: Rancher comes with [Fleet](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md) preinstalled in v2.5+. +- `istio-virtual-service-ui`: This feature enables a [UI to create, read, update, and delete Istio virtual services and destination rules](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features.md), which are traffic management features of Istio. +- `unsupported-storage-drivers`: This feature [allows unsupported storage drivers.](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers.md) In other words, it enables types for storage providers and provisioners that are not enabled by default. + +The below table shows the availability and default value for feature flags in Rancher: + +| Feature Flag Name | Default Value | Status | Available as of | Rancher Restart Required? | +| ----------------------------- | ------------- | ------------ | --------------- |---| +| `dashboard` | `true` | Experimental | v2.4.0 | x | +| `dashboard` | `true` | GA* and no longer a feature flag | v2.5.0 | x | +| `istio-virtual-service-ui` | `false` | Experimental | v2.3.0 | | +| `istio-virtual-service-ui` | `true` | GA* | v2.3.2 | | +| `proxy` | `false` | Experimental | v2.4.0 | | +| `proxy` | N/A | Discontinued | v2.5.0 | | +| `unsupported-storage-drivers` | `false` | Experimental | v2.3.0 | | +| `fleet` | `true` | GA* | v2.5.0 | | + +\* Generally Available. This feature is included in Rancher and it is not experimental. + +# Enabling Features when Starting Rancher + +When you install Rancher, enable the feature you want with a feature flag. The command is different depending on whether you are installing Rancher on a single node or if you are doing a Kubernetes Installation of Rancher. + +### Enabling Features for Kubernetes Installs + +> **Note:** Values set from the Rancher API will override the value passed in through the command line. + +When installing Rancher with a Helm chart, use the `--set` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: + +``` +helm install rancher-latest/rancher \ + --name rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set 'extraEnv[0].name=CATTLE_FEATURES' + --set 'extraEnv[0].value==true,=true' +``` + +Note: If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +### Rendering the Helm Chart for Air Gap Installations + +For an air gap installation of Rancher, you need to add a Helm chart repository and render a Helm template before installing Rancher with Helm. For details, refer to the [air gap installation documentation.](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md) + +Here is an example of a command for passing in the feature flag names when rendering the Helm template. In the below example, two features are enabled by passing the feature flag names in a comma separated list. + +The Helm 3 command is as follows: + + + + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' + --set 'extraEnv[0].value==true,=true' +``` + + + + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' + --set 'extraEnv[0].value==true,=true' +``` + + + + +The Helm 2 command is as follows: + +``` +helm template rancher ./rancher-.tgz --output-dir . \ + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts + --set 'extraEnv[0].name=CATTLE_FEATURES' + --set 'extraEnv[0].value==true,=true' +``` + +### Enabling Features for Docker Installs + +When installing Rancher with Docker, use the `--features` option. In the below example, two features are enabled by passing the feature flag names in a comma separated list: + +``` +docker run -d -p 80:80 -p 443:443 \ + --restart=unless-stopped \ + rancher/rancher:rancher-latest \ + --features==true,=true +``` + + +# Enabling Features with the Rancher UI + +1. Go to the **Global** view and click **Settings.** +1. Click the **Feature Flags** tab. You will see a list of experimental features. +1. To enable a feature, go to the disabled feature you want to enable and click **⋮ > Activate.** + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher UI + +1. Go to the **Global** view and click **Settings.** +1. Click the **Feature Flags** tab. You will see a list of experimental features. +1. To disable a feature, go to the enabled feature you want to disable and click **⋮ > Deactivate.** + +**Result:** The feature is disabled. + +# Enabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit.** +1. In the **Value** drop-down menu, click **True.** +1. Click **Show Request.** +1. Click **Send Request.** +1. Click **Close.** + +**Result:** The feature is enabled. + +### Disabling Features with the Rancher API + +1. Go to `/v3/features`. +1. In the `data` section, you will see an array containing all of the features that can be turned on with feature flags. The name of the feature is in the `id` field. Click the name of the feature you want to enable. +1. In the upper left corner of the screen, under **Operations,** click **Edit.** +1. In the **Value** drop-down menu, click **False.** +1. Click **Show Request.** +1. Click **Send Request.** +1. Click **Close.** + +**Result:** The feature is disabled. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/fleet-gitops-at-scale.md b/versioned_docs/version-2.5/pages-for-subheaders/fleet-gitops-at-scale.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/fleet-gitops-at-scale.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md new file mode 100644 index 0000000000..5effc4568e --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/gke-cluster-configuration.md @@ -0,0 +1,456 @@ +--- +title: GKE Cluster Configuration Reference +shortTitle: GKE Cluster Configuration +weight: 3 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +# Changes in v2.5.8 + +- We now support private GKE clusters. Note: This advanced setup can require more steps during the cluster provisioning process. For details, see [this section.](../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md) +- [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc) are now supported. +- We now support more configuration options for Rancher managed GKE clusters: + - Project + - Network policy + - Network policy config + - Node pools and node configuration options: + - More image types are available for the nodes + - The maximum number of pods per node can be configured + - Node pools can be added while configuring the GKE cluster +- When provisioning a GKE cluster, you can now use reusable cloud credentials instead of using a service account token directly to create the cluster. + +# Cluster Location + +| Value | Description | +|--------|--------------| +| Location Type | Zonal or Regional. With GKE, you can create a cluster tailored to the availability requirements of your workload and your budget. By default, a cluster's nodes run in a single compute zone. When multiple zones are selected, the cluster's nodes will span multiple compute zones, while the controlplane is located in a single zone. Regional clusters increase the availability of the controlplane as well. For help choosing the type of cluster availability, refer to [these docs.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#choosing_a_regional_or_zonal_control_plane) | +| Zone | Each region in Compute engine contains a number of zones. For more information about available regions and zones, refer to [these docs.](https://cloud.google.com/compute/docs/regions-zones#available) | +| Additional Zones | For zonal clusters, you can select additional zones to create a [multi-zone cluster.](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#multi-zonal_clusters) | +| Region | For [regional clusters,](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#regional_clusters) you can select a region. For more information about available regions and zones, refer to [this section](https://cloud.google.com/compute/docs/regions-zones#available). The first part of each zone name is the name of the region. | + +# Cluster Options + +### Kubernetes Version + +_Mutable: yes_ + +For more information on GKE Kubernetes versions, refer to [these docs.](https://cloud.google.com/kubernetes-engine/versioning) + +### Container Address Range + +_Mutable: no_ + +The IP address range for pods in the cluster. Must be a valid CIDR range, e.g. 10.42.0.0/16. If not specified, a random range is automatically chosen from 10.0.0.0/8 and will exclude ranges already allocated to VMs, other clusters, or routes. Automatically chosen ranges may conflict with reserved IP addresses, dynamic routes, or routes within VPCs peering with the cluster. + +### Network + +_Mutable: no_ + +The Compute Engine Network that the cluster connects to. Routes and firewalls will be created using this network. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC networks that are shared to your project will appear here. will be available to select in this field. For more information, refer to [this page](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets). + +### Node Subnet / Subnet + +_Mutable: no_ + +The Compute Engine subnetwork that the cluster connects to. This subnetwork must belong to the network specified in the **Network** field. Select an existing subnetwork, or select "Auto Create Subnetwork" to have one automatically created. If not using an existing network, **Subnetwork Name** is required to generate one. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC subnets that are shared to your project will appear here. If using a Shared VPC network, you cannot select "Auto Create Subnetwork". For more information, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) + +### Subnetwork Name + +_Mutable: no_ + +Automatically create a subnetwork with the provided name. Required if "Auto Create Subnetwork" is selected for **Node Subnet** or **Subnet**. For more information on subnetworks, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) + +### Ip Aliases + +_Mutable: no_ + +Enable [alias IPs](https://cloud.google.com/vpc/docs/alias-ip). This enables VPC-native traffic routing. Required if using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc). + +### Network Policy + +_Mutable: yes_ + +Enable network policy enforcement on the cluster. A network policy defines the level of communication that can occur between pods and services in the cluster. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy) + +### Node Ipv4 CIDR Block + +_Mutable: no_ + +The IP address range of the instance IPs in this cluster. Can be set if "Auto Create Subnetwork" is selected for **Node Subnet** or **Subnet**. Must be a valid CIDR range, e.g. 10.96.0.0/14. For more information on how to determine the IP address range, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) + +### Cluster Secondary Range Name + +_Mutable: no_ + +The name of an existing secondary range for Pod IP addresses. If selected, **Cluster Pod Address Range** will automatically be populated. Required if using a Shared VPC network. + +### Cluster Pod Address Range + +_Mutable: no_ + +The IP address range assigned to pods in the cluster. Must be a valid CIDR range, e.g. 10.96.0.0/11. If not provided, will be created automatically. Must be provided if using a Shared VPC network. For more information on how to determine the IP address range for your pods, refer to [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_pods) + +### Services Secondary Range Name + +_Mutable: no_ + +The name of an existing secondary range for service IP addresses. If selected, **Service Address Range** will be automatically populated. Required if using a Shared VPC network. + +### Service Address Range + +_Mutable: no_ + +The address range assigned to the services in the cluster. Must be a valid CIDR range, e.g. 10.94.0.0/18. If not provided, will be created automatically. Must be provided if using a Shared VPC network. For more information on how to determine the IP address range for your services, refer to [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_svcs) + +### Private Cluster + +_Mutable: no_ + +> Warning: private clusters require additional planning and configuration outside of Rancher. Refer to the [private cluster guide](../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md). + +Assign nodes only internal IP addresses. Private cluster nodes cannot access the public internet unless additional networking steps are taken in GCP. + +### Enable Private Endpoint + +> Warning: private clusters require additional planning and configuration outside of Rancher. Refer to the [private cluster guide](../reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md). + +_Mutable: no_ + +Locks down external access to the control plane endpoint. Only available if **Private Cluster** is also selected. If selected, and if Rancher does not have direct access to the Virtual Private Cloud network the cluster is running in, Rancher will provide a registration command to run on the cluster to enable Rancher to connect to it. + +### Master IPV4 CIDR Block + +_Mutable: no_ + +The IP range for the control plane VPC. + +### Master Authorized Network + +_Mutable: yes_ + +Enable control plane authorized networks to block untrusted non-GCP source IPs from accessing the Kubernetes master through HTTPS. If selected, additional authorized networks may be added. If the cluster is created with a public endpoint, this option is useful for locking down access to the public endpoint to only certain networks, such as the network where your Rancher service is running. If the cluster only has a private endpoint, this setting is required. + +# Additional Options + +### Cluster Addons + +Additional Kubernetes cluster components. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster.AddonsConfig) + +#### Horizontal Pod Autoscaling + +_Mutable: yes_ + +The Horizontal Pod Autoscaler changes the shape of your Kubernetes workload by automatically increasing or decreasing the number of Pods in response to the workload's CPU or memory consumption, or in response to custom metrics reported from within Kubernetes or external metrics from sources outside of your cluster. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/horizontalpodautoscaler) + +#### HTTP (L7) Load Balancing + +_Mutable: yes_ + +HTTP (L7) Load Balancing distributes HTTP and HTTPS traffic to backends hosted on GKE. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer) + +#### Network Policy Config (master only) + +_Mutable: yes_ + +Configuration for NetworkPolicy. This only tracks whether the addon is enabled or not on the master, it does not track whether network policy is enabled for the nodes. + +### Cluster Features (Alpha Features) + +_Mutable: no_ + +Turns on all Kubernetes alpha API groups and features for the cluster. When enabled, the cluster cannot be upgraded and will be deleted automatically after 30 days. Alpha clusters are not recommended for production use as they are not covered by the GKE SLA. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/alpha-clusters) + +### Logging Service + +_Mutable: yes_ + +The logging service the cluster uses to write logs. Use either [Cloud Logging](https://cloud.google.com/logging) or no logging service in which case no logs are exported from the cluster. + +### Monitoring Service + +_Mutable: yes_ + +The monitoring service the cluster uses to write metrics. Use either [Cloud Monitoring](https://cloud.google.com/monitoring) or monitoring service in which case no metrics are exported from the cluster. + + +### Maintenance Window + +_Mutable: yes_ + +Set the start time for a 4 hour maintenance window. The time is specified in the UTC time zone using the HH:MM format. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions) + +# Node Pools + +In this section, enter details describing the configuration of each node in the node pool. + +### Kubernetes Version + +_Mutable: yes_ + +The Kubernetes version for each node in the node pool. For more information on GKE Kubernetes versions, refer to [these docs.](https://cloud.google.com/kubernetes-engine/versioning) + +### Image Type + +_Mutable: yes_ + +The node operating system image. For more information for the node image options that GKE offers for each OS, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/node-images#available_node_images) + +> Note: the default option is "Container-Optimized OS with Docker". The read-only filesystem on GCP's Container-Optimized OS is not compatible with the [legacy logging](../../version-2.0-2.4/pages-for-subheaders/cluster-logging.md) implementation in Rancher. If you need to use the legacy logging feature, select "Ubuntu with Docker" or "Ubuntu with Containerd". The [logging feature as of v2.5](logging.md) is compatible with the Container-Optimized OS image. + +> Note: if selecting "Windows Long Term Service Channel" or "Windows Semi-Annual Channel" for the node pool image type, you must also add at least one Container-Optimized OS or Ubuntu node pool. + +### Machine Type + +_Mutable: no_ + +The virtualized hardware resources available to node instances. For more information on Google Cloud machine types, refer to [this page.](https://cloud.google.com/compute/docs/machine-types#machine_types) + +### Root Disk Type + +_Mutable: no_ + +Standard persistent disks are backed by standard hard disk drives (HDD), while SSD persistent disks are backed by solid state drives (SSD). For more information, refer to [this section.](https://cloud.google.com/compute/docs/disks) + +### Local SSD Disks + +_Mutable: no_ + +Configure each node's local SSD disk storage in GB. Local SSDs are physically attached to the server that hosts your VM instance. Local SSDs have higher throughput and lower latency than standard persistent disks or SSD persistent disks. The data that you store on a local SSD persists only until the instance is stopped or deleted. For more information, see [this section.](https://cloud.google.com/compute/docs/disks#localssds) + +### Preemptible nodes (beta) + +_Mutable: no_ + +Preemptible nodes, also called preemptible VMs, are Compute Engine VM instances that last a maximum of 24 hours in general, and provide no availability guarantees. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/preemptible-vms) + +### Taints + +_Mutable: no_ + +When you apply a taint to a node, only Pods that tolerate the taint are allowed to run on the node. In a GKE cluster, you can apply a taint to a node pool, which applies the taint to all nodes in the pool. + +### Node Labels + +_Mutable: no_ + +You can apply labels to the node pool, which applies the labels to all nodes in the pool. + +Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +# Group Details + +In this section, enter details describing the node pool. + +### Name + +_Mutable: no_ + +Enter a name for the node pool. + +### Initial Node Count + +_Mutable: yes_ + +Integer for the starting number of nodes in the node pool. + +### Max Pod Per Node + +_Mutable: no_ + +GKE has a hard limit of 110 Pods per node. For more information on the Kubernetes limits, see [this section.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#dimension_limits) + +### Autoscaling + +_Mutable: yes_ + +Node pool autoscaling dynamically creates or deletes nodes based on the demands of your workload. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler) + +### Auto Repair + +_Mutable: yes_ + +GKE's node auto-repair feature helps you keep the nodes in your cluster in a healthy, running state. When enabled, GKE makes periodic checks on the health state of each node in your cluster. If a node fails consecutive health checks over an extended time period, GKE initiates a repair process for that node. For more information, see the section on [auto-repairing nodes.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair) + +### Auto Upgrade + +_Mutable: yes_ + +When enabled, the auto-upgrade feature keeps the nodes in your cluster up-to-date with the cluster control plane (master) version when your control plane is [updated on your behalf.](https://cloud.google.com/kubernetes-engine/upgrades#automatic_cp_upgrades) For more information about auto-upgrading nodes, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades) + +### Access Scopes + +_Mutable: no_ + +Access scopes are the legacy method of specifying permissions for your nodes. + +- **Allow default access:** The default access for new clusters is the [Compute Engine default service account.](https://cloud.google.com/compute/docs/access/service-accounts?hl=en_US#default_service_account) +- **Allow full access to all Cloud APIs:** Generally, you can just set the cloud-platform access scope to allow full access to all Cloud APIs, then grant the service account only relevant IAM roles. The combination of access scopes granted to the virtual machine instance and the IAM roles granted to the service account determines the amount of access the service account has for that instance. +- **Set access for each API:** Alternatively, you can choose to set specific scopes that permit access to the particular API methods that the service will call. + +For more information, see the [section about enabling service accounts for a VM.](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + + +### Configuring the Refresh Interval + +The refresh interval can be configured through the setting "gke-refresh", which is an integer representing seconds. + +The default value is 300 seconds. + +The syncing interval can be changed by running `kubectl edit setting gke-refresh`. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for GCP APIs. + + + + +# Labels & Annotations + +Add Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to the cluster. + +Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +# Kubernetes Options + +### Location Type +Zonal or Regional. With GKE, you can create a cluster tailored to the availability requirements of your workload and your budget. By default, a cluster's nodes run in a single compute zone. When multiple zones are selected, the cluster's nodes will span multiple compute zones, while the controlplane is located in a single zone. Regional clusters increase the availability of the controlplane as well. For help choosing the type of cluster availability, refer to [these docs.](https://cloud.google.com/kubernetes-engine/docs/best-practices/scalability#choosing_a_regional_or_zonal_control_plane) + +For [regional clusters,](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#regional_clusters) you can select a region. For more information about available regions and zones, refer to [this section](https://cloud.google.com/compute/docs/regions-zones#available). The first part of each zone name is the name of the region. + +The location type can't be changed after the cluster is created. + +### Zone +Each region in Compute engine contains a number of zones. + +For more information about available regions and zones, refer to [these docs.](https://cloud.google.com/compute/docs/regions-zones#available) + +### Additional Zones +For zonal clusters, you can select additional zones to create a [multi-zone cluster.](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#multi-zonal_clusters) + +### Kubernetes Version +Link to list of GKE kubernetes versions + +### Container Address Range + +The IP address range for pods in the cluster. Must be a valid CIDR range, e.g. 10.42.0.0/16. If not specified, a random range is automatically chosen from 10.0.0.0/8 and will exclude ranges already allocated to VMs, other clusters, or routes. Automatically chosen ranges may conflict with reserved IP addresses, dynamic routes, or routes within VPCs peering with the cluster. + +### Alpha Features + +Turns on all Kubernetes alpha API groups and features for the cluster. When enabled, the cluster cannot be upgraded and will be deleted automatically after 30 days. Alpha clusters are not recommended for production use as they are not covered by the GKE SLA. For more information, refer to [this page](https://cloud.google.com/kubernetes-engine/docs/concepts/alpha-clusters). + +### Legacy Authorization + +This option is deprecated and it is recommended to leave it disabled. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#leave_abac_disabled) +### Stackdriver Logging + +Enable logging with Google Cloud's Operations Suite, formerly called Stackdriver. For details, see the [documentation.](https://cloud.google.com/logging/docs/basic-concepts) +### Stackdriver Monitoring + +Enable monitoring with Google Cloud's Operations Suite, formerly called Stackdriver. For details, see the [documentation.](https://cloud.google.com/monitoring/docs/monitoring-overview) +### Kubernetes Dashboard + +Enable the [Kubernetes dashboard add-on.](https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards#kubernetes_dashboard) Starting with GKE v1.15, you will no longer be able to enable the Kubernetes Dashboard by using the add-on API. +### Http Load Balancing + +Set up [HTTP(S) load balancing.](https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer) To use Ingress, you must have the HTTP(S) Load Balancing add-on enabled. +### Horizontal Pod Autoscaling + +The Horizontal Pod Autoscaler changes the shape of your Kubernetes workload by automatically increasing or decreasing the number of Pods in response to the workload's CPU or memory consumption, or in response to custom metrics reported from within Kubernetes or external metrics from sources outside of your cluster. For more information, see the [documentation.](https://cloud.google.com/kubernetes-engine/docs/concepts/horizontalpodautoscaler) +### Maintenance Window + +Set the start time for a 4 hour maintenance window. The time is specified in the UTC time zone using the HH:MM format. For more information, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions) + +### Network + +The Compute Engine Network that the cluster connects to. Routes and firewalls will be created using this network. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC networks that are shared to your project will appear here. will be available to select in this field. For more information, refer to [this page](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets). + +### Node Subnet / Subnet + +The Compute Engine subnetwork that the cluster connects to. This subnetwork must belong to the network specified in the **Network** field. Select an existing subnetwork, or select "Auto Create Subnetwork" to have one automatically created. If not using an existing network, **Subnetwork Name** is required to generate one. If using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc), the VPC subnets that are shared to your project will appear here. If using a Shared VPC network, you cannot select "Auto Create Subnetwork". For more information, refer to [this page.](https://cloud.google.com/vpc/docs/vpc#vpc_networks_and_subnets) +### Ip Aliases + +Enable [alias IPs](https://cloud.google.com/vpc/docs/alias-ip). This enables VPC-native traffic routing. Required if using [Shared VPCs](https://cloud.google.com/vpc/docs/shared-vpc). + +### Pod address range + +When you create a VPC-native cluster, you specify a subnet in a VPC network. The cluster uses three unique subnet IP address ranges for nodes, pods, and services. For more information on IP address ranges, see [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) + +### Service address range + +When you create a VPC-native cluster, you specify a subnet in a VPC network. The cluster uses three unique subnet IP address ranges for nodes, pods, and services. For more information on IP address ranges, see [this section.](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing) +### Cluster Labels + +A [cluster label](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-managing-labels) is a key-value pair that helps you organize your Google Cloud clusters. You can attach a label to each resource, then filter the resources based on their labels. Information about labels is forwarded to the billing system, so you can break down your billing charges by label. + +## Node Options + +### Node Count +Integer for the starting number of nodes in the node pool. + +### Machine Type +For more information on Google Cloud machine types, refer to [this page.](https://cloud.google.com/compute/docs/machine-types#machine_types) + +### Image Type +Ubuntu or Container-Optimized OS images are available. + +For more information about GKE node image options, refer to [this page.](https://cloud.google.com/kubernetes-engine/docs/concepts/node-images#available_node_images) + +### Root Disk Type + +Standard persistent disks are backed by standard hard disk drives (HDD), while SSD persistent disks are backed by solid state drives (SSD). For more information, refer to [this section.](https://cloud.google.com/compute/docs/disks) + +### Root Disk Size +The size in GB of the [root disk.](https://cloud.google.com/compute/docs/disks) + +### Local SSD disks +Configure each node's local SSD disk storage in GB. + +Local SSDs are physically attached to the server that hosts your VM instance. Local SSDs have higher throughput and lower latency than standard persistent disks or SSD persistent disks. The data that you store on a local SSD persists only until the instance is stopped or deleted. For more information, see [this section.](https://cloud.google.com/compute/docs/disks#localssds) + +### Preemptible nodes (beta) + +Preemptible nodes, also called preemptible VMs, are Compute Engine VM instances that last a maximum of 24 hours in general, and provide no availability guarantees. For more information, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/preemptible-vms) + +### Auto Upgrade + +> Note: Enabling the Auto Upgrade feature for Nodes is not recommended. + +When enabled, the auto-upgrade feature keeps the nodes in your cluster up-to-date with the cluster control plane (master) version when your control plane is [updated on your behalf.](https://cloud.google.com/kubernetes-engine/upgrades#automatic_cp_upgrades) For more information about auto-upgrading nodes, see [this page.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-upgrades) + +### Auto Repair + +GKE's node auto-repair feature helps you keep the nodes in your cluster in a healthy, running state. When enabled, GKE makes periodic checks on the health state of each node in your cluster. If a node fails consecutive health checks over an extended time period, GKE initiates a repair process for that node. For more information, see the section on [auto-repairing nodes.](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-repair) + +### Node Pool Autoscaling + +Enable node pool autoscaling based on cluster load. For more information, see the documentation on [adding a node pool with autoscaling.](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler#adding_a_node_pool_with_autoscaling) + +### Taints +When you apply a taint to a node, only Pods that tolerate the taint are allowed to run on the node. In a GKE cluster, you can apply a taint to a node pool, which applies the taint to all nodes in the pool. +### Node Labels +You can apply labels to the node pool, which applies the labels to all nodes in the pool. + +Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +## Security Options + +### Service Account + +Create a [Service Account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) with a JSON private key and provide the JSON here. See [Google Cloud docs](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) for more info about creating a service account. These IAM roles are required: Compute Viewer (`roles/compute.viewer`), (Project) Viewer (`roles/viewer`), Kubernetes Engine Admin (`roles/container.admin`), Service Account User (`roles/iam.serviceAccountUser`). More info on roles can be found [here.](https://cloud.google.com/kubernetes-engine/docs/how-to/iam-integration) + +### Access Scopes + +Access scopes are the legacy method of specifying permissions for your nodes. + +- **Allow default access:** The default access for new clusters is the [Compute Engine default service account.](https://cloud.google.com/compute/docs/access/service-accounts?hl=en_US#default_service_account) +- **Allow full access to all Cloud APIs:** Generally, you can just set the cloud-platform access scope to allow full access to all Cloud APIs, then grant the service account only relevant IAM roles. The combination of access scopes granted to the virtual machine instance and the IAM roles granted to the service account determines the amount of access the service account has for that instance. +- **Set access for each API:** Alternatively, you can choose to set specific scopes that permit access to the particular API methods that the service will call. + +For more information, see the [section about enabling service accounts for a VM.](https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances) + + + diff --git a/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md new file mode 100644 index 0000000000..2ac2702e78 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/helm-charts-in-rancher.md @@ -0,0 +1,105 @@ +--- +title: Helm Charts in Rancher +weight: 11 +aliases: + - /rancher/v2.x/en/helm-charts/apps-marketplace + - /rancher/v2.5/en/catalog/ + - /rancher/v2.5/en/catalog/apps + - /rancher/v2.5/en/catalog/launching-apps + - /rancher/v2.x/en/helm-charts/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/launching-apps/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/adding-catalogs/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/globaldns/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/built-in/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/creating-apps/ + - /rancher/v2.x/en/helm-charts/apps-marketplace/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/tutorial/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/managing-apps/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/catalog-config/ + - /rancher/v2.x/en/helm-charts/legacy-catalogs/multi-cluster-apps/ +--- + +In this section, you'll learn how to manage Helm chart repositories and applications in Rancher. + +### Changes in Rancher v2.5 + +In Rancher v2.5, the Apps and Marketplace feature replaced the catalog system. + +In the cluster manager, Rancher uses a catalog system to import bundles of charts and then uses those charts to either deploy custom helm applications or Rancher's tools such as Monitoring or Istio. The catalog system is still available in the cluster manager in Rancher v2.5, but it is deprecated. + +Now in the Cluster Explorer, Rancher uses a similar but simplified version of the same system. Repositories can be added in the same way that catalogs were, but are specific to the current cluster. Rancher tools come as pre-loaded repositories which deploy as standalone helm charts. + +### Charts + +From the top-left menu select _"Apps & Marketplace"_ and you will be taken to the Charts page. + +The charts page contains all Rancher, Partner, and Custom Charts. + +* Rancher tools such as Logging or Monitoring are included under the Rancher label +* Partner charts reside under the Partners label +* Custom charts will show up under the name of the repository + +All three types are deployed and managed in the same way. + +> Apps managed by the Cluster Manager should continue to be managed only by the Cluster Manager, and apps managed with the Cluster Explorer must be managed only by the Cluster Explorer. + +### Repositories + +From the left sidebar select _"Repositories"_. + +These items represent helm repositories, and can be either traditional helm endpoints which have an index.yaml, or git repositories which will be cloned and can point to a specific branch. In order to use custom charts, simply add your repository here and they will become available in the Charts tab under the name of the repository. + +To add a private CA for Helm Chart repositories: + +- **HTTP-based chart repositories**: You must add a base64 encoded copy of the CA certificate in DER format to the spec.caBundle field of the chart repo, such as `openssl x509 -outform der -in ca.pem | base64 -w0`. Click **Edit YAML** for the chart repo and set, as in the following example:
    + ``` + [...] + spec: + caBundle: + MIIFXzCCA0egAwIBAgIUWNy8WrvSkgNzV0zdWRP79j9cVcEwDQYJKoZIhvcNAQELBQAwPzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQKDAtNeU9yZywgSW5jLjENMAsGA1UEAwwEcm9vdDAeFw0yMTEyMTQwODMyMTdaFw0yNDEwMDMwODMyMT + ... + nDxZ/tNXt/WPJr/PgEB3hQdInDWYMg7vGO0Oz00G5kWg0sJ0ZTSoA10ZwdjIdGEeKlj1NlPyAqpQ+uDnmx6DW+zqfYtLnc/g6GuLLVPamraqN+gyU8CHwAWPNjZonFN9Vpg0PIk1I2zuOc4EHifoTAXSpnjfzfyAxCaZsnTptimlPFJJqAMj+FfDArGmr4= + [...] + ``` + +- **Git-based chart repositories**: It is not currently possible to add a private CA. For git-based chart repositories with a certificate signed by a private CA, you must disable TLS verification. Click **Edit YAML** for the chart repo, and add the key/value pair as follows: + ``` + [...] + spec: + insecureSkipTLSVerify: true + [...] + ``` + +> **Note:** Helm chart repositories with authentication +> +> As of Rancher v2.5.12, a new value `disableSameOriginCheck` has been added to the Repo.Spec. This allows users to bypass the same origin checks, sending the repository Authentication information as a Basic Auth Header with all API calls. This is not recommended but can be used as a temporary solution in cases of non-standard Helm chart repositories such as those that have redirects to a different origin URL. +> +> To use this feature for an existing Helm chart repository, click ⋮ > Edit YAML. On the `spec` portion of the YAML file, add `disableSameOriginCheck` and set it to `true`. +> +> ```yaml +[...] +spec: + disableSameOriginCheck: true +[...] +``` + +### Helm Compatibility + +The Cluster Explorer only supports Helm 3 compatible charts. + + +### Deployment and Upgrades + +From the _"Charts"_ tab select a Chart to install. Rancher and Partner charts may have extra configurations available through custom pages or questions.yaml files, but all chart installations can modify the values.yaml and other basic settings. Once you click install, a Helm operation job is deployed, and the console for the job is displayed. + +To view all recent changes, go to the _"Recent Operations"_ tab. From there you can view the call that was made, conditions, events, and logs. + +After installing a chart, you can find it in the _"Installed Apps"_ tab. In this section you can upgrade or delete the installation, and see further details. When choosing to upgrade, the form and values presented will be the same as installation. + +Most Rancher tools have additional pages located in the toolbar below the _"Apps & Marketplace"_ section to help manage and use the features. These pages include links to dashboards, forms to easily add Custom Resources, and additional information. + +> If you are upgrading your chart using _"Customize Helm options before upgrade"_ , please be aware that using the _"--force"_ option may result in errors if your chart has immutable fields. This is because some objects in Kubernetes cannot be changed once they are created. To ensure you do not get this error you can: + * use the default upgrade option ( i.e do not use _"--force"_ option ) + * uninstall the existing chart and install the upgraded chart + * delete the resources with immutable fields from the cluster before performing the _"--force"_ upgrade diff --git a/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md b/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md new file mode 100644 index 0000000000..07c40f8211 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/horizontal-pod-autoscaler.md @@ -0,0 +1,31 @@ +--- +title: The Horizontal Pod Autoscaler +description: Learn about the horizontal pod autoscaler (HPA). How to manage HPAs and how to test them with a service deployment +weight: 3026 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/horizontal-pod-autoscaler + - /rancher/v2.x/en/k8s-in-rancher/horitzontal-pod-autoscaler/ +--- + +The [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) (HPA) is a Kubernetes feature that allows you to configure your cluster to automatically scale the services it's running up or down. + +Rancher provides some additional features to help manage HPAs, depending on the version of Rancher. + +You can create, manage, and delete HPAs using the Rancher UI. It only supports HPA in the `autoscaling/v2beta2` API. + +## Managing HPAs + +The way that you manage HPAs is different based on your version of the Kubernetes API: + +- **For Kubernetes API version autoscaling/V2beta1:** This version of the Kubernetes API lets you autoscale your pods based on the CPU and memory utilization of your application. +- **For Kubernetes API Version autoscaling/V2beta2:** This version of the Kubernetes API lets you autoscale your pods based on CPU and memory utilization, in addition to custom metrics. + +You can create, manage, and delete HPAs using the Rancher UI. From the Rancher UI you can configure the HPA to scale based on CPU and memory utilization. For more information, refer to [Managing HPAs with the Rancher UI](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md). To scale the HPA based on custom metrics, you still need to use `kubectl`. For more information, refer to [Configuring HPA to Scale Using Custom Metrics with Prometheus](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl.md#configuring-hpa-to-scale-using-custom-metrics-with-prometheus). + +Clusters created in Rancher v2.0.7 and higher automatically have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use HPA. +## Testing HPAs with a Service Deployment + +You can see your HPA's current number of replicas by going to your project and clicking **Resources > HPA.** For more information, refer to [Get HPA Metrics and Status](../how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui.md). + +You can also use `kubectl` to get the status of HPAs that you test with your load testing tool. For more information, refer to [Testing HPAs with kubectl] +(k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md new file mode 100644 index 0000000000..f91750d70e --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/infrastructure-setup.md @@ -0,0 +1,12 @@ +--- +title: Don't have infrastructure for your Kubernetes cluster? Try one of these tutorials. +shortTitle: Infrastructure Tutorials +weight: 5 +aliases: + - /rancher/v2.x/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ +--- + +To set up infrastructure for a high-availability K3s Kubernetes cluster with an external DB, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) + + +To set up infrastructure for a high-availability RKE Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md b/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md new file mode 100644 index 0000000000..5b7dd2a027 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/install-cluster-autoscaler.md @@ -0,0 +1,27 @@ +--- +title: Cluster Autoscaler +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-admin/cluster-autoscaler/ +--- + +In this section, you'll learn how to install and use the [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +The cluster autoscaler is a tool that automatically adjusts the size of the Kubernetes cluster when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. + +To prevent your pod from being evicted, set a `priorityClassName: system-cluster-critical` property on your pod spec. + +Cluster Autoscaler is designed to run on Kubernetes master nodes. It can run in the `kube-system` namespace. Cluster Autoscaler doesn't scale down nodes with non-mirrored `kube-system` pods running on them. + +It's possible to run a customized deployment of Cluster Autoscaler on worker nodes, but extra care needs to be taken to ensure that Cluster Autoscaler remains up and running. + +# Cloud Providers + +Cluster Autoscaler provides support to distinct cloud providers. For more information, go to [cluster-autoscaler supported cloud providers.](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) + +### Setting up Cluster Autoscaler on Amazon Cloud Provider + +For details on running the cluster autoscaler on Amazon cloud provider, refer to [this page.](../how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/install-rancher-on-linux.md b/versioned_docs/version-2.5/pages-for-subheaders/install-rancher-on-linux.md new file mode 100644 index 0000000000..4e0e7f1471 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/install-rancher-on-linux.md @@ -0,0 +1,241 @@ +--- +title: Install/Upgrade Rancher with RancherD +weight: 3 +aliases: + - /rancher/v2.5/en/installation/install-rancher-on-linux + - /rancher/v2.x/en/installation/install-rancher-on-linux/ +--- + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +We are excited to introduce a new, simpler way to install Rancher called RancherD. + +RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. + +- [About RancherD Installs](#about-rancherd-installs) +- [Prerequisites](#prerequisites) +- [Part I: Installing Rancher](#part-i-installing-rancher) +- [Part II: High Availability](#part-ii-high-availability) +- [Upgrades](#upgrades) +- [Configuration](#configuration) +- [Uninstall](#uninstall) +- [RKE2 Documentation](#rke2-documentation) + +# About RancherD Installs + +When RancherD is launched on a host, it first installs an RKE2 Kubernetes cluster, then deploys Rancher on the cluster as a Kubernetes daemonset. + +In both the RancherD install and the Helm CLI install, Rancher is installed as a Helm chart on a Kubernetes cluster. + +Configuration and upgrading are also simplified with RancherD. When you upgrade the RancherD binary, both the Kubernetes cluster and the Rancher Helm chart are upgraded. + +In Part I of these instructions, you'll learn how to launch RancherD on a single node. The result of following the steps in Part I is a single-node [RKE2](https://docs.rke2.io/) Kubernetes cluster with the Rancher server installed. This cluster can easily become high availability later. If Rancher only needs to manage the local Kubernetes cluster, the installation is complete. + +Part II explains how to convert the single-node Rancher installation into a high-availability installation. If the Rancher server will manage downstream Kubernetes clusters, it is important to follow these steps. A discussion of recommended architecture for highly available Rancher deployments can be found in our [Best Practices Guide.](./rancher-server.md) + +# Prerequisites + +### Node Requirements + +RancherD must be launched on a Linux OS. At this time, only OSes that leverage systemd are supported. + +The Linux node needs to fulfill the [installation requirements](installation-requirements.md) for hardware and networking. Docker is not required for RancherD installs. + +To install RancherD on SELinux Enforcing CentOS 8 nodes or RHEL 8 nodes, some [additional steps](installation-requirements.md#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) are required. +### Root Access + +Before running the installation commands, you will need to log in as root: + +``` +sudo -s +``` + +### Fixed Registration Address + +A fixed registration address is recommended for single-node installs and required for high-availability installs with RancherD. + +The fixed registration address is an endpoint that is used for two purposes: + +- To access the Kubernetes API. So you can, for example, modify your [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file to point to it instead of a specific node. +- To add new nodes to the Kubernetes cluster. To add nodes to the cluster later, you will run a command on the node that will specify the fixed registration address of the cluster. + +If you are installing Rancher on a single node, the fixed registration address makes it possible to add more nodes to the cluster so that you can convert the single-node install to a high-availability install without causing downtime to the cluster. If you don't set up this address when installing the single-node Kubernetes cluster, you would need to re-run the installation script with a fixed registration address in order to add new nodes to the cluster. + +The fixed registration can be the IP or hostname of any of the server nodes, but in many cases those may change over time as nodes are created and destroyed. Therefore, you should have a stable endpoint in front of the server nodes. + +This endpoint can be set up using any number of approaches, such as: + +* A layer 4 (TCP) load balancer +* Round-robin DNS +* Virtual or elastic IP addresses + +The following should be taken into consideration when configuring the load balancer or other endpoint: + +- The RancherD server process listens on port 9345 for new nodes to register. +- The Kubernetes API is served on port 6443, as normal. +- In RancherD installs, the Rancher UI is served on port 8443 by default. (This is different from Helm chart installs, where port 443 is used by default.) + +# Part I: Installing Rancher + +### 1. Set up Configurations + +To avoid certificate errors with the fixed registration address, you should launch the server with the `tls-san` parameter set. This parameter should refer to your fixed registration address. + +This option adds an additional hostname or IP as a Subject Alternative Name in the server's TLS cert, and it can be specified as a list if you would like to access the Kubernetes cluster via both the IP and the hostname. + +Create the RancherD config file at `/etc/rancher/rke2/config.yaml`: + +```yaml +token: my-shared-secret +tls-san: + - my-fixed-registration-address.com + - another-kubernetes-domain.com +``` + +The first server node establishes the secret token that other nodes would register with if they are added to the cluster. + +If you do not specify a pre-shared secret, RancherD will generate one and place it at `/var/lib/rancher/rke2/server/node-token`. + +To specify your own pre-shared secret as the token, set the `token` argument on startup. + +Installing Rancher this way will use Rancher-generated certificates. To use your own self-signed or trusted certificates, refer to the [configuration guide.](../reference-guides/cluster-configuration/rancher-server-configuration/rancherd-configuration-reference.md#certificates-for-the-rancher-server) + +For information on customizing the RancherD Helm chart values.yaml, refer to [this section.](../reference-guides/cluster-configuration/rancher-server-configuration/rancherd-configuration-reference.md#customizing-the-rancherd-helm-chart) + +### 2. Launch the first server node + +Run the RancherD installer: + +``` +curl -sfL https://get.rancher.io | sh - +``` + +The RancherD version can be specified using the `INSTALL_RANCHERD_VERSION` environment variable: + +``` +curl -sfL https://get.rancher.io | INSTALL_RANCHERD_VERSION=v2.5.4-rc6 sh - +``` + +Once installed, the `rancherd` binary will be on your PATH. You can check out its help text like this: + +``` +rancherd --help +NAME: + rancherd - Rancher Kubernetes Engine 2 +... +``` + +Next, launch RancherD: + +``` +systemctl enable rancherd-server.service +systemctl start rancherd-server.service +``` + +When RancherD launches, it installs an RKE2 Kubernetes cluster. Use the following command to see the logs of the Kubernetes cluster as it comes up: + +``` +journalctl -eu rancherd-server -f +``` + +### 3. Set up the kubeconfig file with kubectl + +Once the Kubernetes cluster is up, set up RancherD’s kubeconfig file and `kubectl`: + +``` +export KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=$PATH:/var/lib/rancher/rke2/bin +``` + +### 4. Verify that Rancher is installed on the Kubernetes cluster + +Now, you can start issuing `kubectl` commands. Use the following commands to verify that Rancher is deployed as a daemonset on the cluster: + +``` +kubectl get daemonset rancher -n cattle-system +kubectl get pod -n cattle-system +``` + +If you watch the pods, you will see the following pods installed: + +- `helm-operation` pods in the `cattle-system` namespace +- a `rancher` pod and `rancher-webhook` pod in the `cattle-system` namespace +- a `fleet-agent`, `fleet-controller`, and `gitjob` pod in the `fleet-system` namespace +- a `rancher-operator` pod in the `rancher-operator-system` namespace + +### 5. Set the initial Rancher password + +Once the `rancher` pod is up and running, run the following: + +``` +rancherd reset-admin +``` + +This will give you the URL, username and password needed to log into Rancher. Follow that URL, plug in the credentials, and you’re up and running with Rancher! + +If Rancher will only manage the local Kubernetes cluster, the installation is complete. + +# Part II: High Availability + +If you plan to use the Rancher server to manage downstream Kubernetes clusters, Rancher needs to be highly available. In these steps, you will add more nodes to achieve a high-availability cluster. Since Rancher is running as a daemonset, it will automatically launch on the nodes you add. + +An odd number of nodes is required because the etcd cluster, which contains the cluster data, needs a majority of live nodes to avoid losing quorum. A loss of quorum could require the cluster to be restored from backup. Therefore, we recommend using three nodes. + +When following these steps, you should still be logged in as root. + +### 1. Configure the fixed registration address on a new node + +Additional server nodes are launched much like the first, except that you must specify the `server` and `token` parameters so that they can successfully connect to the initial server node. + +Here is an example of what the RancherD config file would look like for additional server nodes. By default, this config file is expected to be located at `/etc/rancher/rke2/config.yaml`. + +```yaml +server: https://my-fixed-registration-address.com:9345 +token: my-shared-secret +tls-san: + - my-fixed-registration-address.com + - another-kubernetes-domain.com +``` + +### 2. Launch an additional server node + +Run the installer on the new node: + +``` +curl -sfL https://get.rancher.io | sh - +``` + +This will download RancherD and install it as a systemd unit on your host. + + +Next, launch RancherD: + +``` +systemctl enable rancherd-server.service +systemctl start rancherd-server.service +``` + +### 3. Repeat + +Repeat steps one and two for another Linux node, bringing the number of nodes in the cluster to three. + +**Result:** Rancher is highly available and the installation is complete. + +# Upgrades + +For information on upgrades and rollbacks, refer to [this page.](../getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd.md) + +# Configuration + +For information on how to configure certificates, node taints, Rancher Helm chart options, or RancherD CLI options, refer to the [configuration reference.](../reference-guides/cluster-configuration/rancher-server-configuration/rancherd-configuration-reference.md) + +# Uninstall + +To uninstall RancherD from your system, run the command below. This will shut down the process, remove the RancherD binary, and clean up files used by RancherD. + +``` +rancherd-uninstall.sh +``` + +# RKE2 Documentation + +For more information on RKE2, the Kubernetes distribution used to provision the underlying cluster, refer to the documentation [here.](https://docs.rke2.io/) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md b/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md new file mode 100644 index 0000000000..7fca18f395 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md @@ -0,0 +1,311 @@ +--- +title: Install/Upgrade Rancher on a Kubernetes Cluster +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 2 +aliases: + - /rancher/v2.5/en/installation/k8s-install/ + - /rancher/v2.5/en/installation/k8s-install/helm-rancher + - /rancher/v2.5/en/installation/k8s-install/kubernetes-rke + - /rancher/v2.5/en/installation/ha-server-install + - /rancher/v2.5/en/installation/install-rancher-on-k8s/install + - /rancher/v2.x/en/installation/install-rancher-on-k8s/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. + +- [Prerequisites](#prerequisites) +- [Install the Rancher Helm Chart](#install-the-rancher-helm-chart) + +# Prerequisites + +- [Kubernetes Cluster](#kubernetes-cluster) +- [CLI Tools](#cli-tools) +- [Ingress Controller (Only for Hosted Kubernetes)](#ingress-controller-for-hosted-kubernetes) + +### Kubernetes Cluster + +Set up the Rancher server's local Kubernetes cluster. + +Rancher can be installed on any Kubernetes cluster. This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. + +For help setting up a Kubernetes cluster, we provide these tutorials: + +- **RKE:** For the tutorial to install an RKE Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher.md) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster.md) +- **K3s:** For the tutorial to install a K3s Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher.md) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster.md) +- **RKE2:** For the tutorial to install an RKE2 Kubernetes cluster, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md) For help setting up the infrastructure for a high-availability RKE2 cluster, refer to [this page.](../how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster.md) +- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md) +- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks.md) +- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke.md) + +### CLI Tools + +The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md) to choose a version of Helm to install Rancher. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. + +### Ingress Controller (For Hosted Kubernetes) + +To deploy Rancher v2.5 on a hosted Kubernetes cluster such as EKS, GKE, or AKS, you should deploy a compatible Ingress controller first to configure [SSL termination on Rancher.](#3-choose-your-ssl-configuration) + +For an example of how to deploy an ingress on EKS, refer to [this section.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md#5-install-an-ingress) + +# Install the Rancher Helm Chart + +Rancher is installed using the Helm package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. + +With Helm, we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at https://helm.sh/. + +For systems without direct internet access, see [Air Gap: Kubernetes install](../getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha.md). + +To choose a Rancher version to install, refer to [Choosing a Rancher Version.](../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md) + +To choose a version of Helm to install Rancher with, refer to the [Helm version requirements](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md) + +> **Note:** The installation instructions assume you are using Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 migration docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) This [section](../getting-started/installation-and-upgrade/resources/helm-version-requirements.md) provides a copy of the older installation instructions for Rancher installed on an RKE Kubernetes cluster with Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +To set up Rancher, + +1. [Add the Helm chart repository](#1-add-the-helm-chart-repository) +2. [Create a namespace for Rancher](#2-create-a-namespace-for-rancher) +3. [Choose your SSL configuration](#3-choose-your-ssl-configuration) +4. [Install cert-manager](#4-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) +5. [Install Rancher with Helm and your chosen certificate option](#5-install-rancher-with-helm-and-your-chosen-certificate-option) +6. [Verify that the Rancher server is successfully deployed](#6-verify-that-the-rancher-server-is-successfully-deployed) +7. [Save your options](#7-save-your-options) + +### 1. Add the Helm Chart Repository + +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher](../reference-guides/installation-references/helm-chart-options.md#helm-chart-repositories). + +{{< release-channel >}} + +``` +helm repo add rancher- https://releases.rancher.com/server-charts/ +``` + +### 2. Create a Namespace for Rancher + +We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: + +``` +kubectl create namespace cattle-system +``` + +### 3. Choose your SSL Configuration + +The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: + +- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. +- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. +- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. + + +| Configuration | Helm Chart Option | Requires cert-manager | +| ------------------------------ | ----------------------- | ------------------------------------- | +| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#5-install-cert-manager) | +| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#5-install-cert-manager) | +| Certificates from Files | `ingress.tls.source=secret` | no | + +### 4. Install cert-manager + +> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination). + +This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). + +
    + Click to Expand + +> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation](../getting-started/installation-and-upgrade/resources/upgrade-cert-manager.md/). + +These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). + +``` +# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart: +kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml + +# Add the Jetstack Helm repository +helm repo add jetstack https://charts.jetstack.io + +# Update your local Helm chart repository cache +helm repo update + +# Install the cert-manager Helm chart +helm install cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.5.1 +``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +
    + +### 5. Install Rancher with Helm and Your Chosen Certificate Option + +The exact command to install Rancher differs depending on the certificate configuration. + +However, irrespective of the certificate configuration, the name of the Rancher installation in the `cattle-system` namespace should always be `rancher`. + + + + +The default is for Rancher to generate a self-signed CA, and uses `cert-manager` to issue the certificate for access to the Rancher server interface. + +Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. + +- Set `hostname` to the DNS record that resolves to your load balancer. +- Set `replicas` to the number of replicas to use for the Rancher Deployment. This defaults to 3; if you have less than 3 nodes in your cluster you should reduce it accordingly. +- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set replicas=3 +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + + +This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. + +>**Note:** You need to have port 80 open as the HTTP-01 challenge can only be done on port 80. + +In the following command, + +- Set `hostname` to the public DNS record that resolves to your load balancer. +- Set `replicas` to the number of replicas to use for the Rancher Deployment. This defaults to 3; if you have less than 3 nodes in your cluster you should reduce it accordingly. +- Set `ingress.tls.source` to `letsEncrypt`. +- Set `letsEncrypt.email` to the email address used for communication about your certificate (for example, expiry notices). +- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. +- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set replicas=3 \ + --set ingress.tls.source=letsEncrypt \ + --set letsEncrypt.email=me@example.org \ + --set letsEncrypt.ingress.class=nginx +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + +In this option, Kubernetes secrets are created from your own certificates for Rancher to use. + +When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate, or the Ingress controller will fail to configure correctly. + +Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. + +> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?](../faq/technical-items.md#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) + +- Set `hostname` as appropriate for your certificate, as described above. +- Set `replicas` to the number of replicas to use for the Rancher Deployment. This defaults to 3; if you have less than 3 nodes in your cluster you should reduce it accordingly. +- Set `ingress.tls.source` to `secret`. +- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set replicas=3 \ + --set ingress.tls.source=secret +``` + +If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set ingress.tls.source=secret \ + --set privateCA=true +``` + +Now that Rancher is deployed, see [Adding TLS Secrets](../getting-started/installation-and-upgrade/resources/add-tls-secrets.md) to publish the certificate files so Rancher and the Ingress controller can use them. + + + + +The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. + +- [HTTP Proxy](../reference-guides/installation-references/helm-chart-options.md#http-proxy) +- [Private Docker Image Registry](../reference-guides/installation-references/helm-chart-options.md#private-registry-and-air-gap-installs) +- [TLS Termination on an External Load Balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) + +See the [Chart Options](../reference-guides/installation-references/helm-chart-options.md) for the full list of options. + + +### 6. Verify that the Rancher Server is Successfully Deployed + +After adding the secrets, check if Rancher was rolled out successfully: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: + +``` +kubectl -n cattle-system get deploy rancher +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +rancher 3 3 3 3 3m +``` + +It should show the same count for `DESIRED` and `AVAILABLE`. + +### 7. Save Your Options + +Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. + +### Finishing Up + +That's it. You should have a functional Rancher server. + +In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. + +Doesn't work? Take a look at the [Troubleshooting](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting.md) Page + + +### Optional Next Steps + +Enable the Enterprise Cluster Manager. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md b/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md new file mode 100644 index 0000000000..876d82ee43 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/installation-and-upgrade.md @@ -0,0 +1,114 @@ +--- +title: Installing/Upgrading Rancher +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 3 +aliases: + - /rancher/v2.5/en/installation/how-ha-works/ + - /rancher/v2.x/en/installation/ +--- + +This section provides an overview of the architecture options of installing Rancher, describing advantages of each option. + +# Terminology + +In this section, + +- **The Rancher server** manages and provisions Kubernetes clusters. You can interact with downstream Kubernetes clusters through the Rancher server's user interface. +- **RKE (Rancher Kubernetes Engine)** is a certified Kubernetes distribution and CLI/library which creates and manages a Kubernetes cluster. +- **K3s (Lightweight Kubernetes)** is also a fully compliant Kubernetes distribution. It is newer than RKE, easier to use, and more lightweight, with a binary size of less than 100 MB. +- **RKE2** is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. +- **RancherD** was an experimental tool for installing Rancher; a single binary that first launched an RKE2 Kubernetes cluster, then installed the Rancher server Helm chart on the cluster. It was available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +# Changes to Installation in Rancher v2.5 + +In Rancher v2.5, the Rancher management server can be installed on any Kubernetes cluster, including hosted clusters, such as Amazon EKS clusters. + +For Docker installations, a local Kubernetes cluster is installed in the single Docker container, and Rancher is installed on the local cluster. + +The `restrictedAdmin` Helm chart option was added. When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#restricted-admin) + +# Overview of Installation Options + +Rancher can be installed on these main architectures: + +### High-availability Kubernetes Install with the Helm CLI + +We recommend using Helm, a Kubernetes package manager, to install Rancher on multiple nodes on a dedicated Kubernetes cluster. For RKE clusters, three nodes are required to achieve a high-availability cluster. For K3s clusters, only two nodes are required. + +### High-availability Kubernetes Install with RancherD + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +RancherD is a single binary that first launches an RKE2 Kubernetes cluster, then installs the Rancher server Helm chart on the cluster. + +In both the RancherD install and the Helm CLI install, Rancher is installed as a Helm chart on a Kubernetes cluster. + +Configuration and upgrading are also simplified with RancherD. When you upgrade the RancherD binary, both the Kubernetes cluster and the Rancher Helm chart are upgraded. + +### Automated Quickstart to Deploy Rancher on Amazon EKS + +Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS Kubernetes cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) + +### Single-node Kubernetes Install + +Rancher can be installed on a single-node Kubernetes cluster. In this case, the Rancher server doesn't have high availability, which is important for running Rancher in production. + +However, this option is useful if you want to save resources by using a single node in the short term, while preserving a high-availability migration path. In the future, you can add nodes to the cluster to get a high-availability Rancher server. + +### Docker Install + +For test and demonstration purposes, Rancher can be installed with Docker on a single node. + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +### Other Options + +There are also separate instructions for installing Rancher in an air gap environment or behind an HTTP proxy: + +| Level of Internet Access | Kubernetes Installation - Strongly Recommended | Docker Installation | +| ---------------------------------- | ------------------------------ | ---------- | +| With direct access to the Internet | [Docs](install-upgrade-on-a-kubernetes-cluster.md) | [Docs](rancher-on-a-single-node-with-docker.md) | +| Behind an HTTP proxy | [Docs](rancher-behind-an-http-proxy.md) | These [docs,](rancher-on-a-single-node-with-docker.md) plus this [configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) | +| In an air gap environment | [Docs](air-gapped-helm-cli-install.md) | [Docs](air-gapped-helm-cli-install.md) | + +We recommend installing Rancher on a Kubernetes cluster, because in a multi-node cluster, the Rancher management server becomes highly available. This high-availability configuration helps maintain consistent access to the downstream Kubernetes clusters that Rancher will manage. + +For that reason, we recommend that for a production-grade architecture, you should set up a high-availability Kubernetes cluster, then install Rancher on it. After Rancher is installed, you can use Rancher to deploy and manage Kubernetes clusters. + +For testing or demonstration purposes, you can install Rancher in single Docker container. In this Docker install, you can use Rancher to set up Kubernetes clusters out-of-the-box. The Docker install allows you to explore the Rancher server functionality, but it is intended to be used for development and testing purposes only. + +Our [instructions for installing Rancher on Kubernetes](install-upgrade-on-a-kubernetes-cluster.md) describe how to first use K3s or RKE to create and manage a Kubernetes cluster, then install Rancher onto that cluster. + +When the nodes in your Kubernetes cluster are running and fulfill the [node requirements,](installation-requirements.md) you will use Helm to deploy Rancher onto Kubernetes. Helm uses Rancher's Helm chart to install a replica of Rancher on each node in the Kubernetes cluster. We recommend using a load balancer to direct traffic to each replica of Rancher in the cluster. + +For a longer discussion of Rancher architecture, refer to the [architecture overview,](rancher-manager-architecture.md) [recommendations for production-grade architecture,](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) or our [best practices guide.](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md) + +# Prerequisites +Before installing Rancher, make sure that your nodes fulfill all of the [installation requirements.](installation-requirements.md) + +# Architecture Tip + +For the best performance and greater security, we recommend a separate, dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +For more architecture recommendations, refer to [this page.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +### More Options for Installations on a Kubernetes Cluster + +Refer to the [Helm chart options](../reference-guides/installation-references/helm-chart-options.md) for details on installing Rancher on a Kubernetes cluster with other configurations, including: + +- With [API auditing to record all transactions](../reference-guides/installation-references/helm-chart-options.md#api-audit-log) +- With [TLS termination on a load balancer](../reference-guides/installation-references/helm-chart-options.md#external-tls-termination) +- With a [custom Ingress](../reference-guides/installation-references/helm-chart-options.md#customizing-your-ingress) + +In the Rancher installation instructions, we recommend using K3s or RKE to set up a Kubernetes cluster before installing Rancher on the cluster. Both K3s and RKE have many configuration options for customizing the Kubernetes cluster to suit your specific environment. For the full list of their capabilities, refer to their documentation: + +- [RKE configuration options](https://rancher.com/docs/rke/latest/en/config-options/) +- [K3s configuration options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) + +### More Options for Installations with Docker + +Refer to the [docs about options for Docker installs](rancher-on-a-single-node-with-docker.md) for details about other configurations including: + +- With [API auditing to record all transactions](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) +- With an [external load balancer](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md/) +- With a [persistent data store](../reference-guides/single-node-rancher-in-docker/advanced-options.md#persistent-data) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md b/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/installation-references.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md b/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md new file mode 100644 index 0000000000..3500dabfdc --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/installation-requirements.md @@ -0,0 +1,214 @@ +--- +title: Installation Requirements +description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup +weight: 1 +aliases: + - /rancher/v2.x/en/installation/requirements/ +--- + +This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. + +> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) which will run your apps and services. + +Make sure the node(s) for the Rancher server fulfill the following requirements: + +- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) + - [RKE Specific Requirements](#rke-specific-requirements) + - [K3s Specific Requirements](#k3s-specific-requirements) + - [RancherD Specific Requirements](#rancherd-specific-requirements) + - [RKE2 Specific Requirements](#rke2-specific-requirements) + - [Installing Docker](#installing-docker) +- [Hardware Requirements](#hardware-requirements) +- [CPU and Memory](#cpu-and-memory) + - [RKE and Hosted Kubernetes](#rke-and-hosted-kubernetes) + - [K3s Kubernetes](#k3s-kubernetes) + - [RancherD](#rancherd) + - [RKE2 Kubernetes](#rke2-kubernetes) + - [Docker](#docker) +- [Ingress](#ingress) + - [Ingress for RKE2](#ingress-for-rke2) + - [Ingress for EKS](#ingress-for-eks) +- [Disks](#disks) +- [Networking Requirements](#networking-requirements) + - [Node IP Addresses](#node-ip-addresses) + - [Port Requirements](#port-requirements) +- [RancherD on SELinux Enforcing CentOS 8 or RHEL 8 Nodes](#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) + +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md) + +The Rancher UI works best in Firefox or Chrome. + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution. + +Docker is required for nodes that will run RKE Kubernetes clusters. It is not required for RancherD or RKE2 Kubernetes installs. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. + +Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19 and 1.20, firewalld must be turned off. + +> If you don't feel comfortable doing so you might check suggestions in the [respective issue](https://github.com/rancher/rancher/issues/28840). Some users were successful [creating a separate firewalld zone with a policy of ACCEPT for the Pod CIDR](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822). + +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).](../getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64.md) + +### RKE Specific Requirements + +For the container runtime, RKE should work with any modern Docker version. + +Note that the following sysctl setting must be applied: + +``` +net.bridge.bridge-nf-call-iptables=1 +``` + +### K3s Specific Requirements + +For the container runtime, K3s should work with any modern version of Docker or containerd. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. + +If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps](https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. + +### RancherD Specific Requirements + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +At this time, only Linux OSes that leverage systemd are supported. + +To install RancherD on SELinux Enforcing CentOS 8 or RHEL 8 nodes, some [additional steps](#rancherd-on-selinux-enforcing-centos-8-or-rhel-8-nodes) are required. + +Docker is not required for RancherD installs. + +### RKE2 Specific Requirements + +_The RKE2 install is available as of v2.5.6._ + +For details on which OS versions were tested with RKE2, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +Docker is not required for RKE2 installs. + +The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. Currently, RKE2 deploys nginx-ingress as a deployment by default, so you will need to deploy it as a DaemonSet by following [these steps.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md#5-configure-nginx-to-be-a-daemonset) + +### Installing Docker + +Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts](../getting-started/installation-and-upgrade/installation-requirements/install-docker.md) to install Docker with one command. + +Docker is not required for RancherD installs. + +# Hardware Requirements + +The following sections describe the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. + +# CPU and Memory + +Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. + +### RKE and Hosted Kubernetes + +These CPU and memory requirements apply to each host in the Kubernetes cluster where the Rancher server is installed. + +These requirements apply to RKE Kubernetes clusters, as well as to hosted Kubernetes clusters such as EKS. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | ---------- | ------------ | -------| ------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + +### K3s Kubernetes + +These CPU and memory requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.](install-upgrade-on-a-kubernetes-cluster.md) + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | +| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + +### RancherD + +> **Note:** RancherD was an experimental feature available as part of Rancher v2.5.4 through v2.5.10 but is now deprecated and not available for recent releases. + +These CPU and memory requirements apply to each instance with RancherD installed. Minimum recommendations are outlined here. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 2 | 5 GB | +| Medium | Up to 15 | Up to 200 | 3 | 9 GB | + +### RKE2 Kubernetes + +These CPU and memory requirements apply to each instance with RKE2 installed. Minimum recommendations are outlined here. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 2 | 5 GB | +| Medium | Up to 15 | Up to 200 | 3 | 9 GB | + +### Docker + +These CPU and memory requirements apply to a host with a [single-node](rancher-on-a-single-node-with-docker.md) installation of Rancher. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 1 | 4 GB | +| Medium | Up to 15 | Up to 200 | 2 | 8 GB | + +# Ingress + +Each node in the Kubernetes cluster that Rancher is installed on should run an Ingress. + +The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. + +For RKE, K3s and RancherD installations, you don't have to install the Ingress manually because it is installed by default. + +For hosted Kubernetes clusters (EKS, GKE, AKS) and RKE2 Kubernetes installations, you will need to set up the ingress. + +### Ingress for RKE2 + +Currently, RKE2 deploys nginx-ingress as a deployment by default, so you will need to deploy it as a DaemonSet by following [these steps.](../how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher.md#5-configure-nginx-to-be-a-daemonset) + +### Ingress for EKS +For an example of how to deploy an nginx-ingress-controller with a LoadBalancer service, refer to [this section.](../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks.md#5-install-an-ingress) + +# Disks + +Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. + +# Networking Requirements + +This section describes the networking requirements for the node(s) where the Rancher server is installed. + +### Node IP Addresses + +Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +### Port Requirements + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements](../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. + +# RancherD on SELinux Enforcing CentOS 8 or RHEL 8 Nodes + +Before installing Rancher on SELinux Enforcing CentOS 8 nodes or RHEL 8 nodes, you must install `container-selinux` and `iptables`: + +``` +sudo yum install iptables +sudo yum install container-selinux +``` diff --git a/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/integrations-in-rancher.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/introduction.md b/versioned_docs/version-2.5/pages-for-subheaders/introduction.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/introduction.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md b/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md new file mode 100644 index 0000000000..8c8d4641d4 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/istio-setup-guide.md @@ -0,0 +1,32 @@ +--- +title: Setup Guide +weight: 2 +aliases: + - /rancher/v2.5/en/istio/setup + - /rancher/v2.5/en/istio/v2.5/setup/ + - /rancher/v2.x/en/istio/v2.5/setup/ +--- + +This section describes how to enable Istio and start using it in your projects. + +If you use Istio for traffic management, you will need to allow external traffic to the cluster. In that case, you will need to follow all of the steps below. + +# Prerequisites + +This guide assumes you have already [installed Rancher,](installation-and-upgrade.md) and you have already [provisioned a separate Kubernetes cluster](kubernetes-clusters-in-rancher-setup.md) on which you will install Istio. + +The nodes in your cluster must meet the [CPU and memory requirements.](../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) + +The workloads and services that you want to be controlled by Istio must meet [Istio's requirements.](https://istio.io/docs/setup/additional-setup/requirements/) + + +# Install + +> **Quick Setup** If you don't need external traffic to reach Istio, and you just want to set up Istio for monitoring and tracing traffic within the cluster, skip the steps for [setting up the Istio gateway](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) and [setting up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) + +1. [Enable Istio in the cluster.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster.md) +1. [Enable Istio in all the namespaces where you want to use it.](../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md) +1. [Add deployments and services that have the Istio sidecar injected.](../how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar.md) +1. [Set up the Istio gateway. ](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway.md) +1. [Set up Istio's components for traffic management.](../how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management.md) +1. [Generate traffic and see Istio in action.](../how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/istio.md b/versioned_docs/version-2.5/pages-for-subheaders/istio.md new file mode 100644 index 0000000000..146db64d14 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/istio.md @@ -0,0 +1,128 @@ +--- +title: Istio +weight: 14 +aliases: + - /rancher/v2.5/en/dashboard/istio + - /rancher/v2.x/en/istio/ + - /rancher/v2.x/en/istio/v2.5/ +--- + +[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, secure, control, and troubleshoot the traffic within a complex network of microservices. + +As a network of microservices changes and grows, the interactions between them can become increasingly difficult to manage and understand. In such a situation, it is useful to have a service mesh as a separate infrastructure layer. Istio's service mesh lets you manipulate traffic between microservices without changing the microservices directly. + +Our integration of Istio is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to a team of developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +This core service mesh provides features that include but are not limited to the following: + +- **Traffic Management** such as ingress and egress routing, circuit breaking, mirroring. +- **Security** with resources to authenticate and authorize traffic and users, mTLS included. +- **Observability** of logs, metrics, and distributed traffic flows. + +After [setting up istio](istio-setup-guide.md) you can leverage Istio's control plane functionality through the Cluster Explorer, `kubectl`, or `istioctl`. + +Istio needs to be set up by a `cluster-admin` before it can be used in a project. + +- [What's New in Rancher v2.5](#what-s-new-in-rancher-v2-5) +- [Tools Bundled with Istio](#tools-bundled-with-istio) +- [Prerequisites](#prerequisites) +- [Setup Guide](#setup-guide) +- [Remove Istio](#remove-istio) +- [Migrate from Previous Istio Version](#migrate-from-previous-istio-version) +- [Accessing Visualizations](#accessing-visualizations) +- [Architecture](#architecture) +- [Additional steps for installing Istio on an RKE2 cluster](#additional-steps-for-installing-istio-on-an-rke2-cluster) + +# What's New in Rancher v2.5 + +The overall architecture of Istio has been simplified. A single component, Istiod, has been created by combining Pilot, Citadel, Galley and the sidecar injector. Node Agent functionality has also been merged into istio-agent. + +Addons that were previously installed by Istio (cert-manager, Grafana, Jaeger, Kiali, Prometheus, Zipkin) will now need to be installed separately. Istio will support installation of integrations that are from the Istio Project and will maintain compatibility with those that are not. + +A Prometheus integration will still be available through an installation of [Rancher Monitoring](monitoring-and-alerting.md), or by installing your own Prometheus operator. Rancher's Istio chart will also install Kiali by default to ensure you can get a full picture of your microservices out of the box. + +Istio has migrated away from Helm as a way to install Istio and now provides installation through the istioctl binary or Istio Operator. To ensure the easiest interaction with Istio, Rancher's Istio will maintain a Helm chart that utilizes the istioctl binary to manage your Istio installation. + +This Helm chart will be available via the Apps and Marketplace in the UI. A user that has access to the Rancher Chart's catalog will need to set up Istio before it can be used in the project. + +# Tools Bundled with Istio + +Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy Helm chart, including an overlay file option to allow complex customization. + +It also includes the following: + +### Kiali + +Kiali is a comprehensive visualization aid used for graphing traffic flow throughout the service mesh. It allows you to see how they are connected, including the traffic rates and latencies between them. + +You can check the health of the service mesh, or drill down to see the incoming and outgoing requests to a single component. + +### Jaeger + +_Bundled as of v2.5.4_ + +Our Istio installer includes a quick-start, all-in-one installation of [Jaeger,](https://www.jaegertracing.io/) a tool used for tracing distributed systems. + +Note that this is not a production-qualified deployment of Jaeger. This deployment uses an in-memory storage component, while a persistent storage component is recommended for production. For more information on which deployment strategy you may need, refer to the [Jaeger documentation.](https://www.jaegertracing.io/docs/latest/operator/#production-strategy) + +# Prerequisites + +Before enabling Istio, we recommend that you confirm that your Rancher worker nodes have enough [CPU and memory](../explanations/integrations-in-rancher/istio/cpu-and-memory-allocations.md) to run all of the components of Istio. + +If you are installing Istio on RKE2 cluster, some additional steps are required. For details, see [this section.](#additional-steps-for-installing-istio-on-an-rke2-cluster) + +# Setup Guide + +Refer to the [setup guide](istio-setup-guide.md) for instructions on how to set up Istio and use it in a project. + +# Remove Istio + +To remove Istio components from a cluster, namespace, or workload, refer to the section on [uninstalling Istio.](../explanations/integrations-in-rancher/istio/disable-istio.md) + +# Migrate From Previous Istio Version + +There is no upgrade path for Istio versions less than 1.7.x. To successfully install Istio in the **Cluster Explorer**, you will need to disable your existing Istio in the **Cluster Manager**. + +If you have a significant amount of additional Istio CRDs you might consider manually migrating CRDs that are supported in both versions of Istio. You can do this by running `kubectl get -n istio-system -o yaml`, save the output yaml and re-apply in the new version. + +Another option is to manually uninstall istio resources one at a time, but leave the resources that are supported in both versions of Istio and that will not be installed by the newest version. This method is more likely to result in issues installing the new version, but could be a good option depending on your situation. + +# Accessing Visualizations + +> By default, only cluster-admins have access to Kiali. For instructions on how to allow admin, edit or views roles to access them, see [this section.](../explanations/integrations-in-rancher/istio/rbac-for-istio.md) + +After Istio is set up in a cluster, Grafana, Prometheus,and Kiali are available in the Rancher UI. + +To access the Grafana and Prometheus visualizations, from the **Cluster Explorer** navigate to the **Monitoring** app overview page, and click on **Grafana** or **Prometheus** + +To access the Kiali visualization, from the **Cluster Explorer** navigate to the **Istio** app overview page, and click on **Kiali**. From here you can access the **Traffic Graph** tab or the **Traffic Metrics** tab to see network visualizations and metrics. + +By default, all namespace will picked up by prometheus and make data available for Kiali graphs. Refer to [selector/scrape config setup](../explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations.md) if you would like to use a different configuration for prometheus data scraping. + +Your access to the visualizations depend on your role. Grafana and Prometheus are only available for `cluster-admin` roles. The Kiali UI is available only to `cluster-admin` by default, but `cluster-admin` can allow other roles to access them by editing the Istio values.yaml. + +# Architecture + +Istio installs a service mesh that uses [Envoy](https://www.envoyproxy.io/learn/service-mesh) sidecar proxies to intercept traffic to each workload. These sidecars intercept and manage service-to-service communication, allowing fine-grained observation and control over traffic within the cluster. + +Only workloads that have the Istio sidecar injected can be tracked and controlled by Istio. + +When a namespace has Istio enabled, new workloads deployed in the namespace will automatically have the Istio sidecar. You need to manually enable Istio in preexisting workloads. + +For more information on the Istio sidecar, refer to the [Istio sidecare-injection docs](https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/) and for more information on Istio's architecture, refer to the [Istio Architecture docs](https://istio.io/latest/docs/ops/deployment/architecture/) + +### Multiple Ingresses + +By default, each Rancher-provisioned cluster has one NGINX ingress controller allowing traffic into the cluster. Istio also installs an ingress gateway by default into the `istio-system` namespace. The result is that your cluster will have two ingresses in your cluster. + +![In an Istio-enabled cluster, you can have two ingresses: the default Nginx ingress, and the default Istio controller.](/img/istio-ingress.svg) + + Additional Istio Ingress gateways can be enabled via the [overlay file](./configuration-options.md#overlay-file). + +### Egress Support + +By default the Egress gateway is disabled, but can be enabled on install or upgrade through the values.yaml or via the [overlay file](./configuration-options.md#overlay-file). + +# Additional Steps for Installing Istio on an RKE2 Cluster + +To install Istio on an RKE2 cluster, follow the steps in [this section.](../explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster.md) diff --git a/content/rancher/v2.5/en/installation/resources/k8s-tutorials/_index.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-cluster-setup.md similarity index 100% rename from content/rancher/v2.5/en/installation/resources/k8s-tutorials/_index.md rename to versioned_docs/version-2.5/pages-for-subheaders/kubernetes-cluster-setup.md diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md new file mode 100644 index 0000000000..c74ce91f37 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md @@ -0,0 +1,87 @@ +--- +title: Setting up Kubernetes Clusters in Rancher +description: Provisioning Kubernetes Clusters +weight: 7 +aliases: + - /rancher/v2.5/en/concepts/clusters/ + - /rancher/v2.5/en/concepts/clusters/cluster-providers/ + - /rancher/v2.5/en/tasks/clusters/ + - /rancher/v2.x/en/cluster-provisioning/ +--- + +Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. + +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture](rancher-manager-architecture.md) page. + +This section covers the following topics: + + + +- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) +- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) +- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) + - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) + - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) +- [Registering Existing Clusters](#registering-existing-clusters) + + + +### Cluster Management Capabilities by Cluster Type + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +# Setting up Clusters in a Hosted Kubernetes Provider + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +For more information, refer to the section on [hosted Kubernetes clusters.](set-up-clusters-from-hosted-kubernetes-providers.md) + +# Launching Kubernetes with Rancher + +Rancher uses the [Rancher Kubernetes Engine (RKE)](https://rancher.com/docs/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. + +In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. + +These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. + +If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. + +For more information, refer to the section on [RKE clusters.](launch-kubernetes-with-rancher.md) + +### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider + +Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This template defines the parameters used to launch nodes in your cloud providers. + +One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. + +The cloud providers available for creating a node template are decided based on the [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers) active in the Rancher UI. + +For more information, refer to the section on [nodes hosted by an infrastructure provider](use-new-nodes-in-an-infra-provider.md) + +### Launching Kubernetes on Existing Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,](use-existing-nodes.md) which creates a custom cluster. + +You can bring any nodes you want to Rancher and use them to create a cluster. + +These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. + +# Registering Existing Clusters + +The cluster registration feature replaces the feature to import clusters. + +Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. + +When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. + +For more information, see [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md new file mode 100644 index 0000000000..b4cd4b25b6 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-components.md @@ -0,0 +1,20 @@ +--- +title: Kubernetes Components +weight: 100 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/ +--- + +The commands and steps listed in this section apply to the core Kubernetes components on [Rancher Launched Kubernetes](launch-kubernetes-with-rancher.md) clusters. + +This section includes troubleshooting tips in the following categories: + +- [Troubleshooting etcd Nodes](../troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md) +- [Troubleshooting Controlplane Nodes](../troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md) +- [Troubleshooting nginx-proxy Nodes](../troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md) +- [Troubleshooting Worker Nodes and Generic Components](../troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md) + +# Kubernetes Component Diagram + +![Cluster diagram](/img/clusterdiagram.svg)
    +Lines show the traffic flow between components. Colors are used purely for visual aid \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md new file mode 100644 index 0000000000..816dfd2eee --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/kubernetes-resources-setup.md @@ -0,0 +1,77 @@ +--- +title: Kubernetes Resources +weight: 18 +aliases: + - /rancher/v2.5/en/concepts/ + - /rancher/v2.5/en/tasks/ + - /rancher/v2.5/en/concepts/resources/ + - /rancher/v2.x/en/k8s-in-rancher/ +--- + +> The Cluster Explorer is a new feature in Rancher v2.5 that allows you to view and manipulate all of the custom resources and CRDs in a Kubernetes cluster from the Rancher UI. This section will be updated to reflect the way that Kubernetes resources are handled in Rancher v2.5. + +## Workloads + +Deploy applications to your cluster nodes using [workloads](workloads-and-pods.md), which are objects that contain pods that run your apps, along with metadata that set rules for the deployment's behavior. Workloads can be deployed within the scope of the entire clusters or within a namespace. + +When deploying a workload, you can deploy from any image. There are a variety of [workload types](workloads-and-pods.md#workload-types) to choose from which determine how your application should run. + +Following a workload deployment, you can continue working with it. You can: + +- [Upgrade](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) the workload to a newer version of the application it's running. +- [Roll back](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) a workload to a previous version, if an issue occurs during upgrade. +- [Add a sidecar](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar.md), which is a workload that supports a primary workload. + +## Load Balancing and Ingress + +### Load Balancers + +After you launch an application, it's only available within the cluster. It can't be reached externally. + +If you want your applications to be externally accessible, you must add a load balancer to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +#### Ingress + +Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. You can get around this issue by using an ingress. + +Ingress is a set of rules that act as a load balancer. Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster program the load balancer to direct the request to the correct service based on service subdomains or path rules that you've configured. + +For more information, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). + +When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. + +## Service Discovery + +After you expose your cluster to external requests using a load balancer and/or ingress, it's only available by IP address. To create a resolveable hostname, you must create a service record, which is a record that maps an IP address, external hostname, DNS record alias, workload(s), or labelled pods to a specific hostname. + +For more information, see [Service Discovery](../how-to-guides/new-user-guides/kubernetes-resources-setup/create-services.md). + +## Pipelines + +After your project has been [configured to a version control provider](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md#1-configure-version-control-providers), you can add the repositories and start configuring a pipeline for each repository. + +For more information, see [Pipelines](./pipelines.md). + +## Applications + +Besides launching individual components of an application, you can use the Rancher catalog to start launching applications, which are Helm charts. + +For more information, see [Applications in a Project](./helm-charts-in-rancher.md). + +## Kubernetes Resources + +Within the context of a Rancher project or namespace, _resources_ are files and data that support operation of your pods. Within Rancher, certificates, registries, and secrets are all considered resources. However, Kubernetes classifies resources as different types of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/). Therefore, within a single project or namespace, individual resources must have unique names to avoid conflicts. Although resources are primarily used to carry sensitive information, they have other uses as well. + +Resources include: + +- [Certificates](../how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication.md): Files used to encrypt/decrypt data entering or leaving the cluster. +- [ConfigMaps](../how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps.md): Files that store general configuration information, such as a group of config files. +- [Secrets](../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md): Files that store sensitive data like passwords, tokens, or keys. +- [Registries](../how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries.md): Files that carry credentials used to authenticate with private registries. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md b/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md new file mode 100644 index 0000000000..c741178b68 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/launch-kubernetes-with-rancher.md @@ -0,0 +1,36 @@ +--- +title: Launching Kubernetes with Rancher +weight: 4 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/ +--- + +You can have Rancher launch a Kubernetes cluster using any nodes you want. When Rancher deploys Kubernetes onto these nodes, it uses [Rancher Kubernetes Engine](https://rancher.com/docs/rke/latest/en/) (RKE), which is Rancher's own lightweight Kubernetes installer. It can launch Kubernetes on any computers, including: + +- Bare-metal servers +- On-premise virtual machines +- Virtual machines hosted by an infrastructure provider + +Rancher can install Kubernetes on existing nodes, or it can dynamically provision nodes in an infrastructure provider and install Kubernetes on them. + +RKE clusters include clusters that Rancher launched on Windows nodes or other existing custom nodes, as well as clusters that Rancher launched with new nodes on Azure, Digital Ocean, EC2, or vSphere. + +### Requirements + +If you use RKE to set up a cluster, your nodes must meet the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) for nodes in downstream user clusters. + +### Launching Kubernetes on New Nodes in an Infrastructure Provider + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +For more information, refer to the section on [launching Kubernetes on new nodes.](use-new-nodes-in-an-infra-provider.md) + +### Launching Kubernetes on Existing Custom Nodes + +In this scenario, you want to install Kubernetes on bare-metal servers, on-prem virtual machines, or virtual machines that already exist in a cloud provider. With this option, you will run a Rancher agent Docker container on the machine. + +If you want to reuse a node from a previous custom cluster, [clean the node](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +For more information, refer to the section on [custom nodes.](use-existing-nodes.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md b/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md new file mode 100644 index 0000000000..e79744f1c0 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/load-balancer-and-ingress-controller.md @@ -0,0 +1,64 @@ +--- +title: Set Up Load Balancer and Ingress Controller within Rancher +description: Learn how you can set up load balancers and ingress controllers to redirect service requests within Rancher, and learn about the limitations of load balancers +weight: 3040 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/load-balancers-and-ingress + - /rancher/v2.x/en/k8s-in-rancher/load-balancers-and-ingress/ +--- + +Within Rancher, you can set up load balancers and ingress controllers to redirect service requests. + +## Load Balancers + +After you launch an application, the app is only available within the cluster. It can't be reached from outside the cluster. + +If you want your applications to be externally accessible, you must add a load balancer or ingress to your cluster. Load balancers create a gateway for external connections to access your cluster, provided that the user knows the load balancer's IP address and the application's port number. + +Rancher supports two types of load balancers: + +- [Layer-4 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-4-load-balancer) +- [Layer-7 Load Balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#layer-7-load-balancer) + +For more information, see [load balancers](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md). + +### Load Balancer Limitations + +Load Balancers have a couple of limitations you should be aware of: + +- Load Balancers can only handle one IP address per service, which means if you run multiple services in your cluster, you must have a load balancer for each service. Running multiples load balancers can be expensive. + +- If you want to use a load balancer with a Hosted Kubernetes cluster (i.e., clusters hosted in GKE, EKS, or AKS), the load balancer must be running within that cloud provider's infrastructure. Please review the compatibility tables regarding support for load balancers based on how you've provisioned your clusters: + + + - [Support for Layer-4 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-4-load-balancing) + + - [Support for Layer-7 Load Balancing](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing.md#support-for-layer-7-load-balancing) + +## Ingress + +As mentioned in the limitations above, the disadvantages of using a load balancer are: + +- Load Balancers can only handle one IP address per service. +- If you run multiple services in your cluster, you must have a load balancer for each service. +- It can be expensive to have a load balancer for every service. + +In contrast, when an ingress is used as the entrypoint into a cluster, the ingress can route traffic to multiple services with greater flexibility. It can map multiple HTTP requests to services without individual IP addresses for each service. + +Therefore, it is useful to have an ingress if you want multiple services to be exposed with the same IP address, the same Layer 7 protocol, or the same privileged node-ports: 80 and 443. + +Ingress works in conjunction with one or more ingress controllers to dynamically route service requests. When the ingress receives a request, the ingress controller(s) in your cluster direct the request to the correct service based on service subdomains or path rules that you've configured. + +Each Kubernetes Ingress resource corresponds roughly to a file in `/etc/nginx/sites-available/` containing a `server{}` configuration block, where requests for specific files and folders are configured. + +Your ingress, which creates a port of entry to your cluster similar to a load balancer, can reside within your cluster or externally. Ingress and ingress controllers residing in RKE-launched clusters are powered by [Nginx](https://www.nginx.com/). + +Ingress can provide other functionality as well, such as SSL termination, name-based virtual hosting, and more. + +>**Using Rancher in a High Availability Configuration?** +> +>Refrain from adding an Ingress to the `local` cluster. The Nginx Ingress Controller that Rancher uses acts as a global entry point for _all_ clusters managed by Rancher, including the `local` cluster. Therefore, when users try to access an application, your Rancher connection may drop due to the Nginx configuration being reloaded. We recommend working around this issue by deploying applications only in clusters that you launch using Rancher. + +- For more information on how to set up ingress in Rancher, see [Ingress](../how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses.md). +- For complete information about ingress and ingress controllers, see the [Kubernetes Ingress Documentation](https://kubernetes.io/docs/concepts/services-networking/ingress/) +- When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/logging.md b/versioned_docs/version-2.5/pages-for-subheaders/logging.md new file mode 100644 index 0000000000..f723bfc22d --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/logging.md @@ -0,0 +1,155 @@ +--- +title: Rancher Integration with Logging Services +shortTitle: Logging +description: Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster. +metaDescription: "Rancher integrates with popular logging services. Learn the requirements and benefits of integrating with logging services, and enable logging on your cluster." +weight: 15 +aliases: + - /rancher/v2.5/en/dashboard/logging + - /rancher/v2.5/en/logging/v2.5 + - /rancher/v2.5/en/cluster-admin/tools/logging + - /rancher/v2.x/en/logging/ + - /rancher/v2.x/en/logging/v2.5/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The [Banzai Cloud Logging operator](https://banzaicloud.com/docs/one-eye/logging-operator/) now powers Rancher's logging solution in place of the former, in-house solution. + +For an overview of the changes in v2.5, see [this section.](../explanations/integrations-in-rancher/logging/logging-architecture.md#changes-in-rancher-v25) For information about migrating from Logging V1, see [this page.](../explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging.md) + +- [Enabling Logging](#enabling-logging) +- [Uninstall Logging](#uninstall-logging) +- [Architecture](#architecture) +- [Role-based Access Control](#role-based-access-control) +- [Configuring the Logging Custom Resources](#configuring-the-logging-custom-resources) + - [Flows and ClusterFlows](#flows-and-clusterflows) + - [Outputs and ClusterOutputs](#outputs-and-clusteroutputs) +- [Configuring the Logging Helm Chart](#configuring-the-logging-helm-chart) + - [Windows Support](#windows-support) + - [Working with a Custom Docker Root Directory](#working-with-a-custom-docker-root-directory) + - [Working with Taints and Tolerations](#working-with-taints-and-tolerations) + - [Logging V2 with SELinux](#logging-v2-with-selinux) + - [Additional Logging Sources](#additional-logging-sources) +- [Troubleshooting](#troubleshooting) + +# Enabling Logging + +You can enable the logging for a Rancher managed cluster by going to the Apps page and installing the logging app. + +1. In the Rancher UI, go to the cluster where you want to install logging and click **Cluster Explorer**. +1. Click **Apps**. +1. Click the `rancher-logging` app. +1. Scroll to the bottom of the Helm chart README and click **Install**. + +**Result:** The logging app is deployed in the `cattle-logging-system` namespace. + +# Uninstall Logging + +1. From the **Cluster Explorer**, click **Apps & Marketplace**. +1. Click **Installed Apps**. +1. Go to the `cattle-logging-system` namespace and check the boxes for `rancher-logging` and `rancher-logging-crd`. +1. Click **Delete**. +1. Confirm **Delete**. + +**Result** `rancher-logging` is uninstalled. + +# Architecture + +For more information about how the logging application works, see [this section.](../explanations/integrations-in-rancher/logging/logging-architecture.md) + + + +# Role-based Access Control + +Rancher logging has two roles, `logging-admin` and `logging-view`. For more information on how and when to use these roles, see [this page.](../explanations/integrations-in-rancher/logging/rbac-for-logging.md) + +# Configuring Logging Custom Resources + +To manage `Flows,` `ClusterFlows`, `Outputs`, and `ClusterOutputs`, go to the **Cluster Explorer** in the Rancher UI. In the upper left corner, click **Cluster Explorer > Logging**. + +### Flows and ClusterFlows + +For help with configuring `Flows` and `ClusterFlows`, see [this page.](../explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows.md) + +### Outputs and ClusterOutputs + +For help with configuring `Outputs` and `ClusterOutputs`, see [this page.](../explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs.md) + +# Configuring the Logging Helm Chart + +For a list of options that can be configured when the logging application is installed or upgraded, see [this page.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md) + +### Windows Support + + + + +As of Rancher v2.5.8, logging support for Windows clusters has been added and logs can be collected from Windows nodes. + +For details on how to enable or disable Windows node logging, see [this section.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md#enabledisable-windows-node-logging) + + + + + +Clusters with Windows workers support exporting logs from Linux nodes, but Windows node logs are currently unable to be exported. +Only Linux node logs are able to be exported. + +To allow the logging pods to be scheduled on Linux nodes, tolerations must be added to the pods. Refer to the [Working with Taints and Tolerations](../explanations/integrations-in-rancher/logging/taints-and-tolerations.md) section for details and an example. + + + + + +### Working with a Custom Docker Root Directory + +For details on using a custom Docker root directory, see [this section.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md#working-with-a-custom-docker-root-directory) + + +### Working with Taints and Tolerations + +For information on how to use taints and tolerations with the logging application, see [this page.](../explanations/integrations-in-rancher/logging/taints-and-tolerations.md) + + +### Logging V2 with SELinux + +_Available as of v2.5.8_ + +For information on enabling the logging application for SELinux-enabled nodes, see [this section.](../explanations/integrations-in-rancher/logging/logging-helm-chart-options.md#enabling-the-logging-application-to-work-with-selinux) + +### Additional Logging Sources + +By default, Rancher collects logs for control plane components and node components for all cluster types. In some cases additional logs can be collected. For details, see [this section.](logginlogging/helm-chart-options/#enabling-the-logging-application-to-work-with-selinux) + + +# Troubleshooting + +### The `cattle-logging` Namespace Being Recreated + +If your cluster previously deployed logging from the Cluster Manager UI, you may encounter an issue where its `cattle-logging` namespace is continually being recreated. + +The solution is to delete all `clusterloggings.management.cattle.io` and `projectloggings.management.cattle.io` custom resources from the cluster specific namespace in the management cluster. +The existence of these custom resources causes Rancher to create the `cattle-logging` namespace in the downstream cluster if it does not exist. + +The cluster namespace matches the cluster ID, so we need to find the cluster ID for each cluster. + +1. In your web browser, navigate to your cluster(s) in either the Cluster Manager UI or the Cluster Explorer UI. +2. Copy the `` portion from one of the URLs below. The `` portion is the cluster namespace name. + +```bash +# Cluster Management UI +https:///c// + +# Cluster Explorer UI (Dashboard) +https:///dashboard/c// +``` + +Now that we have the `` namespace, we can delete the CRs that cause `cattle-logging` to be continually recreated. +*Warning:* ensure that logging, the version installed from the Cluster Manager UI, is not currently in use. + +```bash +kubectl delete clusterloggings.management.cattle.io -n +kubectl delete projectloggings.management.cattle.io -n +``` diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md new file mode 100644 index 0000000000..ed8ceebd4a --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-clusters.md @@ -0,0 +1,42 @@ +--- +title: Cluster Administration +weight: 8 +aliases: + - /rancher/v2.x/en/cluster-admin/ +--- + +After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. + +This page covers the following topics: + +- [Switching between clusters](#switching-between-clusters) +- [Managing clusters in Rancher](#managing-clusters-in-rancher) +- [Configuring tools](#configuring-tools) + +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +## Switching between Clusters + +To switch between clusters, use the drop-down available in the navigation bar. + +Alternatively, you can switch between projects and clusters directly in the navigation bar. Open the **Global** view and select **Clusters** from the main menu. Then select the name of the cluster you want to open. + +## Managing Clusters in Rancher + +After clusters have been [provisioned into Rancher](kubernetes-clusters-in-rancher-setup.md), [cluster owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. + +import ClusterCapabilitiesTable from '../shared-files/_cluster-capabilities-table.md'; + + + +## Configuring Tools + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + +- Alerts +- Notifiers +- Logging +- Monitoring +- Istio Service Mesh +- OPA Gatekeeper + diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-persistent-storage.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md new file mode 100644 index 0000000000..5db30c266e --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-project-resource-quotas.md @@ -0,0 +1,45 @@ +--- +title: Project Resource Quotas +weight: 2515 +aliases: + - /rancher/v2.5/en/cluster-admin/projects-and-namespaces/resource-quotas + - /rancher/v2.x/en/project-admin/resource-quotas/ +--- + +In situations where several teams share a cluster, one team may overconsume the resources available: CPU, memory, storage, services, Kubernetes objects like pods or secrets, and so on. To prevent this overconsumption, you can apply a _resource quota_, which is a Rancher feature that limits the resources available to a project or namespace. + +This page is a how-to guide for creating resource quotas in existing projects. + +Resource quotas can also be set when a new project is created. For details, refer to the section on [creating new projects.](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md#creating-projects) + +Resource quotas in Rancher include the same functionality as the [native version of Kubernetes](https://kubernetes.io/docs/concepts/policy/resource-quotas/). In Rancher, resource quotas have been extended so that you can apply them to projects. For details on how resource quotas work with projects in Rancher, refer to [this page.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas.md) + +### Applying Resource Quotas to Existing Projects + +Edit [resource quotas](./manage-project-resource-quotas.md) when: + +- You want to limit the resources that a project and its namespaces can use. +- You want to scale the resources available to a project up or down when a research quota is already in effect. + +1. From the **Global** view, open the cluster containing the project to which you want to apply a resource quota. + +1. From the main menu, select **Projects/Namespaces**. + +1. Find the project that you want to add a resource quota to. From that project, select **⋮ > Edit**. + +1. Expand **Resource Quotas** and click **Add Quota**. Alternatively, you can edit existing quotas. + +1. Select a Resource Type. For more information on types, see the [quota type reference.](../how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types.md) + +1. Enter values for the **Project Limit** and the **Namespace Default Limit**. + + | Field | Description | + | ----------------------- | -------------------------------------------------------------------------------------------------------- | + | Project Limit | The overall resource limit for the project. | + | Namespace Default Limit | The default resource limit available for each namespace. This limit is propagated to each namespace in the project. The combined limit of all project namespaces shouldn't exceed the project limit. | + +1. **Optional:** Add more quotas. + +1. Click **Create**. + +**Result:** The resource quota is applied to your project and namespaces. When you add more namespaces in the future, Rancher validates that the project can accommodate the namespace. If the project can't allocate the resources, you may still create namespaces, but they will be given a resource quota of 0. Subsequently, Rancher will not allow you to create any resources restricted by this quota. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md new file mode 100644 index 0000000000..33ef225f85 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-projects.md @@ -0,0 +1,44 @@ +--- +title: Project Administration +weight: 9 +aliases: + - /rancher/v2.5/en/project-admin/editing-projects/ + - /rancher/v2.x/en/project-admin/ +--- + +_Projects_ are objects introduced in Rancher that help organize namespaces in your Kubernetes cluster. You can use projects to create multi-tenant clusters, which allows a group of users to share the same underlying resources without interacting with each other's applications. + +In terms of hierarchy: + +- Clusters contain projects +- Projects contain namespaces + +Within Rancher, projects allow you to manage multiple namespaces as a single entity. In native Kubernetes, which does not include projects, features like role-based access rights or cluster resources are assigned to individual namespaces. In clusters where multiple namespaces require the same set of access rights, assigning these rights to each individual namespace can become tedious. Even though all namespaces require the same rights, there's no way to apply those rights to all of your namespaces in a single action. You'd have to repetitively assign these rights to each namespace! + +Rancher projects resolve this issue by allowing you to apply resources and access rights at the project level. Each namespace in the project then inherits these resources and policies, so you only have to assign them to the project once, rather than assigning them to each individual namespace. + +You can use projects to perform actions like: + +- [Assign users access to a group of namespaces](../how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects.md) +- Assign users [specific roles in a project](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). A role can be owner, member, read-only, or [custom](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles.md) +- [Set resource quotas](manage-project-resource-quotas.md) +- [Manage namespaces](../how-to-guides/advanced-user-guides/manage-projects/manage-namespaces.md) +- [Configure tools](../reference-guides/rancher-project-tools.md) +- [Set up pipelines for continuous integration and deployment](../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md) +- [Configure pod security policies](../how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies.md) + +### Authorization + +Non-administrative users are only authorized for project access after an [administrator](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owner or member](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) adds them to the project's **Members** tab. + +Whoever creates the project automatically becomes a [project owner](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles). + +## Switching between Projects + +To switch between projects, use the drop-down available in the navigation bar. Alternatively, you can switch between projects directly in the navigation bar. + +1. From the **Global** view, navigate to the project that you want to configure. + +1. Select **Projects/Namespaces** from the navigation bar. + +1. Select the link for the project that you want to open. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md b/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md new file mode 100644 index 0000000000..4271ab6bce --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/manage-role-based-access-control-rbac.md @@ -0,0 +1,29 @@ +--- +title: Role-Based Access Control (RBAC) +weight: 1120 +aliases: + - /rancher/v2.5/en/concepts/global-configuration/users-permissions-roles/ + - /rancher/v2.x/en/admin-settings/rbac/ +--- + +Within Rancher, each person authenticates as a _user_, which is a login that grants you access to Rancher. As mentioned in [Authentication](about-authentication.md), users can either be local or external. + +After you configure external authentication, the users that display on the **Users** page changes. + +- If you are logged in as a local user, only local users display. + +- If you are logged in as an external user, both external and local users display. + +## Users and Roles + +Once the user logs in to Rancher, their _authorization_, or their access rights within the system, is determined by _global permissions_, and _cluster and project roles_. + +- [Global Permissions](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md): + + Define user authorization outside the scope of any particular cluster. + +- [Cluster and Project Roles](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md): + + Define user authorization inside the specific cluster or project where they are assigned the role. + +Both global permissions and cluster and project roles are implemented on top of [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). Therefore, enforcement of permissions and roles is performed by Kubernetes. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md new file mode 100644 index 0000000000..3aa725064b --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-alerting-guides.md @@ -0,0 +1,13 @@ +--- +title: Monitoring Guides +shortTitle: Guides +weight: 4 +--- + +- [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) +- [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) +- [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) +- [Customizing Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md) +- [Persistent Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md) +- [Debugging high memory usage](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md) +- [Migrating from Monitoring V1 to V2](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md new file mode 100644 index 0000000000..10747e33cf --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-and-alerting.md @@ -0,0 +1,110 @@ +--- +title: Monitoring and Alerting +shortTitle: Monitoring/Alerting +description: Prometheus lets you view metrics from your different Rancher and Kubernetes objects. Learn about the scope of monitoring and how to enable cluster monitoring +weight: 13 +aliases: + - /rancher/v2.x/en/monitoring-alerting/ + - /rancher/v2.x/en/monitoring-alerting/v2.5/ +--- + +Using the `rancher-monitoring` application, you can quickly deploy leading open-source monitoring and alerting solutions onto your cluster. + +- [Features](#features) +- [How Monitoring Works](#how-monitoring-works) +- [Default Components and Deployments](#default-components-and-deployments) +- [Role-based Access Control](#role-based-access-control) +- [Guides](#guides) +- [Windows Cluster Support](#windows-cluster-support) +- [Known Issues](#known-issues) + +### Features + +Prometheus lets you view metrics from your Rancher and Kubernetes objects. Using timestamps, Prometheus lets you query and view these metrics in easy-to-read graphs and visuals, either through the Rancher UI or Grafana, which is an analytics viewing platform deployed along with Prometheus. + +By viewing data that Prometheus scrapes from your cluster control plane, nodes, and deployments, you can stay on top of everything happening in your cluster. You can then use these analytics to better run your organization: stop system emergencies before they start, develop maintenance strategies, or restore crashed servers. + +The `rancher-monitoring` operator, introduced in Rancher v2.5, is powered by [Prometheus](https://prometheus.io/), [Grafana](https://grafana.com/grafana/), [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/), the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator), and the [Prometheus adapter.](https://github.com/DirectXMan12/k8s-prometheus-adapter) + +The monitoring application allows you to: + +- Monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments +- Define alerts based on metrics collected via Prometheus +- Create custom Grafana dashboards +- Configure alert-based notifications via Email, Slack, PagerDuty, etc. using Prometheus Alertmanager +- Defines precomputed, frequently needed or computationally expensive expressions as new time series based on metrics collected via Prometheus +- Expose collected metrics from Prometheus to the Kubernetes Custom Metrics API via Prometheus Adapter for use in HPA + +# How Monitoring Works + +For an explanation of how the monitoring components work together, see [this page.](../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +# Default Components and Deployments + +### Built-in Dashboards + +By default, the monitoring application deploys Grafana dashboards (curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project) onto a cluster. + +It also deploys an Alertmanager UI and a Prometheus UI. For more information about these tools, see [Built-in Dashboards.](../explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md) +### Default Metrics Exporters + +By default, Rancher Monitoring deploys exporters (such as [node-exporter](https://github.com/prometheus/node_exporter) and [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)). + +These default exporters automatically scrape metrics for CPU and memory from all components of your Kubernetes cluster, including your workloads. + +### Default Alerts + +The monitoring application deploys some alerts by default. To see the default alerts, go to the [Alertmanager UI](../explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards.md#alertmanager-ui) and click **Expand all groups.** + +### Components Exposed in the Rancher UI + +For a list of monitoring components exposed in the Rancher UI, along with common use cases for editing them, see [this section.](../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#components-exposed-in-the-rancher-ui) + +# Role-based Access Control + +For information on configuring access to monitoring, see [this page.](../explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring.md) + +# Guides + +- [Enable monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring.md) +- [Uninstall monitoring](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring.md) +- [Monitoring workloads](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads.md) +- [Customizing Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard.md) +- [Persistent Grafana dashboards](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard.md) +- [Debugging high memory usage](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md) +- [Migrating from Monitoring V1 to V2](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring.md) + +# Configuration + +### Configuring Monitoring Resources in Rancher + +> The configuration reference assumes familiarity with how monitoring components work together. For more information, see [How Monitoring Works.](../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +- [ServiceMonitor and PodMonitor](../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md) +- [Receiver](../reference-guides/monitoring-v2-configuration/receivers.md) +- [Route](../reference-guides/monitoring-v2-configuration/routes.md) +- [PrometheusRule](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules.md) +- [Prometheus](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md) +- [Alertmanager](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +### Configuring Helm Chart Options + +For more information on `rancher-monitoring` chart options, including options to set resource limits and requests, see [this page.](../reference-guides/monitoring-v2-configuration/helm-chart-options.md) + +# Windows Cluster Support + +_Available as of v2.5.8_ + +When deployed onto an RKE1 Windows cluster, Monitoring V2 will now automatically deploy a [windows-exporter](https://github.com/prometheus-community/windows_exporter) DaemonSet and set up a ServiceMonitor to collect metrics from each of the deployed Pods. This will populate Prometheus with `windows_` metrics that are akin to the `node_` metrics exported by [node_exporter](https://github.com/prometheus/node_exporter) for Linux hosts. + +To be able to fully deploy Monitoring V2 for Windows, all of your Windows hosts must have a minimum [wins](https://github.com/rancher/wins) version of v0.1.0. + +For more details on how to upgrade wins on existing Windows hosts, refer to the section on [Windows cluster support for Monitoring V2.](../explanations/integrations-in-rancher/monitoring-and-alerting/windows-support.md) + + + +# Known Issues + +There is a [known issue](https://github.com/rancher/rancher/issues/28787#issuecomment-693611821) that K3s clusters require more default memory. If you are enabling monitoring on a K3s cluster, we recommend setting `prometheus.prometheusSpec.resources.memory.limit` to 2500 Mi and `prometheus.prometheusSpec.resources.memory.request` to 1750 Mi. + +For tips on debugging high memory usage, see [this page.](../how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md new file mode 100644 index 0000000000..28253c73a7 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration-guides.md @@ -0,0 +1,52 @@ +--- +title: Configuration +weight: 5 +aliases: + - /rancher/v2.5/en/monitoring-alerting/configuration + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/ + - /rancher/v2.x/en/monitoring-alerting/v2.5/configuration/alertmanager/ +--- + +This page captures some of the most important options for configuring Monitoring V2 in the Rancher UI. + +For information on configuring custom scrape targets and rules for Prometheus, please refer to the upstream documentation for the [Prometheus Operator.](https://github.com/prometheus-operator/prometheus-operator) Some of the most important custom resources are explained in the Prometheus Operator [design documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md) The Prometheus Operator documentation can help also you set up RBAC, Thanos, or custom configuration. + +# Setting Resource Limits and Requests + +The resource requests and limits for the monitoring application can be configured when installing `rancher-monitoring`. For more information about the default limits, see [this page.](../reference-guides/monitoring-v2-configuration/helm-chart-options.md#configuring-resource-limits-and-requests) + +>**Note:** On an idle cluster, Monitoring V2 has significantly higher CPU usage (up to 70%) as compared to Monitoring V1. To improve performance and achieve similar results as in Monitoring V1, turn off the Prometheus adapter. + +# Prometheus Configuration + +It is usually not necessary to directly edit the Prometheus custom resource. + +Instead, to configure Prometheus to scrape custom metrics, you will only need to create a new ServiceMonitor or PodMonitor to configure Prometheus to scrape additional metrics. + + +### ServiceMonitor and PodMonitor Configuration + +For details, see [this page.](../reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md) + +### Advanced Prometheus Configuration + +For more information about directly editing the Prometheus custom resource, which may be helpful in advanced use cases, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus.md) + +# Alertmanager Configuration + +The Alertmanager custom resource usually doesn't need to be edited directly. For most common use cases, you can manage alerts by updating Routes and Receivers. + +Routes and receivers are part of the configuration of the alertmanager custom resource. In the Rancher UI, Routes and Receivers are not true custom resources, but pseudo-custom resources that the Prometheus Operator uses to synchronize your configuration with the Alertmanager custom resource. When routes and receivers are updated, the monitoring application will automatically update Alertmanager to reflect those changes. + +For some advanced use cases, you may want to configure alertmanager directly. For more information, refer to [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) + +### Receivers + +Receivers are used to set up notifications. For details on how to configure receivers, see [this page.](../reference-guides/monitoring-v2-configuration/receivers.md) +### Routes + +Routes filter notifications before they reach receivers. Each route needs to refer to a receiver that has already been configured. For details on how to configure routes, see [this page.](../reference-guides/monitoring-v2-configuration/routes.md) + +### Advanced + +For more information about directly editing the Alertmanager custom resource, which may be helpful in advanced use cases, see [this page.](../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/monitoring-v2-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/new-user-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/node-template-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md b/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/other-cloud-providers.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md b/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md new file mode 100644 index 0000000000..9c82aff2ba --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/other-installation-methods.md @@ -0,0 +1,22 @@ +--- +title: Other Installation Methods +weight: 3 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/ +--- + +### Air Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +The Docker installation is for development and testing environments only. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +For Rancher v2.5+, the Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/other-troubleshooting-tips.md b/versioned_docs/version-2.5/pages-for-subheaders/other-troubleshooting-tips.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/other-troubleshooting-tips.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/pipelines.md b/versioned_docs/version-2.5/pages-for-subheaders/pipelines.md new file mode 100644 index 0000000000..4ebd22e8b1 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/pipelines.md @@ -0,0 +1,275 @@ +--- +title: Pipelines +weight: 10 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/pipelines + - /rancher/v2.x/en/pipelines/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> As of Rancher v2.5, Git-based deployment pipelines are now deprecated. We recommend handling pipelines with Rancher Continuous Delivery powered by [Fleet](../how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet.md), available in Cluster Explorer. +> +>**Notice:** Fleet does not replace Rancher pipelines; the distinction is that Rancher pipelines are now powered by Fleet. + +Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. + +Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +>**Note:** Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. + +This section covers the following topics: + +- [Concepts](#concepts) +- [How Pipelines Work](#how-pipelines-work) +- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) +- [Setting up Pipelines](#setting-up-pipelines) + - [Configure version control providers](#1-configure-version-control-providers) + - [Configure repositories](#2-configure-repositories) + - [Configure the pipeline](#3-configure-the-pipeline) +- [Pipeline Configuration Reference](#pipeline-configuration-reference) +- [Running your Pipelines](#running-your-pipelines) +- [Triggering a Pipeline](#triggering-a-pipeline) + - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) + +# Concepts + +For an explanation of concepts and terminology used in this section, refer to [this page.](../reference-guides/pipelines/concepts.md) + +# How Pipelines Work + +After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. + +A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. + +Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories](../reference-guides/pipelines/example-repositories.md) to view some common pipeline deployments. + +When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: + + - **Jenkins:** + + The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. + + >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. + + - **Docker Registry:** + + Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. + + - **Minio:** + + Minio storage is used to store the logs for pipeline executions. + + >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components](../reference-guides/pipelines/configure-persistent-data.md). + +# Roles-based Access Control for Pipelines + +If you can access a project, you can enable repositories to start building pipelines. + +Only [administrators](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md), [cluster owners or members](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#cluster-roles), or [project owners](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles.md#project-roles) can configure version control providers and manage global pipeline execution settings. + +Project members can only configure repositories and pipelines. + +# Setting up Pipelines + +To set up pipelines, you will need to do the following: + +1. [Configure version control providers](#1-configure-version-control-providers) +2. [Configure repositories](#2-configure-repositories) +3. [Configure the pipeline](#3-configure-the-pipeline) + +### 1. Configure Version Control Providers + +Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider: + +- GitHub +- GitLab +- Bitbucket + +Select your provider's tab below and follow the directions. + + + + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. + +1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to setup an OAuth App in Github. + +1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. + +1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. + +1. Click **Authenticate**. + + + + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. + +1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. + +1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. + +1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. + +1. Click **Authenticate**. + +>**Note:** +> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. +> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. + + + + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. + +1. Choose the **Use public Bitbucket Cloud** option. + +1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. + +1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. + +1. Click **Authenticate**. + + + + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Select **Tools > Pipelines** in the navigation bar. + +1. Choose the **Use private Bitbucket Server setup** option. + +1. Follow the directions displayed to **Setup a Bitbucket Server application**. + +1. Enter the host address of your Bitbucket server installation. + +1. Click **Authenticate**. + +>**Note:** +> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: +> +> 1. Setup Rancher server with a certificate from a trusted CA. +> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). +> + + + + +**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. + +### 2. Configure Repositories + +After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** + +1. Click on **Configure Repositories**. + +1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. + +1. For each repository that you want to set up a pipeline, click on **Enable**. + +1. When you're done enabling all your repositories, click on **Done**. + +**Results:** You have a list of repositories that you can start configuring pipelines for. + +### 3. Configure the Pipeline + +Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** + +1. Find the repository that you want to set up a pipeline for. + +1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.](../reference-guides/pipelines/pipeline-configuration.md) + + * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. + * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. + +1. Select which `branch` to use from the list of branches. + +1. Optional: Set up notifications. + +1. Set up the trigger rules for the pipeline. + +1. Enter a **Timeout** for the pipeline. + +1. When all the stages and steps are configured, click **Done**. + +**Results:** Your pipeline is now configured and ready to be run. + + +# Pipeline Configuration Reference + +Refer to [this page](../reference-guides/pipelines/pipeline-configuration.md) for details on how to configure a pipeline to: + +- Run a script +- Build and publish images +- Publish catalog templates +- Deploy YAML +- Deploy a catalog app + +The configuration reference also covers how to configure: + +- Notifications +- Timeouts +- The rules that trigger a pipeline +- Environment variables +- Secrets + + +# Running your Pipelines + +Run your pipeline for the first time. From the project view in Rancher, go to **Resources > Pipelines.** Find your pipeline and select the vertical **⋮ > Run**. + +During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: + +- `docker-registry` +- `jenkins` +- `minio` + +This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. + +# Triggering a Pipeline + +When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. + +Available Events: + +* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. +* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. +* **Tag**: When a tag is created in the repository, the pipeline is triggered. + +> **Note:** This option doesn't exist for Rancher's [example repositories](../reference-guides/pipelines/example-repositories.md). + +### Modifying the Event Triggers for the Repository + +1. From the **Global** view, navigate to the project that you want to modify the event trigger for the pipeline. + +1. 1. Click **Resources > Pipelines.** + +1. Find the repository that you want to modify the event triggers. Select the vertical **⋮ > Setting**. + +1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. + +1. Click **Save**. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md b/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md new file mode 100644 index 0000000000..9fd931737a --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/provisioning-storage-examples.md @@ -0,0 +1,16 @@ +--- +title: Provisioning Storage Examples +weight: 3053 +aliases: + - /rancher/v2.5/en/tasks/clusters/adding-storage/provisioning-storage/ + - /rancher/v2.5/en/k8s-in-rancher/volumes-and-storage/examples/ + - /rancher/v2.x/en/cluster-admin/volumes-and-storage/examples/ +--- + +Rancher supports persistent storage with a variety of volume plugins. However, before you use any of these plugins to bind persistent storage to your workloads, you have to configure the storage itself, whether its a cloud-based solution from a service-provider or an on-prem solution that you manage yourself. + +For your convenience, Rancher offers documentation on how to configure some of the popular storage methods: + +- [NFS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage.md) +- [vSphere](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) +- [EBS](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md new file mode 100644 index 0000000000..45d0281d22 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/quick-start-guides.md @@ -0,0 +1,15 @@ +--- +title: Rancher Deployment Quick Start Guides +metaDescription: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +short title: Use this section to jump start your Rancher deployment and testing. It contains instructions for a simple Rancher setup and some common use cases. +weight: 2 +--- +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation](installation-and-upgrade.md). + +Howdy buckaroos! Use this section of the docs to jump start your deployment and testing of Rancher 2.x! It contains instructions for a simple Rancher setup and some common use cases. We plan on adding more content to this section in the future. + +We have Quick Start Guides for: + +- [Deploying Rancher Server](deploy-rancher-manager.md): Get started running Rancher using the method most convenient for you. + +- [Deploying Workloads](deploy-rancher-workloads.md): Deploy a simple [workload](https://kubernetes.io/docs/concepts/workloads/) and expose it, letting you access it from outside the cluster. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md new file mode 100644 index 0000000000..49dc0fc922 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-behind-an-http-proxy.md @@ -0,0 +1,16 @@ +--- +title: Installing Rancher behind an HTTP Proxy +weight: 4 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/behind-proxy/ +--- + +In a lot of enterprise environments, servers or VMs running on premise do not have direct Internet access, but must connect to external services through a HTTP(S) proxy for security reasons. This tutorial shows step by step how to set up a highly available Rancher installation in such an environment. + +Alternatively, it is also possible to set up Rancher completely air-gapped without any Internet access. This process is described in detail in the [Rancher docs](air-gapped-helm-cli-install.md). + +# Installation Outline + +1. [Set up infrastructure](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure.md) +2. [Set up a Kubernetes cluster](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes.md) +3. [Install Rancher](../getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md new file mode 100644 index 0000000000..89eeeeb980 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-managed-clusters.md @@ -0,0 +1,24 @@ +--- +title: Best Practices for Rancher Managed Clusters +shortTitle: Rancher Managed Clusters +weight: 2 +aliases: + - /rancher/v2.5/en/best-practices/v2.5/rancher-managed + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/ +--- + +### Logging + +Refer to [this guide](../reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md) for our recommendations for cluster-level logging and application logging. + +### Monitoring + +Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. Refer to this [guide](../reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md) for our recommendations. + +### Tips for Setting Up Containers + +Running well-built containers can greatly impact the overall performance and security of your environment. Refer to this [guide](../reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md) for tips. + +### Best Practices for Rancher Managed vSphere Clusters + +This [guide](../reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md) outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md new file mode 100644 index 0000000000..ae04cc5aea --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-manager-architecture.md @@ -0,0 +1,183 @@ +--- +title: Architecture +weight: 1 +aliases: + - /rancher/v2.x/en/overview/architecture/ +--- + +This section focuses on the Rancher server, its components, and how Rancher communicates with downstream Kubernetes clusters. + +For information on the different ways that Rancher can be installed, refer to the [overview of installation options.](installation-and-upgrade.md#overview-of-installation-options) + +For a list of main features of the Rancher API server, refer to the [overview section.](../getting-started/introduction/overview.md#features-of-the-rancher-api-server) + +For guidance about setting up the underlying infrastructure for the Rancher server, refer to the [architecture recommendations.](../reference-guides/rancher-manager-architecture/architecture-recommendations.md) + +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts](../reference-guides/kubernetes-concepts.md) page. + +This section covers the following topics: + +- [Rancher server architecture](#rancher-server-architecture) +- [Communicating with downstream user clusters](#communicating-with-downstream-user-clusters) + - [The authentication proxy](#1-the-authentication-proxy) + - [Cluster controllers and cluster agents](#2-cluster-controllers-and-cluster-agents) + - [Node agents](#3-node-agents) + - [Authorized cluster endpoint](#4-authorized-cluster-endpoint) +- [Important files](#important-files) +- [Tools for provisioning Kubernetes clusters](#tools-for-provisioning-kubernetes-clusters) +- [Rancher server components and source code](#rancher-server-components-and-source-code) + +# Rancher Server Architecture + +The majority of Rancher 2.x software runs on the Rancher Server. Rancher Server includes all the software components used to manage the entire Rancher deployment. + +The figure below illustrates the high-level architecture of Rancher 2.x. The figure depicts a Rancher Server installation that manages two downstream Kubernetes clusters: one created by RKE and another created by Amazon EKS (Elastic Kubernetes Service). + +For the best performance and security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +The diagram below shows how users can manipulate both [Rancher-launched Kubernetes](launch-kubernetes-with-rancher.md) clusters and [hosted Kubernetes](set-up-clusters-from-hosted-kubernetes-providers.md) clusters through Rancher's authentication proxy: + +
    Managing Kubernetes Clusters through Rancher's Authentication Proxy
    + +![Architecture](/img/rancher-architecture-rancher-api-server.svg) + +You can install Rancher on a single node, or on a high-availability Kubernetes cluster. + +A high-availability Kubernetes installation is recommended for production. + +A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +The Rancher server, regardless of the installation method, should always run on nodes that are separate from the downstream user clusters that it manages. If Rancher is installed on a high-availability Kubernetes cluster, it should run on a separate cluster from the cluster(s) it manages. + +# Communicating with Downstream User Clusters + +This section describes how Rancher provisions and manages the downstream user clusters that run your apps and services. + +The below diagram shows how the cluster controllers, cluster agents, and node agents allow Rancher to control downstream clusters. + +
    Communicating with Downstream Clusters
    + +![Rancher Components](/img/rancher-architecture-cluster-controller.svg) + +The following descriptions correspond to the numbers in the diagram above: + +1. [The Authentication Proxy](#1-the-authentication-proxy) +2. [Cluster Controllers and Cluster Agents](#2-cluster-controllers-and-cluster-agents) +3. [Node Agents](#3-node-agents) +4. [Authorized Cluster Endpoint](#4-authorized-cluster-endpoint) + +### 1. The Authentication Proxy + +In this diagram, a user named Bob wants to see all pods running on a downstream user cluster called User Cluster 1. From within Rancher, he can run a `kubectl` command to see +the pods. Bob is authenticated through Rancher's authentication proxy. + +The authentication proxy forwards all Kubernetes API calls to downstream clusters. It integrates with authentication services like local authentication, Active Directory, and GitHub. On every Kubernetes API call, the authentication proxy authenticates the caller and sets the proper Kubernetes impersonation headers before forwarding the call to Kubernetes masters. + +Rancher communicates with Kubernetes clusters using a [service account,](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) which provides an identity for processes that run in a pod. + +By default, Rancher generates a [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) that contains credentials for proxying through the Rancher server to connect to the Kubernetes API server on a downstream user cluster. The kubeconfig file (`kube_config_cluster.yml`) contains full access to the cluster. + +### 2. Cluster Controllers and Cluster Agents + +Each downstream user cluster has a cluster agent, which opens a tunnel to the corresponding cluster controller within the Rancher server. + +There is one cluster controller and one cluster agent for each downstream cluster. Each cluster controller: + +- Watches for resource changes in the downstream cluster +- Brings the current state of the downstream cluster to the desired state +- Configures access control policies to clusters and projects +- Provisions clusters by calling the required Docker machine drivers and Kubernetes engines, such as RKE and GKE + +By default, to enable Rancher to communicate with a downstream cluster, the cluster controller connects to the cluster agent. If the cluster agent is not available, the cluster controller can connect to a [node agent](#3-node-agents) instead. + +The cluster agent, also called `cattle-cluster-agent`, is a component that runs in a downstream user cluster. It performs the following tasks: + +- Connects to the Kubernetes API of Rancher-launched Kubernetes clusters +- Manages workloads, pod creation and deployment within each cluster +- Applies the roles and bindings defined in each cluster's global policies +- Communicates between the cluster and Rancher server (through a tunnel to the cluster controller) about events, stats, node info, and health + +### 3. Node Agents + +If the cluster agent (also called `cattle-cluster-agent`) is not available, one of the node agents creates a tunnel to the cluster controller to communicate with Rancher. + +The `cattle-node-agent` is deployed using a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) resource to make sure it runs on every node in a Rancher-launched Kubernetes cluster. It is used to interact with the nodes when performing cluster operations. Examples of cluster operations include upgrading the Kubernetes version and creating or restoring etcd snapshots. + +### 4. Authorized Cluster Endpoint + +An authorized cluster endpoint allows users to connect to the Kubernetes API server of a downstream cluster without having to route their requests through the Rancher authentication proxy. + +> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE](launch-kubernetes-with-rancher.md) to provision the cluster. It is not available for registered clusters, or for clusters in a hosted Kubernetes provider, such as Amazon's EKS. + +There are two main reasons why a user might need the authorized cluster endpoint: + +- To access a downstream user cluster while Rancher is down +- To reduce latency in situations where the Rancher server and downstream cluster are separated by a long distance + +The `kube-api-auth` microservice is deployed to provide the user authentication functionality for the authorized cluster endpoint. When you access the user cluster using `kubectl`, the cluster's Kubernetes API server authenticates you by using the `kube-api-auth` service as a webhook. + +Like the authorized cluster endpoint, the `kube-api-auth` authentication service is also only available for Rancher-launched Kubernetes clusters. + +> **Example scenario:** Let's say that the Rancher server is located in the United States, and User Cluster 1 is located in Australia. A user, Alice, also lives in Australia. Alice can manipulate resources in User Cluster 1 by using the Rancher UI, but her requests will have to be sent from Australia to the Rancher server in the United States, then be proxied back to Australia, where the downstream user cluster is. The geographical distance may cause significant latency, which Alice can reduce by using the authorized cluster endpoint. + +With this endpoint enabled for the downstream cluster, Rancher generates an extra Kubernetes context in the kubeconfig file in order to connect directly to the cluster. This file has the credentials for `kubectl` and `helm`. + +You will need to use a context defined in this kubeconfig file to access the cluster if Rancher goes down. Therefore, we recommend exporting the kubeconfig file so that if Rancher goes down, you can still use the credentials in the file to access your cluster. For more information, refer to the section on accessing your cluster with [kubectl and the kubeconfig file.](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) + +# Important Files + +The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The Kubeconfig file for the cluster, this file contains credentials for full access to the cluster. You can use this file to authenticate with a Rancher-launched Kubernetes cluster if Rancher goes down. +- `rancher-cluster.rkestate`: The Kubernetes cluster state file. This file contains credentials for full access to the cluster. Note: This state file is only created when using RKE v0.2.0 or higher. + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +For more information on connecting to a cluster without the Rancher authentication proxy and other configuration options, refer to the [kubeconfig file](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) documentation. + +# Tools for Provisioning Kubernetes Clusters + +The tools that Rancher uses to provision downstream user clusters depends on the type of cluster that is being provisioned. + +### Rancher Launched Kubernetes for Nodes Hosted in an Infrastructure Provider + +Rancher can dynamically provision nodes in a provider such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Rancher provisions this type of cluster using [RKE](https://github.com/rancher/rke) and [docker-machine.](https://github.com/rancher/machine) + +### Rancher Launched Kubernetes for Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing nodes, which creates a custom cluster. + +Rancher provisions this type of cluster using [RKE.](https://github.com/rancher/rke) + +### Hosted Kubernetes Providers + +When setting up this type of cluster, Kubernetes is installed by providers such as Google Kubernetes Engine, Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +Rancher provisions this type of cluster using [kontainer-engine.](https://github.com/rancher/kontainer-engine) + +### Registered Kubernetes Clusters + +In this type of cluster, Rancher connects to a Kubernetes cluster that has already been set up. Therefore, Rancher does not provision Kubernetes, but only sets up the Rancher agents to communicate with the cluster. + +# Rancher Server Components and Source Code + +This diagram shows each component that the Rancher server is composed of: + +![Rancher Components](/img/rancher-architecture-rancher-components.svg) + +The GitHub repositories for Rancher can be found at the following links: + +- [Main Rancher server repository](https://github.com/rancher/rancher) +- [Rancher UI](https://github.com/rancher/ui) +- [Rancher API UI](https://github.com/rancher/api-ui) +- [Norman,](https://github.com/rancher/norman) Rancher's API framework +- [Types](https://github.com/rancher/types) +- [Rancher CLI](https://github.com/rancher/cli) +- [Catalog applications](https://github.com/rancher/helm) + +This is a partial list of the most important Rancher repositories. For more details about Rancher source code, refer to the section on [contributing to Rancher.](../contribute-to-rancher.md#repositories) To see all libraries and projects used in Rancher, see the [`go.mod` file](https://github.com/rancher/rancher/blob/master/go.mod) in the `rancher/rancher` repository. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md new file mode 100644 index 0000000000..f6a63d3dc0 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-on-a-single-node-with-docker.md @@ -0,0 +1,179 @@ +--- +title: Installing Rancher on a Single Node Using Docker +description: For development and testing environments only, use a Docker install. Install Docker on a single Linux host, and deploy Rancher with a single Docker container. +weight: 2 +aliases: + - /rancher/v2.5/en/installation/single-node-install/ + - /rancher/v2.5/en/installation/single-node + - /rancher/v2.5/en/installation/other-installation-methods/single-node + - /rancher/v2.x/en/installation/requirements/installing-docker/ + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/ +--- + +Rancher can be installed by running a single Docker container. + +In this installation scenario, you'll install Docker on a single Linux host, and then deploy Rancher on your host using a single Docker container. + +> **Want to use an external load balancer?** +> See [Docker Install with an External Load Balancer](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer.md) instead. + +A Docker installation of Rancher is recommended only for development and testing purposes. The ability to migrate Rancher to a high-availability cluster depends on the Rancher version: + +The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster.md) + +### Privileged Access for Rancher v2.5+ + +When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. + +# Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.](installation-requirements.md) + +# 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements](installation-requirements.md) to launch your Rancher server. + +# 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> **Do you want to...** +> +> - Use a proxy? See [HTTP Proxy Configuration](../reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md) +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate](../reference-guides/single-node-rancher-in-docker/advanced-options.md#custom-ca-certificate) +> - Complete an Air Gap Installation? See [Air Gap Install](./air-gapped-helm-cli-install.md) +> - Record all transactions with the Rancher API? See [API Auditing](../reference-guides/single-node-rancher-in-docker/advanced-options.md#api-audit-log) + +Choose from the following options: + +- [Option A: Default Rancher-generated Self-signed Certificate](#option-a-default-rancher-generated-self-signed-certificate) +- [Option B: Bring Your Own Certificate, Self-signed](#option-b-bring-your-own-certificate-self-signed) +- [Option C: Bring Your Own Certificate, Signed by a Recognized CA](#option-c-bring-your-own-certificate-signed-by-a-recognized-ca) +- [Option D: Let's Encrypt Certificate](#option-d-let-s-encrypt-certificate) + +### Option A: Default Rancher-generated Self-signed Certificate + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the minimum installation command below. + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:latest +``` + +### Option B: Bring Your Own Certificate, Self-signed +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> Create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) + +After creating your certificate, run the Docker command below to install Rancher. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| ------------------- | --------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + --privileged \ + rancher/rancher:latest +``` + +### Option C: Bring Your Own Certificate, Signed by a Recognized CA + +In production environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisites:** +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.](installation/other-installation-methods/single-node-dockeinstallation/other-installation-methods/single-node-docker/troubleshooting) + +After obtaining your certificate, run the Docker command below. + +- Use the `-v` flag and provide the path to your certificates to mount them in your container. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. +- Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +| Placeholder | Description | +| ------------------- | ----------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +```bash +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + --privileged \ + rancher/rancher:latest \ + --no-cacerts +``` + +### Option D: Let's Encrypt Certificate + +> **Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +For production environments, you also have the option of using [Let's Encrypt](https://letsencrypt.org/) certificates. Let's Encrypt uses an http-01 challenge to verify that you have control over your domain. You can confirm that you control the domain by pointing the hostname that you want to use for Rancher access (for example, `rancher.mydomain.com`) to the IP of the machine it is running on. You can bind the hostname to the IP address by creating an A record in DNS. + +> **Prerequisites:** +> +> - Let's Encrypt is an Internet service. Therefore, this option cannot be used in an internal/air gapped network. +> - Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +> - Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +After you fulfill the prerequisites, you can install Rancher using a Let's Encrypt certificate by running the following command. + +| Placeholder | Description | +| ----------------- | ------------------- | +| `` | Your domain address | + +As of Rancher v2.5, privileged access is [required.](#privileged-access-for-rancher-v2-5) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher:latest \ + --acme-domain +``` + +## Advanced Options + +When installing Rancher on a single node with Docker, there are several advanced options that can be enabled: + +- Custom CA Certificate +- API Audit Log +- TLS Settings +- Air Gap +- Persistent Data +- Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +Refer to [this page](../reference-guides/single-node-rancher-in-docker/advanced-options.md) for details. + +## Troubleshooting + +Refer to [this page](../getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting.md) for frequently asked questions and troubleshooting tips. + +## What's Next? + +- **Recommended:** Review Single Node [Backup](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher.md) and [Restore](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher.md). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters](kubernetes-clusters-in-rancher-setup.md). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md new file mode 100644 index 0000000000..9f727040e1 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-security.md @@ -0,0 +1,92 @@ +--- +title: Security +weight: 20 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/ +--- + + + + + + + +
    +

    Security policy

    +

    Rancher Labs supports responsible disclosure, and endeavours to resolve all issues in a reasonable time frame.

    +
    +

    Reporting process

    +

    Please submit possible security issues by emailing security-rancher@suse.com .

    +
    +

    Announcements

    +

    Subscribe to the Rancher announcements forum for release updates.

    +
    + +Security is at the heart of all Rancher features. From integrating with all the popular authentication tools and services, to an enterprise grade [RBAC capability,](manage-role-based-access-control-rbac.md) Rancher makes your Kubernetes clusters even more secure. + +On this page, we provide security related documentation along with resources to help you secure your Rancher installation and your downstream Kubernetes clusters: + +- [Running a CIS security scan on a Kubernetes cluster](#running-a-cis-security-scan-on-a-kubernetes-cluster) +- [SELinux RPM](#selinux-rpm) +- [Guide to hardening Rancher installations](#rancher-hardening-guide) +- [The CIS Benchmark and self-assessment](#the-cis-benchmark-and-self-assessment) +- [Third-party penetration test reports](#third-party-penetration-test-reports) +- [Rancher Security Advisories and CVEs](#rancher-security-advisories-and-cves) +- [Kubernetes Security Best Practices](#kubernetes-security-best-practices) + +### Running a CIS Security Scan on a Kubernetes Cluster + +Rancher leverages [kube-bench](https://github.com/aquasecurity/kube-bench) to run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the [CIS](https://www.cisecurity.org/cis-benchmarks/) (Center for Internet Security) Kubernetes Benchmark. + +The CIS Kubernetes Benchmark is a reference document that can be used to establish a secure configuration baseline for Kubernetes. + +The Center for Internet Security (CIS) is a 501(c\)(3) non-profit organization, formed in October 2000, with a mission to "identify, develop, validate, promote, and sustain best practice solutions for cyber defense and build and lead communities to enable an environment of trust in cyberspace". + +CIS Benchmarks are best practices for the secure configuration of a target system. CIS Benchmarks are developed through the generous volunteer efforts of subject matter experts, technology vendors, public and private community members, and the CIS Benchmark Development team. + +The Benchmark provides recommendations of two types: Automated and Manual. We run tests related to only Automated recommendations. + +When Rancher runs a CIS security scan on a cluster, it generates a report showing the results of each test, including a summary with the number of passed, skipped and failed tests. The report also includes remediation steps for any failed tests. + +For details, refer to the section on [security scans](cis-scan-guides.md). + +### SELinux RPM + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. + +We provide two RPMs (Red Hat packages) that enable Rancher products to function properly on SELinux-enforcing hosts: `rancher-selinux` and `rke2-selinux`. For details, see [this page](selinux-rpm.md). + +### Rancher Hardening Guide + +The Rancher Hardening Guide is based on controls and best practices found in the CIS Kubernetes Benchmark from the Center for Internet Security. + +The hardening guides provide prescriptive guidance for hardening a production installation of Rancher. See Rancher's guides for [Self Assessment of the CIS Kubernetes Benchmark](#the-cis-benchmark-and-self-sssessment) for the full list of security controls. + +> The hardening guides describe how to secure the nodes in your cluster, and it is recommended to follow a hardening guide before installing Kubernetes. + +Each version of the hardening guide is intended to be used with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher. + +### The CIS Benchmark and Self-Assessment + +The benchmark self-assessment is a companion to the Rancher security hardening guide. While the hardening guide shows you how to harden the cluster, the benchmark guide is meant to help you evaluate the level of security of the hardened cluster. + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. The original benchmark documents can be downloaded from the [CIS website](https://www.cisecurity.org/benchmark/kubernetes/). + +Each version of Rancher's self-assessment guide corresponds to specific versions of the hardening guide, Rancher, Kubernetes, and the CIS Benchmark. + +### Third-party Penetration Test Reports + +Rancher periodically hires third parties to perform security audits and penetration tests of the Rancher 2.x software stack. The environments under test follow the Rancher provided hardening guides at the time of the testing. Results are posted when the third party has also verified fixes classified MEDIUM or above. + +Results: + +- [Cure53 Pen Test - July 2019](https://releases.rancher.com/documents/security/pen-tests/2019/RAN-01-cure53-report.final.pdf) +- [Untamed Theory Pen Test - March 2019](https://releases.rancher.com/documents/security/pen-tests/2019/UntamedTheory-Rancher_SecurityAssessment-20190712_v5.pdf) + +### Rancher Security Advisories and CVEs + +Rancher is committed to informing the community of security issues in our products. For the list of CVEs (Common Vulnerabilities and Exposures) for issues we have resolved, refer to [this page.](../reference-guides/rancher-security/security-advisories-and-cves.md) + +### Kubernetes Security Best Practices + +For recommendations on securing your Kubernetes cluster, refer to the [Kubernetes Security Best Practices](../reference-guides/rancher-security/kubernetes-security-best-practices.md) guide. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server-configuration.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md new file mode 100644 index 0000000000..3ff8dfc7d2 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-server.md @@ -0,0 +1,22 @@ +--- +title: Best Practices for the Rancher Server +shortTitle: Rancher Server +weight: 1 +aliases: + - /rancher/v2.5/en/best-practices/v2.5/rancher-server + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/ +--- + +This guide contains our recommendations for running the Rancher server, and is intended to be used in situations in which Rancher manages downstream Kubernetes clusters. + +### Recommended Architecture and Infrastructure + +Refer to this [guide](../reference-guides/best-practices/rancher-server/tips-for-running-rancher.md) for our general advice for setting up the Rancher server on a high-availability Kubernetes cluster. + +### Deployment Strategies + +This [guide](../reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md) is designed to help you choose whether a regional deployment strategy or a hub-and-spoke deployment strategy is better for a Rancher server that manages downstream Kubernetes clusters. + +### Installing Rancher in a vSphere Environment + +This [guide](../reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md) outlines a reference architecture for installing Rancher in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/rancher-v2.5-hardening-guides.md b/versioned_docs/version-2.5/pages-for-subheaders/rancher-v2.5-hardening-guides.md new file mode 100644 index 0000000000..dc4466fc6b --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/rancher-v2.5-hardening-guides.md @@ -0,0 +1,57 @@ +--- +title: Self-Assessment and Hardening Guides for Rancher v2.5 +shortTitle: Rancher v2.5 Guides +weight: 1 +--- + +Rancher v2.5 introduced the capability to deploy Rancher on any Kubernetes cluster. For that reason, we now provide separate security hardening guides for Rancher deployments on each of Rancher's Kubernetes distributions. + +- [Rancher Kubernetes Distributions](#rancher-kubernetes-distributions) +- [Hardening Guides and Benchmark Versions](#hardening-guides-and-benchmark-versions) + - [RKE Guides](#rke-guides) + - [RKE2 Guides](#rke2-guides) + - [K3s Guides](#k3s) +- [Rancher with SELinux](#rancher-with-selinux) + +# Rancher Kubernetes Distributions + +Rancher has the following Kubernetes distributions: + +- [**RKE,**](https://rancher.com/docs/rke/latest/en/) Rancher Kubernetes Engine, is a CNCF-certified Kubernetes distribution that runs entirely within Docker containers. +- [**K3s,**](https://rancher.com/docs/k3s/latest/en/) is a fully conformant, lightweight Kubernetes distribution. It is easy to install, with half the memory of upstream Kubernetes, all in a binary of less than 100 MB. +- [**RKE2**](https://docs.rke2.io/) is a fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. + +To harden a Kubernetes cluster outside of Rancher's distributions, refer to your Kubernetes provider docs. + +# Hardening Guides and Benchmark Versions + +These guides have been tested along with the Rancher v2.5 release. Each self-assessment guide is accompanied with a hardening guide and tested on a specific Kubernetes version and CIS benchmark version. If a CIS benchmark has not been validated for your Kubernetes version, you can choose to use the existing guides until a newer version is added. + +### RKE Guides + +Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides +---|---|---|--- +Kubernetes v1.15+ | CIS v1.5 | [Link](../reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md) | [Link](../reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md) +Kubernetes v1.18+ | CIS v1.6 | [Link](../reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.6-benchmark.md) | [Link](../reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.6-benchmark.md) + +### RKE2 Guides + +Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guides +---|---|---|--- +Kubernetes v1.18 | CIS v1.5 | [Link](https://docs.rke2.io/security/cis_self_assessment15/) | [Link](https://docs.rke2.io/security/hardening_guide/) +Kubernetes v1.20 | CIS v1.6 | [Link](https://docs.rke2.io/security/cis_self_assessment16/) | [Link](https://docs.rke2.io/security/hardening_guide/) + +### K3s Guides + +Kubernetes Version | CIS Benchmark Version | Self Assessment Guide | Hardening Guide +---|---|---|--- +Kubernetes v1.17, v1.18, & v1.19 | CIS v1.5 | [Link](https://rancher.com/docs/k3s/latest/en/security/self_assessment/) | [Link](https://rancher.com/docs/k3s/latest/en/security/hardening_guide/) + + +# Rancher with SELinux + +_Available as of v2.5.8_ + +[Security-Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux) is a security enhancement to Linux. After being historically used by government agencies, SELinux is now industry standard and is enabled by default on CentOS 7 and 8. + +To use Rancher with SELinux, we recommend installing the `rancher-selinux` RPM according to the instructions on [this page.](selinux-rpm.md#installing-the-rancher-selinux-rpm) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/resources.md b/versioned_docs/version-2.5/pages-for-subheaders/resources.md new file mode 100644 index 0000000000..6dba2a5194 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/resources.md @@ -0,0 +1,29 @@ +--- +title: Resources +weight: 5 +aliases: + - /rancher/v2.5/en/installation/options + - /rancher/v2.x/en/installation/resources/ +--- + +### Docker Installations + +The [single-node Docker installation](rancher-on-a-single-node-with-docker.md) is for Rancher users that are wanting to test out Rancher. Instead of running on a Kubernetes cluster using Helm, you install the Rancher server component on a single node using a `docker run` command. + +Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +### Air Gapped Installations + +Follow [these steps](air-gapped-helm-cli-install.md) to install the Rancher server in an air gapped environment. + +An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Advanced Options + +When installing Rancher, there are several advanced options that can be enabled during installation. Within each install guide, these options are presented. Learn more about these options: + +- [Custom CA Certificate](../getting-started/installation-and-upgrade/resources/custom-ca-root-certificates.md) +- [API Audit Log](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) +- [TLS Settings](../reference-guides/installation-references/tls-settings.md) +- [etcd configuration](../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs.md) +- [Local System Charts for Air Gap Installations](../getting-started/installation-and-upgrade/resources/local-system-charts.md) | v2.3.0 | diff --git a/content/rancher/v2.5/en/security/selinux/_index.md b/versioned_docs/version-2.5/pages-for-subheaders/selinux-rpm.md similarity index 100% rename from content/rancher/v2.5/en/security/selinux/_index.md rename to versioned_docs/version-2.5/pages-for-subheaders/selinux-rpm.md diff --git a/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md b/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md new file mode 100644 index 0000000000..be4a4e7d5d --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/set-up-cloud-providers.md @@ -0,0 +1,47 @@ +--- +title: Setting up Cloud Providers +weight: 2300 +aliases: + - /rancher/v2.5/en/concepts/clusters/cloud-providers/ + - /rancher/v2.5/en/cluster-provisioning/rke-clusters/options/cloud-providers + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/ +--- +A _cloud provider_ is a module in Kubernetes that provides an interface for managing nodes, load balancers, and networking routes. + +When a cloud provider is set up in Rancher, the Rancher server can automatically provision new nodes, load balancers or persistent storage devices when launching Kubernetes definitions, if the cloud provider you're using supports such automation. + +Your cluster will not provision correctly if you configure a cloud provider cluster of nodes that do not meet the prerequisites. + +By default, the **Cloud Provider** option is set to `None`. + +The following cloud providers can be enabled: + +* Amazon +* Azure +* GCE (Google Compute Engine) +* vSphere + +### Setting up the Amazon Cloud Provider + +For details on enabling the Amazon cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon.md) + +### Setting up the Azure Cloud Provider + +For details on enabling the Azure cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure.md) + +### Setting up the GCE Cloud Provider + +For details on enabling the Google Compute Engine cloud provider, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) + +### Setting up the vSphere Cloud Provider + +For details on enabling the vSphere cloud provider, refer to [this page.](vsphere-cloud-provider.md) + +### Setting up a Custom Cloud Provider + +The `Custom` cloud provider is available if you want to configure any Kubernetes cloud provider. + +For the custom cloud provider option, you can refer to the [RKE docs](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/) on how to edit the yaml file for your specific cloud provider. There are specific cloud providers that have more detailed configuration: + +* [vSphere](https://rancher.com/docs/rke/latest/en/config-options/cloud-providercluster-provisioning/rke-clusters/cloud-providers/vsphere/) +* [OpenStack](https://rancher.com/docs/rke/latest/en/config-options/cloud-providers/openstack/) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md b/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md new file mode 100644 index 0000000000..ce472758e9 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers.md @@ -0,0 +1,32 @@ +--- +title: Setting up Clusters from Hosted Kubernetes Providers +weight: 3 +aliases: + - /rancher/v2.x/en/cluster-provisioning/hosted-kubernetes-clusters/ +--- + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +In this use case, Rancher sends a request to a hosted provider using the provider's API. The provider then provisions and hosts the cluster for you. When the cluster finishes building, you can manage it from the Rancher UI along with clusters you've provisioned that are hosted on-prem or in an infrastructure provider. + +Rancher supports the following Kubernetes providers: + +- [Google GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/) +- [Amazon EKS (Amazon Elastic Container Service for Kubernetes)](https://aws.amazon.com/eks/) +- [Microsoft AKS (Azure Kubernetes Service)](https://azure.microsoft.com/en-us/services/kubernetes-service/) +- [Alibaba ACK (Alibaba Cloud Container Service for Kubernetes)](https://www.alibabacloud.com/product/kubernetes) +- [Tencent TKE (Tencent Kubernetes Engine)](https://intl.cloud.tencent.com/product/tke) +- [Huawei CCE (Huawei Cloud Container Engine)](https://www.huaweicloud.com/en-us/product/cce.html) + +## Hosted Kubernetes Provider Authentication + +When using Rancher to create a cluster hosted by a provider, you are prompted for authentication information. This information is required to access the provider's API. For more information on how to obtain this information, see the following procedures: + +- [Creating a GKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke.md) +- [Creating an EKS Cluster](amazon-eks-permissions.md) +- [Creating an AKS Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks.md) +- [Creating an ACK Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba.md) +- [Creating a TKE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent.md) +- [Creating a CCE Cluster](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md b/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/single-node-rancher-in-docker.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md b/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md new file mode 100644 index 0000000000..8542a0f0a9 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/use-existing-nodes.md @@ -0,0 +1,129 @@ +--- +title: Launching Kubernetes on Existing Custom Nodes +description: To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements +metaDescription: "To create a cluster with custom nodes, you’ll need to access servers in your cluster and provision them according to Rancher requirements" +weight: 2225 +aliases: + - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-custom/ + - /rancher/v2.5/en/cluster-provisioning/custom-clusters/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/ +--- + +When you create a custom cluster, Rancher uses RKE (the Rancher Kubernetes Engine) to create a Kubernetes cluster in on-prem bare-metal servers, on-prem virtual machines, or in any node hosted by an infrastructure provider. + +To use this option you'll need access to servers you intend to use in your Kubernetes cluster. Provision each server according to the [requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md), which includes some hardware specifications and Docker. After you install Docker on each server, you willl also run the command provided in the Rancher UI on each server to turn each one into a Kubernetes node. + +This section describes how to set up a custom cluster. + +# Creating a Cluster with Custom Nodes + +>**Want to use Windows hosts as Kubernetes workers?** +> +>See [Configuring Custom Clusters for Windows](use-windows-clusters.md) before you start. + + + +- [1. Provision a Linux Host](#1-provision-a-linux-host) +- [2. Create the Custom Cluster](#2-create-the-custom-cluster) +- [3. Amazon Only: Tag Resources](#3-amazon-only-tag-resources) + + + +### 1. Provision a Linux Host + +Begin creation of a custom cluster by provisioning a Linux host. Your host can be: + +- A cloud-host virtual machine (VM) +- An on-prem VM +- A bare-metal server + +If you want to reuse a node from a previous custom cluster, [clean the node](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) before using it in a cluster again. If you reuse a node that hasn't been cleaned, cluster provisioning may fail. + +Provision the host according to the [installation requirements](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md) and the [checklist for production-ready clusters.](checklist-for-production-ready-clusters.md) + +### 2. Create the Custom Cluster + +Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. + +1. From the **Clusters** page, click **Add Cluster**. + +2. Choose **Custom**. + +3. Enter a **Cluster Name**. + +4. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. + +5. Use **Cluster Options** to choose the version of Kubernetes, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options.** + + >**Using Windows nodes as Kubernetes workers?** + > + >- See [Enable the Windows Support Option](use-windows-clusters.md). + >- The only Network Provider available for clusters with Windows support is Flannel. +6. Click **Next**. + +7. From **Node Role**, choose the roles that you want filled by a cluster node. You must provision at least one node for each role: `etcd`, `worker`, and `control plane`. All three roles are required for a custom cluster to finish provisioning. For more information on roles, see [this section.](../reference-guides/kubernetes-concepts.md#roles-for-nodes-in-kubernetes-clusters) + + >**Notes:** + > + >- Using Windows nodes as Kubernetes workers? See [this section](use-windows-clusters.md). + >- Bare-Metal Server Reminder: If you plan on dedicating bare-metal servers to each role, you must provision a bare-metal server for each role (i.e. provision multiple bare-metal servers). + +8. **Optional**: Click **[Show advanced options](admin-settings/agent-options/)** to specify IP address(es) to use when registering the node, override the hostname of the node, or to add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) or [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to the node. + +9. Copy the command displayed on screen to your clipboard. + +10. Log in to your Linux host using your preferred shell, such as PuTTy or a remote Terminal connection. Run the command copied to your clipboard. + + >**Note:** Repeat steps 7-10 if you want to dedicate specific hosts to specific node roles. Repeat the steps as many times as needed. + +11. When you finish running the command(s) on your Linux host(s), click **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +### 3. Amazon Only: Tag Resources + +If you have configured your cluster to use Amazon as **Cloud Provider**, tag your AWS resources with a cluster ID. + +[Amazon Documentation: Tagging Your Amazon EC2 Resources](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) + +>**Note:** You can use Amazon EC2 instances without configuring a cloud provider in Kubernetes. You only have to configure the cloud provider if you want to use specific Kubernetes cloud provider functionality. For more information, see [Kubernetes Cloud Providers](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/) + + +The following resources need to be tagged with a `ClusterID`: + +- **Nodes**: All hosts added in Rancher. +- **Subnet**: The subnet used for your cluster +- **Security Group**: The security group used for your cluster. + + >**Note:** Do not tag multiple security groups. Tagging multiple groups generates an error when creating Elastic Load Balancer. + +The tag that should be used is: + +``` +Key=kubernetes.io/cluster/, Value=owned +``` + +`` can be any string you choose. However, the same string must be used on every resource you tag. Setting the tag value to `owned` informs the cluster that all resources tagged with the `` are owned and managed by this cluster. + +If you share resources between clusters, you can change the tag to: + +``` +Key=kubernetes.io/cluster/CLUSTERID, Value=shared +``` + +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md b/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md new file mode 100644 index 0000000000..99716f8553 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/use-new-nodes-in-an-infra-provider.md @@ -0,0 +1,136 @@ +--- +title: Launching Kubernetes on New Nodes in an Infrastructure Provider +weight: 2205 +aliases: + - /rancher/v2.5/en/concepts/global-configuration/node-templates/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ +--- + +Using Rancher, you can create pools of nodes based on a [node template](use-new-nodes-in-an-infra-provider.md#node-templates). This node template defines the parameters you want to use to launch nodes in your infrastructure providers or cloud providers. + +One benefit of installing Kubernetes on node pools hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically create another node to join the cluster to ensure that the count of the node pool is as expected. + +The available cloud providers to create a node template are decided based on active [node drivers](use-new-nodes-in-an-infra-provider.md#node-drivers). + +This section covers the following topics: + +- [Node templates](#node-templates) + - [Node labels](#node-labels) + - [Node taints](#node-taints) + - [Administrator control of node templates](#administrator-control-of-node-templates) +- [Node pools](#node-pools) + - [Node pool taints](#node-pool-taints) + - [About node auto-replace](#about-node-auto-replace) + - [Enabling node auto-replace](#enabling-node-auto-replace) + - [Disabling node auto-replace](#disabling-node-auto-replace) +- [Cloud credentials](#cloud-credentials) +- [Node drivers](#node-drivers) + +# Node Templates + +A node template is the saved configuration for the parameters to use when provisioning nodes in a specific cloud provider. These nodes can be launched from the UI. Rancher uses [Docker Machine](https://docs.docker.com/machine/) to provision these nodes. The available cloud providers to create node templates are based on the active node drivers in Rancher. + +After you create a node template in Rancher, it's saved so that you can use this template again to create node pools. Node templates are bound to your login. After you add a template, you can remove them from your user profile. + +### Node Labels + +You can add [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) on each node template, so that any nodes created from the node template will automatically have these labels on them. + +Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) + +### Node Taints + +You can add [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on each node template, so that any nodes created from the node template will automatically have these taints on them. + +Since taints can be added at a node template and node pool, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +### Administrator Control of Node Templates + +Administrators can control all node templates. Admins can now maintain all the node templates within Rancher. When a node template owner is no longer using Rancher, the node templates created by them can be managed by administrators so the cluster can continue to be updated and maintained. + +To access all node templates, an administrator will need to do the following: + +1. In the Rancher UI, click the user profile icon in the upper right corner. +1. Click **Node Templates.** + +**Result:** All node templates are listed and grouped by owner. The templates can be edited or cloned by clicking the **⋮.** + +# Node Pools + +Using Rancher, you can create pools of nodes based on a [node template](#node-templates). + +A node template defines the configuration of a node, like what operating system to use, number of CPUs and amount of memory. + +The benefit of using a node pool is that if a node is destroyed or deleted, you can increase the number of live nodes to compensate for the node that was lost. The node pool helps you ensure that the count of the node pool is as expected. + +Each node pool must have one or more nodes roles assigned. + +Each node role (i.e. etcd, control plane, and worker) should be assigned to a distinct node pool. Although it is possible to assign multiple node roles to a node pool, this should not be done for production clusters. + +The recommended setup is to have: + +- a node pool with the etcd node role and a count of three +- a node pool with the control plane node role and a count of at least two +- a node pool with the worker node role and a count of at least two + +### Node Pool Taints + +If you haven't defined [taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) on your node template, you can add taints for each node pool. The benefit of adding taints at a node pool is beneficial over adding it at a node template is that you can swap out the node templates without worrying if the taint is on the node template. + +For each taint, they will automatically be added to any created node in the node pool. Therefore, if you add taints to a node pool that have existing nodes, the taints won't apply to existing nodes in the node pool, but any new node added into the node pool will get the taint. + +When there are taints on the node pool and node template, if there is no conflict with the same key and effect of the taints, all taints will be added to the nodes. If there are taints with the same key and different effect, the taints from the node pool will override the taints from the node template. + +### About Node Auto-replace + +If a node is in a node pool, Rancher can automatically replace unreachable nodes. Rancher will use the existing node template for the given node pool to recreate the node if it becomes inactive for a specified number of minutes. + +> **Important:** Self-healing node pools are designed to help you replace worker nodes for stateless applications. It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +Node auto-replace works on top of the Kubernetes node controller. The node controller periodically checks the status of all the nodes (configurable via the `--node-monitor-period` flag of the `kube-controller`). When a node is unreachable, the node controller will taint that node. When this occurs, Rancher will begin its deletion countdown. You can configure the amount of time Rancher waits to delete the node. If the taint is not removed before the deletion countdown ends, Rancher will proceed to delete the node object. Rancher will then provision a node in accordance with the set quantity of the node pool. + +### Enabling Node Auto-replace + +When you create the node pool, you can specify the amount of time in minutes that Rancher will wait to replace an unresponsive node. + +1. In the form for creating a cluster, go to the **Node Pools** section. +1. Go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Fill out the rest of the form for creating a cluster. + +**Result:** Node auto-replace is enabled for the node pool. + +You can also enable node auto-replace after the cluster is created with the following steps: + +1. From the Global view, click the Clusters tab. +1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter the number of minutes that Rancher should wait for a node to respond before replacing the node. +1. Click **Save.** + +**Result:** Node auto-replace is enabled for the node pool. + +### Disabling Node Auto-replace + +You can disable node auto-replace from the Rancher UI with the following steps: + +1. From the Global view, click the Clusters tab. +1. Go to the cluster where you want to enable node auto-replace, click the vertical ⋮ **(…)**, and click **Edit.** +1. In the **Node Pools** section, go to the node pool where you want to enable node auto-replace. In the **Recreate Unreachable After** field, enter 0. +1. Click **Save.** + +**Result:** Node auto-replace is disabled for the node pool. + +# Cloud Credentials + +Node templates can use cloud credentials to store credentials for launching nodes in your cloud provider, which has some benefits: + +- Credentials are stored as a Kubernetes secret, which is not only more secure, but it also allows you to edit a node template without having to enter your credentials every time. + +- After the cloud credential is created, it can be re-used to create additional node templates. + +- Multiple node templates can share the same cloud credential to create node pools. If your key is compromised or expired, the cloud credential can be updated in a single place, which allows all node templates that are using it to be updated at once. + +After cloud credentials are created, the user can start [managing the cloud credentials that they created](../reference-guides/user-settings/manage-cloud-credentials.md). + +# Node Drivers + +If you don't find the node driver that you want to use, you can see if it is available in Rancher's built-in [node drivers and activate it](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#activating-deactivating-node-drivers), or you can [add your own custom node driver](../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md#adding-custom-node-drivers). diff --git a/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md b/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md new file mode 100644 index 0000000000..b175cdf949 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/use-windows-clusters.md @@ -0,0 +1,286 @@ +--- +title: Launching Kubernetes on Windows Clusters +weight: 2240 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +When provisioning a [custom cluster](use-existing-nodes.md) using Rancher, Rancher uses RKE (the Rancher Kubernetes Engine) to install Kubernetes on your existing nodes. + +In a Windows cluster provisioned with Rancher, the cluster must contain both Linux and Windows nodes. The Kubernetes controlplane can only run on Linux nodes, and the Windows nodes can only have the worker role. Windows nodes can only be used for deploying workloads. + +Some other requirements for Windows clusters include: + +- You can only add Windows nodes to a cluster if Windows support is enabled when the cluster is created. Windows support cannot be enabled for existing clusters. +- Kubernetes 1.15+ is required. +- The Flannel network provider must be used. +- Windows nodes must have 50 GB of disk space. + +For the full list of requirements, see [this section.](#requirements-for-windows-clusters) + +For a summary of Kubernetes features supported in Windows, see the Kubernetes documentation on [supported functionality and limitations for using Kubernetes with Windows](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#supported-functionality-and-limitations) or the [guide for scheduling Windows containers in Kubernetes](https://kubernetes.io/docs/setup/production-environment/windows/user-guide-windows-containers/). + +This guide covers the following topics: + + + +- [Requirements](#requirements-for-windows-clusters) +- [Tutorial: How to Create a Cluster with Windows Support](#tutorial-how-to-create-a-cluster-with-windows-support) +- [Configuration for Storage Classes in Azure](#configuration-for-storage-classes-in-azure) + + +# Requirements for Windows Clusters + +The general node requirements for networking, operating systems, and Docker are the same as the node requirements for a [Rancher installation](installation-requirements.md). + +### OS and Docker Requirements + + + + +Our support for Windows Server and Windows containers match the Microsoft official lifecycle for LTSC (Long-Term Servicing Channel) and SAC (Semi-Annual Channel). + +For the support lifecycle dates for Windows Server, see the [Microsoft Documentation.](https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info) + + + + +In order to add Windows worker nodes to a cluster, the node must be running one of the following Windows Server versions and the corresponding version of Docker Engine - Enterprise Edition (EE): + +- Nodes with Windows Server core version 1809 should use Docker EE-basic 18.09 or Docker EE-basic 19.03. +- Nodes with Windows Server core version 1903 should use Docker EE-basic 19.03. + +> **Notes:** +> +> - If you are using AWS, Rancher recommends _Microsoft Windows Server 2019 Base with Containers_ as the Amazon Machine Image (AMI). +> - If you are using GCE, Rancher recommends _Windows Server 2019 Datacenter for Containers_ as the OS image. + + + + +### Kubernetes Version + +Kubernetes v1.15+ is required. + +### Node Requirements + +The hosts in the cluster need to have at least: + +- 2 core CPUs +- 5 GB memory +- 50 GB disk space + +Rancher will not provision the node if the node does not meet these requirements. + +### Networking Requirements + +Before provisioning a new cluster, be sure that you have already installed Rancher on a device that accepts inbound network traffic. This is required in order for the cluster nodes to communicate with Rancher. If you have not already installed Rancher, please refer to the [installation documentation](installation-and-upgrade.md) before proceeding with this guide. + +Rancher only supports Windows using Flannel as the network provider. + +There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. + +For **Host Gateway (L2bridge)** networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +For **VXLAN (Overlay)** networking, the [KB4489899](https://support.microsoft.com/en-us/help/4489899) hotfix must be installed. Most cloud-hosted VMs already have this hotfix. + +If you are configuring DHCP options sets for an AWS virtual private cloud, note that in the `domain-name` option field, only one domain name can be specified. According to the DHCP options [documentation:](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_DHCP_Options.html) + +> Some Linux operating systems accept multiple domain names separated by spaces. However, other Linux operating systems and Windows treat the value as a single domain, which results in unexpected behavior. If your DHCP options set is associated with a VPC that has instances with multiple operating systems, specify only one domain name. + +### Rancher on vSphere with ESXi 6.7u2 and above + +If you are using Rancher on VMware vSphere with ESXi 6.7u2 or later with Red Hat Enterprise Linux 8.3, CentOS 8.3, or SUSE Enterprise Linux 15 SP2 or later, it is necessary to disable the `vmxnet3` virtual network adapter hardware offloading feature. Failure to do so will result in all network connections between pods on different cluster nodes to fail with timeout errors. All connections from Windows pods to critical services running on Linux nodes, such as CoreDNS, will fail as well. It is also possible that external connections may fail. This issue is the result of Linux distributions enabling the hardware offloading feature in `vmxnet3` and a bug in the `vmxnet3` hardware offloading feature that results in the discarding of packets for guest overlay traffic. To address this issue, it is necessary disable the `vmxnet3` hardware offloading feature. This setting does not survive reboot, so it is necessary to disable on every boot. The recommended course of action is to create a systemd unit file at `/etc/systemd/system/disable_hw_offloading.service`, which disables the `vmxnet3` hardware offloading feature on boot. A sample systemd unit file which disables the `vmxnet3` hardware offloading feature is as follows. Note that `` must be customized to the host `vmxnet3` network interface, e.g., `ens192`: + +``` +[Unit] +Description=Disable vmxnet3 hardware offloading feature + +[Service] +Type=oneshot +ExecStart=ethtool -K tx-udp_tnl-segmentation off +ExecStart=ethtool -K tx-udp_tnl-csum-segmentation off +StandardOutput=journal + +[Install] +WantedBy=multi-user.target +``` +Then set the appropriate permissions on the systemd unit file: +``` +chmod 0644 /etc/systemd/system/disable_hw_offloading.service +``` +Finally, enable the systemd service: +``` +systemctl enable disable_hw_offloading.service +``` + +### Architecture Requirements + +The Kubernetes cluster management nodes (`etcd` and `controlplane`) must be run on Linux nodes. + +The `worker` nodes, which is where your workloads will be deployed on, will typically be Windows nodes, but there must be at least one `worker` node that is run on Linux in order to run the Rancher cluster agent, DNS, metrics server, and Ingress related containers. + +Clusters won't begin provisioning until all three node roles (worker, etcd and controlplane) are present. + +We recommend the minimum three-node architecture listed in the table below, but you can always add additional Linux and Windows workers to scale up your cluster for redundancy: + + + +| Node | Operating System | Kubernetes Cluster Role(s) | Purpose | +| ------ | --------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | Control plane, etcd, worker | Manage the Kubernetes cluster | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | Worker | Support the Rancher Cluster agent, Metrics server, DNS, and Ingress for the cluster | +| Node 3 | Windows (Windows Server core version 1809 or above) | Worker | Run your Windows containers | + +### Container Requirements + +Windows requires that containers must be built on the same Windows Server version that they are being deployed on. Therefore, containers must be built on Windows Server core version 1809 or above. If you have existing containers built for an earlier Windows Server core version, they must be re-built on Windows Server core version 1809 or above. + +### Cloud Provider Specific Requirements + +If you set a Kubernetes cloud provider in your cluster, some additional steps are required. You might want to set a cloud provider if you want to want to leverage a cloud provider's capabilities, for example, to automatically provision storage, load balancers, or other infrastructure for your cluster. Refer to [this page](./set-up-cloud-providers.md) for details on how to configure a cloud provider cluster of nodes that meet the prerequisites. + +If you are using the GCE (Google Compute Engine) cloud provider, you must do the following: + +- Enable the GCE cloud provider in the `cluster.yml` by following [these steps.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine.md) +- When provisioning the cluster in Rancher, choose **Custom cloud provider** as the cloud provider in the Rancher UI. + +# Tutorial: How to Create a Cluster with Windows Support + +This tutorial describes how to create a Rancher-provisioned cluster with the three nodes in the [recommended architecture.](#guide-architecture) + +When you provision a cluster with Rancher on existing nodes, you will add nodes to the cluster by installing the [Rancher agent](../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) on each one. When you create or edit your cluster from the Rancher UI, you will see a **Customize Node Run Command** that you can run on each server to add it to your cluster. + +To set up a cluster with support for Windows nodes and containers, you will need to complete the tasks below. + + + +1. [Provision Hosts](#1-provision-hosts) +1. [Create the Cluster on Existing Nodes](#2-create-the-cluster-on-existing-nodes) +1. [Add Nodes to the Cluster](#3-add-nodes-to-the-cluster) +1. [Optional: Configuration for Azure Files](#4-optional-configuration-for-azure-files) + + +# 1. Provision Hosts + +To begin provisioning a cluster on existing nodes with Windows support, prepare your hosts. + +Your hosts can be: + +- Cloud-hosted VMs +- VMs from virtualization clusters +- Bare-metal servers + +You will provision three nodes: + +- One Linux node, which manages the Kubernetes control plane and stores your `etcd` +- A second Linux node, which will be another worker node +- The Windows node, which will run your Windows containers as a worker node + +| Node | Operating System | +| ------ | ------------------------------------------------------------ | +| Node 1 | Linux (Ubuntu Server 18.04 recommended) | +| Node 2 | Linux (Ubuntu Server 18.04 recommended) | +| Node 3 | Windows (Windows Server core version 1809 or above required) | + +If your nodes are hosted by a **Cloud Provider** and you want automation support such as loadbalancers or persistent storage devices, your nodes have additional configuration requirements. For details, see [Selecting Cloud Providers.](cluster-provisioning/rke-clusters/options/cloud-providers) + +# 2. Create the Cluster on Existing Nodes + +The instructions for creating a Windows cluster on existing nodes are very similar to the general [instructions for creating a custom cluster](use-existing-nodes.md) with some Windows-specific requirements. + +1. From the **Global** view, click on the **Clusters** tab and click **Add Cluster**. +1. Click **From existing nodes (Custom)**. +1. Enter a name for your cluster in the **Cluster Name** text box. +1. In the **Kubernetes Version** dropdown menu, select v1.15 or above. +1. In the **Network Provider** field, select **Flannel.** +1. In the **Windows Support** section, click **Enable.** +1. Optional: After you enable Windows support, you will be able to choose the Flannel backend. There are two network options: [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) and [**VXLAN (Overlay)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). The default option is **VXLAN (Overlay)** mode. +1. Click **Next**. + +> **Important:** For Host Gateway (L2bridge) networking, it's best to use the same Layer 2 network for all nodes. Otherwise, you need to configure the route rules for them. For details, refer to the [documentation on configuring cloud-hosted VM routes.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#cloud-hosted-vm-routes-configuration) You will also need to [disable private IP address checks](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway.md#disabling-private-ip-address-checks) if you are using Amazon EC2, Google GCE, or Azure VM. + +# 3. Add Nodes to the Cluster + +This section describes how to register your Linux and Worker nodes to your cluster. You will run a command on each node, which will install the Rancher agent and allow Rancher to manage each node. + +### Add Linux Master Node + +In this section, we fill out a form on the Rancher UI to get a custom command to install the Rancher agent on the Linux master node. Then we will copy the command and run it on our Linux master node to register the node in the cluster. + +The first node in your cluster should be a Linux host has both the **Control Plane** and **etcd** roles. At a minimum, both of these roles must be enabled for this node, and this node must be added to your cluster before you can add Windows hosts. + +1. In the **Node Operating System** section, click **Linux**. +1. In the **Node Role** section, choose at least **etcd** and **Control Plane**. We recommend selecting all three. +1. Optional: If you click **Show advanced options,** you can customize the settings for the [Rancher agent](../reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md) and [node labels.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) +1. Copy the command displayed on the screen to your clipboard. +1. SSH into your Linux host and run the command that you copied to your clipboard. +1. When you are finished provisioning your Linux node(s), select **Done**. + +**Result:** + +Your cluster is created and assigned a state of **Provisioning.** Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active.** + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +It may take a few minutes for the node to be registered in your cluster. + +### Add Linux Worker Node + +In this section, we run a command to register the Linux worker node to the cluster. + +After the initial provisioning of your cluster, your cluster only has a single Linux host. Next, we add another Linux `worker` host, which will be used to support _Rancher cluster agent_, _Metrics server_, _DNS_ and _Ingress_ for your cluster. + +1. From the **Global** view, click **Clusters.** +1. Go to the cluster that you created and click **⋮ > Edit.** +1. Scroll down to **Node Operating System**. Choose **Linux**. +1. In the **Customize Node Run Command** section, go to the **Node Options** and select the **Worker** role. +1. Copy the command displayed on screen to your clipboard. +1. Log in to your Linux host using a remote Terminal connection. Run the command copied to your clipboard. +1. From **Rancher**, click **Save**. + +**Result:** The **Worker** role is installed on your Linux host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. + +> **Note:** Taints on Linux Worker Nodes +> +> For each Linux worker node added into the cluster, the following taints will be added to Linux worker node. By adding this taint to the Linux worker node, any workloads added to the Windows cluster will be automatically scheduled to the Windows worker node. If you want to schedule workloads specifically onto the Linux worker node, you will need to add tolerations to those workloads. + +> | Taint Key | Taint Value | Taint Effect | +> | -------------- | ----------- | ------------ | +> | `cattle.io/os` | `linux` | `NoSchedule` | + +### Add a Windows Worker Node + +In this section, we run a command to register the Windows worker node to the cluster. + +You can add Windows hosts to the cluster by editing the cluster and choosing the **Windows** option. + +1. From the **Global** view, click **Clusters.** +1. Go to the cluster that you created and click **⋮ > Edit.** +1. Scroll down to **Node Operating System**. Choose **Windows**. Note: You will see that the **worker** role is the only available role. +1. Copy the command displayed on screen to your clipboard. +1. Log in to your Windows host using your preferred tool, such as [Microsoft Remote Desktop](https://docs.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/remote-desktop-clients). Run the command copied to your clipboard in the **Command Prompt (CMD)**. +1. From Rancher, click **Save**. +1. Optional: Repeat these instructions if you want to add more Windows nodes to your cluster. + +**Result:** The **Worker** role is installed on your Windows host, and the node registers with Rancher. It may take a few minutes for the node to be registered in your cluster. You now have a Windows Kubernetes cluster. + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through the Rancher server. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# Configuration for Storage Classes in Azure + +If you are using Azure VMs for your nodes, you can use [Azure files](https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv) as a StorageClass for the cluster. For details, refer to [this section.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md b/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md new file mode 100644 index 0000000000..254e80e3e7 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/user-settings.md @@ -0,0 +1,19 @@ +--- +title: User Settings +weight: 23 +aliases: + - /rancher/v2.5/en/tasks/user-settings/ + - /rancher/v2.x/en/user-settings/ +--- + +Within Rancher, each user has a number of settings associated with their login: personal preferences, API keys, etc. You can configure these settings by choosing from the **User Settings** menu. You can open this menu by clicking your avatar, located within the main menu. + +![User Settings Menu](/img/user-settings.png) + +The available user settings are: + +- [API & Keys](../reference-guides/user-settings/api-keys.md): If you want to interact with Rancher programmatically, you need an API key. Follow the directions in this section to obtain a key. +- [Cloud Credentials](../reference-guides/user-settings/manage-cloud-credentials.md): Manage cloud credentials [used by node templates](use-new-nodes-in-an-infra-provider.md#node-templates) to [provision nodes for clusters](launch-kubernetes-with-rancher.md). +- [Node Templates](../reference-guides/user-settings/manage-node-templates.md): Manage templates [used by Rancher to provision nodes for clusters](launch-kubernetes-with-rancher.md). +- [Preferences](../reference-guides/user-settings/user-preferences.md): Sets superficial preferences for the Rancher UI. +- Log Out: Ends your user session. diff --git a/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md b/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md new file mode 100644 index 0000000000..b14b140fd7 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/vsphere-cloud-provider.md @@ -0,0 +1,18 @@ +--- +title: Setting up the vSphere Cloud Provider +weight: 4 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/ +--- + +In this section, you'll learn how to set up a vSphere cloud provider for a Rancher managed RKE Kubernetes cluster in vSphere. + +# In-tree Cloud Provider + +To use the in-tree vSphere cloud provider, you will need to use an RKE configuration option. For details, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere.md) + +# Out-of-tree Cloud Provider + +_Available as of v2.5+_ + +To set up the out-of-tree vSphere cloud provider, you will need to install Helm charts from the Rancher marketplace. For details, refer to [this page.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere.md) diff --git a/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md b/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md new file mode 100644 index 0000000000..2d98aa5087 --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/vsphere.md @@ -0,0 +1,61 @@ +--- +title: Creating a vSphere Cluster +shortTitle: vSphere +description: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +metaDescription: Use Rancher to create a vSphere cluster. It may consist of groups of VMs with distinct properties which allow for fine-grained control over the sizing of nodes. +weight: 2225 +aliases: + - /rancher/v2.5/en/tasks/clusters/creating-a-cluster/create-cluster-vsphere/ + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/ +--- + +By using Rancher with vSphere, you can bring cloud operations on-premises. + +Rancher can provision nodes in vSphere and install Kubernetes on them. When creating a Kubernetes cluster in vSphere, Rancher first provisions the specified number of virtual machines by communicating with the vCenter API. Then it installs Kubernetes on top of them. + +A vSphere cluster may consist of multiple groups of VMs with distinct properties, such as the amount of memory or the number of vCPUs. This grouping allows for fine-grained control over the sizing of nodes for each Kubernetes role. + +- [vSphere Enhancements in Rancher v2.3](#vsphere-enhancements-in-rancher-v2-3) +- [Creating a vSphere Cluster](#creating-a-vsphere-cluster) +- [Provisioning Storage](#provisioning-storage) +- [Enabling the vSphere Cloud Provider](#enabling-the-vsphere-cloud-provider) + +# vSphere Enhancements in Rancher v2.3 + +The vSphere node templates have been updated, allowing you to bring cloud operations on-premises with the following enhancements: + +### Self-healing Node Pools + +One of the biggest advantages of provisioning vSphere nodes with Rancher is that it allows you to take advantage of Rancher's self-healing node pools, also called the [node auto-replace feature,](use-new-nodes-in-an-infra-provider.md#about-node-auto-replace) in your on-premises clusters. Self-healing node pools are designed to help you replace worker nodes for stateless applications. When Rancher provisions nodes from a node template, Rancher can automatically replace unreachable nodes. + +> **Important:** It is not recommended to enable node auto-replace on a node pool of master nodes or nodes with persistent volumes attached, because VMs are treated ephemerally. When a node in a node pool loses connectivity with the cluster, its persistent volumes are destroyed, resulting in data loss for stateful applications. + +### Dynamically Populated Options for Instances and Scheduling + +Node templates for vSphere have been updated so that when you create a node template with your vSphere credentials, the template is automatically populated with the same options for provisioning VMs that you have access to in the vSphere console. + +For the fields to be populated, your setup needs to fulfill the [prerequisites.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md) + +### More Supported Operating Systems + +You can provision VMs with any operating system that supports `cloud-init`. Only YAML format is supported for the [cloud config.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) + +### Video Walkthrough of v2.3.3 Node Template Features + +In this YouTube video, we demonstrate how to set up a node template with the new features designed to help you bring cloud operations to on-premises clusters. + +{{< youtube id="dPIwg6x1AlU">}} + +# Creating a vSphere Cluster + +In [this section,](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere.md) you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in vSphere. + +# Provisioning Storage + +For an example of how to provision storage in vSphere using Rancher, refer to [this section.](../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage.md) In order to dynamically provision storage in vSphere, the vSphere provider must be [enabled.](vsphere-cloud-provider.md) + +# Enabling the vSphere Cloud Provider + +When a cloud provider is set up in Rancher, the Rancher server can automatically provision new infrastructure for the cluster, including new nodes or persistent storage devices. + +For details, refer to the section on [enabling the vSphere cloud provider.](vsphere-cloud-provider.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md b/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md new file mode 100644 index 0000000000..f51184afeb --- /dev/null +++ b/versioned_docs/version-2.5/pages-for-subheaders/workloads-and-pods.md @@ -0,0 +1,84 @@ +--- +title: "Kubernetes Workloads and Pods" +description: "Learn about the two constructs with which you can build any complex containerized application in Kubernetes: Kubernetes workloads and pods" +weight: 3025 +aliases: + - /rancher/v2.5/en/concepts/workloads/ + - /rancher/v2.5/en/tasks/workloads/ + - /rancher/v2.5/en/k8s-in-rancher/workloads + - /rancher/v2.x/en/k8s-in-rancher/workloads/ +--- + +You can build any complex containerized application in Kubernetes using two basic constructs: pods and workloads. Once you build an application, you can expose it for access either within the same cluster or on the Internet using a third construct: services. + +### Pods + +[_Pods_](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) are one or more containers that share network namespaces and storage volumes. Most pods have only one container. Therefore when we discuss _pods_, the term is often synonymous with _containers_. You scale pods the same way you scale containers—by having multiple instances of the same pod that implement a service. Usually pods get scaled and managed by the workload. + +### Workloads + +_Workloads_ are objects that set deployment rules for pods. Based on these rules, Kubernetes performs the deployment and updates the workload with the current state of the application. +Workloads let you define the rules for application scheduling, scaling, and upgrade. + +#### Workload Types + +Kubernetes divides workloads into different types. The most popular types supported by Kubernetes are: + +- [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) + + _Deployments_ are best used for stateless applications (i.e., when you don't have to maintain the workload's state). Pods managed by deployment workloads are treated as independent and disposable. If a pod encounters disruption, Kubernetes removes it and then recreates it. An example application would be an Nginx web server. + +- [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) + + _StatefulSets_, in contrast to deployments, are best used when your application needs to maintain its identity and store data. An application would be something like Zookeeper—an application that requires a database for storage. + +- [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) + + _Daemonsets_ ensures that every node in the cluster runs a copy of pod. For use cases where you're collecting logs or monitoring node performance, this daemon-like workload works best. + +- [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) + + _Jobs_ launch one or more pods and ensure that a specified number of them successfully terminate. Jobs are best used to run a finite task to completion as opposed to managing an ongoing desired application state. + +- [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) + + _CronJobs_ are similar to jobs. CronJobs, however, runs to completion on a cron-based schedule. + +### Services + +In many use cases, a workload has to be either: + +- Accessed by other workloads in the cluster. +- Exposed to the outside world. + +You can achieve these goals by creating a _Service_. Services are mapped to the underlying workload's pods using a [selector/label approach (view the code samples)](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#service-and-replicationcontroller). Rancher UI simplifies this mapping process by automatically creating a service along with the workload, using the service port and type that you select. + +#### Service Types + +There are several types of services available in Rancher. The descriptions below are sourced from the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types). + +- **ClusterIP** + + >Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. This is the default `ServiceType`. + +- **NodePort** + + >Exposes the service on each Node’s IP at a static port (the `NodePort`). A `ClusterIP` service, to which the `NodePort` service will route, is automatically created. You’ll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. + +- **LoadBalancer** + + >Exposes the service externally using a cloud provider’s load balancer. `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. + +## Workload Options + +This section of the documentation contains instructions for deploying workloads and using workload options. + +- [Deploy Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads.md) +- [Upgrade Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads.md) +- [Rollback Workloads](../how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads.md) + +## Related Links + +### External Links + +- [Services](https://kubernetes.io/docs/concepts/services-networking/service/) diff --git a/content/rancher/v2.5/en/_index.md b/versioned_docs/version-2.5/rancher-manager.md similarity index 100% rename from content/rancher/v2.5/en/_index.md rename to versioned_docs/version-2.5/rancher-manager.md diff --git a/versioned_docs/version-2.5/reference-guides.md b/versioned_docs/version-2.5/reference-guides.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/rancher/v2.5/en/api/api-tokens/_index.md b/versioned_docs/version-2.5/reference-guides/about-the-api/api-tokens.md similarity index 100% rename from content/rancher/v2.5/en/api/api-tokens/_index.md rename to versioned_docs/version-2.5/reference-guides/about-the-api/api-tokens.md diff --git a/content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md b/versioned_docs/version-2.5/reference-guides/amazon-eks-permissions/minimum-eks-permissions.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md rename to versioned_docs/version-2.5/reference-guides/amazon-eks-permissions/minimum-eks-permissions.md diff --git a/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/backup-configuration.md b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/backup-configuration.md new file mode 100644 index 0000000000..006df3337d --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/backup-configuration.md @@ -0,0 +1,186 @@ +--- +title: Backup Configuration +shortTitle: Backup +weight: 1 +aliases: + - /rancher/v2.5/en/backups/v2.5/configuration/backup-config + - /rancher/v2.x/en/backups/v2.5/configuration/backup-config/ +--- + +The Backup Create page lets you configure a schedule, enable encryption and specify the storage location for your backups. + +![](/img/backup_restore/backup/backup.png) + +- [Schedule](#schedule) +- [Encryption](#encryption) +- [Storage Location](#storage-location) + - [S3](#s3) + - [Example S3 Storage Configuration](#example-s3-storage-configuration) + - [Example MinIO Configuration](#example-minio-configuration) + - [Example credentialSecret](#example-credentialsecret) + - [IAM Permissions for EC2 Nodes to Access S3](#iam-permissions-for-ec2-nodes-to-access-s3) +- [Examples](#examples) + + +# Schedule + +Select the first option to perform a one-time backup, or select the second option to schedule recurring backups. Selecting **Recurring Backups** lets you configure following two fields: + +- **Schedule**: This field accepts + - Standard [cron expressions](https://en.wikipedia.org/wiki/Cron), such as `"0 * * * *"` + - Descriptors, such as `"@midnight"` or `"@every 1h30m"` +- **Retention Count**: This value specifies how many backup files must be retained. If files exceed the given retentionCount, the oldest files will be deleted. The default value is 10. + +![](/img/backup_restore/backup/schedule.png) + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `schedule` | Provide the cron string for scheduling recurring backups. | +| `retentionCount` | Provide the number of backup files to be retained. | + +# Encryption + +The rancher-backup gathers resources by making calls to the kube-apiserver. Objects returned by apiserver are decrypted, so even if [encryption At rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) is enabled, even the encrypted objects gathered by the backup will be in plaintext. + +To avoid storing them in plaintext, you can use the same encryptionConfig file that was used for at-rest encryption, to encrypt certain resources in your backup. + +> **Important:** You must save the encryptionConfig file, because it won’t be saved by the rancher-backup operator. +The same encryptionFile needs to be used when performing a restore. + +The operator consumes this encryptionConfig as a Kubernetes Secret, and the Secret must be in the operator’s namespace. Rancher installs the `rancher-backup` operator in the `cattle-resources-system` namespace, so create this encryptionConfig secret in that namespace. + +For the `EncryptionConfiguration`, you can use the [sample file provided in the Kubernetes documentation.](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) + +To create the Secret, the encryption configuration file must be named `encryption-provider-config.yaml`, and the `--from-file` flag must be used to create this secret. + +Save the `EncryptionConfiguration` in a file called `encryption-provider-config.yaml` and run this command: + +``` +kubectl create secret generic encryptionconfig \ + --from-file=./encryption-provider-config.yaml \ + -n cattle-resources-system +``` + +This will ensure that the secret contains a key named `encryption-provider-config.yaml`, and the operator will use this key to get the encryption configuration. + +The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key + +![](/img/backup_restore/backup/encryption.png) + +In the example command above, the name `encryptionconfig` can be changed to anything. + + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | + +# Storage Location + +![](/img/backup_restore/backup/storageLocation.png) + +If the StorageLocation is specified in the Backup, the operator will retrieve the backup location from that particular S3 bucket. If not specified, the operator will try to find this file in the default operator-level S3 store, and in the operator-level PVC store. The default storage location is configured during the deployment of the `rancher-backup` operator. + +Selecting the first option stores this backup in the storage location configured while installing the rancher-backup chart. The second option lets you configure a different S3 compatible storage provider for storing the backup. + +### S3 + +The S3 storage location contains the following configuration fields: + +1. **Credential Secret** (optional): If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) The Credential Secret dropdown lists the secrets in all namespaces. +1. **Bucket Name**: The name of the S3 bucket where backup files will be stored. +1. **Region** (optional): The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. This field isn't needed for configuring MinIO. +1. **Folder** (optional): The name of the folder in the S3 bucket where backup files will be stored. Nested folders (e.g., `rancher/cluster1`) are not supported. +1. **Endpoint**: The [endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) that is used to access S3 in the region of your bucket. +1. **Endpoint CA** (optional): This should be the Base64 encoded CA cert. For an example, refer to the [example S3 compatible configuration.](#example-s3-storage-configuration) +1. **Skip TLS Verifications** (optional): Set to true if you are not using TLS. + + +| YAML Directive Name | Description | Required | +| ---------------- | ---------------- | ------------ | +| `credentialSecretName` | If you need to use the AWS Access keys Secret keys to access s3 bucket, create a secret with your credentials with keys and the directives `accessKey` and `secretKey`. It can be in any namespace as long as you provide that namespace in `credentialSecretNamespace`. An example secret is [here.](#example-credentialsecret) This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | +| `credentialSecretNamespace` | The namespace of the secret containing the credentials to access S3. This directive is unnecessary if the nodes running your operator are in EC2 and set up with IAM permissions that allow them to access S3, as described in [this section.](#iam-permissions-for-ec2-nodes-to-access-s3) | | +| `bucketName` | The name of the S3 bucket where backup files will be stored. | ✓ | +| `folder` | The name of the folder in the S3 bucket where backup files will be stored. Nested folders (e.g., `rancher/cluster1`) are not supported. | | +| `region` | The AWS [region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | ✓ | +| `endpoint` | The [endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) that is used to access S3 in the region of your bucket. | ✓ | +| `endpointCA` | This should be the Base64 encoded CA cert. For an example, refer to the [example S3 compatible configuration.](#example-s3-storage-configuration) | | +| `insecureTLSSkipVerify` | Set to true if you are not using TLS. | | + +### Example S3 Storage Configuration + +```yaml +s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com +``` + +### Example MinIO Configuration + +```yaml +s3: + credentialSecretName: minio-creds + bucketName: rancherbackups + endpoint: minio.35.202.130.254.sslip.io + endpointCA: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURHakNDQWdLZ0F3SUJBZ0lKQUtpWFZpNEpBb0J5TUEwR0NTcUdTSWIzRFFFQkN3VUFNQkl4RURBT0JnTlYKQkFNTUIzUmxjM1F0WTJFd0hoY05NakF3T0RNd01UZ3lOVFE1V2hjTk1qQXhNREk1TVRneU5UUTVXakFTTVJBdwpEZ1lEVlFRRERBZDBaWE4wTFdOaE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCjA4dnV3Q2Y0SEhtR2Q2azVNTmozRW5NOG00T2RpS3czSGszd1NlOUlXQkwyVzY5WDZxenBhN2I2M3U2L05mMnkKSnZWNDVqeXplRFB6bFJycjlpbEpWaVZ1NFNqWlFjdG9jWmFCaVNsL0xDbEFDdkFaUlYvKzN0TFVTZSs1ZDY0QQpWcUhDQlZObU5xM3E3aVY0TE1aSVpRc3N6K0FxaU1Sd0pOMVVKQTZ6V0tUc2Yzc3ByQ0J2dWxJWmZsVXVETVAyCnRCTCt6cXZEc0pDdWlhNEEvU2JNT29tVmM2WnNtTGkwMjdub3dGRld3MnRpSkM5d0xMRE14NnJoVHQ4a3VvVHYKQXJpUjB4WktiRU45L1Uzb011eUVKbHZyck9YS2ZuUDUwbk8ycGNaQnZCb3pUTStYZnRvQ1d5UnhKUmI5cFNTRApKQjlmUEFtLzNZcFpMMGRKY2sxR1h3SURBUUFCbzNNd2NUQWRCZ05WSFE0RUZnUVU5NHU4WXlMdmE2MTJnT1pyCm44QnlFQ2NucVFjd1FnWURWUjBqQkRzd09ZQVU5NHU4WXlMdmE2MTJnT1pybjhCeUVDY25xUWVoRnFRVU1CSXgKRURBT0JnTlZCQU1NQjNSbGMzUXRZMkdDQ1FDb2wxWXVDUUtBY2pBTUJnTlZIUk1FQlRBREFRSC9NQTBHQ1NxRwpTSWIzRFFFQkN3VUFBNElCQVFER1JRZ1RtdzdVNXRQRHA5Q2psOXlLRW9Vd2pYWWM2UlAwdm1GSHpubXJ3dUVLCjFrTkVJNzhBTUw1MEpuS29CY0ljVDNEeGQ3TGdIbTNCRE5mVVh2anArNnZqaXhJYXR2UWhsSFNVaWIyZjJsSTkKVEMxNzVyNCtROFkzelc1RlFXSDdLK08vY3pJTGh5ei93aHRDUlFkQ29lS1dXZkFiby8wd0VSejZzNkhkVFJzNwpHcWlGNWZtWGp6S0lOcTBjMHRyZ0xtalNKd1hwSnU0ZnNGOEcyZUh4b2pOKzdJQ1FuSkg5cGRIRVpUQUtOL2ppCnIvem04RlZtd1kvdTBndEZneWVQY1ZWbXBqRm03Y0ZOSkc4Y2ZYd0QzcEFwVjhVOGNocTZGeFBHTkVvWFZnclMKY1VRMklaU0RJd1FFY3FvSzFKSGdCUWw2RXBaUVpWMW1DRklrdFBwSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t +``` +### Example credentialSecret + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: creds +type: Opaque +data: + accessKey: + secretKey: +``` + +### IAM Permissions for EC2 Nodes to Access S3 + +There are two ways to set up the `rancher-backup` operator to use S3 as the backup storage location. + +One way is to configure the `credentialSecretName` in the Backup custom resource, which refers to AWS credentials that have access to S3. + +If the cluster nodes are in Amazon EC2, the S3 access can also be set up by assigning IAM permissions to the EC2 nodes so that they can access S3. + +To allow a node to access S3, follow the instructions in the [AWS documentation](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-instance-access-s3-bucket/) to create an IAM role for EC2. When you add a custom policy to the role, add the following permissions, and replace the `Resource` with your bucket name: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::rancher-backups" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:PutObjectAcl" + ], + "Resource": [ + "arn:aws:s3:::rancher-backups/*" + ] + } + ] +} +``` + +After the role is created, and you have attached the corresponding instance profile to your EC2 instance(s), the `credentialSecretName` directive can be left empty in the Backup custom resource. + +# Examples + +For example Backup custom resources, refer to [this page.](examples.md#backup) diff --git a/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/examples.md b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/examples.md new file mode 100644 index 0000000000..ef3370c454 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/examples.md @@ -0,0 +1,301 @@ +--- +title: Examples +weight: 5 +aliases: + - /rancher/v2.5/en/backups/v2.5/examples + - /rancher/v2.x/en/backups/v2.5/examples/ +--- + +This section contains examples of Backup and Restore custom resources. + +The default backup storage location is configured when the `rancher-backup` operator is installed or upgraded. + +Encrypted backups can only be restored if the Restore custom resource uses the same encryption configuration secret that was used to create the backup. + +- [Backup](#backup) + - [Backup in the default location with encryption](#backup-in-the-default-location-with-encryption) + - [Recurring backup in the default location](#recurring-backup-in-the-default-location) + - [Encrypted recurring backup in the default location](#encrypted-recurring-backup-in-the-default-location) + - [Encrypted backup in Minio](#encrypted-backup-in-minio) + - [Backup in S3 using AWS credential secret](#backup-in-s3-using-aws-credential-secret) + - [Recurring backup in S3 using AWS credential secret](#recurring-backup-in-s3-using-aws-credential-secret) + - [Backup from EC2 nodes with IAM permission to access S3](#backup-from-ec2-nodes-with-iam-permission-to-access-s3) +- [Restore](#restore) + - [Restore using the default backup file location](#restore-using-the-default-backup-file-location) + - [Restore for Rancher migration](#restore-for-rancher-migration) + - [Restore from encrypted backup](#restore-from-encrypted-backup) + - [Restore an encrypted backup from Minio](#restore-an-encrypted-backup-from-minio) + - [Restore from backup using an AWS credential secret to access S3](#restore-from-backup-using-an-aws-credential-secret-to-access-s3) + - [Restore from EC2 nodes with IAM permissions to access S3](#restore-from-ec2-nodes-with-iam-permissions-to-access-s3) +- [Example Credential Secret for Storing Backups in S3](#example-credential-secret-for-storing-backups-in-s3) +- [Example EncryptionConfiguration](#example-encryptionconfiguration) + +# Backup + +This section contains example Backup custom resources. + +### Backup in the Default Location with Encryption + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: default-location-encrypted-backup +spec: + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +### Recurring Backup in the Default Location + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: default-location-recurring-backup +spec: + resourceSetName: rancher-resource-set + schedule: "@every 1h" + retentionCount: 10 +``` + +### Encrypted Recurring Backup in the Default Location + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: default-enc-recurring-backup +spec: + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 3 +``` + +### Encrypted Backup in Minio + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: minio-backup +spec: + storageLocation: + s3: + credentialSecretName: minio-creds + credentialSecretNamespace: default + bucketName: rancherbackups + endpoint: minio.sslip.io + endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +### Backup in S3 Using AWS Credential Secret + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: s3-backup +spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +### Recurring Backup in S3 Using AWS Credential Secret + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: s3-recurring-backup +spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 10 +``` + +### Backup from EC2 Nodes with IAM Permission to Access S3 + +This example shows that the AWS credential secret does not have to be provided to create a backup if the nodes running `rancher-backup` have [these permissions for access to S3.](backup-configuration.md#iam-permissions-for-ec2-nodes-to-access-s3) + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Backup +metadata: + name: s3-iam-backup +spec: + storageLocation: + s3: + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig +``` + +# Restore + +This section contains example Restore custom resources. + +### Restore Using the Default Backup File Location + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-default +spec: + backupFilename: default-location-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-29-54-07-00.tar.gz +# encryptionConfigSecretName: test-encryptionconfig +``` + +### Restore for Rancher Migration +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-migration +spec: + backupFilename: backup-b0450532-cee1-4aa1-a881-f5f48a007b1c-2020-09-15T07-27-09Z.tar.gz + prune: false + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com +``` + +### Restore from Encrypted Backup + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-encrypted +spec: + backupFilename: default-test-s3-def-backup-c583d8f2-6daf-4648-8ead-ed826c591471-2020-08-24T20-47-05Z.tar.gz + encryptionConfigSecretName: encryptionconfig +``` + +### Restore an Encrypted Backup from Minio + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-minio +spec: + backupFilename: default-minio-backup-demo-aa5c04b7-4dba-4c48-9ac4-ab7916812eaa-2020-08-30T13-18-17-07-00.tar.gz + storageLocation: + s3: + credentialSecretName: minio-creds + credentialSecretNamespace: default + bucketName: rancherbackups + endpoint: minio.sslip.io + endpointCA: LS0tLS1CRUdJTi3VUFNQkl5UUT.....pbEpWaVzNkRS0tLS0t + encryptionConfigSecretName: test-encryptionconfig +``` + +### Restore from Backup Using an AWS Credential Secret to Access S3 + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-s3-demo +spec: + backupFilename: test-s3-recurring-backup-752ecd87-d958-4d20-8350-072f8d090045-2020-09-26T12-49-34-07-00.tar.gz.enc + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + encryptionConfigSecretName: test-encryptionconfig +``` + +### Restore from EC2 Nodes with IAM Permissions to Access S3 + +This example shows that the AWS credential secret does not have to be provided to restore from backup if the nodes running `rancher-backup` have [these permissions for access to S3.](backup-configuration.md#iam-permissions-for-ec2-nodes-to-access-s3) + +```yaml +apiVersion: resources.cattle.io/v1 +kind: Restore +metadata: + name: restore-s3-demo +spec: + backupFilename: default-test-s3-recurring-backup-84bf8dd8-0ef3-4240-8ad1-fc7ec308e216-2020-08-24T10#52#44-07#00.tar.gz + storageLocation: + s3: + bucketName: rajashree-backup-test + folder: ecm1 + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + encryptionConfigSecretName: test-encryptionconfig +``` + +# Example Credential Secret for Storing Backups in S3 + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: creds +type: Opaque +data: + accessKey: + secretKey: +``` + +# Example EncryptionConfiguration + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: EncryptionConfiguration +resources: + - resources: + - secrets + providers: + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - secretbox: + keys: + - name: key1 + secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= +``` diff --git a/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/restore-configuration.md b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/restore-configuration.md new file mode 100644 index 0000000000..d39d508c59 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/restore-configuration.md @@ -0,0 +1,90 @@ +--- +title: Restore Configuration +shortTitle: Restore +weight: 2 +aliases: + - /rancher/v2.5/en/backups/v2.5/configuration/restore-config + - /rancher/v2.x/en/backups/v2.5/configuration/restore-config/ +--- + +The Restore Create page lets you provide details of the backup to restore from + +![](/img/backup_restore/restore/restore.png) + +- [Backup Source](#backup-source) + - [An Existing Backup Config](#an-existing-backup-config) + - [The default storage target](#the-default-storage-target) + - [An S3-compatible object store](#an-s3-compatible-object-store) +- [Encryption](#encryption) +- [Prune during restore](#prune-during-restore) +- [Getting the Backup Filename from S3](#getting-the-backup-filename-from-s3) + +# Backup Source +Provide details of the backup file and its storage location, which the operator will then use to perform the restore. Select from the following options to provide these details + + + + +### An existing backup config + +Selecting this option will populate the **Target Backup** dropdown with the Backups available in this cluster. Select the Backup from the dropdown, and that will fill out the **Backup Filename** field for you, and will also pass the backup source information from the selected Backup to the operator. + +![](/img/backup_restore/restore/existing.png) + +If the Backup custom resource does not exist in the cluster, you need to get the exact filename and provide the backup source details with the default storage target or an S3-compatible object store. + + +### The default storage target + +Select this option if you are restoring from a backup file that exists in the default storage location configured at the operator-level. The operator-level configuration is the storage location that was configured when the `rancher-backup` operator was installed or upgraded. Provide the exact filename in the **Backup Filename** field. + +![](/img/backup_restore/restore/default.png) + +### An S3-compatible object store + +Select this option if no default storage location is configured at the operator-level, OR if the backup file exists in a different S3 bucket than the one configured as the default storage location. Provide the exact filename in the **Backup Filename** field. Refer to [this section](#getting-the-backup-filename-from-s3) for exact steps on getting the backup filename from s3. Fill in all the details for the S3 compatible object store. Its fields are exactly same as ones for the `backup.StorageLocation` configuration in the [Backup custom resource.](backup-configuration.md#storage-location) + +![](/img/backup_restore/restore/s3store.png) + +# Encryption + +If the backup was created with encryption enabled, its file will have `.enc` suffix. Choosing such a Backup, or providing a backup filename with `.enc` suffix will display another dropdown named **Encryption Config Secret**. + +![](/img/backup_restore/restore/encryption.png) + +The Secret selected from this dropdown must have the same contents as the one used for the Backup custom resource while performing the backup. If the encryption configuration doesn't match, the restore will fail + +The `Encryption Config Secret` dropdown will filter out and list only those Secrets that have this exact key + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `encryptionConfigSecretName` | Provide the name of the Secret from `cattle-resources-system` namespace, that contains the encryption config file. | + +> **Important** +This field should only be set if the backup was created with encryption enabled. Providing the incorrect encryption config will cause the restore to fail. + +# Prune During Restore + +* **Prune**: In order to fully restore Rancher from a backup, and to go back to the exact state it was at when the backup was performed, we need to delete any additional resources that were created by Rancher after the backup was taken. The operator does so if the **Prune** flag is enabled. Prune is enabled by default and it is recommended to keep it enabled. +* **Delete Timeout**: This is the amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `prune` | Delete the resources managed by Rancher that are not present in the backup (Recommended). | +| `deleteTimeoutSeconds` | Amount of time the operator will wait while deleting a resource before editing the resource to remove finalizers and attempt deletion again. | + +# Getting the Backup Filename from S3 + +This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. + +To obtain this file name from S3, go to your S3 bucket (and folder if it was specified while performing backup). + +Copy the filename and store it in your Restore custom resource. So assuming the name of your backup file is `backupfile`, + +- If your bucket name is `s3bucket` and no folder was specified, then the `backupFilename` to use will be `backupfile`. +- If your bucket name is `s3bucket` and the base folder is`s3folder`, the `backupFilename` to use is only `backupfile` . +- If there is a subfolder inside `s3Folder` called `s3sub`, and that has your backup file, then the `backupFilename` to use is `s3sub/backupfile`. + +| YAML Directive Name | Description | +| ---------------- | ---------------- | +| `backupFilename` | This is the name of the backup file that the `rancher-backup` operator will use to perform the restore. | diff --git a/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/storage-configuration.md b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/storage-configuration.md new file mode 100644 index 0000000000..df3aeac5b8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/backup-restore-configuration/storage-configuration.md @@ -0,0 +1,62 @@ +--- +title: Backup Storage Location Configuration +shortTitle: Storage +weight: 3 +aliases: + - /rancher/v2.5/en/backups/v2.5/configuration/storage-config + - /rancher/v2.x/en/backups/v2.5/configuration/storage-config/ +--- + +Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible object store. + +Only one storage location can be configured at the operator level. + +- [Storage Location Configuration](#storage-location-configuration) + - [No Default Storage Location](#no-default-storage-location) + - [S3-compatible Object Store](#s3-compatible-object-store) + - [Use an existing StorageClass](#existing-storageclass) + - [Use an existing PersistentVolume](#existing-persistent-volume) +- [Encryption](#encryption) +- [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) + +# Storage Location Configuration + +### No Default Storage Location + +You can choose to not have any operator-level storage location configured. If you select this option, you must configure an S3-compatible object store as the storage location for each individual backup. + +### S3-compatible Object Store + +| Parameter | Description | +| -------------- | -------------- | +| Credential Secret | Choose the credentials for S3 from your secrets in Rancher. [Example](./examples.md#example-credential-secret-for-storing-backups-in-s3). | +| Bucket Name | Enter the name of the [S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html) where the backups will be stored. Default: `rancherbackups`. | +| Region | The [AWS region](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) where the S3 bucket is located. | +| Folder | The [folder in the S3 bucket](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-folders.html) where the backups will be stored. | +| Endpoint | The [S3 endpoint](https://docs.aws.amazon.com/general/latest/gr/s3.html) For example, `s3.us-west-2.amazonaws.com`. | +| Endpoint CA | The CA cert used to for the S3 endpoint. Default: base64 encoded CA cert | +| insecureTLSSkipVerify | Set to true if you are not using TLS. | + +### Existing StorageClass + +Installing the `rancher-backup` chart by selecting the StorageClass option will create a Persistent Volume Claim (PVC), and Kubernetes will in turn dynamically provision a Persistent Volume (PV) where all the backups will be saved by default. + +For information about creating storage classes refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage.md) + +> **Important** +It is highly recommended to use a StorageClass with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. +If no such StorageClass is available, after the PV is provisioned, make sure to edit its reclaim policy and set it to "Retain" before storing backups in it. + +### Existing Persistent Volume + +Select an existing Persistent Volume (PV) that will be used to store your backups. For information about creating PersistentVolumes in Rancher, refer to [this section.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage.md#2-add-a-persistent-volume-that-refers-to-the-persistent-storage) + +> **Important** +It is highly recommended to use a Persistent Volume with a reclaim policy of "Retain". Otherwise if the PVC created by the `rancher-backup` chart gets deleted (either during app upgrade, or accidentally), the PV will get deleted too, which means all backups saved in it will get deleted. + + +# Example values.yaml for the rancher-backup Helm Chart + +The documented `values.yaml` file that can be used to configure `rancher-backup` operator when the Helm CLI is used can be found in the [backup-restore-operator repository.](https://github.com/rancher/backup-restore-operator/blob/release/v1.0/charts/rancher-backup/values.yaml) + +For more information about `values.yaml` files and configuring Helm charts during installation, refer to the [Helm documentation.](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing) diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/logging/_index.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md similarity index 100% rename from content/rancher/v2.5/en/best-practices/rancher-managed/logging/_index.md rename to versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/logging-best-practices.md diff --git a/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md new file mode 100644 index 0000000000..d934db0404 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices.md @@ -0,0 +1,123 @@ +--- +title: Monitoring Best Practices +weight: 2 +aliases: + - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/monitoring + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/monitoring/ +--- + +Configuring sensible monitoring and alerting rules is vital for running any production workloads securely and reliably. This is not different when using Kubernetes and Rancher. Fortunately the integrated monitoring and alerting functionality makes this whole process a lot easier. + +The [Rancher monitoring documentation](../../../pages-for-subheaders/monitoring-and-alerting.md) describes how you can set up a complete Prometheus and Grafana stack. Out of the box this will scrape monitoring data from all system and Kubernetes components in your cluster and provide sensible dashboards and alerts for them to get started. But for a reliable setup, you also need to monitor your own workloads and adapt Prometheus and Grafana to your own specific use cases and cluster sizes. This document aims to give you best practices for this. + +- [What to Monitor](#what-to-monitor) +- [Configuring Prometheus Resource Usage](#configuring-prometheus-resource-usage) +- [Scraping Custom Workloads](#scraping-custom-workloads) +- [Monitoring in a (Micro)Service Architecture](#monitoring-in-a-micro-service-architecture) +- [Real User Monitoring](#real-user-monitoring) +- [Security Monitoring](#security-monitoring) +- [Setting up Alerts](#setting-up-alerts) + +# What to Monitor + +Kubernetes itself, as well as applications running inside of it, form a distributed system where different components interact with each other. For the whole system and each individual component, you have to ensure performance, availability, reliability and scalability. A good resource with more details and information is Google's free [Site Reliability Engineering Book](https://landing.google.com/sre/sre-book/), especially the chapter about [Monitoring distributed systems](https://landing.google.com/sre/sre-book/chapters/monitoring-distributed-systems/). + +# Configuring Prometheus Resource Usage + +When installing the integrated monitoring stack, Rancher allows to configure several settings that are dependent on the size of your cluster and the workloads running in it. This chapter covers these in more detail. + +### Storage and Data Retention + +The amount of storage needed for Prometheus directly correlates to the amount of time series and labels that you store and the data retention you have configured. It is important to note that Prometheus is not meant to be used as a long-term metrics storage. Data retention time is usually only a couple of days and not weeks or months. The reason for this is that Prometheus does not perform any aggregation on its stored metrics. This is great because aggregation can dilute data, but it also means that the needed storage grows linearly over time without retention. + +One way to calculate the necessary storage is to look at the average size of a storage chunk in Prometheus with this query + +``` +rate(prometheus_tsdb_compaction_chunk_size_bytes_sum[1h]) / rate(prometheus_tsdb_compaction_chunk_samples_sum[1h]) +``` + +Next, find out your data ingestion rate per second: + +``` +rate(prometheus_tsdb_head_samples_appended_total[1h]) +``` + +and then multiply this with the retention time, adding a few percentage points as buffer: + +``` +average chunk size in bytes * ingestion rate per second * retention time in seconds * 1.1 = necessary storage in bytes +``` + +You can find more information about how to calculate the necessary storage in this [blog post](https://www.robustperception.io/how-much-disk-space-do-prometheus-blocks-use). + +You can read more about the Prometheus storage concept in the [Prometheus documentation](https://prometheus.io/docs/prometheus/latest/storage). + +### CPU and Memory Requests and Limits + +In larger Kubernetes clusters Prometheus can consume quite a bit of memory. The amount of memory Prometheus needs directly correlates to the amount of time series and amount of labels it stores and the scrape interval in which these are filled. + +You can find more information about how to calculate the necessary memory in this [blog post](https://www.robustperception.io/how-much-ram-does-prometheus-2-x-need-for-cardinality-and-ingestion). + +The amount of necessary CPUs correlate with the amount of queries you are performing. + +### Federation and Long-term Storage + +Prometheus is not meant to store metrics for a long amount of time, but should only be used for short term storage. + +In order to store some, or all metrics for a long time, you can leverage Prometheus' [remote read/write](https://prometheus.io/docs/prometheus/latest/storage/#remote-storage-integrations) capabilities to connect it to storage systems like [Thanos](https://thanos.io/), [InfluxDB](https://www.influxdata.com/), [M3DB](https://www.m3db.io/), or others. You can find an example setup in this [blog post](https://rancher.com/blog/2020/prometheus-metric-federation). + +# Scraping Custom Workloads + +While the integrated Rancher Monitoring already scrapes system metrics from a cluster's nodes and system components, the custom workloads that you deploy on Kubernetes should also be scraped for data. For that you can configure Prometheus to do an HTTP request to an endpoint of your applications in a certain interval. These endpoints should then return their metrics in a Prometheus format. + +In general, you want to scrape data from all the workloads running in your cluster so that you can use them for alerts or debugging issues. Often, you recognize that you need some data only when you actually need the metrics during an incident. It is good, if it is already scraped and stored. Since Prometheus is only meant to be a short-term metrics storage, scraping and keeping lots of data is usually not that expensive. If you are using a long-term storage solution with Prometheus, you can then still decide which data you are actually persisting and keeping there. + +### About Prometheus Exporters + +A lot of 3rd party workloads like databases, queues or web-servers either already support exposing metrics in a Prometheus format, or there are so called exporters available that translate between the tool's metrics and the format that Prometheus understands. Usually you can add these exporters as additional sidecar containers to the workload's Pods. A lot of helm charts already include options to deploy the correct exporter. Additionally you can find a curated list of exports by SysDig on [promcat.io](https://promcat.io/) and on [ExporterHub](https://exporterhub.io/). + +### Prometheus support in Programming Languages and Frameworks + +To get your own custom application metrics into Prometheus, you have to collect and expose these metrics directly from your application's code. Fortunately, there are already libraries and integrations available to help with this for most popular programming languages and frameworks. One example for this is the Prometheus support in the [Spring Framework](https://docs.spring.io/spring-metrics/docs/current/public/prometheus). + +### ServiceMonitors and PodMonitors + +Once all your workloads expose metrics in a Prometheus format, you have to configure Prometheus to scrape it. Under the hood Rancher is using the [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator). This makes it easy to add additional scraping targets with ServiceMonitors and PodMonitors. A lot of helm charts already include an option to create these monitors directly. You can also find more information in the Rancher documentation. + +### Prometheus Push Gateway + +There are some workloads that are traditionally hard to scrape by Prometheus. Examples for these are short lived workloads like Jobs and CronJobs, or applications that do not allow sharing data between individual handled incoming requests, like PHP applications. + +To still get metrics for these use cases, you can set up [prometheus-pushgateways](https://github.com/prometheus/pushgateway). The CronJob or PHP application would push metric updates to the pushgateway. The pushgateway aggregates and exposes them through an HTTP endpoint, which then can be scraped by Prometheus. + +### Prometheus Blackbox Monitor + +Sometimes it is useful to monitor workloads from the outside. For this, you can use the [Prometheus blackbox-exporter](https://github.com/prometheus/blackbox_exporter) which allows probing any kind of endpoint over HTTP, HTTPS, DNS, TCP and ICMP. + +# Monitoring in a (Micro)Service Architecture + +If you have a (micro)service architecture where multiple individual workloads within your cluster are communicating with each other, it is really important to have detailed metrics and traces about this traffic to understand how all these workloads are communicating with each other and where a problem or bottleneck may be. + +Of course you can monitor all this internal traffic in all your workloads and expose these metrics to Prometheus. But this can quickly become quite work intensive. Service Meshes like Istio, which can be installed with [a click](../../../pages-for-subheaders/istio.md) in Rancher, can do this automatically and provide rich telemetry about the traffic between all services. + +# Real User Monitoring + +Monitoring the availability and performance of all your internal workloads is vitally important to run stable, reliable and fast applications. But these metrics only show you parts of the picture. To get a complete view it is also necessary to know how your end users are actually perceiving it. For this you can look into various [Real user monitoring solutions](https://en.wikipedia.org/wiki/Real_user_monitoring). + +# Security Monitoring + +In addition to monitoring workloads to detect performance, availability or scalability problems, the cluster and the workloads running into it should also be monitored for potential security problems. A good starting point is to frequently run and alert on [CIS Scans](../../../pages-for-subheaders/cis-scans.md) which check if the cluster is configured according to security best practices. + +For the workloads, you can have a look at Kubernetes and Container security solutions like [Falco](https://falco.org/), [Aqua Kubernetes Security](https://www.aquasec.com/solutions/kubernetes-container-security/), [SysDig](https://sysdig.com/). + +# Setting up Alerts + +Getting all the metrics into a monitoring systems and visualizing them in dashboards is great, but you also want to be pro-actively alerted if something goes wrong. + +The integrated Rancher monitoring already configures a sensible set of alerts that make sense in any Kubernetes cluster. You should extend these to cover your specific workloads and use cases. + +When setting up alerts, configure them for all the workloads that are critical to the availability of your applications. But also make sure that they are not too noisy. Ideally every alert you are receiving should be because of a problem that needs your attention and needs to be fixed. If you have alerts that are firing all the time but are not that critical, there is a danger that you start ignoring your alerts all together and then miss the real important ones. Less may be more here. Start to focus on the real important metrics first, for example alert if your application is offline. Fix all the problems that start to pop up and then start to create more detailed alerts. + +If an alert starts firing, but there is nothing you can do about it at the moment, it's also fine to silence the alert for a certain amount of time, so that you can look at it later. + +You can find more information on how to set up alerts and notification channels in the [Rancher Documentation](../../../pages-for-subheaders/monitoring-and-alerting.md). diff --git a/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md new file mode 100644 index 0000000000..9ab497223e --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere.md @@ -0,0 +1,62 @@ +--- +title: Best Practices for Rancher Managed vSphere Clusters +shortTitle: Rancher Managed Clusters in vSphere +aliases: + - /rancher/v2.5/en/best-practices/v2.5/rancher-managed/managed-vsphere + - /rancher/v2.x/en/best-practices/v2.5/rancher-managed/managed-vsphere/ +--- + +This guide outlines a reference architecture for provisioning downstream Rancher clusters in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. + +- [1. VM Considerations](#1-vm-considerations) +- [2. Network Considerations](#2-network-considerations) +- [3. Storage Considerations](#3-storage-considerations) +- [4. Backups and Disaster Recovery](#4-backups-and-disaster-recovery) + +
    Solution Overview
    + +![Solution Overview](/img/solution_overview.drawio.svg) + +# 1. VM Considerations + +### Leverage VM Templates to Construct the Environment + +To facilitate consistency across the deployed Virtual Machines across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customisation options. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across ESXi Hosts + +Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Downstream Cluster Nodes Across Datastores + +Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. + +### Configure VM's as Appropriate for Kubernetes + +It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. + +# 2. Network Considerations + +### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes + +Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. + +### Consistent IP Addressing for VM's + +Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +# 3. Storage Considerations + +### Leverage SSD Drives for ETCD Nodes + +ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. + +# 4. Backups and Disaster Recovery + +### Perform Regular Downstream Cluster Backups + +Kubernetes uses etcd to store all its data - from configuration, state and metadata. Backing this up is crucial in the event of disaster recovery. + +### Back up Downstream Node VMs + +Incorporate the Rancher downstream node VM's within a standard VM backup policy. \ No newline at end of file diff --git a/content/rancher/v2.5/en/best-practices/rancher-managed/containers/_index.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md similarity index 100% rename from content/rancher/v2.5/en/best-practices/rancher-managed/containers/_index.md rename to versioned_docs/version-2.5/reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers.md diff --git a/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md new file mode 100644 index 0000000000..3eef461d74 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere.md @@ -0,0 +1,94 @@ +--- +title: Installing Rancher in a vSphere Environment +shortTitle: On-Premises Rancher in vSphere +weight: 3 +aliases: + - /rancher/v2.5/en/best-practices/v2.5/rancher-server/rancher-in-vsphere + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/rancher-in-vsphere/ +--- + +This guide outlines a reference architecture for installing Rancher on an RKE Kubernetes cluster in a vSphere environment, in addition to standard vSphere best practices as documented by VMware. + +- [1. Load Balancer Considerations](#1-load-balancer-considerations) +- [2. VM Considerations](#2-vm-considerations) +- [3. Network Considerations](#3-network-considerations) +- [4. Storage Considerations](#4-storage-considerations) +- [5. Backups and Disaster Recovery](#5-backups-and-disaster-recovery) + +
    Solution Overview
    + +![Solution Overview](/img/rancher-on-prem-vsphere.svg) + +# 1. Load Balancer Considerations + +A load balancer is required to direct traffic to the Rancher workloads residing on the RKE nodes. + +### Leverage Fault Tolerance and High Availability + +Leverage the use of an external (hardware or software) load balancer that has inherit high-availability functionality (F5, NSX-T, Keepalived, etc). + +### Back Up Load Balancer Configuration + +In the event of a Disaster Recovery activity, availability of the Load balancer configuration will expedite the recovery process. + +### Configure Health Checks + +Configure the Load balancer to automatically mark nodes as unavailable if a health check is failed. For example, NGINX can facilitate this with: + +`max_fails=3 fail_timeout=5s` + +### Leverage an External Load Balancer + +Avoid implementing a software load balancer within the management cluster. + +### Secure Access to Rancher + +Configure appropriate Firewall / ACL rules to only expose access to Rancher + +# 2. VM Considerations + +### Size the VM's According to Rancher Documentation + +https://rancher.com/docs/rancher/v2.5/en/installation/requirements/ + +### Leverage VM Templates to Construct the Environment + +To facilitate the consistency of Virtual Machines deployed across the environment, consider the use of "Golden Images" in the form of VM templates. Packer can be used to accomplish this, adding greater customization options. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across ESXi Hosts + +Doing so will ensure node VM's are spread across multiple ESXi hosts - preventing a single point of failure at the host level. + +### Leverage DRS Anti-Affinity Rules (Where Possible) to Separate Rancher Cluster Nodes Across Datastores + +Doing so will ensure node VM's are spread across multiple datastores - preventing a single point of failure at the datastore level. + +### Configure VM's as Appropriate for Kubernetes + +It’s important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double-checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node. + +# 3. Network Considerations + +### Leverage Low Latency, High Bandwidth Connectivity Between ETCD Nodes + +Deploy etcd members within a single data center where possible to avoid latency overheads and reduce the likelihood of network partitioning. For most setups, 1Gb connections will suffice. For large clusters, 10Gb connections can reduce the time taken to restore from backup. + +### Consistent IP Addressing for VM's + +Each node used should have a static IP configured. In the case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +# 4. Storage Considerations + +### Leverage SSD Drives for ETCD Nodes + +ETCD is very sensitive to write latency. Therefore, leverage SSD disks where possible. + +# 5. Backups and Disaster Recovery + +### Perform Regular Management Cluster Backups + +Rancher stores its data in the ETCD datastore of the Kubernetes cluster it resides on. Like with any Kubernetes cluster, perform frequent, tested backups of this cluster. + +### Back up Rancher Cluster Node VMs + +Incorporate the Rancher management node VM's within a standard VM backup policy. diff --git a/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md new file mode 100644 index 0000000000..a88e581c8e --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/rancher-deployment-strategy.md @@ -0,0 +1,48 @@ +--- +title: Rancher Deployment Strategy +weight: 100 +aliases: + - /rancher/v2.5/en/best-practices/v2.5/rancher-server/deployment-strategies + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-strategies/ +--- + +There are two recommended deployment strategies for a Rancher server that manages downstream Kubernetes clusters. Each one has its own pros and cons. Read more about which one would fit best for your use case: + +* [Hub and Spoke](#hub-and-spoke-strategy) +* [Regional](#regional-strategy) + +# Hub & Spoke Strategy +--- + +In this deployment scenario, there is a single Rancher control plane managing Kubernetes clusters across the globe. The control plane would be run on a high-availability Kubernetes cluster, and there would be impact due to latencies. + +![](/img/bpg/hub-and-spoke.png) + +### Pros + +* Environments could have nodes and network connectivity across regions. +* Single control plane interface to view/see all regions and environments. +* Kubernetes does not require Rancher to operate and can tolerate losing connectivity to the Rancher control plane. + +### Cons + +* Subject to network latencies. +* If the control plane goes out, global provisioning of new services is unavailable until it is restored. However, each Kubernetes cluster can continue to be managed individually. + +# Regional Strategy +--- +In the regional deployment model a control plane is deployed in close proximity to the compute nodes. + +![](/img/bpg/regional.png) + +### Pros + +* Rancher functionality in regions stay operational if a control plane in another region goes down. +* Network latency is greatly reduced, improving the performance of functionality in Rancher. +* Upgrades of the Rancher control plane can be done independently per region. + +### Cons + +* Overhead of managing multiple Rancher installations. +* Visibility across global Kubernetes clusters requires multiple interfaces/panes of glass. +* Deploying multi-cluster apps in Rancher requires repeating the process for each Rancher server. diff --git a/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md new file mode 100644 index 0000000000..17a5b65aad --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/best-practices/rancher-server/tips-for-running-rancher.md @@ -0,0 +1,40 @@ +--- +title: Tips for Running Rancher +weight: 100 +aliases: + - /rancher/v2.5/en/best-practices/deployment-types + - /rancher/v2.5/en/best-practices/v2.5/rancher-server/deployment-types + - /rancher/v2.x/en/best-practices/v2.5/rancher-server/deployment-types/ +--- + +This guide is geared toward use cases where Rancher is used to manage downstream Kubernetes clusters. The high-availability setup is intended to prevent losing access to downstream clusters if the Rancher server is not available. + +A high-availability Kubernetes installation, defined as an installation of Rancher on a Kubernetes cluster with at least three nodes, should be used in any production installation of Rancher, as well as any installation deemed "important." Multiple Rancher instances running on multiple nodes ensure high availability that cannot be accomplished with a single node environment. + +If you are installing Rancher in a vSphere environment, refer to the best practices documented [here.](on-premises-rancher-in-vsphere.md) + +When you set up your high-availability Rancher installation, consider the following: + +### Run Rancher on a Separate Cluster +Don't run other workloads or microservices in the Kubernetes cluster that Rancher is installed on. + +### Make sure nodes are configured correctly for Kubernetes ### +It's important to follow K8s and etcd best practices when deploying your nodes, including disabling swap, double checking you have full network connectivity between all machines in the cluster, using unique hostnames, MAC addresses, and product_uuids for every node, checking that all correct ports are opened, and deploying with ssd backed etcd. More details can be found in the [kubernetes docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) and [etcd's performance op guide](https://etcd.io/docs/v3.4/op-guide/performance/). + +### When using RKE: Back up the Statefile +RKE keeps record of the cluster state in a file called `cluster.rkestate`. This file is important for the recovery of a cluster and/or the continued maintenance of the cluster through RKE. Because this file contains certificate material, we strongly recommend encrypting this file before backing up. After each run of `rke up` you should backup the state file. + +### Run All Nodes in the Cluster in the Same Datacenter +For best performance, run all three of your nodes in the same geographic datacenter. If you are running nodes in the cloud, such as AWS, run each node in a separate Availability Zone. For example, launch node 1 in us-west-2a, node 2 in us-west-2b, and node 3 in us-west-2c. + +### Development and Production Environments Should be Similar +It's strongly recommended to have a "staging" or "pre-production" environment of the Kubernetes cluster that Rancher runs on. This environment should mirror your production environment as closely as possible in terms of software and hardware configuration. + +### Monitor Your Clusters to Plan Capacity +The Rancher server's Kubernetes cluster should run within the [system and hardware requirements](../../../pages-for-subheaders/installation-requirements.md) as closely as possible. The more you deviate from the system and hardware requirements, the more risk you take. + +However, metrics-driven capacity planning analysis should be the ultimate guidance for scaling Rancher, because the published requirements take into account a variety of workload types. + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution, and Grafana, which lets you visualize the metrics from Prometheus. + +After you [enable monitoring](../../../pages-for-subheaders/monitoring-and-alerting.md) in the cluster, you can set up [a notification channel](../../../pages-for-subheaders/monitoring-and-alerting.md) and alerts to let you know if your cluster is approaching its capacity. You can also use the Prometheus and Grafana monitoring framework to establish a baseline for key metrics as you scale. \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/cli-with-rancher/kubectl-utility.md b/versioned_docs/version-2.5/reference-guides/cli-with-rancher/kubectl-utility.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cli-with-rancher/kubectl-utility.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md b/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cli-with-rancher/rancher-cli.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md new file mode 100644 index 0000000000..ab62161e40 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2.md @@ -0,0 +1,47 @@ +--- +title: EC2 Node Template Configuration +weight: 1 +aliases: + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ +--- + +For more details about EC2, nodes, refer to the official documentation for the [EC2 Management Console](https://aws.amazon.com/ec2). +### Region + +In the **Region** field, select the same region that you used when creating your cloud credentials. + +### Cloud Credentials + +Your AWS account access information, stored in a [cloud credential.](../../../user-settings/manage-cloud-credentials.md) + +See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. + +See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. + +See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach an IAM + +See our three example JSON policies: + +- [Example IAM Policy](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy) +- [Example IAM Policy with PassRole](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider](../../../../pages-for-subheaders/set-up-cloud-providers.md) or want to pass an IAM Profile to an instance) +- [Example IAM Policy to allow encrypted EBS volumes](../../../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster.md#example-iam-policy-to-allow-encrypted-ebs-volumes) policy to an user. + +### Authenticate & Configure Nodes + +Choose an availability zone and network settings for your cluster. + +### Security Group + +Choose the default security group or configure a security group. + +Please refer to [Amazon EC2 security group when using Node Driver](../../../../getting-started/installation-and-upgrade/installation-requirements/port-requirements.md#rancher-aws-ec2-security-group) to see what rules are created in the `rancher-nodes` Security Group. + +### Instance Options + +Configure the instances that will be created. Make sure you configure the correct **SSH User** for the configured AMI. It is possible that a selected region does not support the default instance type. In this scenario you must select an instance type that does exist, otherwise an error will occur stating the requested configuration is not supported. + +If you need to pass an **IAM Instance Profile Name** (not ARN), for example, when you want to use a [Kubernetes Cloud Provider](../../../../pages-for-subheaders/set-up-cloud-providers.md), you will need an additional permission in your policy. See [Example IAM policy with PassRole](#example-iam-policy-with-passrole) for an example policy. + +### Engine Options + +In the **Engine Options** section of the node template, you can configure the Docker daemon. You may want to specify the docker version or a Docker registry mirror. diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/_index.md rename to versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure.md diff --git a/content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md similarity index 100% rename from content/rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/_index.md rename to versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean.md diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md new file mode 100644 index 0000000000..b70a29b98d --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere.md @@ -0,0 +1,95 @@ +--- +title: VSphere Node Template Configuration +weight: 2 +aliases: + - /rancher/v2.5/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/node-template-reference + - /rancher/v2.5/en/cluster-provisionin/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/enabling-uuids + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/ +--- + +The following node template configuration reference applies to Rancher v2.3.3+. + +- [Account Access](#account-access) +- [Scheduling](#scheduling) +- [Instance Options](#instance-options) +- [Networks](#networks) +- [Node tags and custom attributes](#node-tags-and-custom-attributes) +- [cloud-init](#cloud-init) + +# Account Access + +| Parameter | Required | Description | +|:----------------------|:--------:|:-----| +| Cloud Credentials | * | Your vSphere account access information, stored in a [cloud credential.](../../../user-settings/manage-cloud-credentials.md) | + +Your cloud credential has these fields: + +| Credential Field | Description | +|-----------------|--------------| +| vCenter or ESXi Server | Enter the vCenter or ESXi hostname/IP. ESXi is the virtualization platform where you create and run virtual machines and virtual appliances. vCenter Server is the service through which you manage multiple hosts connected in a network and pool host resources. | +| Port | Optional: configure configure the port of the vCenter or ESXi server. | +| Username and password | Enter your vSphere login username and password. | + +# Scheduling + +Choose what hypervisor the virtual machine will be scheduled to. + +The fields in the **Scheduling** section should auto-populate with the data center and other scheduling options that are available to you in vSphere. + +| Field | Required | Explanation | +|---------|---------------|-----------| +| Data Center | * | Choose the name/path of the data center where the VM will be scheduled. | +| Resource Pool | | Name of the resource pool to schedule the VMs in. Resource pools can be used to partition available CPU and memory resources of a standalone host or cluster, and they can also be nested. Leave blank for standalone ESXi. If not specified, the default resource pool is used. | +| Data Store | * | If you have a data store cluster, you can toggle the **Data Store** field. This lets you select a data store cluster where your VM will be scheduled to. If the field is not toggled, you can select an individual disk. | +| Folder | | Name of a folder in the datacenter to create the VMs in. Must already exist. The VM folders in this dropdown menu directly correspond to your VM folders in vSphere. The folder name should be prefaced with `vm/` in your vSphere config file. | +| Host | | The IP of the host system to schedule VMs in. Leave this field blank for a standalone ESXi or for a cluster with DRS (Distributed Resource Scheduler). If specified, the host system's pool will be used and the **Resource Pool** parameter will be ignored. | + +# Instance Options + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +| Parameter | Required | Description | +|:----------------|:--------:|:-----------| +| CPUs | * | Number of vCPUS to assign to VMs. | +| Memory | * | Amount of memory to assign to VMs. | +| Disk | * | Size of the disk (in MB) to attach to the VMs. | +| Creation method | * | The method for setting up an operating system on the node. The operating system can be installed from an ISO or from a VM template. Depending on the creation method, you will also have to specify a VM template, content library, existing VM, or ISO. For more information on creation methods, refer to [About VM Creation Methods.](#about-vm-creation-methods) | +| Cloud Init | | URL of a `cloud-config.yml` file or URL to provision VMs with. This file allows further customization of the operating system, such as network configuration, DNS servers, or system daemons. The operating system must support `cloud-init`. | +| Networks | | Name(s) of the network to attach the VM to. | +| Configuration Parameters used for guestinfo | | Additional configuration parameters for the VMs. These correspond to the [Advanced Settings](https://kb.vmware.com/s/article/1016098) in the vSphere console. Example use cases include providing RancherOS [guestinfo]({{}}/os/v1.x/en/installation/cloud/vmware-esxi/#vmware-guestinfo) parameters or enabling disk UUIDs for the VMs (`disk.EnableUUID=TRUE`). | + + +### About VM Creation Methods + +In the **Creation method** field, configure the method used to provision VMs in vSphere. Available options include creating VMs that boot from a RancherOS ISO or creating VMs by cloning from an existing virtual machine or [VM template](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-F7BF0E6B-7C4F-4E46-8BBF-76229AEA7220.html). + +The existing VM or template may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [NoCloud datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html). + +Choose the way that the VM will be created: + +- **Deploy from template: Data Center:** Choose a VM template that exists in the data center that you selected. +- **Deploy from template: Content Library:** First, select the [Content Library](https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vm_admin.doc/GUID-254B2CE8-20A8-43F0-90E8-3F6776C2C896.html) that contains your template, then select the template from the populated list **Library templates.** +- **Clone an existing virtual machine:** In the **Virtual machine** field, choose an existing VM that the new VM will be cloned from. +- **Install from boot2docker ISO:** Ensure that the **OS ISO URL** field contains the URL of a VMware ISO release for RancherOS (`rancheros-vmware.iso`). Note that this URL must be accessible from the nodes running your Rancher server installation. + +# Networks + +The node template now allows a VM to be provisioned with multiple networks. In the **Networks** field, you can now click **Add Network** to add any networks available to you in vSphere. + +# Node Tags and Custom Attributes + +Tags allow you to attach metadata to objects in the vSphere inventory to make it easier to sort and search for these objects. + +For tags, all your vSphere tags will show up as options to select from in your node template. + +In the custom attributes, Rancher will let you select all the custom attributes you have already set up in vSphere. The custom attributes are keys and you can enter values for each one. + +> **Note:** Custom attributes are a legacy feature that will eventually be removed from vSphere. + +# cloud-init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users, authorizing SSH keys or setting up the network. + +To make use of cloud-init initialization, create a cloud config file using valid YAML syntax and paste the file content in the the **Cloud Init** field. Refer to the [cloud-init documentation.](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. + +Note that cloud-init is not supported when using the ISO creation method. \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md new file mode 100644 index 0000000000..93f0b7fffa --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration.md @@ -0,0 +1,424 @@ +--- +title: EKS Cluster Configuration Reference +shortTitle: EKS Cluster Configuration +weight: 2 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.](../../user-settings/manage-cloud-credentials.md) | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Secrets Encryption + + + +Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) + +### API Server Endpoint Access + + + +Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Private-only API Endpoints + +If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. + +There are two ways to avoid this extra manual step: +- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. +- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). + +### Public Access Endpoints + + + +Optionally limit access to the public endpoint via explicit CIDR blocks. + +If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. + +One of the following is required to enable private access: +- Rancher's IP must be part of an allowed CIDR block +- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group + +For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Subnet + + + +| Option | Description | +| ------- | ------------ | +| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | +| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Logging + + + +Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. + +Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. + +For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) + +### Managed Node Groups + + + +Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. + +For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) + +#### Bring your own launch template + +A launch template ID and version can be provided in order to easily configure the EC2 instances in a node group. If a launch template is provided, then none of the settings below will be configurable in Rancher. Therefore, using a launch template would require that all the necessary and desired settings from the list below would need to be specified in the launch template. Also note that if a launch template ID and version is provided, then only the template version can be updated. Using a new template ID would require creating a new managed node group. + +| Option | Description | Required/Optional | +| ------ | ----------- | ----------------- | +| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | Required | +| Image ID | Specify a custom AMI for the nodes. Custom AMIs used with EKS must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) | Optional | +| Node Volume Size | The launch template must specify an EBS volume with the desired size | Required | +| SSH Key | A key to be added to the instances to provide SSH access to the nodes | Optional | +| User Data | Cloud init script in [MIME multi-part format](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data) | Optional | +| Instance Resource Tags | Tag each EC2 instance in the node group | Optional | + +#### Rancher-managed launch templates + +If you do not specify a launch template, then you will be able to configure the above options in the Rancher UI and all of them can be updated after creation. In order to take advantage of all of these options, Rancher will create and manage a launch template for you. Each cluster in Rancher will have one Rancher-managed launch template and each managed node group that does not have a specified launch template will have one version of the managed launch template. The name of this launch template will have the prefix "rancher-managed-lt-" followed by the display name of the cluster. In addition, the Rancher-managed launch template will be tagged with the key "rancher-managed-template" and value "do-not-modify-or-delete" to help identify it as Rancher-managed. It is important that this launch template and its versions not be modified, deleted, or used with any other clusters or managed node groups. Doing so could result in your node groups being "degraded" and needing to be destroyed and recreated. + +#### Custom AMIs + +If you specify a custom AMI, whether in a launch template or in Rancher, then the image must be [configured properly](https://aws.amazon.com/premiumsupport/knowledge-center/eks-custom-linux-ami/) and you must provide user data to [bootstrap the node](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami). This is considered an advanced use case and understanding the requirements is imperative. + +If you specify a launch template that does not contain a custom AMI, then Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version and selected region. You can also select a [GPU enabled instance](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html#gpu-ami) for workloads that would benefit from it. + +>**Note** +>The GPU enabled instance setting in Rancher is ignored if a custom AMI is provided, either in the dropdown or in a launch template. + +#### Spot instances + +Spot instances are now [supported by EKS](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html#managed-node-group-capacity-types-spot). If a launch template is specified, Amazon recommends that the template not provide an instance type. Instead, Amazon recommends providing multiple instance types. If the "Request Spot Instances" checkbox is enabled for a node group, then you will have the opportunity to provide multiple instance types. + +>**Note** +>Any selection you made in the instance type dropdown will be ignored in this situation and you must specify at least one instance type to the "Spot Instance Types" section. Furthermore, a launch template used with EKS cannot request spot instances. Requesting spot instances must be part of the EKS configuration. + +#### Node Group Settings + +The following settings are also configurable. All of these except for the "Node Group Name" are editable after the node group is created. + +| Option | Description | +| ------- | ------------ | +| Node Group Name | The name of the node group. | +| Desired ASG Size | The desired number of instances. | +| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Labels | Kubernetes labels applied to the nodes in the managed node group. Note: Invalid labels can prevent upgrades or can prevent Rancher from starting. For details on label syntax requirements, see the [Kubernetes documentation.](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) | +| Tags | These are tags for the managed node group and do not propagate to any of the associated resources. | + + + + +### Changes in Rancher v2.5 + +More EKS options can be configured when you create an EKS cluster in Rancher, including the following: + +- Managed node groups +- Desired size, minimum size, maximum size (requires the Cluster Autoscaler to be installed) +- Control plane logging +- Secrets encryption with KMS + +The following capabilities have been added for configuring EKS clusters in Rancher: + +- GPU support +- Exclusively use managed nodegroups that come with the most up-to-date AMIs +- Add new nodes +- Upgrade nodes +- Add and remove node groups +- Disable and enable private access +- Add restrictions to public access +- Use your cloud credentials to create the EKS cluster instead of passing in your access key and secret key + +Due to the way that the cluster data is synced with EKS, if the cluster is modified from another source, such as in the EKS console, and in Rancher within five minutes, it could cause some changes to be overwritten. For information about how the sync works and how to configure it, refer to [this section](#syncing). + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Cloud Credentials | Select the cloud credentials that you created for your IAM policy. For more information on creating cloud credentials in Rancher, refer to [this page.](../../user-settings/manage-cloud-credentials.md) | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Secrets Encryption + + + +Optional: To encrypt secrets, select or enter a key created in [AWS Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html) + +### API Server Endpoint Access + + + +Configuring Public/Private API access is an advanced use case. For details, refer to the EKS cluster endpoint access control [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Private-only API Endpoints + +If you enable private and disable public API endpoint access when creating a cluster, then there is an extra step you must take in order for Rancher to connect to the cluster successfully. In this case, a pop-up will be displayed with a command that you will run on the cluster to register it with Rancher. Once the cluster is provisioned, you can run the displayed command anywhere you can connect to the cluster's Kubernetes API. + +There are two ways to avoid this extra manual step: +- You can create the cluster with both private and public API endpoint access on cluster creation. You can disable public access after the cluster is created and in an active state and Rancher will continue to communicate with the EKS cluster. +- You can ensure that Rancher shares a subnet with the EKS cluster. Then security groups can be used to enable Rancher to communicate with the cluster's API endpoint. In this case, the command to register the cluster is not needed, and Rancher will be able to communicate with your cluster. For more information on configuring security groups, refer to the [security groups documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). + +### Public Access Endpoints + + + +Optionally limit access to the public endpoint via explicit CIDR blocks. + +If you limit access to specific CIDR blocks, then it is recommended that you also enable the private access to avoid losing network communication to the cluster. + +One of the following is required to enable private access: +- Rancher's IP must be part of an allowed CIDR block +- Private access should be enabled, and Rancher must share a subnet with the cluster and have network access to the cluster, which can be configured with a security group + +For more information about public and private access to the cluster endpoint, refer to the [Amazon EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) + +### Subnet + + + +| Option | Description | +| ------- | ------------ | +| Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC with 3 public subnets. | +| Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your Control Plane and nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html). | + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Logging + + + +Configure control plane logs to send to Amazon CloudWatch. You are charged the standard CloudWatch Logs data ingestion and storage costs for any logs sent to CloudWatch Logs from your clusters. + +Each log type corresponds to a component of the Kubernetes control plane. To learn more about these components, see [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) in the Kubernetes documentation. + +For more information on EKS control plane logging, refer to the official [documentation.](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) + +### Managed Node Groups + + + +Amazon EKS managed node groups automate the provisioning and lifecycle management of nodes (Amazon EC2 instances) for Amazon EKS Kubernetes clusters. + +For more information about how node groups work and how they are configured, refer to the [EKS documentation.](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) + +Amazon will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the Kubernetes version. You can configure whether the AMI has GPU enabled. + +| Option | Description | +| ------- | ------------ | +| Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. | +| Maximum ASG Size | The maximum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | +| Minimum ASG Size | The minimum number of instances. This setting won't take effect until the [Cluster Autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html) is installed. | + + + + +### Account Access + + + +Complete each drop-down and field using the information obtained for your IAM policy. + +| Setting | Description | +| ---------- | -------------------------------------------------------------------------------------------------------------------- | +| Region | From the drop-down choose the geographical region in which to build your cluster. | +| Access Key | Enter the access key that you created for your IAM policy. | +| Secret Key | Enter the secret key that you created for your IAM policy. | + +### Service Role + + + +Choose a [service role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). + +Service Role | Description +-------------|--------------------------- +Standard: Rancher generated service role | If you choose this role, Rancher automatically adds a service role for use with the cluster. +Custom: Choose from your existing service roles | If you choose this role, Rancher lets you choose from service roles that you're already created within AWS. For more information on creating a custom service role in AWS, see the [Amazon documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#create-service-linked-role). + +### Public IP for Worker Nodes + + + +Your selection for this option determines what options are available for **VPC & Subnet**. + +Option | Description +-------|------------ +Yes | When your cluster nodes are provisioned, they're assigned a both a private and public IP address. +No: Private IPs only | When your cluster nodes are provisioned, they're assigned only a private IP address.

    If you choose this option, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. + +### VPC & Subnet + + + +The available options depend on the [public IP for worker nodes.](#public-ip-for-worker-nodes) + +Option | Description + -------|------------ + Standard: Rancher generated VPC and Subnet | While provisioning your cluster, Rancher generates a new VPC and Subnet. + Custom: Choose from your existing VPC and Subnets | While provisioning your cluster, Rancher configures your nodes to use a VPC and Subnet that you've already [created in AWS](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html). If you choose this option, complete the remaining steps below. + + For more information, refer to the AWS documentation for [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). Follow one of the sets of instructions below based on your selection from the previous step. + +- [What Is Amazon VPC?](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) +- [VPCs and Subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + + +If you choose to assign a public IP address to your cluster's worker nodes, you have the option of choosing between a VPC that's automatically generated by Rancher (i.e., **Standard: Rancher generated VPC and Subnet**), or a VPC that you've already created with AWS (i.e., **Custom: Choose from your existing VPC and Subnets**). Choose the option that best fits your use case. + +
    + Click to expand + + If you're using **Custom: Choose from your existing VPC and Subnets**: + + (If you're using **Standard**, skip to the [instance options.)](#select-instance-options-2-4) + + 1. Make sure **Custom: Choose from your existing VPC and Subnets** is selected. + + 1. From the drop-down that displays, choose a VPC. + + 1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. + + 1. Click **Next: Select Security Group**. +
    + +If your worker nodes have Private IPs only, you must also choose a **VPC & Subnet** that allow your instances to access the internet. This access is required so that your worker nodes can connect to the Kubernetes control plane. + +
    + Click to expand + +Follow the steps below. + +>**Tip:** When using only private IP addresses, you can provide your nodes internet access by creating a VPC constructed with two subnets, a private set and a public set. The private set should have its route tables configured to point toward a NAT in the public set. For more information on routing traffic from private subnets, please see the [official AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html). + +1. From the drop-down that displays, choose a VPC. + +1. Click **Next: Select Subnets**. Then choose one of the **Subnets** that displays. + +
    + +### Security Group + + + +Amazon Documentation: + +- [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) +- [Security Groups for Your VPC](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +- [Create a Security Group](https://docs.aws.amazon.com/vpc/latest/userguide/getting-started-ipv4.html#getting-started-create-security-group) + +### Instance Options + + + +Instance type and size of your worker nodes affects how many IP addresses each worker node will have available. See this [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) for more information. + +Option | Description +-------|------------ +Instance Type | Choose the [hardware specs](https://aws.amazon.com/ec2/instance-types/) for the instance you're provisioning. +Custom AMI Override | If you want to use a custom [Amazon Machine Image](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html#creating-an-ami) (AMI), specify it here. By default, Rancher will use the [EKS-optimized AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html) for the EKS version that you chose. +Desired ASG Size | The number of instances that your cluster will provision. +User Data | Custom commands can to be passed to perform automated configuration tasks **WARNING: Modifying this may cause your nodes to be unable to join the cluster.** _Note: Available as of v2.2.0_ + +
    +
    + +### Configuring the Refresh Interval + + + + +The `eks-refresh-cron` setting is deprecated. It has been migrated to the `eks-refresh` setting, which is an integer representing seconds. + +The default value is 300 seconds. + +The syncing interval can be changed by running `kubectl edit setting eks-refresh`. + +If the `eks-refresh-cron` setting was previously set, the migration will happen automatically. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. + + + + +It is possible to change the refresh interval through the setting `eks-refresh-cron`. This setting accepts values in the Cron format. The default is `*/5 * * * *`. + +The shorter the refresh window, the less likely any race conditions will occur, but it does increase the likelihood of encountering request limits that may be in place for AWS APIs. + + + diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md similarity index 100% rename from content/rancher/v2.5/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md rename to versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters.md diff --git a/content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rancherd-configuration-reference.md similarity index 100% rename from content/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/_index.md rename to versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rancherd-configuration-reference.md diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md new file mode 100644 index 0000000000..f2296cb054 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md @@ -0,0 +1,79 @@ +--- +title: RKE Cluster Configuration +weight: 1 +--- + +In [clusters launched by RKE](../../../pages-for-subheaders/launch-kubernetes-with-rancher.md), you can edit any of the remaining options that follow. + +- [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) +- [Editing Clusters with YAML](#editing-clusters-with-yaml) +- [Updating ingress-nginx](#updating-ingress-nginx) + +# Configuration Options in the Rancher UI + +To edit your cluster, open the **Global** view, make sure the **Clusters** tab is selected, and then select **⋮ > Edit** for the cluster that you want to edit. + +Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE cluster configuration file in YAML. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) + +### Kubernetes Version + +The version of Kubernetes installed on each cluster node. For more detail, see [Upgrading Kubernetes](../../../getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes.md). + +### Network Provider + +The \container networking interface (CNI) that powers networking for your cluster.

    **Note:** You can only choose this option while provisioning your cluster. It cannot be edited later. + +### Project Network Isolation + +If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. + +Before Rancher v2.5.8, project network isolation is only available if you are using the Canal network plugin for RKE. + +In v2.5.8+, project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### Nginx Ingress + +If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use Nginx ingress within the cluster. + +### Metrics Server Monitoring + +Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. + +### Pod Security Policy Support + +Enables [pod security policies](../../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies.md) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. + +### Docker version on nodes + +Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. + +### Docker Root Directory + +The directory on your cluster nodes where you've installed Docker. If you install Docker on your nodes to a non-default directory, update this path. + +### Default Pod Security Policy + +If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. + +### Cloud Provider + +If you're using a cloud provider to host cluster nodes launched by RKE, enable [this option](../../../pages-for-subheaders/set-up-cloud-providers.md) so that you can use the cloud provider's native features. If you want to store persistent data for your cloud-hosted cluster, this option is required. + +# Editing Clusters with YAML + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +- To edit an RKE config file directly from the Rancher UI, click **Edit as YAML**. +- To read from an existing RKE file, click **Read from File**. + +![image](/img/cluster-options-yaml.png) + +For an example of RKE config file syntax, see the [RKE documentation](https://rancher.com/docs/rke/latest/en/example-yamls/). + +For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.](https://rancher.com/docs/rke/latest/en/config-options/) + +# Updating ingress-nginx + +Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. + +If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. \ No newline at end of file diff --git a/content/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/_index.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md similarity index 100% rename from content/rancher/v2.5/en/cluster-admin/editing-clusters/syncing/_index.md rename to versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters.md diff --git a/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md new file mode 100644 index 0000000000..ca2d507c4a --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options.md @@ -0,0 +1,58 @@ +--- +title: Rancher Agent Options +weight: 2500 +aliases: + - /rancher/v2.5/en/admin-settings/agent-options/ + - /rancher/v2.5/en/cluster-provisioning/custom-clusters/agent-options + - /rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/ +--- + +Rancher deploys an agent on each node to communicate with the node. This pages describes the options that can be passed to the agent. To use these options, you will need to [create a cluster with custom nodes](../../../../pages-for-subheaders/use-existing-nodes.md) and add the options to the generated `docker run` command when adding a node. + +For an overview of how Rancher communicates with downstream clusters using node agents, refer to the [architecture section.](../../../../pages-for-subheaders/rancher-manager-architecture.md#3-node-agents) + +## General options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--server` | `CATTLE_SERVER` | The configured Rancher `server-url` setting which the agent connects to | +| `--token` | `CATTLE_TOKEN` | Token that is needed to register the node in Rancher | +| `--ca-checksum` | `CATTLE_CA_CHECKSUM` | The SHA256 checksum of the configured Rancher `cacerts` setting to validate | +| `--node-name` | `CATTLE_NODE_NAME` | Override the hostname that is used to register the node (defaults to `hostname -s`) | +| `--label` | `CATTLE_NODE_LABEL` | Add node labels to the node. For multiple labels, pass additional `--label` options. (`--label key=value`) | +| `--taints` | `CATTLE_NODE_TAINTS` | Add node taints to the node. For multiple taints, pass additional `--taints` options. (`--taints key=value:effect`) | + +## Role options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--all-roles` | `ALL=true` | Apply all roles (`etcd`,`controlplane`,`worker`) to the node | +| `--etcd` | `ETCD=true` | Apply the role `etcd` to the node | +| `--controlplane` | `CONTROL=true` | Apply the role `controlplane` to the node | +| `--worker` | `WORKER=true` | Apply the role `worker` to the node | + +## IP address options + +| Parameter | Environment variable | Description | +| ---------- | -------------------- | ----------- | +| `--address` | `CATTLE_ADDRESS` | The IP address the node will be registered with (defaults to the IP used to reach `8.8.8.8`) | +| `--internal-address` | `CATTLE_INTERNAL_ADDRESS` | The IP address used for inter-host communication on a private network | + +### Dynamic IP address options + +For automation purposes, you can't have a specific IP address in a command as it has to be generic to be used for every node. For this, we have dynamic IP address options. They are used as a value to the existing IP address options. This is supported for `--address` and `--internal-address`. + +| Value | Example | Description | +| ---------- | -------------------- | ----------- | +| Interface name | `--address eth0` | The first configured IP address will be retrieved from the given interface | +| `ipify` | `--address ipify` | Value retrieved from `https://api.ipify.org` will be used | +| `awslocal` | `--address awslocal` | Value retrieved from `http://169.254.169.254/latest/meta-data/local-ipv4` will be used | +| `awspublic` | `--address awspublic` | Value retrieved from `http://169.254.169.254/latest/meta-data/public-ipv4` will be used | +| `doprivate` | `--address doprivate` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/private/0/ipv4/address` will be used | +| `dopublic` | `--address dopublic` | Value retrieved from `http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address` will be used | +| `azprivate` | `--address azprivate` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2017-08-01&format=text` will be used | +| `azpublic` | `--address azpublic` | Value retrieved from `http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text` will be used | +| `gceinternal` | `--address gceinternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip` will be used | +| `gceexternal` | `--address gceexternal` | Value retrieved from `http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip` will be used | +| `packetlocal` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/local-ipv4` will be used | +| `packetpublic` | `--address packetlocal` | Value retrieved from `https://metadata.packet.net/2009-04-04/meta-data/public-ipv4` will be used | diff --git a/versioned_docs/version-2.5/reference-guides/configure-openldap/openldap-config-reference.md b/versioned_docs/version-2.5/reference-guides/configure-openldap/openldap-config-reference.md new file mode 100644 index 0000000000..06b2d82c04 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/configure-openldap/openldap-config-reference.md @@ -0,0 +1,88 @@ +--- +title: OpenLDAP Configuration Reference +weight: 2 +aliases: + - /rancher/v2.x/en/admin-settings/authentication/openldap/openldap-config/ +--- + +This section is intended to be used as a reference when setting up an OpenLDAP authentication provider in Rancher. + +For further details on configuring OpenLDAP, refer to the [official documentation.](https://www.openldap.org/doc/) + +> Before you proceed with the configuration, please familiarize yourself with the concepts of [External Authentication Configuration and Principal Users](../../pages-for-subheaders/about-authentication.md#external-authentication-configuration-and-principal-users). + +- [Background: OpenLDAP Authentication Flow](#background-openldap-authentication-flow) +- [OpenLDAP server configuration](#openldap-server-configuration) +- [User/group schema configuration](#user-group-schema-configuration) + - [User schema configuration](#user-schema-configuration) + - [Group schema configuration](#group-schema-configuration) + +## Background: OpenLDAP Authentication Flow + +1. When a user attempts to login with his LDAP credentials, Rancher creates an initial bind to the LDAP server using a service account with permissions to search the directory and read user/group attributes. +2. Rancher then searches the directory for the user by using a search filter based on the provided username and configured attribute mappings. +3. Once the user has been found, he is authenticated with another LDAP bind request using the user's DN and provided password. +4. Once authentication succeeded, Rancher then resolves the group memberships both from the membership attribute in the user's object and by performing a group search based on the configured user mapping attribute. + +# OpenLDAP Server Configuration + +You will need to enter the address, port, and protocol to connect to your OpenLDAP server. `389` is the standard port for insecure traffic, `636` for TLS traffic. + +> **Using TLS?** +> +> If the certificate used by the OpenLDAP server is self-signed or not from a recognized certificate authority, make sure have at hand the CA certificate (concatenated with any intermediate certificates) in PEM format. You will have to paste in this certificate during the configuration so that Rancher is able to validate the certificate chain. + +If you are in doubt about the correct values to enter in the user/group Search Base configuration fields, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. + +
    OpenLDAP Server Parameters
    + +| Parameter | Description | +|:--|:--| +| Hostname | Specify the hostname or IP address of the OpenLDAP server | +| Port | Specify the port at which the OpenLDAP server is listening for connections. Unencrypted LDAP normally uses the standard port of 389, while LDAPS uses port 636.| +| TLS | Check this box to enable LDAP over SSL/TLS (commonly known as LDAPS). You will also need to paste in the CA certificate if the server uses a self-signed/enterprise-signed certificate. | +| Server Connection Timeout | The duration in number of seconds that Rancher waits before considering the server unreachable. | +| Service Account Distinguished Name | Enter the Distinguished Name (DN) of the user that should be used to bind, search and retrieve LDAP entries. | +| Service Account Password | The password for the service account. | +| User Search Base | Enter the Distinguished Name of the node in your directory tree from which to start searching for user objects. All users must be descendents of this base DN. For example: "ou=people,dc=acme,dc=com".| +| Group Search Base | If your groups live under a different node than the one configured under `User Search Base` you will need to provide the Distinguished Name here. Otherwise leave this field empty. For example: "ou=groups,dc=acme,dc=com".| + +# User/Group Schema Configuration + +If your OpenLDAP directory deviates from the standard OpenLDAP schema, you must complete the **Customize Schema** section to match it. + +Note that the attribute mappings configured in this section are used by Rancher to construct search filters and resolve group membership. It is therefore always recommended to verify that the configuration here matches the schema used in your OpenLDAP. + +If you are unfamiliar with the user/group schema used in the OpenLDAP server, consult your LDAP administrator or refer to the section [Identify Search Base and Schema using ldapsearch](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory.md#annex-identify-search-base-and-schema-using-ldapsearch) in the Active Directory authentication documentation. + +### User Schema Configuration + +The table below details the parameters for the user schema configuration. + +
    User Schema Configuration Parameters
    + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for user objects in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Username Attribute | The user attribute whose value is suitable as a display name. | +| Login Attribute | The attribute whose value matches the username part of credentials entered by your users when logging in to Rancher. This is typically `uid`. | +| User Member Attribute | The user attribute containing the Distinguished Name of groups a user is member of. Usually this is one of `memberOf` or `isMemberOf`. | +| Search Attribute | When a user enters text to add users or groups in the UI, Rancher queries the LDAP server and attempts to match users by the attributes provided in this setting. Multiple attributes can be specified by separating them with the pipe ("\|") symbol. | +| User Enabled Attribute | If the schema of your OpenLDAP server supports a user attribute whose value can be evaluated to determine if the account is disabled or locked, enter the name of that attribute. The default OpenLDAP schema does not support this and the field should usually be left empty. | +| Disabled Status Bitmask | This is the value for a disabled/locked user account. The parameter is ignored if `User Enabled Attribute` is empty. | + +### Group Schema Configuration + +The table below details the parameters for the group schema configuration. + +
    Group Schema Configuration Parameters
    + +| Parameter | Description | +|:--|:--| +| Object Class | The name of the object class used for group entries in your domain. If defined, only specify the name of the object class - *don't* include it in an LDAP wrapper such as &(objectClass=xxxx) | +| Name Attribute | The group attribute whose value is suitable for a display name. | +| Group Member User Attribute | The name of the **user attribute** whose format matches the group members in the `Group Member Mapping Attribute`. | +| Group Member Mapping Attribute | The name of the group attribute containing the members of a group. | +| Search Attribute | Attribute used to construct search filters when adding groups to clusters or projects in the UI. See description of user schema `Search Attribute`. | +| Group DN Attribute | The name of the group attribute whose format matches the values in the user's group membership attribute. See `User Member Attribute`. | +| Nested Group Membership | This settings defines whether Rancher should resolve nested group memberships. Use only if your organization makes use of these nested memberships (ie. you have groups that contain other groups as members). This option is disabled if you are using Shibboleth. | \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md b/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/installation-references/feature-flags.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/installation-references/helm-chart-options.md b/versioned_docs/version-2.5/reference-guides/installation-references/helm-chart-options.md new file mode 100644 index 0000000000..abce71e516 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/installation-references/helm-chart-options.md @@ -0,0 +1,260 @@ +--- +title: Rancher Helm Chart Options +weight: 1 +aliases: + - /rancher/v2.5/en/installation/options/ + - /rancher/v2.5/en/installation/options/chart-options/ + - /rancher/v2.5/en/installation/options/helm2/helm-rancher/chart-options/ + - /rancher/v2.5/en/installation/resources/chart-options + - /rancher/v2.x/en/installation/install-rancher-on-k8s/chart-options/ +--- + +This page is a configuration reference for the Rancher Helm chart. + +For help choosing a Helm chart version, refer to [this page.](../../getting-started/installation-and-upgrade/resources/choose-a-rancher-version.md) + +For information on enabling experimental features, refer to [this page.](../../pages-for-subheaders/enable-experimental-features.md) + +- [Common Options](#common-options) +- [Advanced Options](#advanced-options) +- [API Audit Log](#api-audit-log) +- [Setting Extra Environment Variables](#setting-extra-environment-variables) +- [TLS Settings](#tls-settings) +- [Customizing your Ingress](#customizing-your-ingress) +- [HTTP Proxy](#http-proxy) +- [Additional Trusted CAs](#additional-trusted-cas) +- [Private Registry and Air Gap Installs](#private-registry-and-air-gap-installs) +- [External TLS Termination](#external-tls-termination) + +### Common Options + +| Option | Default Value | Description | +| ------------------------- | ------------- | ---------------------------------------------------------------------------------- | +| `hostname` | " " | `string` - the Fully Qualified Domain Name for your Rancher Server | +| `ingress.tls.source` | "rancher" | `string` - Where to get the cert for the ingress. - "rancher, letsEncrypt, secret" | +| `letsEncrypt.email` | " " | `string` - Your email address | +| `letsEncrypt.environment` | "production" | `string` - Valid options: "staging, production" | +| `privateCA` | false | `bool` - Set to true if your cert is signed by a private CA | + +
    + +### Advanced Options + +| Option | Default Value | Description | +| ------------------------------ | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| `additionalTrustedCAs` | false | `bool` - See [Additional Trusted CAs](#additional-trusted-cas) | +| `addLocal` | "true" | `string` - Have Rancher detect and import the "local" Rancher server cluster. _Note: This option is no longer available in v2.5.0. Consider using the `restrictedAdmin` option to prevent users from modifying the local cluster._ | +| `antiAffinity` | "preferred" | `string` - AntiAffinity rule for Rancher pods - "preferred, required" | +| `auditLog.destination` | "sidecar" | `string` - Stream to sidecar container console or hostPath volume - "sidecar, hostPath" | +| `auditLog.hostPath` | "/var/log/rancher/audit" | `string` - log file destination on host (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.level` | 0 | `int` - set the [API Audit Log](../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) level. 0 is off. [0-3] | +| `auditLog.maxAge` | 1 | `int` - maximum number of days to retain old audit log files (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxBackup` | 1 | `int` - maximum number of audit log files to retain (only applies when `auditLog.destination` is set to `hostPath`) | +| `auditLog.maxSize` | 100 | `int` - maximum size in megabytes of the audit log file before it gets rotated (only applies when `auditLog.destination` is set to `hostPath`) | +| `busyboxImage` | "busybox" | `string` - Image location for busybox image used to collect audit logs | +| `certmanager.version` | "" | `string` - set cert-manager compatibility | +| `debug` | false | `bool` - set debug flag on rancher server | +| `extraEnv` | [] | `list` - set additional environment variables for Rancher | +| `imagePullSecrets` | [] | `list` - list of names of Secret resource containing private registry credentials | +| `ingress.configurationSnippet` | "" | `string` - Add additional Nginx configuration. Can be used for proxy configuration. | +| `ingress.extraAnnotations` | {} | `map` - additional annotations to customize the ingress | +| `ingress.enabled` | true | When set to false, Helm will not install a Rancher ingress. Set the option to false to deploy your own ingress. _Available as of v2.5.6_ | +| `letsEncrypt.ingress.class` | "" | `string` - optional ingress class for the cert-manager acmesolver ingress that responds to the Let's Encrypt ACME challenges. Options: traefik, nginx. | | +| `noProxy` | "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local,cattle-system.svc" | `string` - comma separated list of hostnames or ip address not to use the proxy | | +| `proxy` | "" | `string` - HTTP[S] proxy server for Rancher | +| `rancherImage` | "rancher/rancher" | `string` - rancher image source | +| `rancherImagePullPolicy` | "IfNotPresent" | `string` - Override imagePullPolicy for rancher server images - "Always", "Never", "IfNotPresent" | +| `rancherImageTag` | same as chart version | `string` - rancher/rancher image tag | +| `replicas` | 3 | `int` - Number of replicas of Rancher pods | +| `resources` | {} | `map` - rancher pod resource requests & limits | +| `restrictedAdmin` | `false` | _Available in Rancher v2.5_ `bool` - When this option is set to true, the initial Rancher user has restricted access to the local Kubernetes cluster to prevent privilege escalation. For more information, see the section about the [restricted-admin role.](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions.md#restricted-admin) | +| `systemDefaultRegistry` | "" | `string` - private registry to be used for all system Docker images, e.g., http://registry.example.com/ | +| `tls` | "ingress" | `string` - See [External TLS Termination](#external-tls-termination) for details. - "ingress, external" | +| `useBundledSystemChart` | `false` | `bool` - select to use the system-charts packaged with Rancher server. This option is used for air gapped installations. | + + + +### API Audit Log + +Enabling the [API Audit Log](installation/api-auditing/). + +You can collect this log as you would any container log. Enable [logging](../../pages-for-subheaders/logging.md) for the `System` Project on the Rancher server cluster. + +```plain +--set auditLog.level=1 +``` + +By default enabling Audit Logging will create a sidecar container in the Rancher pod. This container (`rancher-audit-log`) will stream the log to `stdout`. You can collect this log as you would any container log. When using the sidecar as the audit log destination, the `hostPath`, `maxAge`, `maxBackups`, and `maxSize` options do not apply. It's advised to use your OS or Docker daemon's log rotation features to control disk space use. Enable [logging](../../pages-for-subheaders/logging.md) for the Rancher server cluster or System Project. + +Set the `auditLog.destination` to `hostPath` to forward logs to volume shared with the host system instead of streaming to a sidecar container. When setting the destination to `hostPath` you may want to adjust the other auditLog parameters for log rotation. + +### Setting Extra Environment Variables + +You can set extra environment variables for Rancher server using `extraEnv`. This list uses the same `name` and `value` keys as the container manifest definitions. Remember to quote the values. + +```plain +--set 'extraEnv[0].name=CATTLE_TLS_MIN_VERSION' +--set 'extraEnv[0].value=1.0' +``` + +### TLS Settings + +When you install Rancher inside of a Kubernetes cluster, TLS is offloaded at the cluster's ingress controller. The possible TLS settings depend on the used ingress controller. + +See [TLS settings](tls-settings.md) for more information and options. + +### Import `local` Cluster + +By default Rancher server will detect and import the `local` cluster it's running on. User with access to the `local` cluster will essentially have "root" access to all the clusters managed by Rancher server. + +> **Important:** If you turn addLocal off, most Rancher v2.5 features won't work, including the EKS provisioner. + +If this is a concern in your environment you can set this option to "false" on your initial install. + +This option is only effective on the initial Rancher install. See [Issue 16522](https://github.com/rancher/rancher/issues/16522) for more information. + +```plain +--set addLocal="false" +``` + +### Customizing your Ingress + +To customize or use a different ingress with Rancher server you can set your own Ingress annotations. + +Example on setting a custom certificate issuer: + +```plain +--set ingress.extraAnnotations.'cert-manager\.io/cluster-issuer'=issuer-name +``` + +Example on setting a static proxy header with `ingress.configurationSnippet`. This value is parsed like a template so variables can be used. + +```plain +--set ingress.configurationSnippet='more_set_input_headers X-Forwarded-Host {{ .Values.hostname }};' +``` + +### HTTP Proxy + +Rancher requires internet access for some functionality (helm charts). Use `proxy` to set your proxy server. + +Add your IP exceptions to the `noProxy` list. Make sure you add the Pod cluster IP range (default: `10.42.0.0/16`), Service cluster IP range (default: `10.43.0.0/16`), the internal cluster domains (default: `.svc,.cluster.local`) and any worker cluster `controlplane` nodes. Rancher supports CIDR notation ranges in this list. + +```plain +--set proxy="http://:@:/" +--set noProxy="127.0.0.0/8\,10.0.0.0/8\,172.16.0.0/12\,192.168.0.0/16\,.svc\,.cluster.local" +``` + +### Additional Trusted CAs + +If you have private registries, catalogs or a proxy that intercepts certificates, you may need to add additional trusted CAs to Rancher. + +```plain +--set additionalTrustedCAs=true +``` + +Once the Rancher deployment is created, copy your CA certs in pem format into a file named `ca-additional.pem` and use `kubectl` to create the `tls-ca-additional` secret in the `cattle-system` namespace. + +```plain +kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./ca-additional.pem +``` + +### Private Registry and Air Gap Installs + +For details on installing Rancher with a private registry, see [Air Gapped Helm CLI Install](../../pages-for-subheaders/air-gapped-helm-cli-install.md). + +# External TLS Termination + +We recommend configuring your load balancer as a Layer 4 balancer, forwarding plain 80/tcp and 443/tcp to the Rancher Management cluster nodes. The Ingress Controller on the cluster will redirect http traffic on port 80 to https on port 443. + +You may terminate the SSL/TLS on a L7 load balancer external to the Rancher cluster (ingress). Use the `--set tls=external` option and point your load balancer at port http 80 on all of the Rancher cluster nodes. This will expose the Rancher interface on http port 80. Be aware that clients that are allowed to connect directly to the Rancher cluster will not be encrypted. If you choose to do this we recommend that you restrict direct access at the network level to just your load balancer. + +> **Note:** If you are using a Private CA signed certificate, add `--set privateCA=true` and see [Adding TLS Secrets - Using a Private CA Signed Certificate](../../getting-started/installation-and-upgrade/resources/add-tls-secrets.md) to add the CA cert for Rancher. + +Your load balancer must support long lived websocket connections and will need to insert proxy headers so Rancher can route links correctly. + +### Configuring Ingress for External TLS when Using NGINX v0.25 + +In NGINX v0.25, the behavior of NGINX has [changed](https://github.com/kubernetes/ingress-nginx/blob/master/Changelog.md#0220) regarding forwarding headers and external TLS termination. Therefore, in the scenario that you are using external TLS termination configuration with NGINX v0.25, you must edit the `cluster.yml` to enable the `use-forwarded-headers` option for ingress: + +```yaml +ingress: + provider: nginx + options: + use-forwarded-headers: 'true' +``` + +### Required Headers + +- `Host` +- `X-Forwarded-Proto` +- `X-Forwarded-Port` +- `X-Forwarded-For` + +### Recommended Timeouts + +- Read Timeout: `1800 seconds` +- Write Timeout: `1800 seconds` +- Connect Timeout: `30 seconds` + +### Health Checks + +Rancher will respond `200` to health checks on the `/healthz` endpoint. + +### Example NGINX config + +This NGINX configuration is tested on NGINX 1.14. + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +- Replace `IP_NODE1`, `IP_NODE2` and `IP_NODE3` with the IP addresses of the nodes in your cluster. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server IP_NODE_1:80; + server IP_NODE_2:80; + server IP_NODE_3:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` diff --git a/content/rancher/v2.5/en/installation/resources/tls-settings/_index.md b/versioned_docs/version-2.5/reference-guides/installation-references/tls-settings.md similarity index 100% rename from content/rancher/v2.5/en/installation/resources/tls-settings/_index.md rename to versioned_docs/version-2.5/reference-guides/installation-references/tls-settings.md diff --git a/versioned_docs/version-2.5/reference-guides/kubernetes-concepts.md b/versioned_docs/version-2.5/reference-guides/kubernetes-concepts.md new file mode 100644 index 0000000000..0741a45f73 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/kubernetes-concepts.md @@ -0,0 +1,74 @@ +--- +title: Kubernetes Concepts +weight: 4 +aliases: + - /rancher/v2.x/en/overview/concepts/ +--- + +This page explains concepts related to Kubernetes that are important for understanding how Rancher works. The descriptions below provide a simplified interview of Kubernetes components. For more details, refer to the [official documentation on Kubernetes components.](https://kubernetes.io/docs/concepts/overview/components/) + +This section covers the following topics: + +- [About Docker](#about-docker) +- [About Kubernetes](#about-kubernetes) +- [What is a Kubernetes Cluster?](#what-is-a-kubernetes-cluster) +- [Roles for Nodes in Kubernetes Clusters](#roles-for-nodes-in-kubernetes-clusters) + - [etcd Nodes](#etcd-nodes) + - [Controlplane Nodes](#controlplane-nodes) + - [Worker Nodes](#worker-nodes) +- [About Helm](#about-helm) + +# About Docker + +Docker is the container packaging and runtime standard. Developers build container images from Dockerfiles and distribute container images from Docker registries. [Docker Hub](https://hub.docker.com) is the most popular public registry. Many organizations also set up private Docker registries. Docker is primarily used to manage containers on individual nodes. + +>**Note:** Although Rancher 1.6 supported Docker Swarm clustering technology, it is no longer supported in Rancher 2.x due to the success of Kubernetes. + +# About Kubernetes + +Kubernetes is the container cluster management standard. YAML files specify containers and other resources that form an application. Kubernetes performs functions such as scheduling, scaling, service discovery, health check, secret management, and configuration management. + +# What is a Kubernetes Cluster? + +A cluster is a group of computers that work together as a single system. + +A _Kubernetes Cluster_ is a cluster that uses the [Kubernetes container-orchestration system](https://kubernetes.io/) to deploy, maintain, and scale Docker containers, allowing your organization to automate application operations. + +# Roles for Nodes in Kubernetes Clusters + +Each computing resource in a Kubernetes cluster is called a _node_. Nodes can be either bare-metal servers or virtual machines. Kubernetes classifies nodes into three types: _etcd_ nodes, _control plane_ nodes, and _worker_ nodes. + +A Kubernetes cluster consists of at least one etcd, controlplane, and worker node. + +### etcd Nodes + +Rancher uses etcd as a data store in both single node and high-availability installations. In Kubernetes, etcd is also a role for nodes that store the cluster state. + +The state of a Kubernetes cluster is maintained in [etcd.](https://kubernetes.io/docs/concepts/overview/components/#etcd) The etcd nodes run the etcd database. + +The etcd database component is a distributed key-value store used as Kubernetes storage for all cluster data, such as cluster coordination and state management. It is recommended to run etcd on multiple nodes so that there's always a backup available for failover. + +Although you can run etcd on just one node, etcd requires a majority of nodes, a quorum, to agree on updates to the cluster state. The cluster should always contain enough healthy etcd nodes to form a quorum. For a cluster with n members, a quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for a quorum. + +Three etcd nodes is generally sufficient for smaller clusters and five etcd nodes for large clusters. + +### Controlplane Nodes + +Controlplane nodes run the Kubernetes API server, scheduler, and controller manager. These nodes take care of routine tasks to ensure that your cluster maintains your configuration. Because all cluster data is stored on your etcd nodes, control plane nodes are stateless. You can run control plane on a single node, although three or more nodes are recommended for redundancy. Additionally, a single node can share the control plane and etcd roles. + +### Worker Nodes + +Each [worker node](https://kubernetes.io/docs/concepts/architecture/nodes/) runs the following: + +- **Kubelets:** An agent that monitors the state of the node, ensuring your containers are healthy. +- **Workloads:** The containers and pods that hold your apps, as well as other types of deployments. + +Worker nodes also run storage and networking drivers, and ingress controllers when required. You create as many worker nodes as necessary to run your [workloads](../pages-for-subheaders/workloads-and-pods.md). + +# About Helm + +For high-availability installations of Rancher, Helm is the tool used to install Rancher on a Kubernetes cluster. + +Helm is the package management tool of choice for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm we can create configurable deployments instead of just using static files. For more information about creating your own catalog of deployments, check out the docs at [https://helm.sh/](https://helm.sh). + +For more information on service accounts and cluster role binding, refer to the [Kubernetes documentation.](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md new file mode 100644 index 0000000000..f62dfb976b --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/examples.md @@ -0,0 +1,24 @@ +--- +title: Examples +weight: 400 +--- + +### ServiceMonitor + +An example ServiceMonitor custom resource can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + +### PodMonitor + +An example PodMonitor can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/example-app-pod-monitor.yaml) An example Prometheus resource that refers to it can be found [here.](https://github.com/prometheus-operator/prometheus-operator/blob/master/example/user-guides/getting-started/prometheus-pod-monitor.yaml) + +### PrometheusRule + +For users who are familiar with Prometheus, a PrometheusRule contains the alerting and recording rules that you would normally place in a [Prometheus rule file](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/). + +For a more fine-grained application of PrometheusRules within your cluster, the ruleSelector field on a Prometheus resource allows you to select which PrometheusRules should be loaded onto Prometheus based on the labels attached to the PrometheusRules resources. + +An example PrometheusRule is on [this page.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md) + +### Alertmanager Config + +For an example configuration, refer to [this section.](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md#example-alertmanager-config) \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/helm-chart-options.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/helm-chart-options.md new file mode 100644 index 0000000000..22f0efb84f --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/helm-chart-options.md @@ -0,0 +1,77 @@ +--- +title: Helm Chart Options +weight: 8 +--- + +- [Configuring Resource Limits and Requests](#configuring-resource-limits-and-requests) +- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) +- [Additional Scrape Configurations](#additional-scrape-configurations) +- [Configuring Applications Packaged within Monitoring V2](#configuring-applications-packaged-within-monitoring-v2) +- [Increase the Replicas of Alertmanager](#increase-the-replicas-of-alertmanager) +- [Configuring the Namespace for a Persistent Grafana Dashboard](#configuring-the-namespace-for-a-persistent-grafana-dashboard) + + +# Configuring Resource Limits and Requests + +The resource requests and limits can be configured when installing `rancher-monitoring`. + +The default values are in the [values.yaml](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/values.yaml) in the `rancher-monitoring` Helm chart. + +The default values in the table below are the minimum required resource limits and requests. + +| Resource Name | Memory Limit | CPU Limit | Memory Request | CPU Request | +| ------------- | ------------ | ----------- | ---------------- | ------------------ | +| alertmanager | 500Mi | 1000m | 100Mi | 100m | +| grafana | 200Mi | 200m | 100Mi | 100m | +| kube-state-metrics subchart | 200Mi | 100m | 130Mi | 100m | +| prometheus-node-exporter subchart | 50Mi | 200m | 30Mi | 100m | +| prometheusOperator | 500Mi | 200m | 100Mi | 100m | +| prometheus | 2500Mi | 1000m | 1750Mi | 750m | +| **Total** | **3950Mi** | **2700m** | **2210Mi** | **1250m** | + +At least 50Gi storage is recommended. + + +# Trusted CA for Notifiers + +If you need to add a trusted CA to your notifier, follow these steps: + +1. Create the `cattle-monitoring-system` namespace. +1. Add your trusted CA secret to the `cattle-monitoring-system` namespace. +1. Deploy or upgrade the `rancher-monitoring` Helm chart. In the chart options, reference the secret in **Alerting > Additional Secrets.** + +**Result:** The default Alertmanager custom resource will have access to your trusted CA. + + +# Additional Scrape Configurations + +If the scrape configuration you want cannot be specified via a ServiceMonitor or PodMonitor at the moment, you can provide an `additionalScrapeConfigSecret` on deploying or upgrading `rancher-monitoring`. + +A [scrape_config section](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) specifies a set of targets and parameters describing how to scrape them. In the general case, one scrape configuration specifies a single job. + +An example of where this might be used is with Istio. For more information, see [this section.](https://rancher.com/docs/rancher/v2.5/en/istio/configuration-reference/selectors-and-scrape) + + +# Configuring Applications Packaged within Monitoring v2 + +We deploy kube-state-metrics and node-exporter with monitoring v2. Node exporter are deployed as DaemonSets. In the monitoring v2 helm chart, in the values.yaml, each of the things are deployed as sub charts. + +We also deploy grafana which is not managed by prometheus. + +If you look at what the helm chart is doing like in kube-state-metrics, there are plenty more values that you can set that aren’t exposed in the top level chart. + +But in the top level chart you can add values that override values that exist in the sub chart. + +### Increase the Replicas of Alertmanager + +As part of the chart deployment options, you can opt to increase the number of replicas of the Alertmanager deployed onto your cluster. The replicas can all be managed using the same underlying Alertmanager Config Secret. For more information on the Alertmanager Config Secret, refer to [this section](../../how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager.md#multiple-alertmanager-replicas) + +### Configuring the Namespace for a Persistent Grafana Dashboard + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: + +``` +grafana.sidecar.dashboards.searchNamespace=ALL +``` + +Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/receivers.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/receivers.md new file mode 100644 index 0000000000..fccdea7c91 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/receivers.md @@ -0,0 +1,401 @@ +--- +title: Receiver Configuration +shortTitle: Receivers +weight: 1 +aliases: + - /rancher/v2.5/en/monitoring-alerting/configuration/alertmanager + - rancher/v2.5/en/monitoring-alerting/legacy/notifiers/ + - /rancher/v2.5/en/cluster-admin/tools/notifiers + - /rancher/v2.5/en/cluster-admin/tools/alerts + - /rancher/v2.5/en/monitoring-alerting/configuration/alertmanager +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The [Alertmanager Config](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) Secret contains the configuration of an Alertmanager instance that sends out notifications based on alerts it receives from Prometheus. + +> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md#3-how-alertmanager-works) + +- [Creating Receivers in the Rancher UI](#creating-receivers-in-the-rancher-ui) +- [Receiver Configuration](#receiver-configuration) + - [Slack](#slack) + - [Email](#email) + - [PagerDuty](#pagerduty) + - [Opsgenie](#opsgenie) + - [Webhook](#webhook) + - [Custom](#custom) + - [Teams](#teams) + - [SMS](#sms) +- [Route Configuration](#route-configuration) + - [Receiver](#receiver) + - [Grouping](#grouping) + - [Matching](#matching) +- [Configuring Multiple Receivers](#configuring-multiple-receivers) +- [Example Alertmanager Config](examples.md#example-alertmanager-config) +- [Example Route Config for CIS Scan Alerts](#example-route-config-for-cis-scan-alerts) +- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) + +# Creating Receivers in the Rancher UI +_Available as of v2.5.4_ + +> **Prerequisites:** +> +>- The monitoring application needs to be installed. +>- If you configured monitoring with an existing Alertmanager Secret, it must have a format that is supported by Rancher's UI. Otherwise you will only be able to make changes based on modifying the Alertmanager Secret directly. Note: We are continuing to make enhancements to what kinds of Alertmanager Configurations we can support using the Routes and Receivers UI, so please [file an issue](https://github.com/rancher/rancher/issues/new) if you have a request for a feature enhancement. + +To create notification receivers in the Rancher UI, + +1. Click **Cluster Explorer > Monitoring** and click **Receiver.** +2. Enter a name for the receiver. +3. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. +4. Click **Create.** + +**Result:** Alerts can be configured to send notifications to the receiver(s). + +# Receiver Configuration + +The notification integrations are configured with the `receiver`, which is explained in the [Prometheus documentation.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) + +### Native vs. Non-native Receivers + +By default, AlertManager provides native integration with some receivers, which are listed in [this section.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) All natively supported receivers are configurable through the Rancher UI. + +For notification mechanisms not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI. + +Currently the Rancher Alerting Drivers app provides access to the following integrations: +- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver +- SMS, based on the [Sachet](https://github.com/messagebird/sachet) driver + +### Changes in Rancher v2.5.8 + +Rancher v2.5.8 added Microsoft Teams and SMS as configurable receivers in the Rancher UI. + +### Changes in Rancher v2.5.4 + +Rancher v2.5.4 introduced the capability to configure receivers by filling out forms in the Rancher UI. + + + + +The following types of receivers can be configured in the Rancher UI: + +- Slack +- Email +- PagerDuty +- Opsgenie +- Webhook +- Custom +- Teams +- SMS + +The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. + +# Slack + +| Field | Type | Description | +|------|--------------|------| +| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | +| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | +| Proxy URL | String | Proxy for the webhook notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +# Email + +| Field | Type | Description | +|------|--------------|------| +| Default Recipient Address | String | The email address that will receive notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +SMTP options: + +| Field | Type | Description | +|------|--------------|------| +| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | +| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | +| Use TLS | Bool | Use TLS for encryption. | +| Username | String | Enter a username to authenticate with the SMTP server. | +| Password | String | Enter a password to authenticate with the SMTP server. | + +# PagerDuty + +| Field | Type | Description | +|------|------|-------| +| Integration Type | String | `Events API v2` or `Prometheus`. | +| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | +| Proxy URL | String | Proxy for the PagerDuty notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +# Opsgenie + +| Field | Description | +|------|-------------| +| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | +| Proxy URL | Proxy for the Opsgenie notifications. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +Opsgenie Responders: + +| Field | Type | Description | +|-------|------|--------| +| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | +| Send To | String | Id, Name, or Username of the Opsgenie recipient. | + +# Webhook + +| Field | Description | +|-------|--------------| +| URL | Webhook URL for the app of your choice. | +| Proxy URL | Proxy for the webhook notification. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + + + +# Custom + +The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. + +# Teams + +### Enabling the Teams Receiver for Rancher Managed Clusters + +The Teams receiver is not a native receiver and must be enabled before it can be used. You can enable the Teams receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the Teams option selected. + +1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Cluster Explorer**. +1. Click **Apps**. +1. Click the **Alerting Drivers** app. +1. Click the **Helm Deploy Options** tab +1. Select the **Teams** option and click **Install**. +1. Take note of the namespace used as it will be required in a later step. + +### Configure the Teams Receiver + +The Teams receiver can be configured by updating its ConfigMap. For example, the following is a minimal Teams receiver configuration. + +```yaml +[Microsoft Teams] +teams-instance-1: https://your-teams-webhook-url +``` + +When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). + +Use the example below as the URL where: + +- `ns-1` is replaced with the namespace where the `rancher-alerting-drivers` app is installed + +```yaml +url: http://rancher-alerting-drivers-prom2teams.ns-1.svc:8089/v2/teams-instance-1 +``` + + + +# SMS + +### Enabling the SMS Receiver for Rancher Managed Clusters + +The SMS receiver is not a native receiver and must be enabled before it can be used. You can enable the SMS receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the SMS option selected. + +1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Cluster Explorer**. +1. Click **Apps**. +1. Click the **Alerting Drivers** app. +1. Click the **Helm Deploy Options** tab +1. Select the **SMS** option and click **Install**. +1. Take note of the namespace used as it will be required in a later step. + +### Configure the SMS Receiver + +The SMS receiver can be configured by updating its ConfigMap. For example, the following is a minimal SMS receiver configuration. + +```yaml +providers: + telegram: + token: 'your-token-from-telegram' + +receivers: +- name: 'telegram-receiver-1' + provider: 'telegram' + to: + - '123456789' +``` + +When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). + +Use the example below as the name and URL, where: + +- the name assigned to the receiver, e.g. `telegram-receiver-1`, must match the name in the `receivers.name` field in the ConfigMap, e.g. `telegram-receiver-1` +- `ns-1` in the URL is replaced with the namespace where the `rancher-alerting-drivers` app is installed + +```yaml +name: telegram-receiver-1 +url http://rancher-alerting-drivers-sachet.ns-1.svc:9876/alert +``` + + + + + + +The following types of receivers can be configured in the Rancher UI: + +- Slack +- Email +- PagerDuty +- Opsgenie +- Webhook +- Custom + +The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. + +### Slack {#slack-254-257} + +| Field | Type | Description | +|------|--------------|------| +| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | +| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | +| Proxy URL | String | Proxy for the webhook notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +### Email {#email-254-257} + +| Field | Type | Description | +|------|--------------|------| +| Default Recipient Address | String | The email address that will receive notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +SMTP options: + +| Field | Type | Description | +|------|--------------|------| +| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | +| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | +| Use TLS | Bool | Use TLS for encryption. | +| Username | String | Enter a username to authenticate with the SMTP server. | +| Password | String | Enter a password to authenticate with the SMTP server. | + +### PagerDuty {#pagerduty-254-257} + +| Field | Type | Description | +|------|------|-------| +| Integration Type | String | `Events API v2` or `Prometheus`. | +| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | +| Proxy URL | String | Proxy for the PagerDuty notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +### Opsgenie {#opsgenie-254-257} + +| Field | Description | +|------|-------------| +| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | +| Proxy URL | Proxy for the Opsgenie notifications. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +Opsgenie Responders: + +| Field | Type | Description | +|-------|------|--------| +| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | +| Send To | String | Id, Name, or Username of the Opsgenie recipient. | + +### Webhook {#webhook-1} + +| Field | Description | +|-------|--------------| +| URL | Webhook URL for the app of your choice. | +| Proxy URL | Proxy for the webhook notification. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +### Custom {#custom-254-257} + +The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. + + + +The Alertmanager must be configured in YAML, as shown in these [examples.](#example-alertmanager-configs) + + + + +# Configuring Multiple Receivers + +By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. + +It is also possible to send alerts to multiple notification systems. One way is to configure the Receiver using custom YAML, in which case you can add the configuration for multiple notification systems, as long as you are sure that both systems should receive the same messages. + +You can also set up multiple receivers by using the `continue` option for a route, so that the alerts sent to a receiver continue being evaluated in the next level of the routing tree, which could contain another receiver. + + +# Example Alertmanager Configs + +### Slack +To set up notifications via Slack, the following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret, where the `api_url` should be updated to use your Webhook URL from Slack: + +```yaml +route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 3h + receiver: 'slack-notifications' +receivers: +- name: 'slack-notifications' + slack_configs: + - send_resolved: true + text: '{{ template "slack.rancher.text" . }}' + api_url: +templates: +- /etc/alertmanager/config/*.tmpl +``` + +### PagerDuty +To set up notifications via PagerDuty, use the example below from the [PagerDuty documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) as a guideline. This example sets up a route that captures alerts for a database service and sends them to a receiver linked to a service that will directly notify the DBAs in PagerDuty, while all other alerts will be directed to a default receiver with a different PagerDuty integration key. + +The following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret. The `service_key` should be updated to use your PagerDuty integration key and can be found as per the "Integrating with Global Event Routing" section of the PagerDuty documentation. For the full list of configuration options, refer to the [Prometheus documentation](https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config). + +```yaml +route: + group_by: [cluster] + receiver: 'pagerduty-notifications' + group_interval: 5m + routes: + - match: + service: database + receiver: 'database-notifcations' + +receivers: +- name: 'pagerduty-notifications' + pagerduty_configs: + - service_key: 'primary-integration-key' + +- name: 'database-notifcations' + pagerduty_configs: + - service_key: 'database-integration-key' +``` + +# Example Route Config for CIS Scan Alerts + +While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. + +For example, the following example route configuration could be used with a Slack receiver named `test-cis`: + +```yaml +spec: + receiver: test-cis + group_by: +# - string + group_wait: 30s + group_interval: 30s + repeat_interval: 30s + match: + job: rancher-cis-scan +# key: string + match_re: + {} +# key: string +``` + +For more information on enabling alerting for `rancher-cis-benchmark`, see [this section.](../../pages-for-subheaders/cis-scan-guides.md#enabling-alerting-for-rancher-cis-benchmark) + + +# Trusted CA for Notifiers + +If you need to add a trusted CA to your notifier, follow the steps in [this section.](helm-chart-options.md#trusted-ca-for-notifiers) \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/routes.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/routes.md new file mode 100644 index 0000000000..b7ea8f1058 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/routes.md @@ -0,0 +1,79 @@ +--- +title: Route Configuration +shortTitle: Routes +weight: 5 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The route configuration is the section of the Alertmanager custom resource that controls how the alerts fired by Prometheus are grouped and filtered before they reach the receiver. + +When a Route is changed, the Prometheus Operator regenerates the Alertmanager custom resource to reflect the changes. + +For more information about configuring routes, refer to the [official Alertmanager documentation.](https://www.prometheus.io/docs/alerting/latest/configuration/#route) + +> This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +- [Route Restrictions](#route-restrictions) +- [Route Configuration](#route-configuration) + - [Receiver](#receiver) + - [Grouping](#grouping) + - [Matching](#matching) + +# Route Restrictions + +Alertmanager proxies alerts for Prometheus based on its receivers and a routing tree that filters alerts to certain receivers based on labels. + +Alerting drivers proxy alerts for Alertmanager to non-native receivers, such as Microsoft Teams and SMS. + +In the Rancher UI for configuring routes and receivers, you can configure routing trees with one root and then a depth of one more level, for a tree with a depth of two. But if you use a `continue` route when configuring Alertmanager directly, you can make the tree deeper. + +Each receiver is for one or more notification providers. So if you know that every alert for Slack should also go to PagerDuty, you can configure both in the same receiver. + +# Route Configuration + +### Note on Labels and Annotations + +Labels should be used for identifying information that can affect the routing of notifications. Identifying information about the alert could consist of a container name, or the name of the team that should be notified. + +Annotations should be used for information that does not affect who receives the alert, such as a runbook url or error message. + + + + +### Receiver +The route needs to refer to a [receiver](#receiver-configuration) that has already been configured. + +### Grouping + +| Field | Default | Description | +|-------|--------------|---------| +| Group By | N/a | The labels by which incoming alerts are grouped together. For example, `[ group_by: '[' , ... ']' ]` Multiple alerts coming in for labels such as `cluster=A` and `alertname=LatencyHigh` can be batched into a single group. To aggregate by all possible labels, use the special value `'...'` as the sole label name, for example: `group_by: ['...']` Grouping by `...` effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. | +| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | +| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | +| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | + +### Matching + +The **Match** field refers to a set of equality matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs to the Rancher UI, they correspond to the YAML in this format: + +```yaml +match: + [ : , ... ] +``` + +The **Match Regex** field refers to a set of regex-matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs in the Rancher UI, they correspond to the YAML in this format: + +```yaml +match_re: + [ : , ... ] +``` + + + + +The Alertmanager must be configured in YAML, as shown in this [example.](examples.md#alertmanager-config) + + + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md new file mode 100644 index 0000000000..e237469615 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors.md @@ -0,0 +1,31 @@ +--- +title: ServiceMonitor and PodMonitor Configuration +shortTitle: ServiceMonitors and PodMonitors +weight: 7 +--- + +ServiceMonitors and PodMonitors are both pseudo-CRDs that map the scrape configuration of the Prometheus custom resource. + +These configuration objects declaratively specify the endpoints that Prometheus will scrape metrics from. + +ServiceMonitors are more commonly used than PodMonitors, and we recommend them for most use cases. + +> This section assumes familiarity with how monitoring components work together. For more information, see [this section.](../../explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works.md) + +### ServiceMonitors + +This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how groups of Kubernetes services should be monitored. + +When a ServiceMonitor is created, the Prometheus Operator updates the Prometheus scrape configuration to include the ServiceMonitor configuration. Then Prometheus begins scraping metrics from the endpoint defined in the ServiceMonitor. + +Any Services in your cluster that match the labels located within the ServiceMonitor `selector` field will be monitored based on the `endpoints` specified on the ServiceMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) provided by Prometheus Operator. + +For more information about how ServiceMonitors work, refer to the [Prometheus Operator documentation.](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) + +### PodMonitors + +This pseudo-CRD maps to a section of the Prometheus custom resource configuration. It declaratively specifies how group of pods should be monitored. + +When a PodMonitor is created, the Prometheus Operator updates the Prometheus scrape configuration to include the PodMonitor configuration. Then Prometheus begins scraping metrics from the endpoint defined in the PodMonitor. + +Any Pods in your cluster that match the labels located within the PodMonitor `selector` field will be monitored based on the `podMetricsEndpoints` specified on the PodMonitor. For more information on what fields can be specified, please look at the [spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#podmonitorspec) provided by Prometheus Operator. diff --git a/content/rancher/v2.5/en/pipelines/concepts/_index.md b/versioned_docs/version-2.5/reference-guides/pipelines/concepts.md similarity index 100% rename from content/rancher/v2.5/en/pipelines/concepts/_index.md rename to versioned_docs/version-2.5/reference-guides/pipelines/concepts.md diff --git a/versioned_docs/version-2.5/reference-guides/pipelines/configure-persistent-data.md b/versioned_docs/version-2.5/reference-guides/pipelines/configure-persistent-data.md new file mode 100644 index 0000000000..1552218591 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/pipelines/configure-persistent-data.md @@ -0,0 +1,92 @@ +--- +title: Configuring Persistent Data for Pipeline Components +weight: 600 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/pipelines/storage + - /rancher/v2.x/en/pipelines/storage/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.](../../how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage.md) + +>**Prerequisites (for both parts A and B):** +> +>[Persistent volumes](../../pages-for-subheaders/create-kubernetes-persistent-storage.md) must be available for the cluster. + +### A. Configuring Persistent Data for Docker Registry + +1. From the project that you're configuring a pipeline for, and click **Resources > Workloads.** + +1. Find the `docker-registry` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. + +1. Click **Upgrade**. + +### B. Configuring Persistent Data for Minio + +1. From the project view, click **Resources > Workloads.** Find the `minio` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. + +1. Click **Upgrade**. + +**Result:** Persistent storage is configured for your pipeline components. diff --git a/versioned_docs/version-2.5/reference-guides/pipelines/example-repositories.md b/versioned_docs/version-2.5/reference-guides/pipelines/example-repositories.md new file mode 100644 index 0000000000..032c18aa1c --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/pipelines/example-repositories.md @@ -0,0 +1,78 @@ +--- +title: Example Repositories +weight: 500 +aliases: + - /rancher/v2.5/en/tools/pipelines/quick-start-guide/ + - /rancher/v2.5/en/k8s-in-rancher/pipelines/example-repos + - /rancher/v2.x/en/pipelines/example-repos/ +--- + +Rancher ships with several example repositories that you can use to familiarize yourself with pipelines. We recommend configuring and testing the example repository that most resembles your environment before using pipelines with your own repositories in a production environment. Use this example repository as a sandbox for repo configuration, build demonstration, etc. Rancher includes example repositories for: + +- Go +- Maven +- php + +> **Note:** The example repositories are only available if you have not [configured a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md). + +To start using these example repositories, + +1. [Enable the example repositories](#1-enable-the-example-repositories) +2. [View the example pipeline](#2-view-the-example-pipeline) +3. [Run the example pipeline](#3-run-the-example-pipeline) + +### 1. Enable the Example Repositories + +By default, the example pipeline repositories are disabled. Enable one (or more) to test out the pipeline feature and see how it works. + +1. From the **Global** view, navigate to the project that you want to test out pipelines. + +1. Click **Resources > Pipelines.** + +1. Click **Configure Repositories**. + + **Step Result:** A list of example repositories displays. + + >**Note:** Example repositories only display if you haven't fetched your own repos. + +1. Click **Enable** for one of the example repos (e.g., `https://github.com/rancher/pipeline-example-go.git`). Then click **Done**. + +**Results:** + +- The example repository is enabled to work with a pipeline is available in the **Pipeline** tab. + +- The following workloads are deployed to a new namespace: + + - `docker-registry` + - `jenkins` + - `minio` + +### 2. View the Example Pipeline + +After enabling an example repository, review the pipeline to see how it is set up. + +1. From the **Global** view, navigate to the project that you want to test out pipelines. + +1. Click **Resources > Pipelines.** + +1. Find the example repository, select the vertical **⋮**. There are two ways to view the pipeline: + * **Rancher UI**: Click on **Edit Config** to view the stages and steps of the pipeline. + * **YAML**: Click on View/Edit YAML to view the `./rancher-pipeline.yml` file. + +### 3. Run the Example Pipeline + +After enabling an example repository, run the pipeline to see how it works. + +1. From the **Global** view, navigate to the project that you want to test out pipelines. + +1. Click **Resources > Pipelines.** + +1. Find the example repository, select the vertical **⋮ > Run**. + + >**Note:** When you run a pipeline the first time, it takes a few minutes to pull relevant images and provision necessary pipeline components. + +**Result:** The pipeline runs. You can see the results in the logs. + +### What's Next? + +For detailed information about setting up your own pipeline for your repository, [configure a version control provider](../../how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines.md), enable a repository and finally configure your pipeline. diff --git a/versioned_docs/version-2.5/reference-guides/pipelines/example-yaml.md b/versioned_docs/version-2.5/reference-guides/pipelines/example-yaml.md new file mode 100644 index 0000000000..cbb1ee77bc --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/pipelines/example-yaml.md @@ -0,0 +1,76 @@ +--- +title: Example YAML File +weight: 501 +aliases: + - /rancher/v2.5/en/tools/pipelines/reference/ + - /rancher/v2.5/en/k8s-in-rancher/pipelines/example + - /rancher/v2.x/en/pipelines/example/ +--- + +Pipelines can be configured either through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. + +In the [pipeline configuration reference](pipeline-configuration.md), we provide examples of how to configure each feature using the Rancher UI or using YAML configuration. + +Below is a full example `rancher-pipeline.yml` for those who want to jump right in. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${ALIAS_ENV} + # Set environment variables in container for the step + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 + # Set environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com + - name: Deploy some workloads + steps: + - applyYamlConfig: + path: ./deployment.yaml +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +# timeout in minutes +timeout: 30 +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` diff --git a/versioned_docs/version-2.5/reference-guides/pipelines/pipeline-configuration.md b/versioned_docs/version-2.5/reference-guides/pipelines/pipeline-configuration.md new file mode 100644 index 0000000000..20a0f35e2b --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/pipelines/pipeline-configuration.md @@ -0,0 +1,648 @@ +--- +title: Pipeline Configuration Reference +weight: 1 +aliases: + - /rancher/v2.5/en/k8s-in-rancher/pipelines/config + - /rancher/v2.x/en/pipelines/config/ +--- + +In this section, you'll learn how to configure pipelines. + +- [Step Types](#step-types) +- [Step Type: Run Script](#step-type-run-script) +- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) +- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) +- [Step Type: Deploy YAML](#step-type-deploy-yaml) +- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) +- [Notifications](#notifications) +- [Timeouts](#timeouts) +- [Triggers and Trigger Rules](#triggers-and-trigger-rules) +- [Environment Variables](#environment-variables) +- [Secrets](#secrets) +- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) +- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) + - [Executor Quota](#executor-quota) + - [Resource Quota for Executors](#resource-quota-for-executors) + - [Custom CA](#custom-ca) +- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) +- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) + +# Step Types + +Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. + +Step types include: + +- [Run Script](#step-type-run-script) +- [Build and Publish Images](#step-type-build-and-publish-images) +- [Publish Catalog Template](#step-type-publish-catalog-template) +- [Deploy YAML](#step-type-deploy-yaml) +- [Deploy Catalog App](#step-type-deploy-catalog-app) + + + +### Configuring Steps By UI + +If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. + +1. Add stages to your pipeline execution by clicking **Add Stage**. + + 1. Enter a **Name** for each stage of your pipeline. + 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. + +1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. + +### Configuring Steps by YAML + +For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com +``` +# Step Type: Run Script + +The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configuring Script by UI + +1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. + +1. Click **Add**. + +### Configuring Script by YAML +```yaml +# example +stages: +- name: Build something + steps: + - runScriptConfig: + image: golang + shellScript: go build +``` +# Step Type: Build and Publish Images + +The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. + +The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. + +### Configuring Building and Publishing Images by UI +1. From the **Step Type** drop-down, choose **Build and Publish**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | + Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | + Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | + Build Context

    (**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). + +### Configuring Building and Publishing Images by YAML + +You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: + +Variable Name | Description +------------------------|------------------------------------------------------------ +PLUGIN_DRY_RUN | Disable docker push +PLUGIN_DEBUG | Docker daemon executes in debug mode +PLUGIN_MIRROR | Docker daemon registry mirror +PLUGIN_INSECURE | Docker daemon allows insecure registries +PLUGIN_BUILD_ARGS | Docker build args, a comma separated list + +
    + +```yaml +# This example shows an environment variable being used +# in the Publish Image step. This variable allows you to +# publish an image to an insecure registry: + +stages: +- name: Publish Image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + pushRemote: true + registry: example.com + env: + PLUGIN_INSECURE: "true" +``` + +# Step Type: Publish Catalog Template + +The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a git hosted chart repository. It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. + +### Configuring Publishing a Catalog Template by UI + +1. From the **Step Type** drop-down, choose **Publish Catalog Template**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | + Catalog Template Name | The name of the template. For example, wordpress. | + Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | + Protocol | You can choose to publish via HTTP(S) or SSH protocol. | + Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | + Git URL | The Git URL of the chart repository that the template will be published to. | + Git Branch | The Git branch of the chart repository that the template will be published to. | + Author Name | The author name used in the commit message. | + Author Email | The author email used in the commit message. | + + +### Configuring Publishing a Catalog Template by YAML + +You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: + +* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. +* CatalogTemplate: The name of the template. +* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. +* GitUrl: The git URL of the chart repository that the template will be published to. +* GitBranch: The git branch of the chart repository that the template will be published to. +* GitAuthor: The author name used in the commit message. +* GitEmail: The author email used in the commit message. +* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. + +```yaml +# example +stages: +- name: Publish Wordpress Template + steps: + - publishCatalogConfig: + path: ./charts/wordpress/latest + catalogTemplate: wordpress + version: ${CICD_GIT_TAG} + gitUrl: git@github.com:myrepo/charts.git + gitBranch: master + gitAuthor: example-user + gitEmail: user@example.com + envFrom: + - sourceName: publish-keys + sourceKey: DEPLOY_KEY +``` + +# Step Type: Deploy YAML + +This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configure Deploying YAML by UI + +1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. + +1. Enter the **YAML Path**, which is the path to the manifest file in the source code. + +1. Click **Add**. + +### Configure Deploying YAML by YAML + +```yaml +# example +stages: +- name: Deploy + steps: + - applyYamlConfig: + path: ./deployment.yaml +``` + +# Step Type :Deploy Catalog App + +The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. + +### Configure Deploying Catalog App by UI + +1. From the **Step Type** drop-down, choose **Deploy Catalog App**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Catalog | The catalog from which the app template will be used. | + Template Name | The name of the app template. For example, wordpress. | + Template Version | The version of the app template you want to deploy. | + Namespace | The target namespace where you want to deploy the app. | + App Name | The name of the app you want to deploy. | + Answers | Key-value pairs of answers used to deploy the app. | + + +### Configure Deploying Catalog App by YAML + +You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: + +* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. +* Version: The version of the template you want to deploy. +* Answers: Key-value pairs of answers used to deploy the app. +* Name: The name of the app you want to deploy. +* TargetNamespace: The target namespace where you want to deploy the app. + +```yaml +# example +stages: +- name: Deploy App + steps: + - applyAppConfig: + catalogTemplate: cattle-global-data:library-mysql + version: 0.3.8 + answers: + persistence.enabled: "false" + name: testmysql + targetNamespace: test +``` + +# Timeouts + +By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. + +### Configuring Timeouts by UI + +Enter a new value in the **Timeout** field. + +### Configuring Timeouts by YAML + +In the `timeout` section, enter the timeout value in minutes. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +# timeout in minutes +timeout: 30 +``` + +# Notifications + +You can enable notifications to any notifiers based on the build status of a pipeline. Before enabling notifications, Rancher recommends [setting up notifiers](../monitoring-v2-configuration/receivers.md) so it will be easy to add recipients immediately. + +### Configuring Notifications by UI + +1. Within the **Notification** section, turn on notifications by clicking **Enable**. + +1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. + +1. If you don't have any existing notifiers, Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions](monitoring-alerting/legacy/notifiers/) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. + + > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. + +1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. + +### Configuring Notifications by YAML + +In the `notification` section, you will provide the following information: + +* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. + * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. + * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. +* **Condition:** Select which conditions of when you want the notification to be sent. +* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. + +```yaml +# Example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` + +# Triggers and Trigger Rules + +After you configure a pipeline, you can trigger it using different methods: + +- **Manually:** + + After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. + +- **Automatically:** + + When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. + + To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. + +Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: + +- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. + +- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. + +If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. + +Wildcard character (`*`) expansion is supported in `branch` conditions. + +This section covers the following topics: + +- [Configuring pipeline triggers](#configuring-pipeline-triggers) +- [Configuring stage triggers](#configuring-stage-triggers) +- [Configuring step triggers](#configuring-step-triggers) +- [Configuring triggers by YAML](#configuring-triggers-by-yaml) + +### Configuring Pipeline Triggers + +1. From the **Global** view, navigate to the project that you want to configure a pipeline trigger rule. + +1. Click **Resources > Pipelines.** + +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. + +1. Click on **Show Advanced Options**. + +1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. + + 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. + + 1. **Optional:** Add more branches that trigger a build. + +1. Click **Done.** + +### Configuring Stage Triggers + +1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. + +1. Click **Resources > Pipelines.** + +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. + +1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. + +1. Click **Show advanced options**. + +1. In the **Trigger Rules** section, configure rules to run or skip the stage. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the stage and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the stage. | + | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + +### Configuring Step Triggers + +1. From the **Global** view, navigate to the project that you want to configure a stage trigger rule. + +1. Click **Resources > Pipelines.** + +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. + +1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. + +1. Click **Show advanced options**. + +1. In the **Trigger Rules** section, configure rules to run or skip the step. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the step and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the step. | + | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + + +### Configuring Triggers by YAML + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +``` + +# Environment Variables + +When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. + +### Configuring Environment Variables by UI + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** + +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. + +1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. + +1. Click **Show advanced options**. + +1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. + +1. Add your environment variable(s) into either the script or file. + +1. Click **Save**. + +### Configuring Environment Variables by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 +``` + +# Secrets + +If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets](../../how-to-guides/new-user-guides/kubernetes-resources-setup/secrets.md). + +### Prerequisite +Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. +
    + +>**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). + +### Configuring Secrets by UI + +1. From the **Global** view, navigate to the project that you want to configure pipelines. + +1. Click **Resources > Pipelines.** + +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. + +1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. + +1. Click **Show advanced options**. + +1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. + +1. Click **Save**. + +### Configuring Secrets by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${ALIAS_ENV} + # environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV +``` + +# Pipeline Variable Substitution Reference + +For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. + +Variable Name | Description +------------------------|------------------------------------------------------------ +`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). +`CICD_GIT_URL` | URL of the Git repository. +`CICD_GIT_COMMIT` | Git commit ID being executed. +`CICD_GIT_BRANCH` | Git branch of this event. +`CICD_GIT_REF` | Git reference specification of this event. +`CICD_GIT_TAG` | Git tag name, set on tag event. +`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). +`CICD_PIPELINE_ID` | Rancher ID for the pipeline. +`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. +`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. +`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. +`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

    [Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) + +# Global Pipeline Execution Settings + +After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. These settings can be edited by selecting **Tools > Pipelines** in the navigation bar. + +- [Executor Quota](#executor-quota) +- [Resource Quota for Executors](#resource-quota-for-executors) +- [Custom CA](#custom-ca) + +### Executor Quota + +Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. + +### Resource Quota for Executors + +Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. + +Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. + +To configure compute resources for pipeline-step containers: + +You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. + +In a step, you will provide the following information: + +* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. +* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. +* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. +* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi +``` + +>**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. + +### Custom CA + +If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. + +1. Click **Edit cacerts**. + +1. Paste in the CA root certificates and click **Save cacerts**. + +**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. + +# Persistent Data for Pipeline Components + +The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +For details on setting up persistent storage for pipelines, refer to [this page.](configure-persistent-data.md) + +# Example rancher-pipeline.yml + +An example pipeline configuration file is on [this page.](example-yaml.md) diff --git a/versioned_docs/version-2.5/reference-guides/rancher-cluster-tools.md b/versioned_docs/version-2.5/reference-guides/rancher-cluster-tools.md new file mode 100644 index 0000000000..2963ea7351 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-cluster-tools.md @@ -0,0 +1,62 @@ +--- +title: Tools for Logging, Monitoring, and Visibility +weight: 2033 +aliases: + - /rancher/v2.5/en/tools/notifiers-and-alerts/ + - /rancher/v2.x/en/cluster-admin/tools/ +--- + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + + + +- [Logging](#logging) +- [Monitoring and Alerts](#monitoring-and-alerts) +- [Istio](#istio) +- [OPA Gatekeeper](#opa-gatekeeper) +- [CIS Scans](#cis-scans) + + + + +# Logging + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems + +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. + +For more information, refer to the logging documentation [here.](../pages-for-subheaders/logging.md) +# Monitoring and Alerts + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. + +After monitoring is enabled, you can set up alerts and notifiers that provide the mechanism to receive them. + +Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. + +Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. + +For more information, refer to the monitoring documentation [here.](../pages-for-subheaders/monitoring-and-alerting.md) + +# Istio + +[Istio](https://istio.io/) is an open-source tool that makes it easier for DevOps teams to observe, control, troubleshoot, and secure the traffic within a complex network of microservices. + +Rancher's integration with Istio was improved in Rancher v2.5. + +For more information, refer to the Istio documentation [here.](../pages-for-subheaders/istio.md) +# OPA Gatekeeper + +[OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) is an open-source project that provides integration between OPA and Kubernetes to provide policy control via admission controller webhooks. For details on how to enable Gatekeeper in Rancher, refer to the [OPA Gatekeeper section.](../explanations/integrations-in-rancher/opa-gatekeeper.md) + +# CIS Scans + +Rancher can run a security scan to check whether Kubernetes is deployed according to security best practices as defined in the CIS Kubernetes Benchmark. + +For more information, refer to the CIS scan documentation [here.](../pages-for-subheaders/cis-scan-guides.md) \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/architecture-recommendations.md b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/architecture-recommendations.md new file mode 100644 index 0000000000..0f3b2d2a68 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/architecture-recommendations.md @@ -0,0 +1,113 @@ +--- +title: Architecture Recommendations +weight: 3 +aliases: + - /rancher/v2.x/en/overview/architecture-recommendations/ +--- + +Kubernetes cluster. If you are installing Rancher on a single node, the main architecture recommendation that applies to your installation is that the node running Rancher should be [separate from downstream clusters.](#separation-of-rancher-and-user-clusters) + +This section covers the following topics: + +- [Separation of Rancher and User Clusters](#separation-of-rancher-and-user-clusters) +- [Why HA is Better for Rancher in Production](#why-ha-is-better-for-rancher-in-production) +- [Recommended Load Balancer Configuration for Kubernetes Installations](#recommended-load-balancer-configuration-for-kubernetes-installations) +- [Environment for Kubernetes Installations](#environment-for-kubernetes-installations) +- [Recommended Node Roles for Kubernetes Installations](#recommended-node-roles-for-kubernetes-installations) +- [Architecture for an Authorized Cluster Endpoint](#architecture-for-an-authorized-cluster-endpoint) + +# Separation of Rancher and User Clusters + +A user cluster is a downstream Kubernetes cluster that runs your apps and services. + +If you have a Docker installation of Rancher, the node running the Rancher server should be separate from your downstream clusters. + +If Rancher is intended to manage downstream Kubernetes clusters, the Kubernetes cluster that the Rancher server runs on should also be separate from the downstream user clusters. + +![Separation of Rancher Server from User Clusters](/img/rancher-architecture-separation-of-rancher-server.svg) + +# Why HA is Better for Rancher in Production + +We recommend installing the Rancher server on a high-availability Kubernetes cluster, primarily because it protects the Rancher server data. In a high-availability installation, a load balancer serves as the single point of contact for clients, distributing network traffic across multiple servers in the cluster and helping to prevent any one server from becoming a point of failure. + +We don't recommend installing Rancher in a single Docker container, because if the node goes down, there is no copy of the cluster data available on other nodes and you could lose the data on your Rancher server. + +### K3s Kubernetes Cluster Installations + +One option for the underlying Kubernetes cluster is to use K3s Kubernetes. K3s is Rancher's CNCF certified Kubernetes distribution. It is easy to install and uses half the memory of Kubernetes, all in a binary of less than 100 MB. Another advantage of K3s is that it allows an external datastore to hold the cluster data, allowing the K3s server nodes to be treated as ephemeral. + +
    Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server
    +![Architecture of a K3s Kubernetes Cluster Running the Rancher Management Server](/img/k3s-server-storage.svg) + +### RKE Kubernetes Cluster Installations + +In an RKE installation, the cluster data is replicated on each of three etcd nodes in the cluster, providing redundancy and data duplication in case one of the nodes fails. + +
    Architecture of an RKE Kubernetes Cluster Running the Rancher Management Server
    +![Architecture of an RKE Kubernetes cluster running the Rancher management server](/img/rke-server-storage.svg) + +# Recommended Load Balancer Configuration for Kubernetes Installations + +We recommend the following configurations for the load balancer and Ingress controllers: + +* The DNS for Rancher should resolve to a Layer 4 load balancer (TCP) +* The Load Balancer should forward port TCP/80 and TCP/443 to all 3 nodes in the Kubernetes cluster. +* The Ingress controller will redirect HTTP to HTTPS and terminate SSL/TLS on port TCP/443. +* The Ingress controller will forward traffic to port TCP/80 on the pod in the Rancher deployment. + +
    Rancher installed on a Kubernetes cluster with layer 4 load balancer, depicting SSL termination at Ingress controllers
    +![Rancher HA](/img/ha/rancher2ha.svg) + +# Environment for Kubernetes Installations + +It is strongly recommended to install Rancher on a Kubernetes cluster on hosted infrastructure such as Amazon's EC2 or Google Compute Engine. + +For the best performance and greater security, we recommend a dedicated Kubernetes cluster for the Rancher management server. Running user workloads on this cluster is not advised. After deploying Rancher, you can [create or import clusters](../../pages-for-subheaders/kubernetes-clusters-in-rancher-setup.md) for running your workloads. + +# Recommended Node Roles for Kubernetes Installations + +The below recommendations apply when Rancher is installed on a K3s Kubernetes cluster or an RKE Kubernetes cluster. + +### K3s Cluster Roles + +In K3s clusters, there are two types of nodes: server nodes and agent nodes. Both servers and agents can have workloads scheduled on them. Server nodes run the Kubernetes master. + +For the cluster running the Rancher management server, we recommend using two server nodes. Agent nodes are not required. + +### RKE Cluster Roles + +If Rancher is installed on an RKE Kubernetes cluster, the cluster should have three nodes, and each node should have all three Kubernetes roles: etcd, controlplane, and worker. + +### Contrasting RKE Cluster Architecture for Rancher Server and for Downstream Kubernetes Clusters + +Our recommendation for RKE node roles on the Rancher server cluster contrasts with our recommendations for the downstream user clusters that run your apps and services. + +Rancher uses RKE as a library when provisioning downstream Kubernetes clusters. Note: The capability to provision downstream K3s clusters will be added in a future version of Rancher. + +For downstream Kubernetes clusters, we recommend that each node in a user cluster should have a single role for stability and scalability. + +![Kubernetes Roles for Nodes in Rancher Server Cluster vs. User Clusters](/img/rancher-architecture-node-roles.svg) + +RKE only requires at least one node with each role and does not require nodes to be restricted to one role. However, for the clusters that run your apps, we recommend separate roles for each node so that workloads on worker nodes don't interfere with the Kubernetes master or cluster data as your services scale. + +We recommend that downstream user clusters should have at least: + +- **Three nodes with only the etcd role** to maintain a quorum if one node is lost, making the state of your cluster highly available +- **Two nodes with only the controlplane role** to make the master component highly available +- **One or more nodes with only the worker role** to run the Kubernetes node components, as well as the workloads for your apps and services + +With that said, it is safe to use all three roles on three nodes when setting up the Rancher server because: + +* It allows one `etcd` node failure. +* It maintains multiple instances of the master components by having multiple `controlplane` nodes. +* No other workloads than Rancher itself should be created on this cluster. + +Because no additional workloads will be deployed on the Rancher server cluster, in most cases it is not necessary to use the same architecture that we recommend for the scalability and reliability of downstream clusters. + +For more best practices for downstream clusters, refer to the [production checklist](../../pages-for-subheaders/checklist-for-production-ready-clusters.md) or our [best practices guide.](../../pages-for-subheaders/best-practices.md) + +# Architecture for an Authorized Cluster Endpoint + +If you are using an [authorized cluster endpoint,](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) we recommend creating an FQDN pointing to a load balancer which balances traffic across your nodes with the `controlplane` role. + +If you are using private CA signed certificates on the load balancer, you have to supply the CA certificate, which will be included in the generated kubeconfig file to validate the certificate chain. See the documentation on [kubeconfig files](../../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) and [API keys](../user-settings/api-keys.md#creating-an-api-key) for more information. diff --git a/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/rancher-server-and-components.md b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/rancher-server-and-components.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-manager-architecture/rancher-server-and-components.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/rancher-project-tools.md b/versioned_docs/version-2.5/reference-guides/rancher-project-tools.md new file mode 100644 index 0000000000..c58397fcce --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-project-tools.md @@ -0,0 +1,41 @@ +--- +title: Tools for Logging, Monitoring, and Visibility +weight: 2525 +aliases: + - /rancher/v2.x/en/project-admin/tools/ +--- + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + + +- [Notifiers and Alerts](#notifiers-and-alerts) +- [Logging](#logging) +- [Monitoring](#monitoring) + + + +## Notifiers and Alerts + +Notifiers and alerts are two features that work together to inform you of events in the Rancher system. Before they can be enabled, the monitoring application must be installed. + +Notifiers are services that inform you of alert events. You can configure notifiers to send alert notifications to staff best suited to take corrective action. Notifications can be sent with Slack, email, PagerDuty, WeChat, and webhooks. + +Alerts are rules that trigger those notifications. Before you can receive alerts, you must configure one or more notifier in Rancher. The scope for alerts can be set at either the cluster or project level. + +## Logging + +Logging is helpful because it allows you to: + +- Capture and analyze the state of your cluster +- Look for trends in your environment +- Save your logs to a safe location outside of your cluster +- Stay informed of events like a container crashing, a pod eviction, or a node dying +- More easily debugg and troubleshoot problems + +Rancher can integrate with Elasticsearch, splunk, kafka, syslog, and fluentd. + +For details, refer to the [logging section.](../pages-for-subheaders/logging.md) + +## Monitoring + +Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with [Prometheus](https://prometheus.io/), a leading open-source monitoring solution. For details, refer to the [monitoring section.](../pages-for-subheaders/monitoring-and-alerting.md) diff --git a/content/rancher/v2.6/en/security/best-practices/_index.md b/versioned_docs/version-2.5/reference-guides/rancher-security/kubernetes-security-best-practices.md similarity index 100% rename from content/rancher/v2.6/en/security/best-practices/_index.md rename to versioned_docs/version-2.5/reference-guides/rancher-security/kubernetes-security-best-practices.md diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md new file mode 100644 index 0000000000..28c57fd70b --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.5-benchmark.md @@ -0,0 +1,724 @@ +--- +title: Hardening Guide with CIS 1.5 Benchmark +weight: 200 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.5-hardening-2.5/ +--- + +This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + + Rancher Version | CIS Benchmark Version | Kubernetes Version +----------------|-----------------------|------------------ + Rancher v2.5 | Benchmark v1.5 | Kubernetes 1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_Hardening_Guide_CIS_1.5.pdf) + +### Overview + +This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.5 with Kubernetes v1.15 or provisioning a RKE cluster with Kubernetes 1.15 to be used within Rancher v2.5. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5](self-assessment-guide-with-cis-v1.5-benchmark.md). + +#### Known Issues + +- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.5 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. +- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.5 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +### Configure Kernel Runtime Parameters + +The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: + +``` +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxbytes=25000000 +``` + +Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### Configure `etcd` user and group +A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. + +#### create `etcd` user and group +To create the **etcd** group run the following console commands. + +The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. + +``` +groupadd --gid 52034 etcd +useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +``` + +Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: + +``` yaml +services: + etcd: + gid: 52034 + uid: 52034 +``` + +#### Set `automountServiceAccountToken` to `false` for `default` service accounts +Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: + +``` +automountServiceAccountToken: false +``` + +Save the following yaml to a file called `account_update.yaml` + +``` yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default +automountServiceAccountToken: false +``` + +Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. + +``` +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" +done +``` + +### Ensure that all Namespaces have Network Policies defined + +Running different applications on the same Kubernetes cluster creates a risk of one +compromised application attacking a neighboring application. Network segmentation is +important to ensure that containers can communicate only with those they are supposed +to. A network policy is a specification of how selections of pods are allowed to +communicate with each other and other network endpoints. + +Network Policies are namespace scoped. When a network policy is introduced to a given +namespace, all traffic not allowed by the policy is denied. However, if there are no network +policies in a namespace all traffic will be allowed into and out of the pods in that +namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. +This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. +Additional information about CNI providers can be found +[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) + +Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a +**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace +(even if policies are added that cause some pods to be treated as “isolated”), +you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as +`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +about network policies can be found on the Kubernetes site. + +> This `NetworkPolicy` is not recommended for production use + +``` yaml +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress +``` + +Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to +`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. + +``` +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl apply -f default-allow-all.yaml -n ${namespace} +done +``` +Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. + +### Reference Hardened RKE `cluster.yml` configuration + +The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install +of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is +provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes + + +``` yaml +# If you intend to deploy Kubernetes in an air-gapped environment, +# please consult the documentation on how to configure custom RKE images. +kubernetes_version: "v1.15.9-rancher1-1" +enable_network_policy: true +default_pod_security_policy_template_id: "restricted" +# the nodes directive is required and will vary depending on your environment +# documentation for node configuration can be found here: +# https://rancher.com/docs/rke/latest/en/config-options/nodes +nodes: +services: + etcd: + uid: 52034 + gid: 52034 + kube-api: + pod_security_policy: true + secrets_encryption_config: + enabled: true + audit_log: + enabled: true + admission_configuration: + event_rate_limit: + enabled: true + kube-controller: + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + scheduler: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + kubelet: + generate_serving_certificate: true + extra_args: + feature-gates: "RotateKubeletServerCertificate=true" + protect-kernel-defaults: "true" + tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + extra_binds: [] + extra_env: [] + cluster_domain: "" + infra_container_image: "" + cluster_dns_server: "" + fail_swap_on: false + kubeproxy: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] +network: + plugin: "" + options: {} + mtu: 0 + node_selector: {} +authentication: + strategy: "" + sans: [] + webhook: null +addons: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: tiller + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: tiller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system + +addons_include: [] +system_images: + etcd: "" + alpine: "" + nginx_proxy: "" + cert_downloader: "" + kubernetes_services_sidecar: "" + kubedns: "" + dnsmasq: "" + kubedns_sidecar: "" + kubedns_autoscaler: "" + coredns: "" + coredns_autoscaler: "" + kubernetes: "" + flannel: "" + flannel_cni: "" + calico_node: "" + calico_cni: "" + calico_controllers: "" + calico_ctl: "" + calico_flexvol: "" + canal_node: "" + canal_cni: "" + canal_flannel: "" + canal_flexvol: "" + weave_node: "" + weave_cni: "" + pod_infra_container: "" + ingress: "" + ingress_backend: "" + metrics_server: "" + windows_pod_infra_container: "" +ssh_key_path: "" +ssh_cert_path: "" +ssh_agent_auth: false +authorization: + mode: "" + options: {} +ignore_docker_version: false +private_registries: [] +ingress: + provider: "" + options: {} + node_selector: {} + extra_args: {} + dns_policy: "" + extra_envs: [] + extra_volumes: [] + extra_volume_mounts: [] +cluster_name: "" +prefix_path: "" +addon_job_timeout: 0 +bastion_host: + address: "" + port: "" + user: "" + ssh_key: "" + ssh_key_path: "" + ssh_cert: "" + ssh_cert_path: "" +monitoring: + provider: "" + options: {} + node_selector: {} +restore: + restore: false + snapshot_name: "" +dns: null +``` + +### Reference Hardened RKE Template configuration + +The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. +RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher +[documentaion](https://rancher.com/docs/rancher/v2.5/en/installation) for additional installation and RKE Template details. + +``` yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 30 + addons: |- + --- + apiVersion: v1 + kind: Namespace + metadata: + name: ingress-nginx + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: ingress-nginx + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: ingress-nginx + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: Namespace + metadata: + name: cattle-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: default-psp-role + namespace: cattle-system + rules: + - apiGroups: + - extensions + resourceNames: + - default-psp + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: default-psp-rolebinding + namespace: cattle-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: default-psp-role + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: tiller + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: tiller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin + subjects: + - kind: ServiceAccount + name: tiller + namespace: kube-system + ignore_docker_version: true + kubernetes_version: v1.15.9-rancher1-1 +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + mtu: 0 + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 52034 + retention: 72h + snapshot: false + uid: 52034 + kube_api: + always_pull_images: false + audit_log: + enabled: true + event_rate_limit: + enabled: true + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + bind-address: 127.0.0.1 + address: 127.0.0.1 + feature-gates: RotateKubeletServerCertificate=true + profiling: 'false' + terminated-pod-gc-threshold: '1000' + kubelet: + extra_args: + anonymous-auth: 'false' + event-qps: '0' + feature-gates: RotateKubeletServerCertificate=true + make-iptables-util-chains: 'true' + protect-kernel-defaults: 'true' + streaming-connection-idle-timeout: 1800s + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + generate_serving_certificate: true + scheduler: + extra_args: + bind-address: 127.0.0.1 + address: 127.0.0.1 + profiling: 'false' + ssh_agent_auth: false +windows_prefered_cluster: false +``` + +### Hardened Reference Ubuntu 18.04 LTS **cloud-config**: + +The reference **cloud-config** is generally used in cloud infrastructure environments to allow for +configuration management of compute instances. The reference config configures Ubuntu operating system level settings +needed before installing kubernetes. + +``` yaml +#cloud-config +packages: + - curl + - jq +runcmd: + - sysctl -w vm.overcommit_memory=1 + - sysctl -w kernel.panic=10 + - sysctl -w kernel.panic_on_oops=1 + - curl https://releases.rancher.com/install-docker/18.09.sh | sh + - usermod -aG docker ubuntu + - return=1; while [ $return != 0 ]; do sleep 2; docker ps; return=$?; done + - addgroup --gid 52034 etcd + - useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +write_files: + - path: /etc/sysctl.d/kubelet.conf + owner: root:root + permissions: "0644" + content: | + vm.overcommit_memory=1 + kernel.panic=10 + kernel.panic_on_oops=1 +``` diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.6-benchmark.md b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.6-benchmark.md new file mode 100644 index 0000000000..448babc1ce --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.6-benchmark.md @@ -0,0 +1,578 @@ +--- +title: Hardening Guide with CIS 1.6 Benchmark +weight: 100 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.6-hardening-2.5/ +--- + +This document provides prescriptive guidance for hardening a production installation of a RKE cluster to be used with Rancher v2.5.4. It outlines the configurations and controls required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +> This hardening guide describes how to secure the nodes in your cluster, and it is recommended to follow this guide before installing Kubernetes. + +This hardening guide is intended to be used for RKE clusters and associated with specific versions of the CIS Kubernetes Benchmark, Kubernetes, and Rancher: + + Rancher Version | CIS Benchmark Version | Kubernetes Version +----------------|-----------------------|------------------ + Rancher v2.5.4 | Benchmark 1.6 | Kubernetes v1.18 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_Hardening_Guide_CIS_1.6.pdf) + +### Overview + +This document provides prescriptive guidance for hardening a RKE cluster to be used for installing Rancher v2.5.4 with Kubernetes v1.18 or provisioning a RKE cluster with Kubernetes v1.18 to be used within Rancher v2.5.4. It outlines the configurations required to address Kubernetes benchmark controls from the Center for Information Security (CIS). + +For more detail about evaluating a hardened cluster against the official CIS benchmark, refer to the [CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4](self-assessment-guide-with-cis-v1.6-benchmark.md). + +#### Known Issues + +- Rancher **exec shell** and **view logs** for pods are **not** functional in a CIS 1.6 hardened setup when only public IP is provided when registering custom nodes. This functionality requires a private IP to be provided when registering the custom nodes. +- When setting the `default_pod_security_policy_template_id:` to `restricted` Rancher creates **RoleBindings** and **ClusterRoleBindings** on the default service accounts. The CIS 1.6 5.1.5 check requires the default service accounts have no roles or cluster roles bound to it apart from the defaults. In addition the default service accounts should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +Migration Rancher from 2.4 to 2.5. Addons were removed in HG 2.5, and therefore namespaces on migration may be not created on the downstream clusters. Pod may fail to run because of missing namesapce like ingress-nginx, cattle-system. + +### Configure Kernel Runtime Parameters + +The following `sysctl` configuration is recommended for all nodes type in the cluster. Set the following parameters in `/etc/sysctl.d/90-kubelet.conf`: + +```ini +vm.overcommit_memory=1 +vm.panic_on_oom=0 +kernel.panic=10 +kernel.panic_on_oops=1 +kernel.keys.root_maxbytes=25000000 +``` + +Run `sysctl -p /etc/sysctl.d/90-kubelet.conf` to enable the settings. + +### Configure `etcd` user and group +A user account and group for the **etcd** service is required to be setup before installing RKE. The **uid** and **gid** for the **etcd** user will be used in the RKE **config.yml** to set the proper permissions for files and directories during installation time. + +#### create `etcd` user and group +To create the **etcd** group run the following console commands. + +The commands below use `52034` for **uid** and **gid** are for example purposes. Any valid unused **uid** or **gid** could also be used in lieu of `52034`. + +```bash +groupadd --gid 52034 etcd +useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +``` + +Update the RKE **config.yml** with the **uid** and **gid** of the **etcd** user: + +```yaml +services: + etcd: + gid: 52034 + uid: 52034 +``` + +#### Set `automountServiceAccountToken` to `false` for `default` service accounts +Kubernetes provides a default service account which is used by cluster workloads where no specific service account is assigned to the pod. Where access to the Kubernetes API from a pod is required, a specific service account should be created for that pod, and rights granted to that service account. The default service account should be configured such that it does not provide a service account token and does not have any explicit rights assignments. + +For each namespace including **default** and **kube-system** on a standard RKE install the **default** service account must include this value: + +```yaml +automountServiceAccountToken: false +``` + +Save the following yaml to a file called `account_update.yaml` + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: default +automountServiceAccountToken: false +``` + +Create a bash script file called `account_update.sh`. Be sure to `chmod +x account_update.sh` so the script has execute permissions. + +```bash +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl patch serviceaccount default -n ${namespace} -p "$(cat account_update.yaml)" +done +``` + +### Ensure that all Namespaces have Network Policies defined + +Running different applications on the same Kubernetes cluster creates a risk of one +compromised application attacking a neighboring application. Network segmentation is +important to ensure that containers can communicate only with those they are supposed +to. A network policy is a specification of how selections of pods are allowed to +communicate with each other and other network endpoints. + +Network Policies are namespace scoped. When a network policy is introduced to a given +namespace, all traffic not allowed by the policy is denied. However, if there are no network +policies in a namespace all traffic will be allowed into and out of the pods in that +namespace. To enforce network policies, a CNI (container network interface) plugin must be enabled. +This guide uses [canal](https://github.com/projectcalico/canal) to provide the policy enforcement. +Additional information about CNI providers can be found +[here](https://rancher.com/blog/2019/2019-03-21-comparing-kubernetes-cni-providers-flannel-calico-canal-and-weave/) + +Once a CNI provider is enabled on a cluster a default network policy can be applied. For reference purposes a +**permissive** example is provide below. If you want to allow all traffic to all pods in a namespace +(even if policies are added that cause some pods to be treated as “isolated”), +you can create a policy that explicitly allows all traffic in that namespace. Save the following `yaml` as +`default-allow-all.yaml`. Additional [documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +about network policies can be found on the Kubernetes site. + +> This `NetworkPolicy` is not recommended for production use + +```yaml +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress +``` + +Create a bash script file called `apply_networkPolicy_to_all_ns.sh`. Be sure to +`chmod +x apply_networkPolicy_to_all_ns.sh` so the script has execute permissions. + +```bash +#!/bin/bash -e + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + kubectl apply -f default-allow-all.yaml -n ${namespace} +done +``` + +Execute this script to apply the `default-allow-all.yaml` the **permissive** `NetworkPolicy` to all namespaces. + +### Reference Hardened RKE `cluster.yml` configuration + +The reference `cluster.yml` is used by the RKE CLI that provides the configuration needed to achieve a hardened install +of Rancher Kubernetes Engine (RKE). Install [documentation](https://rancher.com/docs/rke/latest/en/installation/) is +provided with additional details about the configuration items. This reference `cluster.yml` does not include the required **nodes** directive which will vary depending on your environment. Documentation for node configuration can be found here: https://rancher.com/docs/rke/latest/en/config-options/nodes + + +```yaml +# If you intend to deploy Kubernetes in an air-gapped environment, +# please consult the documentation on how to configure custom RKE images. +# https://rancher.com/docs/rke/latest/en/installation/ + +# the nodes directive is required and will vary depending on your environment +# documentation for node configuration can be found here: +# https://rancher.com/docs/rke/latest/en/config-options/nodes +nodes: [] +services: + etcd: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + external_urls: [] + ca_cert: "" + cert: "" + key: "" + path: "" + uid: 52034 + gid: 52034 + snapshot: false + retention: "" + creation: "" + backup_config: null + kube-api: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + service_cluster_ip_range: "" + service_node_port_range: "" + pod_security_policy: true + always_pull_images: false + secrets_encryption_config: + enabled: true + custom_config: null + audit_log: + enabled: true + configuration: null + admission_configuration: null + event_rate_limit: + enabled: true + configuration: null + kube-controller: + image: "" + extra_args: + feature-gates: RotateKubeletServerCertificate=true + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + cluster_cidr: "" + service_cluster_ip_range: "" + scheduler: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + kubelet: + image: "" + extra_args: + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: "true" + tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] + cluster_domain: cluster.local + infra_container_image: "" + cluster_dns_server: "" + fail_swap_on: false + generate_serving_certificate: true + kubeproxy: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + win_extra_args: {} + win_extra_binds: [] + win_extra_env: [] +network: + plugin: "" + options: {} + mtu: 0 + node_selector: {} + update_strategy: null +authentication: + strategy: "" + sans: [] + webhook: null +addons: | + apiVersion: policy/v1beta1 + kind: PodSecurityPolicy + metadata: + name: restricted + spec: + requiredDropCapabilities: + - NET_RAW + privileged: false + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - emptyDir + - secret + - persistentVolumeClaim + - downwardAPI + - configMap + - projected + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: psp:restricted + rules: + - apiGroups: + - extensions + resourceNames: + - restricted + resources: + - podsecuritypolicies + verbs: + - use + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: psp:restricted + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:restricted + subjects: + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:serviceaccounts + - apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:authenticated + --- + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: default-allow-all + spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: default + automountServiceAccountToken: false +addons_include: [] +system_images: + etcd: "" + alpine: "" + nginx_proxy: "" + cert_downloader: "" + kubernetes_services_sidecar: "" + kubedns: "" + dnsmasq: "" + kubedns_sidecar: "" + kubedns_autoscaler: "" + coredns: "" + coredns_autoscaler: "" + nodelocal: "" + kubernetes: "" + flannel: "" + flannel_cni: "" + calico_node: "" + calico_cni: "" + calico_controllers: "" + calico_ctl: "" + calico_flexvol: "" + canal_node: "" + canal_cni: "" + canal_controllers: "" + canal_flannel: "" + canal_flexvol: "" + weave_node: "" + weave_cni: "" + pod_infra_container: "" + ingress: "" + ingress_backend: "" + metrics_server: "" + windows_pod_infra_container: "" +ssh_key_path: "" +ssh_cert_path: "" +ssh_agent_auth: false +authorization: + mode: "" + options: {} +ignore_docker_version: false +kubernetes_version: v1.18.12-rancher1-1 +private_registries: [] +ingress: + provider: "" + options: {} + node_selector: {} + extra_args: {} + dns_policy: "" + extra_envs: [] + extra_volumes: [] + extra_volume_mounts: [] + update_strategy: null + http_port: 0 + https_port: 0 + network_mode: "" +cluster_name: +cloud_provider: + name: "" +prefix_path: "" +win_prefix_path: "" +addon_job_timeout: 0 +bastion_host: + address: "" + port: "" + user: "" + ssh_key: "" + ssh_key_path: "" + ssh_cert: "" + ssh_cert_path: "" +monitoring: + provider: "" + options: {} + node_selector: {} + update_strategy: null + replicas: null +restore: + restore: false + snapshot_name: "" +dns: null +upgrade_strategy: + max_unavailable_worker: "" + max_unavailable_controlplane: "" + drain: null + node_drain_input: null +``` + +### Reference Hardened RKE Template configuration + +The reference RKE Template provides the configuration needed to achieve a hardened install of Kubenetes. +RKE Templates are used to provision Kubernetes and define Rancher settings. Follow the Rancher +[documentaion](https://rancher.com/docs/rancher/v2.5/en/installation) for additional installation and RKE Template details. + +```yaml +# +# Cluster Config +# +default_pod_security_policy_template_id: restricted +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: + addon_job_timeout: 45 + ignore_docker_version: true + kubernetes_version: v1.18.12-rancher1-1 +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + mtu: 0 + plugin: canal + rotate_encryption_key: false +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: false + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: '5000' + heartbeat-interval: '500' + gid: 52034 + retention: 72h + snapshot: false + uid: 52034 + kube_api: + always_pull_images: false + audit_log: + enabled: true + event_rate_limit: + enabled: true + pod_security_policy: true + secrets_encryption_config: + enabled: true + service_node_port_range: 30000-32767 + kube_controller: + extra_args: + feature-gates: RotateKubeletServerCertificate=true + bind-address: 127.0.0.1 + address: 127.0.0.1 + kubelet: + extra_args: + feature-gates: RotateKubeletServerCertificate=true + protect-kernel-defaults: 'true' + tls-cipher-suites: >- + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + fail_swap_on: false + generate_serving_certificate: true + scheduler: + extra_args: + bind-address: 127.0.0.1 + address: 127.0.0.1 + ssh_agent_auth: false + upgrade_strategy: + max_unavailable_controlplane: '1' + max_unavailable_worker: 10% +windows_prefered_cluster: false +``` + +### Hardened Reference Ubuntu 20.04 LTS **cloud-config**: + +The reference **cloud-config** is generally used in cloud infrastructure environments to allow for +configuration management of compute instances. The reference config configures Ubuntu operating system level settings +needed before installing kubernetes. + +```yaml +#cloud-config +apt: + sources: + docker.list: + source: deb [arch=amd64] http://download.docker.com/linux/ubuntu $RELEASE stable + keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 +system_info: + default_user: + groups: + - docker +write_files: +- path: "/etc/apt/preferences.d/docker" + owner: root:root + permissions: '0600' + content: | + Package: docker-ce + Pin: version 5:19* + Pin-Priority: 800 +- path: "/etc/sysctl.d/90-kubelet.conf" + owner: root:root + permissions: '0644' + content: | + vm.overcommit_memory=1 + vm.panic_on_oom=0 + kernel.panic=10 + kernel.panic_on_oops=1 + kernel.keys.root_maxbytes=25000000 +package_update: true +packages: +- docker-ce +- docker-ce-cli +- containerd.io +runcmd: +- sysctl -p /etc/sysctl.d/90-kubelet.conf +- groupadd --gid 52034 etcd +- useradd --comment "etcd service account" --uid 52034 --gid 52034 etcd +``` diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md new file mode 100644 index 0000000000..07859da7ca --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark.md @@ -0,0 +1,2267 @@ +--- +title: CIS 1.5 Benchmark - Self-Assessment Guide - Rancher v2.5 +weight: 201 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.5-benchmark-2.5/ +--- + +### CIS v1.5 Kubernetes Benchmark - Rancher v2.5 with Kubernetes v1.15 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.5_Benchmark_Assessment.pdf) + +#### Overview + +This document is a companion to the Rancher v2.5 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +---------------------------|----------|---------|------- +Hardening Guide with CIS 1.5 Benchmark | Rancher v2.5 | CIS v1.5| Kubernetes v1.15 + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.5. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. + +> NOTE: only scored tests are covered in this guide. + +### Controls + +--- +## 1 Master Node Security Configuration +### 1.1 Master Node Configuration Files + +#### 1.1.1 Ensure that the API server pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. + +#### 1.1.2 Ensure that the API server pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the API server. All configuration is passed in as arguments at container run time. + +#### 1.1.3 Ensure that the controller manager pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.4 Ensure that the controller manager pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.5 Ensure that the scheduler pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.6 Ensure that the scheduler pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.7 Ensure that the etcd pod specification file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +#### 1.1.8 Ensure that the etcd pod specification file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. + +#### 1.1.11 Ensure that the etcd data directory permissions are set to `700` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). For example, + +``` bash +chmod 700 /var/lib/etcd +``` + +**Audit Script:** 1.1.11.sh + +``` +#!/bin/bash -e + +etcd_bin=${1} + +test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') + +docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %a +``` + +**Audit Execution:** + +``` +./1.1.11.sh etcd +``` + +**Expected result**: + +``` +'700' is equal to '700' +``` + +#### 1.1.12 Ensure that the etcd data directory ownership is set to `etcd:etcd` (Scored) + +**Result:** PASS + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, +from the below command: + +``` bash +ps -ef | grep etcd +``` + +Run the below command (based on the etcd data directory found above). +For example, +``` bash +chown etcd:etcd /var/lib/etcd +``` + +**Audit Script:** 1.1.12.sh + +``` +#!/bin/bash -e + +etcd_bin=${1} + +test_dir=$(ps -ef | grep ${etcd_bin} | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%') + +docker inspect etcd | jq -r '.[].HostConfig.Binds[]' | grep "${test_dir}" | cut -d ":" -f 1 | xargs stat -c %U:%G +``` + +**Audit Execution:** + +``` +./1.1.12.sh etcd +``` + +**Expected result**: + +``` +'etcd:etcd' is present +``` + +#### 1.1.13 Ensure that the `admin.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. +We recommend that this `kube_config_cluster.yml` file be kept in secure store. + +#### 1.1.14 Ensure that the admin.conf file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE does not store the kubernetes default kubeconfig credentials file on the nodes. It’s presented to user where RKE is run. +We recommend that this `kube_config_cluster.yml` file be kept in secure store. + +#### 1.1.15 Ensure that the `scheduler.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.16 Ensure that the `scheduler.conf` file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the scheduler. All configuration is passed in as arguments at container run time. + +#### 1.1.17 Ensure that the `controller-manager.conf` file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.18 Ensure that the `controller-manager.conf` file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the controller manager. All configuration is passed in as arguments at container run time. + +#### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chown -R root:root /etc/kubernetes/ssl +``` + +**Audit:** + +``` +stat -c %U:%G /etc/kubernetes/ssl +``` + +**Expected result**: + +``` +'root:root' is present +``` + +#### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chmod -R 644 /etc/kubernetes/ssl +``` + +**Audit Script:** check_files_permissions.sh + +``` +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit +``` + +**Audit Execution:** + +``` +./check_files_permissions.sh '/etc/kubernetes/ssl/*.pem' +``` + +**Expected result**: + +``` +'true' is present +``` + +#### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to `600` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, + +``` bash +chmod -R 600 /etc/kubernetes/ssl/certs/serverca +``` + +**Audit Script:** 1.1.21.sh + +``` +#!/bin/bash -e +check_dir=${1:-/etc/kubernetes/ssl} + +for file in $(find ${check_dir} -name "*key.pem"); do + file_permission=$(stat -c %a ${file}) + if [[ "${file_permission}" == "600" ]]; then + continue + else + echo "FAIL: ${file} ${file_permission}" + exit 1 + fi +done + +echo "pass" +``` + +**Audit Execution:** + +``` +./1.1.21.sh /etc/kubernetes/ssl +``` + +**Expected result**: + +``` +'pass' is present +``` + +### 1.2 API Server + +#### 1.2.2 Ensure that the `--basic-auth-file` argument is not set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--basic-auth-file' is not present +``` + +#### 1.2.3 Ensure that the `--token-auth-file` parameter is not set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--token-auth-file' is not present +``` + +#### 1.2.4 Ensure that the `--kubelet-https` argument is set to true (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--kubelet-https` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-https' is present OR '--kubelet-https' is not present +``` + +#### 1.2.5 Ensure that the `--kubelet-client-certificate` and `--kubelet-client-key` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the +kubelet client certificate and key parameters as below. + +``` bash +--kubelet-client-certificate= +--kubelet-client-key= +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +#### 1.2.6 Ensure that the `--kubelet-certificate-authority` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +`/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--kubelet-certificate-authority' is present +``` + +#### 1.2.7 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. +One such example could be as below. + +``` bash +--authorization-mode=RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' not have 'AlwaysAllow' +``` + +#### 1.2.8 Ensure that the `--authorization-mode` argument includes `Node` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to a value that includes `Node`. + +``` bash +--authorization-mode=Node,RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' has 'Node' +``` + +#### 1.2.9 Ensure that the `--authorization-mode` argument includes `RBAC` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--authorization-mode` parameter to a value that includes RBAC, +for example: + +``` bash +--authorization-mode=Node,RBAC +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'Node,RBAC' has 'RBAC' +``` + +#### 1.2.11 Ensure that the admission control plugin `AlwaysAdmit` is not set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and either remove the `--enable-admission-plugins` parameter, or set it to a +value that does not include `AlwaysAdmit`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +#### 1.2.14 Ensure that the admission control plugin `ServiceAccount` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and ensure that the `--disable-admission-plugins` parameter is set to a +value that does not include `ServiceAccount`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'ServiceAccount' OR '--enable-admission-plugins' is not present +``` + +#### 1.2.15 Ensure that the admission control plugin `NamespaceLifecycle` is set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--disable-admission-plugins` parameter to +ensure it does not include `NamespaceLifecycle`. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--disable-admission-plugins' is present OR '--disable-admission-plugins' is not present +``` + +#### 1.2.16 Ensure that the admission control plugin `PodSecurityPolicy` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--enable-admission-plugins` parameter to a +value that includes `PodSecurityPolicy`: + +``` bash +--enable-admission-plugins=...,PodSecurityPolicy,... +``` + +Then restart the API Server. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' +``` + +#### 1.2.17 Ensure that the admission control plugin `NodeRestriction` is set (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--enable-admission-plugins` parameter to a +value that includes `NodeRestriction`. + +``` bash +--enable-admission-plugins=...,NodeRestriction,... +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' +``` + +#### 1.2.18 Ensure that the `--insecure-bind-address` argument is not set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and remove the `--insecure-bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--insecure-bind-address' is not present +``` + +#### 1.2.19 Ensure that the `--insecure-port` argument is set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--insecure-port=0 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + +#### 1.2.20 Ensure that the `--secure-port` argument is not set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and either remove the `--secure-port` parameter or +set it to a different **(non-zero)** desired port. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +6443 is greater than 0 OR '--secure-port' is not present +``` + +#### 1.2.21 Ensure that the `--profiling` argument is set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.2.22 Ensure that the `--audit-log-path` argument is set (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-path` parameter to a suitable path and +file where you would like audit logs to be written, for example: + +``` bash +--audit-log-path=/var/log/apiserver/audit.log +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--audit-log-path' is present +``` + +#### 1.2.23 Ensure that the `--audit-log-maxage` argument is set to `30` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxage` parameter to `30` or as an appropriate number of days: + +``` bash +--audit-log-maxage=30 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +30 is greater or equal to 30 +``` + +#### 1.2.24 Ensure that the `--audit-log-maxbackup` argument is set to `10` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxbackup` parameter to `10` or to an appropriate +value. + +``` bash +--audit-log-maxbackup=10 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +10 is greater or equal to 10 +``` + +#### 1.2.25 Ensure that the `--audit-log-maxsize` argument is set to `100` or as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--audit-log-maxsize` parameter to an appropriate size in **MB**. +For example, to set it as `100` **MB**: + +``` bash +--audit-log-maxsize=100 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +100 is greater or equal to 100 +``` + +#### 1.2.26 Ensure that the `--request-timeout` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +and set the below parameter as appropriate and if needed. +For example, + +``` bash +--request-timeout=300s +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--request-timeout' is not present OR '--request-timeout' is present +``` + +#### 1.2.27 Ensure that the `--service-account-lookup` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the below parameter. + +``` bash +--service-account-lookup=true +``` + +Alternatively, you can delete the `--service-account-lookup` parameter from this file so +that the default takes effect. + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-lookup' is not present OR 'true' is equal to 'true' +``` + +#### 1.2.28 Ensure that the `--service-account-key-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--service-account-key-file` parameter +to the public key file for service accounts: + +``` bash +`--service-account-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-key-file' is present +``` + +#### 1.2.29 Ensure that the `--etcd-certfile` and `--etcd-keyfile` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the **etcd** certificate and **key** file parameters. + +``` bash +`--etcd-certfile=` +`--etcd-keyfile=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +#### 1.2.30 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the TLS certificate and private key file parameters. + +``` bash +`--tls-cert-file=` +`--tls-private-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +#### 1.2.31 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the client certificate authority file. + +``` bash +`--client-ca-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--client-ca-file' is present +``` + +#### 1.2.32 Ensure that the `--etcd-cafile` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the etcd certificate authority file parameter. + +``` bash +`--etcd-cafile=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--etcd-cafile' is present +``` + +#### 1.2.33 Ensure that the `--encryption-provider-config` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` +on the master node and set the `--encryption-provider-config` parameter to the path of that file: + +``` bash +--encryption-provider-config= +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected result**: + +``` +'--encryption-provider-config' is present +``` + +#### 1.2.34 Ensure that encryption providers are appropriately configured (Scored) + +**Result:** PASS + +**Remediation:** +Follow the Kubernetes documentation and configure a `EncryptionConfig` file. +In this file, choose **aescbc**, **kms** or **secretbox** as the encryption provider. + +**Audit Script:** 1.2.34.sh + +``` +#!/bin/bash -e + +check_file=${1} + +grep -q -E 'aescbc|kms|secretbox' ${check_file} +if [ $? -eq 0 ]; then + echo "--pass" + exit 0 +else + echo "fail: encryption provider found in ${check_file}" + exit 1 +fi +``` + +**Audit Execution:** + +``` +./1.2.34.sh /etc/kubernetes/ssl/encryption.yaml +``` + +**Expected result**: + +``` +'--pass' is present +``` + +### 1.3 Controller Manager + +#### 1.3.1 Ensure that the `--terminated-pod-gc-threshold` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, +for example: + +``` bash +--terminated-pod-gc-threshold=10 +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--terminated-pod-gc-threshold' is present +``` + +#### 1.3.2 Ensure that the `--profiling` argument is set to false (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.3.3 Ensure that the `--use-service-account-credentials` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node to set the below parameter. + +``` bash +--use-service-account-credentials=true +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'true' is not equal to 'false' +``` + +#### 1.3.4 Ensure that the `--service-account-private-key-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--service-account-private-key-file` parameter +to the private key file for service accounts. + +``` bash +`--service-account-private-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--service-account-private-key-file' is present +``` + +#### 1.3.5 Ensure that the `--root-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--root-ca-file` parameter to the certificate bundle file`. + +``` bash +`--root-ca-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--root-ca-file' is present +``` + +#### 1.3.6 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`. + +``` bash +--feature-gates=RotateKubeletServerCertificate=true +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'RotateKubeletServerCertificate=true' is equal to 'RotateKubeletServerCertificate=true' +``` + +#### 1.3.7 Ensure that the `--bind-address argument` is set to `127.0.0.1` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` +on the master node and ensure the correct value for the `--bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected result**: + +``` +'--bind-address' argument is set to 127.0.0.1 +``` + +### 1.4 Scheduler + +#### 1.4.1 Ensure that the `--profiling` argument is set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file +on the master node and set the below parameter. + +``` bash +--profiling=false +``` + +**Audit:** + +``` +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 1.4.2 Ensure that the `--bind-address` argument is set to `127.0.0.1` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` +on the master node and ensure the correct value for the `--bind-address` parameter. + +**Audit:** + +``` +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected result**: + +``` +'--bind-address' argument is set to 127.0.0.1 +``` + +## 2 Etcd Node Configuration +### 2 Etcd Node Configuration Files + +#### 2.1 Ensure that the `--cert-file` and `--key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` +on the master node and set the below parameters. + +``` bash +`--cert-file=` +`--key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--cert-file' is present AND '--key-file' is present +``` + +#### 2.2 Ensure that the `--client-cert-auth` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and set the below parameter. + +``` bash +--client-cert-auth="true" +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 2.3 Ensure that the `--auto-tls` argument is not set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and either remove the `--auto-tls` parameter or set it to `false`. + +``` bash + --auto-tls=false +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +#### 2.4 Ensure that the `--peer-cert-file` and `--peer-key-file` arguments are set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the +master node and set the below parameters. + +``` bash +`--peer-client-file=` +`--peer-key-file=` +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +#### 2.5 Ensure that the `--peer-client-cert-auth` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and set the below parameter. + +``` bash +--peer-client-cert-auth=true +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 2.6 Ensure that the `--peer-auto-tls` argument is not set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master +node and either remove the `--peer-auto-tls` parameter or set it to `false`. + +``` bash +--peer-auto-tls=false +``` + +**Audit:** + +``` +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected result**: + +``` +'--peer-auto-tls' is not present OR '--peer-auto-tls' is present +``` + +## 3 Control Plane Configuration +### 3.2 Logging + +#### 3.2.1 Ensure that a minimal audit policy is created (Scored) + +**Result:** PASS + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit Script:** 3.2.1.sh + +``` +#!/bin/bash -e + +api_server_bin=${1} + +/bin/ps -ef | /bin/grep ${api_server_bin} | /bin/grep -v ${0} | /bin/grep -v grep +``` + +**Audit Execution:** + +``` +./3.2.1.sh kube-apiserver +``` + +**Expected result**: + +``` +'--audit-policy-file' is present +``` + +## 4 Worker Node Security Configuration +### 4.1 Worker Node Configuration Files + +#### 4.1.1 Ensure that the kubelet service file permissions are set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.2 Ensure that the kubelet service file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.3 Ensure that the proxy kubeconfig file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +#### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is present +``` + +#### 4.1.5 Ensure that the kubelet.conf file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +#### 4.1.6 Ensure that the kubelet.conf file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, + +``` bash +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` + +#### 4.1.7 Ensure that the certificate authorities file permissions are set to `644` or more restrictive (Scored) + +**Result:** PASS + +**Remediation:** +Run the following command to modify the file permissions of the + +``` bash +`--client-ca-file chmod 644 ` +``` + +**Audit:** + +``` +stat -c %a /etc/kubernetes/ssl/kube-ca.pem +``` + +**Expected result**: + +``` +'644' is equal to '644' OR '640' is present OR '600' is present +``` + +#### 4.1.8 Ensure that the client certificate authorities file ownership is set to `root:root` (Scored) + +**Result:** PASS + +**Remediation:** +Run the following command to modify the ownership of the `--client-ca-file`. + +``` bash +chown root:root +``` + +**Audit:** + +``` +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /etc/kubernetes/ssl/kube-ca.pem; fi' +``` + +**Expected result**: + +``` +'root:root' is equal to 'root:root' +``` + +#### 4.1.9 Ensure that the kubelet configuration file has permissions set to `644` or more restrictive (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.1.10 Ensure that the kubelet configuration file ownership is set to `root:root` (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +### 4.2 Kubelet + +#### 4.2.1 Ensure that the `--anonymous-auth argument` is set to false (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: `anonymous`: enabled to +`false`. +If using executable arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--anonymous-auth=false +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'false' is equal to 'false' +``` + +#### 4.2.2 Ensure that the `--authorization-mode` argument is not set to `AlwaysAllow` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: `mode` to `Webhook`. If +using executable arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + +``` bash +--authorization-mode=Webhook +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'Webhook' not have 'AlwaysAllow' +``` + +#### 4.2.3 Ensure that the `--client-ca-file` argument is set as appropriate (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: `x509`: `clientCAFile` to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_AUTHZ_ARGS` variable. + +``` bash +`--client-ca-file=` +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'--client-ca-file' is present +``` + +#### 4.2.4 Ensure that the `--read-only-port` argument is set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--read-only-port=0 +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` + +#### 4.2.5 Ensure that the `--streaming-connection-idle-timeout` argument is not set to `0` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a +value other than `0`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--streaming-connection-idle-timeout=5m +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +#### 4.2.6 Ensure that the ```--protect-kernel-defaults``` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `protectKernelDefaults`: `true`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable. + +``` bash +--protect-kernel-defaults=true +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +#### 4.2.7 Ensure that the `--make-iptables-util-chains` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains`: `true`. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +remove the `--make-iptables-util-chains` argument from the +`KUBELET_SYSTEM_PODS_ARGS` variable. +Based on your system, restart the kubelet service. For example: + +```bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' OR '--make-iptables-util-chains' is not present +``` + +#### 4.2.10 Ensure that the `--tls-cert-file` and `--tls-private-key-file` arguments are set as appropriate (Scored) + +**Result:** Not Applicable + +**Remediation:** +RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. + +#### 4.2.11 Ensure that the `--rotate-certificates` argument is not set to `false` (Scored) + +**Result:** PASS + +**Remediation:** +If using a Kubelet config file, edit the file to add the line `rotateCertificates`: `true` or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +`/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and +remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` +variable. +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'--rotate-certificates' is present OR '--rotate-certificates' is not present +``` + +#### 4.2.12 Ensure that the `RotateKubeletServerCertificate` argument is set to `true` (Scored) + +**Result:** PASS + +**Remediation:** +Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` +on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable. + +``` bash +--feature-gates=RotateKubeletServerCertificate=true +``` + +Based on your system, restart the kubelet service. For example: + +``` bash +systemctl daemon-reload +systemctl restart kubelet.service +``` + +**Audit:** + +``` +/bin/ps -fC kubelet +``` + +**Audit Config:** + +``` +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected result**: + +``` +'true' is equal to 'true' +``` + +## 5 Kubernetes Policies +### 5.1 RBAC and Service Accounts + +#### 5.1.5 Ensure that default service accounts are not actively used. (Scored) + +**Result:** PASS + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value + +``` bash +automountServiceAccountToken: false +``` + +**Audit Script:** 5.1.5.sh + +``` +#!/bin/bash + +export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} + +kubectl version > /dev/null +if [ $? -ne 0 ]; then + echo "fail: kubectl failed" + exit 1 +fi + +accounts="$(kubectl --kubeconfig=${KUBECONFIG} get serviceaccounts -A -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true)) | "fail \(.metadata.name) \(.metadata.namespace)"')" + +if [[ "${accounts}" != "" ]]; then + echo "fail: automountServiceAccountToken not false for accounts: ${accounts}" + exit 1 +fi + +default_binding="$(kubectl get rolebindings,clusterrolebindings -A -o json | jq -r '.items[] | select(.subjects[].kind=="ServiceAccount" and .subjects[].name=="default" and .metadata.name=="default").metadata.uid' | wc -l)" + +if [[ "${default_binding}" -gt 0 ]]; then + echo "fail: default service accounts have non default bindings" + exit 1 +fi + +echo "--pass" +exit 0 +``` + +**Audit Execution:** + +``` +./5.1.5.sh +``` + +**Expected result**: + +``` +'--pass' is present +``` + +### 5.2 Pod Security Policies + +#### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostPID` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostIPC` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.hostNetwork` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +#### 5.2.5 Minimize the admission of containers with `allowPrivilegeEscalation` (Scored) + +**Result:** PASS + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +`.spec.allowPrivilegeEscalation` field is omitted or set to `false`. + +**Audit:** + +``` +kubectl --kubeconfig=/root/.kube/config get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected result**: + +``` +1 is greater than 0 +``` + +### 5.3 Network Policies and CNI + +#### 5.3.2 Ensure that all Namespaces have Network Policies defined (Scored) + +**Result:** PASS + +**Remediation:** +Follow the documentation and create `NetworkPolicy` objects as you need them. + +**Audit Script:** 5.3.2.sh + +``` +#!/bin/bash -e + +export KUBECONFIG=${KUBECONFIG:-"/root/.kube/config"} + +kubectl version > /dev/null +if [ $? -ne 0 ]; then + echo "fail: kubectl failed" + exit 1 +fi + +for namespace in $(kubectl get namespaces -A -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [ ${policy_count} -eq 0 ]; then + echo "fail: ${namespace}" + exit 1 + fi +done + +echo "pass" +``` + +**Audit Execution:** + +``` +./5.3.2.sh +``` + +**Expected result**: + +``` +'pass' is present +``` + +### 5.6 General Policies + +#### 5.6.4 The default namespace should not be used (Scored) + +**Result:** PASS + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + +**Audit Script:** 5.6.4.sh + +``` +#!/bin/bash -e + +export KUBECONFIG=${KUBECONFIG:-/root/.kube/config} + +kubectl version > /dev/null +if [[ $? -gt 0 ]]; then + echo "fail: kubectl failed" + exit 1 +fi + +default_resources=$(kubectl get all -o json | jq --compact-output '.items[] | select((.kind == "Service") and (.metadata.name == "kubernetes") and (.metadata.namespace == "default") | not)' | wc -l) + +echo "--count=${default_resources}" +``` + +**Audit Execution:** + +``` +./5.6.4.sh +``` + +**Expected result**: + +``` +'0' is equal to '0' +``` diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.6-benchmark.md b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.6-benchmark.md new file mode 100644 index 0000000000..f34e1ba798 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.6-benchmark.md @@ -0,0 +1,3319 @@ +--- +title: CIS 1.6 Benchmark - Self-Assessment Guide - Rancher v2.5.4 +weight: 101 +aliases: + - /rancher/v2.x/en/security/rancher-2.5/1.6-benchmark-2.5/ +--- + +### CIS 1.6 Kubernetes Benchmark - Rancher v2.5.4 with Kubernetes v1.18 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.5/Rancher_1.6_Benchmark_Assessment.pdf) + +#### Overview + +This document is a companion to the Rancher v2.5.4 security hardening guide. The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark, and Kubernetes: + +Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version +---------------------------|----------|---------|------- +Hardening Guide with CIS 1.6 Benchmark | Rancher v2.5.4 | CIS 1.6| Kubernetes v1.18 + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark don't apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher-created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark 1.6. You can download the benchmark after logging in to [CISecurity.org]( https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher Labs are provided for testing. +When performing the tests, you will need access to the Docker command line on the hosts of all three RKE roles. The commands also make use of the the [jq](https://stedolan.github.io/jq/) and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) (with valid config) tools to and are required in the testing and evaluation of test results. + +### Controls + +## 1.1 Etcd Node Configuration Files +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + + +**Audit:** + +```bash +stat -c %a /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'700' is equal to '700' +``` + +**Returned Value**: + +```console +700 + +``` +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +A system service account is required for etcd data directory ownership. +Refer to Rancher's hardening guide for more details on how to configure this ownership. + + +**Audit:** + +```bash +stat -c %U:%G /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'etcd:etcd' is present +``` + +**Returned Value**: + +```console +etcd:etcd + +``` +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown -R root:root /etc/kubernetes/pki/ + + +**Audit:** + +```bash +check_files_owner_in_dir.sh /node/etc/kubernetes/ssl +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/usr/bin/env bash + +# This script is used to ensure the owner is set to root:root for +# the given directory and all the files in it +# +# inputs: +# $1 = /full/path/to/directory +# +# outputs: +# true/false + +INPUT_DIR=$1 + +if [[ "${INPUT_DIR}" == "" ]]; then + echo "false" + exit +fi + +if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then + echo "false" + exit +fi + +statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) +while read -r statInfoLine; do + f=$(echo ${statInfoLine} | cut -d' ' -f1) + p=$(echo ${statInfoLine} | cut -d' ' -f2) + + if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then + if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "root:root" ]]; then + echo "false" + exit + fi + fi +done <<< "${statInfoLines}" + + +echo "true" +exit + +``` +**Returned Value**: + +```console +true + +``` +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 644 /etc/kubernetes/pki/*.crt + + +**Audit:** + +```bash +check_files_permissions.sh /node/etc/kubernetes/ssl/!(*key).pem +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` +**Returned Value**: + +```console +true + +``` +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 600 /etc/kubernetes/ssl/*key.pem + + +**Audit:** + +```bash +check_files_permissions.sh /node/etc/kubernetes/ssl/*key.pem 600 +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` +**Returned Value**: + +```console +true + +``` +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-apiserver.yaml; fi' +``` + + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-apiserver.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-apiserver.yaml; fi' +``` + + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-controller-manager.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-controller-manager.yaml; fi' +``` + + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-controller-manager.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-controller-manager.yaml; fi' +``` + + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-scheduler.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/kube-scheduler.yaml; fi' +``` + + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/kube-scheduler.yaml; then stat -c %U:%G /etc/kubernetes/manifests/kube-scheduler.yaml; fi' +``` + + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/etcd.yaml; then stat -c permissions=%a /etc/kubernetes/manifests/etcd.yaml; fi' +``` + + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/manifests/etcd.yaml; then stat -c %U:%G /etc/kubernetes/manifests/etcd.yaml; fi' +``` + + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 + + +**Audit:** + +```bash +stat -c permissions=%a +``` + + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root + + +**Audit:** + +```bash +stat -c %U:%G +``` + + +### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi' +``` + + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi' +``` + + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e scheduler; then stat -c permissions=%a scheduler; fi' +``` + + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e scheduler; then stat -c %U:%G scheduler; fi' +``` + + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e controllermanager; then stat -c permissions=%a controllermanager; fi' +``` + + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e controllermanager; then stat -c %U:%G controllermanager; fi' +``` + + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--anonymous-auth=false + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--basic-auth-file=` parameter. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--basic-auth-file' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--token-auth-file=` parameter. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the --kubelet-https parameter. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is not present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console + 'Node,RBAC' not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'Node' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes RBAC, +for example: +--authorization-mode=Node,RBAC + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'RBAC' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console + 'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) + +**Result:** pass + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes PodSecurityPolicy: +--enable-admission-plugins=...,PodSecurityPolicy,... +Then restart the API Server. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the --insecure-bind-address parameter. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--insecure-bind-address' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--insecure-port=0 + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'0' is equal to '0' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +6443 is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.21 Ensure that the --profiling argument is set to false (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--profiling=false + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example: +--audit-log-path=/var/log/apiserver/audit.log + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: +--audit-log-maxage=30 + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +30 is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. +--audit-log-maxbackup=10 + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +10 is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB: +--audit-log-maxsize=100 + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +100 is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameter as appropriate and if needed. +For example, +--request-timeout=300s + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--request-timeout' is not present OR '--request-timeout' is not present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --service-account-key-file parameter +to the public key file for service accounts: +`--service-account-key-file=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the etcd certificate and key file parameters. +`--etcd-certfile=` +`--etcd-keyfile=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the TLS certificate and private key file parameters. +`--tls-cert-file=` +`--tls-private-key-file=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the client certificate authority file. +`--client-ca-file=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the etcd certificate authority file parameter. +`--etcd-cafile=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + + +**Audit:** + +```bash +check_encryption_provider_config.sh aescbc kms secretbox +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/usr/bin/env bash + +# This script is used to check the encrption provider config is set to aesbc +# +# outputs: +# true/false + +# TODO: Figure out the file location from the kube-apiserver commandline args +ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" + +if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then + echo "false" + exit +fi + +for provider in "$@" +do + if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then + echo "true" + exit + fi +done + +echo "false" +exit + +``` +**Returned Value**: + +```console + - aescbc: +true + +``` +### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM +_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM +_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM +_SHA384 + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example: +--terminated-pod-gc-threshold=10 + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true + +``` +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the below parameter. +--profiling=false + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true + +``` +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node to set the below parameter. +--use-service-account-credentials=true + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'true' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true + +``` +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +`--service-account-private-key-file=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true + +``` +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --root-ca-file parameter to the certificate bundle file`. +`--root-ca-file=` + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=0.0.0.0 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true + +``` +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** notApplicable + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +Cluster provisioned by RKE handles certificate rotation directly through RKE. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and ensure the correct value for the --bind-address parameter + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' argument is set to 127.0.0.1 +``` + +**Returned Value**: + +```console +root 4788 4773 4 16:16 ? 00:00:09 kube-controller-manager --configure-cloud-routes=false --cloud-provider= --service-cluster-ip-range=10.43.0.0/16 --v=2 --bind-address=127.0.0.1 --pod-eviction-timeout=5m0s --leader-elect=true --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --profiling=false --node-monitor-grace-period=40s --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --address=127.0.0.1 --allow-untagged-cloud=true --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --allocate-node-cidrs=true --enable-hostpath-provisioner=false --terminated-pod-gc-threshold=1000 --feature-gates=RotateKubeletServerCertificate=true --use-service-account-credentials=true + +``` +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the master node and set the below parameter. +--profiling=false + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 4947 4930 1 16:16 ? 00:00:02 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --address=0.0.0.0 + +``` +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml +on the master node and ensure the correct value for the --bind-address parameter + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'--bind-address' argument is set to 127.0.0.1 +``` + +**Returned Value**: + +```console +root 4947 4930 1 16:16 ? 00:00:02 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --leader-elect=true --profiling=false --v=2 --address=127.0.0.1 --bind-address=127.0.0.1 + +``` +## 2 Etcd Node Configuration Files +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +`--cert-file=` +`--key-file=` + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--cert-file' is present AND '--key-file' is present +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameters. +`--peer-client-file=` +`--peer-key-file=` + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-client-cert-auth' is present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-auto-tls' is not present OR '--peer-auto-tls' is present +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameter. +`--trusted-ca-file=` + + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--trusted-ca-file' is present +``` + +**Returned Value**: + +```console +etcd 4318 4301 6 16:15 ? 00:00:14 /usr/local/bin/etcd --listen-peer-urls=https://0.0.0.0:2380 --cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --advertise-client-urls=https://192.168.1.225:2379,https://192.168.1.225:4001 --election-timeout=5000 --data-dir=/var/lib/rancher/etcd/ --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225.pem --enable-v2=true --initial-cluster=etcd-cis-aio-0=https://192.168.1.225:2380 --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --client-cert-auth=true --heartbeat-interval=500 --initial-cluster-token=etcd-cluster-1 --name=etcd-cis-aio-0 --listen-client-urls=https://0.0.0.0:2379 --peer-key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem --peer-client-cert-auth=true --initial-advertise-peer-urls=https://192.168.1.225:2380 --initial-cluster-state=new --key-file=/etc/kubernetes/ssl/kube-etcd-192-168-1-225-key.pem +root 4366 4349 0 16:15 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=192.168.1.225:2379 --retention=72h --creation=12h +root 4643 4626 23 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User +root 14998 14985 0 16:19 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json + +``` +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + + +**Audit:** + +```bash + +``` + + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-policy-file' is present +``` + +**Returned Value**: + +```console +root 4643 4626 22 16:15 ? 00:00:46 kube-apiserver --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-cluster-ip-range=10.43.0.0/16 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --authorization-mode=Node,RBAC --audit-log-maxsize=100 --audit-log-format=json --requestheader-allowed-names=kube-apiserver-proxy-client --cloud-provider= --etcd-prefix=/registry --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --allow-privileged=true --service-account-lookup=true --admission-control-config-file=/etc/kubernetes/admission.yaml --audit-policy-file=/etc/kubernetes/audit-policy.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --service-node-port-range=30000-32767 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --storage-backend=etcd3 --anonymous-auth=false --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --advertise-address=192.168.1.225 --audit-log-maxage=30 --etcd-servers=https://192.168.1.225:2379 --runtime-config=policy/v1beta1/podsecuritypolicy=true --bind-address=0.0.0.0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --insecure-port=0 --requestheader-group-headers=X-Remote-Group --secure-port=6443 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-extra-headers-prefix=X-Remote-Extra- --profiling=false --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --requestheader-username-headers=X-Remote-User + +``` +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + +**Result:** warn + +**Remediation:** +Consider modification of the audit policy in use on the cluster to include these items, at a +minimum. + + +**Audit:** + +```bash + +``` + + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c permissions=%a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' +``` + + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c %U:%G /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' +``` + + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 $proykubeconfig + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +**Returned Value**: + +```console +600 + +``` +### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is not present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root + +``` +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +`--client-ca-file chmod 644 ` + + +**Audit:** + +```bash +check_cafile_permissions.sh +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Audit Script:** +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + +``` +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +`chown root:root ` + + +**Audit:** + +```bash +check_cafile_ownership.sh +``` + +**Expected Result**: + +```console +'root:root' is not present +``` + +**Audit Script:** +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + +``` +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + +**Result:** notApplicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c permissions=%a /var/lib/kubelet/config.yaml; fi' +``` + + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + +**Result:** notApplicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/kubelet/config.yaml; then stat -c %U:%G /var/lib/kubelet/config.yaml; fi' +``` + + +## 4.2 Kubelet +### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +`--client-ca-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present OR '' is not present +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD +root 5103 5086 7 16:16 ? 00:00:12 kubelet --resolv-conf=/etc/resolv.conf --read-only-port=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --feature-gates=RotateKubeletServerCertificate=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --make-iptables-util-chains=true --streaming-connection-idle-timeout=30m --cluster-dns=10.43.0.10 --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225-key.pem --address=0.0.0.0 --cni-bin-dir=/opt/cni/bin --anonymous-auth=false --protect-kernel-defaults=true --cloud-provider= --hostname-override=cis-aio-0 --fail-swap-on=false --cgroups-per-qos=True --authentication-token-webhook=true --event-qps=0 --v=2 --pod-infra-container-image=rancher/pause:3.1 --authorization-mode=Webhook --network-plugin=cni --cluster-domain=cluster.local --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --volume-plugin-dir=/var/lib/kubelet/volumeplugins --cni-conf-dir=/etc/cni/net.d --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225.pem --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf + +``` +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present OR '' is not present +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + +**Result:** notApplicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set tlsCertFile to the location +of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +`--tls-cert-file=` +`--tls-private-key-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present AND '' is not present +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line rotateCertificates: true or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'--rotate-certificates' is not present OR '--rotate-certificates' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD +root 5103 5086 6 16:16 ? 00:00:12 kubelet --resolv-conf=/etc/resolv.conf --read-only-port=0 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --feature-gates=RotateKubeletServerCertificate=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --make-iptables-util-chains=true --streaming-connection-idle-timeout=30m --cluster-dns=10.43.0.10 --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225-key.pem --address=0.0.0.0 --cni-bin-dir=/opt/cni/bin --anonymous-auth=false --protect-kernel-defaults=true --cloud-provider= --hostname-override=cis-aio-0 --fail-swap-on=false --cgroups-per-qos=True --authentication-token-webhook=true --event-qps=0 --v=2 --pod-infra-container-image=rancher/pause:3.1 --authorization-mode=Webhook --network-plugin=cni --cluster-domain=cluster.local --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --volume-plugin-dir=/var/lib/kubelet/volumeplugins --cni-conf-dir=/etc/cni/net.d --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-192-168-1-225.pem --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf + +``` +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + +**Result:** notApplicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE handles certificate rotation directly through RKE. + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set TLSCipherSuites: to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Expected Result**: + +```console +'' is not present +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + + +**Audit:** + +```bash + +``` + + +### 5.1.2 Minimize access to secrets (Manual) + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to secret objects in the cluster. + + +**Audit:** + +```bash + +``` + + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + + +**Audit:** + +```bash + +``` + + +### 5.1.4 Minimize access to create pods (Manual) + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + + +**Audit:** + +```bash + +``` + + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + + +**Audit:** + +```bash +check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` +**Returned Value**: + +```console +true + +``` +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + + +**Audit:** + +```bash + +``` + + +## 5.2 Pod Security Policies +### 5.2.1 Minimize the admission of privileged containers (Manual) + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that +the .spec.privileged field is omitted or set to false. + + +**Audit:** + +```bash + +``` + + +### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostPID field is omitted or set to false. + + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 + +``` +### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostIPC field is omitted or set to false. + + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 + +``` +### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostNetwork field is omitted or set to false. + + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 + +``` +### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.allowPrivilegeEscalation field is omitted or set to false. + + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 + +``` +### 5.2.6 Minimize the admission of root containers (Manual) + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of +UIDs not including 0. + + +**Audit:** + +```bash + +``` + + +### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + + +**Audit:** + +```bash + +``` + + +### 5.2.8 Minimize the admission of containers with added capabilities (Manual) + +**Result:** warn + +**Remediation:** +Ensure that allowedCapabilities is not present in PSPs for the cluster unless +it is set to an empty array. + + +**Audit:** + +```bash + +``` + + +### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications runnning on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + + +**Audit:** + +```bash + +``` + + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) + +**Result:** warn + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + + +**Audit:** + +```bash + +``` + + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + +**Result:** pass + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + + +**Audit:** + +```bash +check_for_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [[ ${policy_count} -eq 0 ]]; then + echo "false" + exit + fi +done + +echo "true" + +``` +**Returned Value**: + +```console +true + +``` +## 5.4 Secrets Management +### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) + +**Result:** warn + +**Remediation:** +if possible, rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + + +**Audit:** + +```bash + +``` + + +### 5.4.2 Consider external secret storage (Manual) + +**Result:** warn + +**Remediation:** +Refer to the secrets management options offered by your cloud provider or a third-party +secrets management solution. + + +**Audit:** + +```bash + +``` + + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + + +**Audit:** + +```bash + +``` + + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + + +**Audit:** + +```bash + +``` + + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) + +**Result:** warn + +**Remediation:** +Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you +would need to enable alpha features in the apiserver by passing "--feature- +gates=AllAlpha=true" argument. +Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS +parameter to "--feature-gates=AllAlpha=true" +KUBE_API_ARGS="--feature-gates=AllAlpha=true" +Based on your system, restart the kube-apiserver service. For example: +systemctl restart kube-apiserver.service +Use annotations to enable the docker/default seccomp profile in your pod definitions. An +example is as below: +apiVersion: v1 +kind: Pod +metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default +spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + + +**Audit:** + +```bash + +``` + + +### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply security contexts to your pods. For a +suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker +Containers. + + +**Audit:** + +```bash + +``` + + +### 5.7.4 The default namespace should not be used (Automated) + +**Result:** pass + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + + +**Audit:** + +```bash +check_for_default_ns.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Audit Script:** +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) +if [[ ${count} -gt 0 ]]; then + echo "false" + exit +fi + +echo "true" + + +``` +**Returned Value**: + +```console +true + +``` diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/security-advisories-and-cves.md b/versioned_docs/version-2.5/reference-guides/rancher-security/security-advisories-and-cves.md new file mode 100644 index 0000000000..7227445587 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/security-advisories-and-cves.md @@ -0,0 +1,29 @@ +--- +title: Security Advisories and CVEs +weight: 300 +--- + +Rancher is committed to informing the community of security issues in our products. Rancher will publish security advisories and CVEs (Common Vulnerabilities and Exposures) for issues we have resolved. New security advisories are also published in Rancher's GitHub [security page](https://github.com/rancher/rancher/security/advisories). + +| ID | Description | Date | Resolution | +|----|-------------|------|------------| +| [CVE-2022-21951](https://github.com/rancher/rancher/security/advisories/GHSA-vrph-m5jj-c46c) | This vulnerability only affects customers using [Weave](https://rancher.com/docs/rancher/v2.6/en/faq/networking/cni-providers/#weave) Container Network Interface (CNI) when configured through [RKE templates](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rke-templates/). A vulnerability was discovered in Rancher versions 2.5.0 up to and including 2.5.13, and 2.6.0 up to and including 2.6.4, where a user interface (UI) issue with RKE templates does not include a value for the Weave password when Weave is chosen as the CNI. If a cluster is created based on the mentioned template, and Weave is configured as the CNI, no password will be created for [network encryption](https://www.weave.works/docs/net/latest/tasks/manage/security-untrusted-networks/) in Weave; therefore, network traffic in the cluster will be sent unencrypted. | 24 May 2022 | [Rancher v2.6.5](https://github.com/rancher/rancher/releases/tag/v2.6.5) and [Rancher v2.5.14](https://github.com/rancher/rancher/releases/tag/v2.5.14) | +| [CVE-2021-36784](https://github.com/rancher/rancher/security/advisories/GHSA-jwvr-vv7p-gpwq) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 which allows users who have create or update permissions on [Global Roles](https://rancher.com/docs/rancher/v2.6/en/admin-settings/rbac/) to escalate their permissions, or those of another user, to admin-level permissions. Global Roles grant users Rancher-wide permissions, such as the ability to create clusters. In the identified versions of Rancher, when users are given permission to edit or create Global Roles, they are not restricted to only granting permissions which they already posses. This vulnerability affects customers who utilize non-admin users that are able to create or edit Global Roles. The most common use case for this scenario is the `restricted-admin` role. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2021-4200](https://github.com/rancher/rancher/security/advisories/GHSA-hx8w-ghh8-r4xf) | This vulnerability only affects customers using the `restricted-admin` role in Rancher. A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3 where the `global-data` role in `cattle-global-data` namespace grants write access to the Catalogs. Since each user with any level of catalog access was bound to the `global-data` role, this grants write access to templates (`CatalogTemplates`) and template versions (`CatalogTemplateVersions`) for any user with any level of catalog access. New users created in Rancher are by default assigned to the `user` role (standard user), which is not designed to grant write catalog access. This vulnerability effectively elevates the privilege of any user to write access for the catalog template and catalog template version resources. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [GHSA-wm2r-rp98-8pmh](https://github.com/rancher/rancher/security/advisories/GHSA-wm2r-rp98-8pmh) | This vulnerability only affects customers using [Fleet](https://rancher.com/docs/rancher/v2.6/en/deploy-across-clusters/fleet/) for continuous delivery with authenticated Git and/or Helm repositories. An issue was discovered in `go-getter` library in versions prior to [`v1.5.11`](https://github.com/hashicorp/go-getter/releases/tag/v1.5.11) that exposes SSH private keys in base64 format due to a failure in redacting such information from error messages. The vulnerable version of this library is used in Rancher through Fleet in versions of Fleet prior to [`v0.3.9`](https://github.com/rancher/fleet/releases/tag/v0.3.9). This issue affects Rancher versions 2.5.0 up to and including 2.5.12 and from 2.6.0 up to and including 2.6.3. The issue was found and reported by Dagan Henderson from Raft Engineering. | 14 Apr 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) and [Rancher v2.5.13](https://github.com/rancher/rancher/releases/tag/v2.5.13) | +| [CVE-2021-36778](https://github.com/rancher/rancher/security/advisories/GHSA-4fc7-hc63-7fjg) | A vulnerability was discovered in Rancher versions from 2.5.0 up to and including 2.5.11 and from 2.6.0 up to and including 2.6.2, where an insufficient check of the same-origin policy when downloading Helm charts from a configured private repository can lead to exposure of the repository credentials to a third-party provider. This issue only happens when the user configures access credentials to a private repository in Rancher inside `Apps & Marketplace > Repositories`. The issue was found and reported by Martin Andreas Ullrich. | 14 Apr 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3) and [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) | +| [GHSA-hwm2-4ph6-w6m5](https://github.com/rancher/rancher/security/advisories/GHSA-hwm2-4ph6-w6m5) | A vulnerability was discovered in versions of Rancher starting 2.0 up to and including 2.6.3. The `restricted` pod security policy (PSP) provided in Rancher deviated from the upstream `restricted` policy provided in Kubernetes on account of which Rancher's PSP had `runAsUser` set to `runAsAny`, while upstream had `runAsUser` set to `MustRunAsNonRoot`. This allowed containers to run as any user, including a privileged user (`root`), even when Rancher's `restricted` policy was enforced on a project or at the cluster level. | 31 Mar 2022 | [Rancher v2.6.4](https://github.com/rancher/rancher/releases/tag/v2.6.4) | +| [CVE-2021-36775](https://github.com/rancher/rancher/security/advisories/GHSA-28g7-896h-695v) | A vulnerability was discovered in Rancher versions up to and including 2.4.17, 2.5.11 and 2.6.2. After removing a `Project Role` associated with a group from the project, the bindings that granted access to cluster-scoped resources for those subjects were not deleted. This was due to an incomplete authorization logic check. A user who was a member of the affected group with authenticated access to Rancher could exploit this vulnerability to access resources they shouldn't have had access to. The exposure level would depend on the original permission level granted to the affected project role. This vulnerability only affected customers using group based authentication in Rancher. | 31 Mar 2022 | [Rancher v2.6.3](https://github.com/rancher/rancher/releases/tag/v2.6.3), [Rancher v2.5.12](https://github.com/rancher/rancher/releases/tag/v2.5.12) and [Rancher v2.4.18](https://github.com/rancher/rancher/releases/tag/v2.4.18) | +| [CVE-2021-36776](https://github.com/rancher/rancher/security/advisories/GHSA-gvh9-xgrq-r8hw) | A vulnerability was discovered in Rancher versions starting 2.5.0 up to and including 2.5.9, that allowed an authenticated user to impersonate any user on a cluster through an API proxy, without requiring knowledge of the impersonated user's credentials. This was due to the API proxy not dropping the impersonation header before sending the request to the Kubernetes API. A malicious user with authenticated access to Rancher could use this to impersonate another user with administrator access in Rancher, thereby gaining administrator level access to the cluster. | 31 Mar 2022 | [Rancher v2.6.0](https://github.com/rancher/rancher/releases/tag/v2.6.0) and [Rancher v2.5.10](https://github.com/rancher/rancher/releases/tag/v2.5.10) | +| [CVE-2021-25318](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25318) | A vulnerability was discovered in Rancher versions 2.0 through the aforementioned fixed versions, where users were granted access to resources regardless of the resource's API group. For example, Rancher should have allowed users access to `apps.catalog.cattle.io`, but instead incorrectly gave access to `apps.*`. Resources affected in the **Downstream clusters** and **Rancher management cluster** can be found [here](https://github.com/rancher/rancher/security/advisories/GHSA-f9xf-jq4j-vqw4). There is not a direct mitigation besides upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-31999](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-31999) | A vulnerability was discovered in Rancher 2.0.0 through the aforementioned patched versions, where a malicious Rancher user could craft an API request directed at the proxy for the Kubernetes API of a managed cluster to gain access to information they do not have access to. This is done by passing the "Impersonate-User" or "Impersonate-Group" header in the Connection header, which is then correctly removed by the proxy. At this point, instead of impersonating the user and their permissions, the request will act as if it was from the Rancher management server and incorrectly return the information. The vulnerability is limited to valid Rancher users with some level of permissions on the cluster. There is not a direct mitigation besides upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-25320](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25320) | A vulnerability was discovered in Rancher 2.2.0 through the aforementioned patched versions, where cloud credentials weren't being properly validated through the Rancher API. Specifically through a proxy designed to communicate with cloud providers. Any Rancher user that was logged-in and aware of a cloud-credential ID that was valid for a given cloud provider, could call that cloud provider's API through the proxy API, and the cloud-credential would be attached. The exploit is limited to valid Rancher users. There is not a direct mitigation outside of upgrading to the patched Rancher versions. | 14 Jul 2021 | [Rancher v2.5.9](https://github.com/rancher/rancher/releases/tag/v2.5.9) and [Rancher v2.4.16](https://github.com/rancher/rancher/releases/tag/v2.4.16) | +| [CVE-2021-25313](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-25313) | A security vulnerability was discovered on all Rancher 2 versions. When accessing the Rancher API with a browser, the URL was not properly escaped, making it vulnerable to an XSS attack. Specially crafted URLs to these API endpoints could include JavaScript which would be embedded in the page and execute in a browser. There is no direct mitigation. Avoid clicking on untrusted links to your Rancher server. | 2 Mar 2021 | [Rancher v2.5.6](https://github.com/rancher/rancher/releases/tag/v2.5.6), [Rancher v2.4.14](https://github.com/rancher/rancher/releases/tag/v2.4.14), and [Rancher v2.3.11](https://github.com/rancher/rancher/releases/tag/v2.3.11) | +| [CVE-2019-14435](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14435) | This vulnerability allows authenticated users to potentially extract otherwise private data out of IPs reachable from system service containers used by Rancher. This can include but not only limited to services such as cloud provider metadata services. Although Rancher allow users to configure whitelisted domains for system service access, this flaw can still be exploited by a carefully crafted HTTP request. The issue was found and reported by Matt Belisle and Alex Stevenson at Workiva. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-14436](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-14436) | The vulnerability allows a member of a project that has access to edit role bindings to be able to assign themselves or others a cluster level role granting them administrator access to that cluster. The issue was found and reported by Michal Lipinski at Nokia. | 5 Aug 2019 | [Rancher v2.2.7](https://github.com/rancher/rancher/releases/tag/v2.2.7) and [Rancher v2.1.12](https://github.com/rancher/rancher/releases/tag/v2.1.12) | +| [CVE-2019-13209](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13209) | The vulnerability is known as a [Cross-Site Websocket Hijacking attack](https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html). This attack allows an exploiter to gain access to clusters managed by Rancher with the roles/permissions of a victim. It requires that a victim to be logged into a Rancher server and then access a third-party site hosted by the exploiter. Once that is accomplished, the exploiter is able to execute commands against the Kubernetes API with the permissions and identity of the victim. Reported by Matt Belisle and Alex Stevenson from Workiva. | 15 Jul 2019 | [Rancher v2.2.5](https://github.com/rancher/rancher/releases/tag/v2.2.5), [Rancher v2.1.11](https://github.com/rancher/rancher/releases/tag/v2.1.11) and [Rancher v2.0.16](https://github.com/rancher/rancher/releases/tag/v2.0.16) | +| [CVE-2019-12303](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12303) | Project owners can inject extra fluentd logging configurations that makes it possible to read files or execute arbitrary commands inside the fluentd container. Reported by Tyler Welton from Untamed Theory. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-12274](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12274) | Nodes using the built-in node drivers using a file path option allows the machine to read arbitrary files including sensitive ones from inside the Rancher server container. | 5 Jun 2019 | [Rancher v2.2.4](https://github.com/rancher/rancher/releases/tag/v2.2.4), [Rancher v2.1.10](https://github.com/rancher/rancher/releases/tag/v2.1.10) and [Rancher v2.0.15](https://github.com/rancher/rancher/releases/tag/v2.0.15) | +| [CVE-2019-11202](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11202) | The default admin, that is shipped with Rancher, will be re-created upon restart of Rancher despite being explicitly deleted. | 16 Apr 2019 | [Rancher v2.2.2](https://github.com/rancher/rancher/releases/tag/v2.2.2), [Rancher v2.1.9](https://github.com/rancher/rancher/releases/tag/v2.1.9) and [Rancher v2.0.14](https://github.com/rancher/rancher/releases/tag/v2.0.14) | +| [CVE-2019-6287](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-6287) | Project members continue to get access to namespaces from projects that they were removed from if they were added to more than one project. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) | +| [CVE-2018-20321](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-20321) | Any project member with access to the `default` namespace can mount the `netes-default` service account in a pod and then use that pod to execute administrative privileged commands against the Kubernetes cluster. | 29 Jan 2019 | [Rancher v2.1.6](https://github.com/rancher/rancher/releases/tag/v2.1.6) and [Rancher v2.0.11](https://github.com/rancher/rancher/releases/tag/v2.0.11) - Rolling back from these versions or greater have specific [instructions](../../getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks.md). | diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md b/versioned_docs/version-2.5/reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/selinux-rpm/about-rancher-selinux.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/versioned_docs/version-2.5/reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md b/versioned_docs/version-2.5/reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md new file mode 100644 index 0000000000..fa42a3bae8 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/rancher-security/selinux-rpm/about-rke2-selinux.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/rancher/v2.5/en/admin-settings/rke-templates/example-yaml/_index.md b/versioned_docs/version-2.5/reference-guides/rke1-template-example-yaml.md similarity index 100% rename from content/rancher/v2.5/en/admin-settings/rke-templates/example-yaml/_index.md rename to versioned_docs/version-2.5/reference-guides/rke1-template-example-yaml.md diff --git a/versioned_docs/version-2.5/reference-guides/single-node-rancher-in-docker/advanced-options.md b/versioned_docs/version-2.5/reference-guides/single-node-rancher-in-docker/advanced-options.md new file mode 100644 index 0000000000..b258c89743 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/single-node-rancher-in-docker/advanced-options.md @@ -0,0 +1,117 @@ +--- +title: Advanced Options for Docker Installs +weight: 5 +aliases: + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/advanced/ +--- + +When installing Rancher, there are several [advanced options](../../pages-for-subheaders/resources.md) that can be enabled: + +- [Custom CA Certificate](#custom-ca-certificate) +- [API Audit Log](#api-audit-log) +- [TLS Settings](#tls-settings) +- [Air Gap](#air-gap) +- [Persistent Data](#persistent-data) +- [Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node](#running-rancher-rancher-and-rancher-rancher-agent-on-the-same-node) + +### Custom CA Certificate + +If you want to configure Rancher to use a CA root certificate to be used when validating services, you would start the Rancher container sharing the directory that contains the CA root certificate. + +Use the command example to start a Rancher container with your private CA certificates mounted. + +- The volume flag (`-v`) should specify the host directory containing the CA root certificates. +- The environment variable flag (`-e`) in combination with `SSL_CERT_DIR` and directory declares an environment variable that specifies the mounted CA root certificates directory location inside the container. +- Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. +- Mounting a host directory inside the container can be done using `-v host-source-directory:container-destination-directory` or `--volume host-source-directory:container-destination-directory`. + +The example below is based on having the CA root certificates in the `/host/certs` directory on the host and mounting this directory on `/container/certs` inside the Rancher container. + +As of Rancher v2.5, privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /host/certs:/container/certs \ + -e SSL_CERT_DIR="/container/certs" \ + --privileged \ + rancher/rancher:latest +``` + +### API Audit Log + +The API Audit Log records all the user and system transactions made through Rancher server. + +The API Audit Log writes to `/var/log/auditlog` inside the rancher container by default. Share that directory as a volume and set your `AUDIT_LEVEL` to enable the log. + +See [API Audit Log](../../getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log.md) for more information and options. + +As of Rancher v2.5, privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /var/log/rancher/auditlog:/var/log/auditlog \ + -e AUDIT_LEVEL=1 \ + --privileged \ + rancher/rancher:latest +``` + +### TLS settings + +To set a different TLS configuration, you can use the `CATTLE_TLS_MIN_VERSION` and `CATTLE_TLS_CIPHERS` environment variables. For example, to configure TLS 1.0 as minimum accepted TLS version: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_TLS_MIN_VERSION="1.0" \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +See [TLS settings](../installation-references/tls-settings.md) for more information and options. + +### Air Gap + +If you are visiting this page to complete an air gap installation, you must prepend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) + +### Running `rancher/rancher` and `rancher/rancher-agent` on the Same Node + +In the situation where you want to use a single node to run Rancher and to be able to add the same node to a cluster, you have to adjust the host ports mapped for the `rancher/rancher` container. + +If a node is added to a cluster, it deploys the nginx ingress controller which will use port 80 and 443. This will conflict with the default ports we advise to expose for the `rancher/rancher` container. + +Please note that this setup is not recommended for production use, but can be convenient for development/demo purposes. + +To change the host ports mapping, replace the following part `-p 80:80 -p 443:443` with `-p 8080:80 -p 8443:443`: + +``` +docker run -d --restart=unless-stopped \ + -p 8080:80 -p 8443:443 \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) diff --git a/versioned_docs/version-2.5/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md b/versioned_docs/version-2.5/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md new file mode 100644 index 0000000000..6d36af6c34 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/single-node-rancher-in-docker/http-proxy-configuration.md @@ -0,0 +1,46 @@ +--- +title: HTTP Proxy Configuration +weight: 251 +aliases: + - /rancher/v2.5/en/installation/proxy-configuration/ + - /rancher/v2.5/en/installation/single-node/proxy + - /rancher/v2.x/en/installation/other-installation-methods/single-node-docker/proxy/ +--- + +If you operate Rancher behind a proxy and you want to access services through the proxy (such as retrieving catalogs), you must provide Rancher information about your proxy. As Rancher is written in Go, it uses the common proxy environment variables as shown below. + +Make sure `NO_PROXY` contains the network addresses, network address ranges and domains that should be excluded from using the proxy. + +| Environment variable | Purpose | +| -------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| HTTP_PROXY | Proxy address to use when initiating HTTP connection(s) | +| HTTPS_PROXY | Proxy address to use when initiating HTTPS connection(s) | +| NO_PROXY | Network address(es), network address range(s) and domains to exclude from using the proxy when initiating connection(s) | + +> **Note** NO_PROXY must be in uppercase to use network range (CIDR) notation. + +## Docker Installation + +Passing environment variables to the Rancher container can be done using `-e KEY=VALUE` or `--env KEY=VALUE`. Required values for `NO_PROXY` in a [Docker Installation](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md) are: + +- `localhost` +- `127.0.0.1` +- `0.0.0.0` +- `10.0.0.0/8` +- `cattle-system.svc` +- `.svc` +- `.cluster.local` + +The example below is based on a proxy server accessible at `http://192.168.0.1:3128`, and excluding usage the proxy when accessing network range `192.168.10.0/24` and every hostname under the domain `example.com`. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e HTTP_PROXY="http://192.168.10.1:3128" \ + -e HTTPS_PROXY="http://192.168.10.1:3128" \ + -e NO_PROXY="localhost,127.0.0.1,0.0.0.0,10.0.0.0/8,cattle-system.svc,192.168.10.0/24,.svc,.cluster.local,example.com" \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.](../../pages-for-subheaders/rancher-on-a-single-node-with-docker.md#privileged-access-for-rancher-v2-5) diff --git a/versioned_docs/version-2.5/reference-guides/system-tools.md b/versioned_docs/version-2.5/reference-guides/system-tools.md new file mode 100644 index 0000000000..717e292c17 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/system-tools.md @@ -0,0 +1,118 @@ +--- +title: System Tools +weight: 22 +aliases: + - /rancher/v2.x/en/system-tools/ +--- + +System Tools is a tool to perform operational tasks on [Rancher Launched Kubernetes](../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters or [installations of Rancher on an RKE cluster.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) The tasks include: + +* Collect logging and system metrics from nodes. +* Remove Kubernetes resources created by Rancher. + +The following commands are available: + +| Command | Description +|---|--- +| [logs](#logs) | Collect Kubernetes cluster component logs from nodes. +| [stats](#stats) | Stream system metrics from nodes. +| [remove](#remove) | Remove Kubernetes resources created by Rancher. + +# Download System Tools + +You can download the latest version of System Tools from the [GitHub releases page](https://github.com/rancher/system-tools/releases/latest). Download the version of `system-tools` for the OS that you are using to interact with the cluster. + +Operating System | Filename +-----------------|----- +MacOS | `system-tools_darwin-amd64` +Linux | `system-tools_linux-amd64` +Windows | `system-tools_windows-amd64.exe` + +After you download the tools, complete the following actions: + +1. Rename the file to `system-tools`. + +1. Give the file executable permissions by running the following command: + + > **Using Windows?** + The file is already an executable, you can skip this step. + + ``` + chmod +x system-tools + ``` + +# Logs + +The logs subcommand will collect log files of core Kubernetes cluster components from nodes in [Rancher-launched Kubernetes clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) or nodes on an [RKE Kubernetes cluster that Rancher is installed on.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). See [Troubleshooting](../troubleshooting.md) for a list of core Kubernetes cluster components. + +System Tools will use the provided kubeconfig file to deploy a DaemonSet, that will copy all the logfiles from the core Kubernetes cluster components and add them to a single tar file (`cluster-logs.tar` by default). If you only want to collect logging from a single node, you can specify the node by using `--node NODENAME` or `-n NODENAME`. + +### Usage + +``` +./system-tools_darwin-amd64 logs --kubeconfig +``` + +The following are the options for the logs command: + +| Option | Description +| ------------------------------------------------------ | ------------------------------------------------------ +| `--kubeconfig , -c ` | The cluster's kubeconfig file. +| `--output , -o cluster-logs.tar` | Name of the created tarball containing the logs. If no output filename is defined, the options defaults to `cluster-logs.tar`. +| `--node , -n node1` | Specify the nodes to collect the logs from. If no node is specified, logs from all nodes in the cluster will be collected. + +# Stats + +The stats subcommand will display system metrics from nodes in [Rancher-launched Kubernetes clusters](../pages-for-subheaders/launch-kubernetes-with-rancher.md) or nodes in an [RKE Kubernetes cluster that Rancher is installed on.](../pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md). + +System Tools will deploy a DaemonSet, and run a predefined command based on `sar` (System Activity Report) to show system metrics. + +### Usage + +``` +./system-tools_darwin-amd64 stats --kubeconfig +``` + +The following are the options for the stats command: + +| Option | Description +| ------------------------------------------------------ | ------------------------------ +| `--kubeconfig , -c ` | The cluster's kubeconfig file. +| `--node , -n node1` | Specify the nodes to display the system metrics from. If no node is specified, logs from all nodes in the cluster will be displayed. +| `--stats-command value, -s value` | The command to run to display the system metrics. If no command is defined, the options defaults to `/usr/bin/sar -u -r -F 1 1`. + +# Remove + +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) before executing the command. + +When you install Rancher on a Kubernetes cluster, it will create Kubernetes resources to run and to store configuration data. If you want to remove Rancher from your cluster, you can use the `remove` subcommand to remove the Kubernetes resources. When you use the `remove` subcommand, the following resources will be removed: + +- The Rancher deployment namespace (`cattle-system` by default). +- Any `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` that Rancher applied the `cattle.io/creator:norman` label to. Rancher applies this label to any resource that it creates. +- Labels, annotations, and finalizers. +- Rancher Deployment. +- Machines, clusters, projects, and user custom resource deployments (CRDs). +- All resources create under the `management.cattle.io` API Group. +- All CRDs created by Rancher v2.x. + +>**Using 2.0.8 or Earlier?** +> +>These versions of Rancher do not automatically delete the `serviceAccount`, `clusterRole`, and `clusterRoleBindings` resources after the job runs. You'll have to delete them yourself. + +### Usage + +When you run the command below, all the resources listed [above](#remove) will be removed from the cluster. + +>**Warning:** This command will remove data from your etcd nodes. Make sure you have created a [backup of etcd](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher.md) before executing the command. + +``` +./system-tools remove --kubeconfig --namespace +``` + +The following are the options for the `remove` command: + +| Option | Description +| ---------------------------------------------- | ------------ +| `--kubeconfig , -c ` | The cluster's kubeconfig file +| `--namespace , -n cattle-system` | Rancher 2.x deployment namespace (``). If no namespace is defined, the options defaults to `cattle-system`. +| `--force` | Skips the interactive removal confirmation and removes the Rancher deployment without prompt. diff --git a/versioned_docs/version-2.5/reference-guides/user-settings/api-keys.md b/versioned_docs/version-2.5/reference-guides/user-settings/api-keys.md new file mode 100644 index 0000000000..e7edf99953 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/user-settings/api-keys.md @@ -0,0 +1,58 @@ +--- +title: API Keys +weight: 7005 +aliases: + - /rancher/v2.5/en/concepts/api-keys/ + - /rancher/v2.5/en/tasks/user-settings/api-keys/ + - /rancher/v2.x/en/user-settings/api-keys/ +--- + +## API Keys and User Authentication + +If you want to access your Rancher clusters, projects, or other objects using external applications, you can do so using the Rancher API. However, before your application can access the API, you must provide the app with a key used to authenticate with Rancher. You can obtain a key using the Rancher UI. + +An API key is also required for using Rancher CLI. + +API Keys are composed of four components: + +- **Endpoint:** This is the IP address and path that other applications use to send requests to the Rancher API. +- **Access Key:** The token's username. +- **Secret Key:** The token's password. For applications that prompt you for two different strings for API authentication, you usually enter the two keys together. +- **Bearer Token:** The token username and password concatenated together. Use this string for applications that prompt you for one authentication string. + +## Creating an API Key + +1. Select **User Avatar** > **API & Keys** from the **User Settings** menu in the upper-right. + +2. Click **Add Key**. + +3. **Optional:** Enter a description for the API key and select an expiration period or a scope. We recommend setting an expiration date. + + The API key won't be valid after expiration. Shorter expiration periods are more secure. + + Expiration period will be bound by `v3/settings/auth-token-max-ttl-minutes`. If it exceeds the max-ttl, API key will be created with max-ttl as the expiration period. + + A scope will limit the API key so that it will only work against the Kubernetes API of the specified cluster. If the cluster is configured with an Authorized Cluster Endpoint, you will be able to use a scoped token directly against the cluster's API without proxying through the Rancher server. See [Authorized Cluster Endpoints](../../pages-for-subheaders/rancher-manager-architecture.md#4-authorized-cluster-endpoint) for more information. + +4. Click **Create**. + + **Step Result:** Your API Key is created. Your API **Endpoint**, **Access Key**, **Secret Key**, and **Bearer Token** are displayed. + + Use the **Bearer Token** to authenticate with Rancher CLI. + +5. Copy the information displayed to a secure location. This information is only displayed once, so if you lose your key, you'll have to make a new one. + +## What's Next? + +- Enter your API key information into the application that will send requests to the Rancher API. +- Learn more about the Rancher endpoints and parameters by selecting **View in API** for an object in the Rancher UI. +- API keys are used for API calls and [Rancher CLI](../../pages-for-subheaders/cli-with-rancher.md). + +## Deleting API Keys + +If you need to revoke an API key, delete it. You should delete API keys: + +- That may have been compromised. +- That have expired. + +To delete an API, select the stale key and click **Delete**. diff --git a/versioned_docs/version-2.5/reference-guides/user-settings/manage-cloud-credentials.md b/versioned_docs/version-2.5/reference-guides/user-settings/manage-cloud-credentials.md new file mode 100644 index 0000000000..36580ede38 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/user-settings/manage-cloud-credentials.md @@ -0,0 +1,51 @@ +--- +title: Managing Cloud Credentials +weight: 7011 +aliases: + - /rancher/v2.x/en/user-settings/cloud-credentials/ +--- + +When you create a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. + +Node templates can use cloud credentials to access the credential information required to provision nodes in the infrastructure providers. The same cloud credential can be used by multiple node templates. By using a cloud credential, you do not have to re-enter access keys for the same cloud provider. Cloud credentials are stored as Kubernetes secrets. + +Cloud credentials are only used by node templates if there are fields marked as `password`. The default `active` node drivers have their account access fields marked as `password`, but there may be some `inactive` node drivers, which are not using them yet. These node drivers will not use cloud credentials. + +You can create cloud credentials in two contexts: + +- [During creation of a node template](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) for a cluster. +- In the **User Settings** + +All cloud credentials are bound to the user profile of who created it. They **cannot** be shared across users. + +## Creating a Cloud Credential from User Settings + +1. From your user settings, select **User Avatar > Cloud Credentials**. +1. Click **Add Cloud Credential**. +1. Enter a name for the cloud credential. +1. Select a **Cloud Credential Type** from the drop down. The values of this dropdown is based on the `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) in Rancher. +1. Based on the selected cloud credential type, enter the required values to authenticate with the infrastructure provider. +1. Click **Create**. + +**Result:** The cloud credential is created and can immediately be used to [create node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates). + +## Updating a Cloud Credential + +When access credentials are changed or compromised, updating a cloud credential allows you to rotate those credentials while keeping the same node template. + +1. From your user settings, select **User Avatar > Cloud Credentials**. +1. Choose the cloud credential you want to edit and click the **⋮ > Edit**. +1. Update the credential information and click **Save**. + +**Result:** The cloud credential is updated with the new access credentials. All existing node templates using this cloud credential will automatically use the updated information whenever [new nodes are added](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Deleting a Cloud Credential + +In order to delete cloud credentials, there must not be any node template associated with it. If you are unable to delete the cloud credential, [delete any node templates](manage-node-templates.md#deleting-a-node-template) that are still associated to that cloud credential. + +1. From your user settings, select **User Avatar > Cloud Credentials**. +1. You can either individually delete a cloud credential or bulk delete. + + - To individually delete one, choose the cloud credential you want to edit and click the **⋮ > Delete**. + - To bulk delete cloud credentials, select one or more cloud credentials from the list. Click **Delete**. +1. Confirm that you want to delete these cloud credentials. diff --git a/versioned_docs/version-2.5/reference-guides/user-settings/manage-node-templates.md b/versioned_docs/version-2.5/reference-guides/user-settings/manage-node-templates.md new file mode 100644 index 0000000000..4bdf6e81e4 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/user-settings/manage-node-templates.md @@ -0,0 +1,49 @@ +--- +title: Managing Node Templates +weight: 7010 +aliases: + - /rancher/v2.x/en/user-settings/node-templates/ +--- + +When you provision a cluster [hosted by an infrastructure provider](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md), [node templates](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#node-templates) are used to provision the cluster nodes. These templates use Docker Machine configuration options to define an operating system image and settings/parameters for the node. You can create node templates in two contexts: + +- While [provisioning a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). +- At any time, from your [user settings](#creating-a-node-template-from-user-settings). + +When you create a node template, it is bound to your user profile. Node templates cannot be shared among users. You can delete stale node templates that you no longer user from your user settings. + +## Creating a Node Template from User Settings + +1. From your user settings, select **User Avatar > Node Templates**. +1. Click **Add Template**. +1. Select one of the cloud providers available. Then follow the instructions on screen to configure the template. + +**Result:** The template is configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Updating a Node Template + +1. From your user settings, select **User Avatar > Node Templates**. +1. Choose the node template that you want to edit and click the **⋮ > Edit**. + + > **Note:** As of v2.2.0, the default `active` [node drivers](../../how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers.md) and any node driver, that has fields marked as `password`, are required to use [cloud credentials](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md#cloud-credentials). If you have upgraded to v2.2.0, existing node templates will continue to work with the previous account access information, but when you edit the node template, you will be required to create a cloud credential and the node template will start using it. + +1. Edit the required information and click **Save**. + +**Result:** The node template is updated. All node pools using this node template will automatically use the updated information when new nodes are added. + +## Cloning Node Templates + +When creating new node templates from your user settings, you can clone an existing template and quickly update its settings rather than creating a new one from scratch. Cloning templates saves you the hassle of re-entering access keys for the cloud provider. + +1. From your user settings, select **User Avatar > Node Templates**. +1. Find the template you want to clone. Then select **⋮ > Clone**. +1. Complete the rest of the form. + +**Result:** The template is cloned and configured. You can use the template later when you [provision a node pool cluster](../../pages-for-subheaders/use-new-nodes-in-an-infra-provider.md). + +## Deleting a Node Template + +When you no longer use a node template, you can delete it from your user settings. + +1. From your user settings, select **User Avatar > Node Templates**. +1. Select one or more template from the list. Then click **Delete**. Confirm the delete when prompted. diff --git a/versioned_docs/version-2.5/reference-guides/user-settings/user-preferences.md b/versioned_docs/version-2.5/reference-guides/user-settings/user-preferences.md new file mode 100644 index 0000000000..679376e608 --- /dev/null +++ b/versioned_docs/version-2.5/reference-guides/user-settings/user-preferences.md @@ -0,0 +1,71 @@ +--- +title: User Preferences +weight: 7012 +aliases: + - /rancher/v2.x/en/user-settings/preferences/ +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Each user can choose preferences to personalize their Rancher experience. To change preference settings, open the **User Settings** menu and then select **Preferences**. + +The preferences available will differ depending on whether the **User Settings** menu was accessed while on the Cluster Manager UI or the Cluster Explorer UI. + + + + +## Theme + +Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. + +## My Account + +This section displays the **Name** (your display name) and **Username** (your login) used for your session. To change your login's current password, click the **Change Password** button. + +## Table Row per Page + +On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. + + + + + +## Theme + +Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. + +## Login Landing Page + +Choose the default page to display after logging in. + +## Date Format + +Choose your preferred format to display dates. By default, dates are displayed in the form `Wed, Jun 9 2021`. + +## Time Format + +Choose your preferred format to display time. By default, the 12-hour format is used. + +## Table Row per Page + +On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. + +## YAML Editor Key Mapping + +Choose the editor used when editing YAML configurations. When Emacs or Vim is chosen, the editor's shortcut commands can also be used. + +## Enable Developer Tools & Features + +Enables developer tools and features to be used. + +## Hide All Type Description Boxes + +Hides all description boxes. + +## Helm Charts + +When deploying applications from the "Apps & Marketplace", choose whether to show only released versions of the Helm chart or to include prerelease versions as well. + + + diff --git a/versioned_docs/version-2.5/security/security-scan/security-scan.md b/versioned_docs/version-2.5/security/security-scan/security-scan.md new file mode 100644 index 0000000000..28e59b6b0d --- /dev/null +++ b/versioned_docs/version-2.5/security/security-scan/security-scan.md @@ -0,0 +1,6 @@ +--- +title: Security Scans +weight: 299 +--- + +The documentation about CIS security scans has moved [here.](../../pages-for-subheaders/cis-scan-guides.md) diff --git a/versioned_docs/version-2.5/shared-files/_cluster-capabilities-table.md b/versioned_docs/version-2.5/shared-files/_cluster-capabilities-table.md new file mode 100644 index 0000000000..65dfefcabc --- /dev/null +++ b/versioned_docs/version-2.5/shared-files/_cluster-capabilities-table.md @@ -0,0 +1,61 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + +| Action | Rancher Launched Kubernetes Clusters | EKS and GKE Clusters1 | Other Hosted Kubernetes Clusters | Non-EKS or GKE Registered Clusters | +| --- | --- | ---| ---|----| +| [Using kubectl and a kubeconfig file to Access a Cluster](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) | ✓ | ✓ | ✓ | ✓ | +| [Managing Cluster Members](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) | ✓ | ✓ | ✓ | ✓ | +| [Editing and Upgrading Clusters](../pages-for-subheaders/cluster-configuration.md) | ✓ | ✓ | ✓ | ✓2 | +| [Managing Nodes](../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) | ✓ | ✓ | ✓ | ✓3 | +| [Managing Persistent Volumes and Storage Classes](../pages-for-subheaders/create-kubernetes-persistent-storage.md) | ✓ | ✓ | ✓ | ✓ | +| [Managing Projects, Namespaces and Workloads](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) | ✓ | ✓ | ✓ | ✓ | +| [Using App Catalogs](../pages-for-subheaders/helm-charts-in-rancher.md/) | ✓ | ✓ | ✓ | ✓ | +| Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio) | ✓ | ✓ | ✓ | ✓ | +| [Running Security Scans](../pages-for-subheaders/cis-scans.md) | ✓ | ✓ | ✓ | ✓ | +| [Use existing configuration to create additional clusters](../how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md)| ✓ | ✓ |✓ | | +| [Ability to rotate certificates](../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md) | ✓ | ✓ | | | +| Ability to [backup](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) and [restore](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md) Rancher-launched clusters | ✓ | ✓ | | ✓4 | +| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) | ✓ | | | | +| [Configuring Pod Security Policies](../how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md) | ✓ | ✓ | | | +| [Authorized Cluster Endpoint](cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) | ✓ | | | | + +1. Registered GKE and EKS clusters have the same options available as GKE and EKS clusters created from the Rancher UI. The difference is that when a registered cluster is deleted from the Rancher UI, [it is not destroyed.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md#additional-features-for-registered-eks-and-gke-clusters) + +2. Cluster configuration options can't be edited for registered clusters, except for [K3s and RKE2 clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) + +3. For registered cluster nodes, the Rancher UI exposes the ability to cordon, drain, and edit the node. + +4. For registered clusters using etcd as a control plane, snapshots must be taken manually outside of the Rancher UI to use for backup and recovery. + + + + +| Action | Rancher Launched Kubernetes Clusters | Hosted Kubernetes Clusters | Registered EKS Clusters | All Other Registered Clusters | +| --- | --- | ---| ---|----| +| [Using kubectl and a kubeconfig file to Access a Cluster](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig.md) | ✓ | ✓ | ✓ | ✓ | +| [Managing Cluster Members](../how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters.md) | ✓ | ✓ | ✓ | ✓ | +| [Editing and Upgrading Clusters](../pages-for-subheaders/cluster-configuration.md) | ✓ | ✓ | ✓ | ✓1 | +| [Managing Nodes](../how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools.md) | ✓ | ✓ | ✓ | ✓2 | +| [Managing Persistent Volumes and Storage Classes](../pages-for-subheaders/create-kubernetes-persistent-storage.md) | ✓ | ✓ | ✓ | ✓ | +| [Managing Projects, Namespaces and Workloads](../how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces.md) | ✓ | ✓ | ✓ | ✓ | +| [Using App Catalogs](catalog/) | ✓ | ✓ | ✓ | ✓ | +| Configuring Tools (Alerts, Notifiers, Logging, Monitoring, Istio) | ✓ | ✓ | ✓ | ✓ | +| [Running Security Scans](security/security-scan/) | ✓ | ✓ | ✓ | ✓ | +| [Use existing configuration to create additional clusters](../how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration.md)| ✓ | ✓ |✓ | | +| [Ability to rotate certificates](../how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates.md) | ✓ | | ✓ | | +| Ability to [backup](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters.md) and [restore](../how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup.md) Rancher-launched clusters | ✓ | ✓ | | ✓3 | +| [Cleaning Kubernetes components when clusters are no longer reachable from Rancher](../how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes.md) | ✓ | | | | +| [Configuring Pod Security Policies](../how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy.md) | ✓ | | ✓ | | +| [Authorized Cluster Endpoint](cluster-provisioning/rke-clusters/options/#authorized-cluster-endpoint) | ✓ | | | + +1. Cluster configuration options can't be edited for registered clusters, except for [K3s and RKE2 clusters.](../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters.md) + +2. For registered cluster nodes, the Rancher UI exposes the ability to cordon, drain, and edit the node. + +3. For registered clusters using etcd as a control plane, snapshots must be taken manually outside of the Rancher UI to use for backup and recovery. + + + diff --git a/versioned_docs/version-2.5/shared-files/_common-ports-table.md b/versioned_docs/version-2.5/shared-files/_common-ports-table.md new file mode 100644 index 0000000000..1835beba03 --- /dev/null +++ b/versioned_docs/version-2.5/shared-files/_common-ports-table.md @@ -0,0 +1,19 @@ +| Protocol | Port | Description | +|:--------: |:----------------: |---------------------------------------------------------------------------------- | +| TCP | 22 | Node driver SSH provisioning | +| TCP | 179 | Calico BGP Port | +| TCP | 2376 | Node driver Docker daemon TLS port | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | +| TCP | 8443 | Rancher webhook | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | +| TCP | 9443 | Rancher webhook | +| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | +| TCP | 6783 | Weave Port | +| UDP | 6783-6784 | Weave UDP Ports | +| TCP | 10250 | Metrics server communication with all nodes API | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | +| TCP/UDP | 30000-32767 | NodePort port range | diff --git a/versioned_docs/version-2.5/troubleshooting.md b/versioned_docs/version-2.5/troubleshooting.md new file mode 100644 index 0000000000..d280c7459f --- /dev/null +++ b/versioned_docs/version-2.5/troubleshooting.md @@ -0,0 +1,40 @@ +--- +title: Troubleshooting +weight: 26 +aliases: + - /rancher/v2.x/en/troubleshooting/ +--- + +This section contains information to help you troubleshoot issues when using Rancher. + +- [Kubernetes components](pages-for-subheaders/kubernetes-components.md) + + If you need help troubleshooting core Kubernetes cluster components like: + * `etcd` + * `kube-apiserver` + * `kube-controller-manager` + * `kube-scheduler` + * `kubelet` + * `kube-proxy` + * `nginx-proxy` + +- [Kubernetes resources](troubleshooting/other-troubleshooting-tips/kubernetes-resources.md) + + Options for troubleshooting Kubernetes resources like Nodes, Ingress Controller and Rancher Agents are described in this section. + +- [Networking](troubleshooting/other-troubleshooting-tips/networking.md) + + Steps to troubleshoot networking issues can be found here. + +- [DNS](troubleshooting/other-troubleshooting-tips/dns.md) + + When you experience name resolution issues in your cluster. + +- [Troubleshooting Rancher installed on Kubernetes](troubleshooting/other-troubleshooting-tips/rancher-ha.md) + + If you experience issues with your [Rancher server installed on Kubernetes](pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster.md) + +- [Logging](troubleshooting/other-troubleshooting-tips/logging.md) + + Read more about what log levels can be configured and how to configure a log level. + diff --git a/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md new file mode 100644 index 0000000000..4e1425e770 --- /dev/null +++ b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes.md @@ -0,0 +1,42 @@ +--- +title: Troubleshooting Controlplane Nodes +weight: 2 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-components/controlplane/ +--- + +This section applies to nodes with the `controlplane` role. + +# Check if the Controlplane Containers are Running + +There are three specific containers launched on nodes with the `controlplane` role: + +* `kube-apiserver` +* `kube-controller-manager` +* `kube-scheduler` + +The containers should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name='kube-apiserver|kube-controller-manager|kube-scheduler' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +26c7159abbcc rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-apiserver +f3d287ca4549 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-scheduler +bdf3898b8063 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-controller-manager +``` + +# Controlplane Container Logging + +> **Note:** If you added multiple nodes with the `controlplane` role, both `kube-controller-manager` and `kube-scheduler` use a leader election process to determine the leader. Only the current leader will log the performed actions. See [Kubernetes leader election](../other-troubleshooting-tips/kubernetes-resources.md#kubernetes-leader-election) how to retrieve the current leader. + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kube-apiserver +docker logs kube-controller-manager +docker logs kube-scheduler +``` \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd/_index.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md similarity index 100% rename from content/rancher/v2.5/en/troubleshooting/kubernetes-components/etcd/_index.md rename to versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-etcd-nodes.md diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md similarity index 100% rename from content/rancher/v2.5/en/troubleshooting/kubernetes-components/nginx-proxy/_index.md rename to versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-nginx-proxy.md diff --git a/content/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md b/versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md similarity index 100% rename from content/rancher/v2.5/en/troubleshooting/kubernetes-components/worker-and-generic/_index.md rename to versioned_docs/version-2.5/troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components.md diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md new file mode 100644 index 0000000000..344343007a --- /dev/null +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/dns.md @@ -0,0 +1,219 @@ +--- +title: DNS +weight: 103 +aliases: + - /rancher/v2.x/en/troubleshooting/dns/ +--- + +The commands/steps listed on this page can be used to check name resolution issues in your cluster. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +Before running the DNS checks, check the [default DNS provider](../../reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration.md#de) for your cluster and make sure that [the overlay network is functioning correctly](networking.md#check-if-overlay-network-is-functioning-correctly) as this can also be the reason why DNS resolution (partly) fails. + +### Check if DNS pods are running + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns +``` + +Example output when using CoreDNS: +``` +NAME READY STATUS RESTARTS AGE +coredns-799dffd9c4-6jhlz 1/1 Running 0 76m +``` + +Example output when using kube-dns: +``` +NAME READY STATUS RESTARTS AGE +kube-dns-5fd74c7488-h6f7n 3/3 Running 0 4m13s +``` + +### Check if the DNS service is present with the correct cluster-ip + +``` +kubectl -n kube-system get svc -l k8s-app=kube-dns +``` + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kube-dns ClusterIP 10.43.0.10 53/UDP,53/TCP 4m13s +``` + +### Check if domain names are resolving + +Check if internal cluster names are resolving (in this example, `kubernetes.default`), the IP shown after `Server:` should be the same as the `CLUSTER-IP` from the `kube-dns` service. + +``` +kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup kubernetes.default +``` + +Example output: +``` +Server: 10.43.0.10 +Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local + +Name: kubernetes.default +Address 1: 10.43.0.1 kubernetes.default.svc.cluster.local +pod "busybox" deleted +``` + +Check if external names are resolving (in this example, `www.google.com`) + +``` +kubectl run -it --rm --restart=Never busybox --image=busybox:1.28 -- nslookup www.google.com +``` + +Example output: +``` +Server: 10.43.0.10 +Address 1: 10.43.0.10 kube-dns.kube-system.svc.cluster.local + +Name: www.google.com +Address 1: 2a00:1450:4009:80b::2004 lhr35s04-in-x04.1e100.net +Address 2: 216.58.211.100 ams15s32-in-f4.1e100.net +pod "busybox" deleted +``` + +If you want to check resolving of domain names on all of the hosts, execute the following steps: + +1. Save the following file as `ds-dnstest.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: dnstest + spec: + selector: + matchLabels: + name: dnstest + template: + metadata: + labels: + name: dnstest + spec: + tolerations: + - operator: Exists + containers: + - image: busybox:1.28 + imagePullPolicy: Always + name: alpine + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + ``` + +2. Launch it using `kubectl create -f ds-dnstest.yml` +3. Wait until `kubectl rollout status ds/dnstest -w` returns: `daemon set "dnstest" successfully rolled out`. +4. Configure the environment variable `DOMAIN` to a fully qualified domain name (FQDN) that the host should be able to resolve (`www.google.com` is used as an example) and run the following command to let each container on every host resolve the configured domain name (it's a single line command). + + ``` + export DOMAIN=www.google.com; echo "=> Start DNS resolve test"; kubectl get pods -l name=dnstest --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do kubectl exec $pod -- /bin/sh -c "nslookup $DOMAIN > /dev/null 2>&1"; RC=$?; if [ $RC -ne 0 ]; then echo $host cannot resolve $DOMAIN; fi; done; echo "=> End DNS resolve test" + ``` + +5. When this command has finished running, the output indicating everything is correct is: + + ``` + => Start DNS resolve test + => End DNS resolve test + ``` + +If you see error in the output, that means that the mentioned host(s) is/are not able to resolve the given FQDN. + +Example error output of a situation where host with IP 209.97.182.150 had the UDP ports blocked. + +``` +=> Start DNS resolve test +command terminated with exit code 1 +209.97.182.150 cannot resolve www.google.com +=> End DNS resolve test +``` + +Cleanup the alpine DaemonSet by running `kubectl delete ds/dnstest`. + +### CoreDNS specific + +#### Check CoreDNS logging + +``` +kubectl -n kube-system logs -l k8s-app=kube-dns +``` + +#### Check configuration + +CoreDNS configuration is stored in the configmap `coredns` in the `kube-system` namespace. + +``` +kubectl -n kube-system get configmap coredns -o go-template={{.data.Corefile}} +``` + +#### Check upstream nameservers in resolv.conf + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for CoreDNS. You can check this file on the host or run the following Pod with `dnsPolicy` set to `Default`, which will inherit the `/etc/resolv.conf` from the host it is running on. + +``` +kubectl run -i --restart=Never --rm test-${RANDOM} --image=ubuntu --overrides='{"kind":"Pod", "apiVersion":"v1", "spec": {"dnsPolicy":"Default"}}' -- sh -c 'cat /etc/resolv.conf' +``` + +#### Enable query logging + +Enabling query logging can be done by enabling the [log plugin](https://coredns.io/plugins/log/) in the Corefile configuration in the configmap `coredns`. You can do so by using `kubectl -n kube-system edit configmap coredns` or use the command below to replace the configuration in place: + +``` +kubectl get configmap -n kube-system coredns -o json | sed -e 's_loadbalance_log\\n loadbalance_g' | kubectl apply -f - +``` + +All queries will now be logged and can be checked using the command in [Check CoreDNS logging](#check-coredns-logging). + +### kube-dns specific + +#### Check upstream nameservers in kubedns container + +By default, the configured nameservers on the host (in `/etc/resolv.conf`) will be used as upstream nameservers for kube-dns. Sometimes the host will run a local caching DNS nameserver, which means the address in `/etc/resolv.conf` will point to an address in the loopback range (`127.0.0.0/8`) which will be unreachable by the container. In case of Ubuntu 18.04, this is done by `systemd-resolved`. We detect if `systemd-resolved` is running, and will automatically use the `/etc/resolv.conf` file with the correct upstream nameservers (which is located at `/run/systemd/resolve/resolv.conf`). + +Use the following command to check the upstream nameservers used by the kubedns container: + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns --no-headers -o custom-columns=NAME:.metadata.name,HOSTIP:.status.hostIP | while read pod host; do echo "Pod ${pod} on host ${host}"; kubectl -n kube-system exec $pod -c kubedns cat /etc/resolv.conf; done +``` + +Example output: +``` +Pod kube-dns-667c7cb9dd-z4dsf on host x.x.x.x +nameserver 1.1.1.1 +nameserver 8.8.4.4 +``` + +If the output shows an address in the loopback range (`127.0.0.0/8`), you can correct this in two ways: + +* Make sure the correct nameservers are listed in `/etc/resolv.conf` on your nodes in the cluster, please consult your operating system documentation on how to do this. Make sure you execute this before provisioning a cluster, or reboot the nodes after making the modification. +* Configure the `kubelet` to use a different file for resolving names, by using `extra_args` as shown below (where `/run/resolvconf/resolv.conf` is the file with the correct nameservers): + +``` +services: + kubelet: + extra_args: + resolv-conf: "/run/resolvconf/resolv.conf" +``` + +> **Note:** As the `kubelet` is running inside a container, the path for files located in `/etc` and `/usr` are in `/host/etc` and `/host/usr` inside the `kubelet` container. + +See [Editing Cluster as YAML](../../pages-for-subheaders/cluster-configuration.md#editing-clusters-with-yaml) how to apply this change. When the provisioning of the cluster has finished, you have to remove the kube-dns pod to activate the new setting in the pod: + +``` +kubectl delete pods -n kube-system -l k8s-app=kube-dns +pod "kube-dns-5fd74c7488-6pwsf" deleted +``` + +Try to resolve name again using [Check if domain names are resolving](#check-if-domain-names-are-resolving). + +If you want to check the kube-dns configuration in your cluster (for example, to check if there are different upstream nameservers configured), you can run the following command to list the kube-dns configuration: + +``` +kubectl -n kube-system get configmap kube-dns -o go-template='{{range $key, $value := .data}}{{ $key }}{{":"}}{{ $value }}{{"\n"}}{{end}}' +``` + +Example output: +``` +upstreamNameservers:["1.1.1.1"] +``` diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md new file mode 100644 index 0000000000..2b23884dd8 --- /dev/null +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/kubernetes-resources.md @@ -0,0 +1,273 @@ +--- +title: Kubernetes resources +weight: 101 +aliases: + - /rancher/v2.x/en/troubleshooting/kubernetes-resources/ +--- + +The commands/steps listed on this page can be used to check the most important Kubernetes resources and apply to [Rancher Launched Kubernetes](../../pages-for-subheaders/launch-kubernetes-with-rancher.md) clusters. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +- [Nodes](#nodes) + - [Get nodes](#get-nodes) + - [Get node conditions](#get-node-conditions) +- [Kubernetes leader election](#kubernetes-leader-election) + - [Kubernetes controller manager leader](#kubernetes-controller-manager-leader) + - [Kubernetes scheduler leader](#kubernetes-scheduler-leader) +- [Ingress controller](#ingress-controller) + - [Pod details](#pod-details) + - [Pod container logs](#pod-container-logs) + - [Namespace events](#namespace-events) + - [Debug logging](#debug-logging) + - [Check configuration](#check-configuration) +- [Rancher agents](#rancher-agents) + - [cattle-node-agent](#cattle-node-agent) + - [cattle-cluster-agent](#cattle-cluster-agent) +- [Jobs and pods](#jobs-and-pods) + - [Check that pods or jobs have status Running/Completed](#check-that-pods-or-jobs-have-status-running-completed) + - [Describe pod](#describe-pod) + - [Pod container logs](#pod-container-logs) + - [Describe job](#describe-job) + - [Logs from the containers of pods of the job](#logs-from-the-containers-of-pods-of-the-job) + - [Evicted pods](#evicted-pods) + - [Job does not complete](#job-does-not-complete) + +# Nodes + +### Get nodes + +Run the command below and check the following: + +- All nodes in your cluster should be listed, make sure there is not one missing. +- All nodes should have the **Ready** status (if not in **Ready** state, check the `kubelet` container logs on that node using `docker logs kubelet`) +- Check if all nodes report the correct version. +- Check if OS/Kernel/Docker values are shown as expected (possibly you can relate issues due to upgraded OS/Kernel/Docker) + + +``` +kubectl get nodes -o wide +``` + +Example output: + +``` +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +controlplane-0 Ready controlplane 31m v1.13.5 138.68.188.91 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +etcd-0 Ready etcd 31m v1.13.5 138.68.180.33 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +worker-0 Ready worker 30m v1.13.5 139.59.179.88 Ubuntu 18.04.2 LTS 4.15.0-47-generic docker://18.9.5 +``` + +### Get node conditions + +Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) + +``` +kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{end}}' +``` + +Run the command below to list nodes with [Node Conditions](https://kubernetes.io/docs/concepts/architecture/nodes/#condition) that are active that could prevent normal operation. + +``` +kubectl get nodes -o go-template='{{range .items}}{{$node := .}}{{range .status.conditions}}{{if ne .type "Ready"}}{{if eq .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{":"}}{{.status}}{{"\n"}}{{end}}{{else}}{{if ne .status "True"}}{{$node.metadata.name}}{{": "}}{{.type}}{{": "}}{{.status}}{{"\n"}}{{end}}{{end}}{{end}}{{end}}' +``` + +Example output: + +``` +worker-0: DiskPressure:True +``` + +# Kubernetes leader election + +### Kubernetes Controller Manager leader + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-controller-manager` endpoint (in this example, `controlplane-0`). + +``` +kubectl -n kube-system get endpoints kube-controller-manager -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> +``` + +### Kubernetes Scheduler leader + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `kube-scheduler` endpoint (in this example, `controlplane-0`). + +``` +kubectl -n kube-system get endpoints kube-scheduler -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"controlplane-0_xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx","leaseDurationSeconds":15,"acquireTime":"2018-12-27T08:59:45Z","renewTime":"2018-12-27T09:44:57Z","leaderTransitions":0}> +``` + +# Ingress Controller + +The default Ingress Controller is NGINX and is deployed as a DaemonSet in the `ingress-nginx` namespace. The pods are only scheduled to nodes with the `worker` role. + +Check if the pods are running on all nodes: + +``` +kubectl -n ingress-nginx get pods -o wide +``` + +Example output: + +``` +kubectl -n ingress-nginx get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE +default-http-backend-797c5bc547-kwwlq 1/1 Running 0 17m x.x.x.x worker-1 +nginx-ingress-controller-4qd64 1/1 Running 0 14m x.x.x.x worker-1 +nginx-ingress-controller-8wxhm 1/1 Running 0 13m x.x.x.x worker-0 +``` + +If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. + +### Pod details + +``` +kubectl -n ingress-nginx describe pods -l app=ingress-nginx +``` + +### Pod container logs + +``` +kubectl -n ingress-nginx logs -l app=ingress-nginx +``` + +### Namespace events + +``` +kubectl -n ingress-nginx get events +``` + +### Debug logging + +To enable debug logging: + +``` +kubectl -n ingress-nginx patch ds nginx-ingress-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--v=5"}]' +``` + +### Check configuration + +Retrieve generated configuration in each pod: + +``` +kubectl -n ingress-nginx get pods -l app=ingress-nginx --no-headers -o custom-columns=.NAME:.metadata.name | while read pod; do kubectl -n ingress-nginx exec $pod -- cat /etc/nginx/nginx.conf; done +``` + +# Rancher agents + +Communication to the cluster (Kubernetes API via `cattle-cluster-agent`) and communication to the nodes (cluster provisioning via `cattle-node-agent`) is done through Rancher agents. + +#### cattle-node-agent + +Check if the cattle-node-agent pods are present on each node, have status **Running** and don't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-node-agent-4gc2p 1/1 Running 0 2h x.x.x.x worker-1 +cattle-node-agent-8cxkk 1/1 Running 0 2h x.x.x.x etcd-1 +cattle-node-agent-kzrlg 1/1 Running 0 2h x.x.x.x etcd-0 +cattle-node-agent-nclz9 1/1 Running 0 2h x.x.x.x controlplane-0 +cattle-node-agent-pwxp7 1/1 Running 0 2h x.x.x.x worker-0 +cattle-node-agent-t5484 1/1 Running 0 2h x.x.x.x controlplane-1 +cattle-node-agent-t8mtz 1/1 Running 0 2h x.x.x.x etcd-2 +``` + +Check logging of a specific cattle-node-agent pod or all cattle-node-agent pods: + +``` +kubectl -n cattle-system logs -l app=cattle-agent +``` + +#### cattle-cluster-agent + +Check if the cattle-cluster-agent pod is present in the cluster, has status **Running** and doesn't have a high count of Restarts: + +``` +kubectl -n cattle-system get pods -l app=cattle-cluster-agent -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +cattle-cluster-agent-54d7c6c54d-ht9h4 1/1 Running 0 2h x.x.x.x worker-1 +``` + +Check logging of cattle-cluster-agent pod: + +``` +kubectl -n cattle-system logs -l app=cattle-cluster-agent +``` + +# Jobs and Pods + +### Check that pods or jobs have status **Running**/**Completed** + +To check, run the command: + +``` +kubectl get pods --all-namespaces +``` + +If a pod is not in **Running** state, you can dig into the root cause by running: + +### Describe pod + +``` +kubectl describe pod POD_NAME -n NAMESPACE +``` + +### Pod container logs + +``` +kubectl logs POD_NAME -n NAMESPACE +``` + +If a job is not in **Completed** state, you can dig into the root cause by running: + +### Describe job + +``` +kubectl describe job JOB_NAME -n NAMESPACE +``` + +### Logs from the containers of pods of the job + +``` +kubectl logs -l job-name=JOB_NAME -n NAMESPACE +``` + +### Evicted pods + +Pods can be evicted based on [eviction signals](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/#eviction-policy). + +Retrieve a list of evicted pods (podname and namespace): + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' +``` + +To delete all evicted pods: + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace delete pod $epod; done +``` + +Retrieve a list of evicted pods, scheduled node and the reason: + +``` +kubectl get pods --all-namespaces -o go-template='{{range .items}}{{if eq .status.phase "Failed"}}{{if eq .status.reason "Evicted"}}{{.metadata.name}}{{" "}}{{.metadata.namespace}}{{"\n"}}{{end}}{{end}}{{end}}' | while read epod enamespace; do kubectl -n $enamespace get pod $epod -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName,MSG:.status.message; done +``` + +### Job does not complete + +If you have enabled Istio, and you are having issues with a Job you deployed not completing, you will need to add an annotation to your pod using [these steps.](../../how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace.md) + +Since Istio Sidecars run indefinitely, a Job cannot be considered complete even after its task has completed. This is a temporary workaround and will disable Istio for any traffic to/from the annotated Pod. Keep in mind this may not allow you to continue to use a Job for integration testing, as the Job will not have access to the service mesh. \ No newline at end of file diff --git a/content/rancher/v2.5/en/troubleshooting/logging/_index.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/logging.md similarity index 100% rename from content/rancher/v2.5/en/troubleshooting/logging/_index.md rename to versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/logging.md diff --git a/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md new file mode 100644 index 0000000000..ff1efadd61 --- /dev/null +++ b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/networking.md @@ -0,0 +1,120 @@ +--- +title: Networking +weight: 102 +aliases: + - /rancher/v2.x/en/troubleshooting/networking/ +--- + +The commands/steps listed on this page can be used to check networking related issues in your cluster. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml` for Rancher HA) or are using the embedded kubectl via the UI. + +### Double check if all the required ports are opened in your (host) firewall + +Double check if all the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) are opened in your (host) firewall. The overlay network uses UDP in comparison to all other required ports which are TCP. +### Check if overlay network is functioning correctly + +The pod can be scheduled to any of the hosts you used for your cluster, but that means that the NGINX ingress controller needs to be able to route the request from `NODE_1` to `NODE_2`. This happens over the overlay network. If the overlay network is not functioning, you will experience intermittent TCP/HTTP connection failures due to the NGINX ingress controller not being able to route to the pod. + +To test the overlay network, you can launch the following `DaemonSet` definition. This will run a `swiss-army-knife` container on every host (image was developed by Rancher engineers and can be found here: https://github.com/rancherlabs/swiss-army-knife), which we will use to run a `ping` test between containers on all hosts. + +> **Note:** This container [does not support ARM nodes](https://github.com/leodotcloud/swiss-army-knife/issues/18), such as a Raspberry Pi. This will be seen in the pod logs as `exec user process caused: exec format error`. + +1. Save the following file as `overlaytest.yml` + + ``` + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: overlaytest + spec: + selector: + matchLabels: + name: overlaytest + template: + metadata: + labels: + name: overlaytest + spec: + tolerations: + - operator: Exists + containers: + - image: rancherlabs/swiss-army-knife + imagePullPolicy: Always + name: overlaytest + command: ["sh", "-c", "tail -f /dev/null"] + terminationMessagePath: /dev/termination-log + + ``` + +2. Launch it using `kubectl create -f overlaytest.yml` +3. Wait until `kubectl rollout status ds/overlaytest -w` returns: `daemon set "overlaytest" successfully rolled out`. +4. Run the following script, from the same location. It will have each `overlaytest` container on every host ping each other: + ``` + #!/bin/bash + echo "=> Start network overlay test" + kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.metadata.name}{" "}{@.spec.nodeName}{"\n"}{end}' | + while read spod shost + do kubectl get pods -l name=overlaytest -o jsonpath='{range .items[*]}{@.status.podIP}{" "}{@.spec.nodeName}{"\n"}{end}' | + while read tip thost + do kubectl --request-timeout='10s' exec $spod -c overlaytest -- /bin/sh -c "ping -c2 $tip > /dev/null 2>&1" + RC=$? + if [ $RC -ne 0 ] + then echo FAIL: $spod on $shost cannot reach pod IP $tip on $thost + else echo $shost can reach $thost + fi + done + done + echo "=> End network overlay test" + ``` + +5. When this command has finished running, it will output the state of each route: + + ``` + => Start network overlay test + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.7.3 on wk2 + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.0.5 on cp1 + Error from server (NotFound): pods "wk2" not found + FAIL: overlaytest-5bglp on wk2 cannot reach pod IP 10.42.2.12 on wk1 + command terminated with exit code 1 + FAIL: overlaytest-v4qkl on cp1 cannot reach pod IP 10.42.7.3 on wk2 + cp1 can reach cp1 + cp1 can reach wk1 + command terminated with exit code 1 + FAIL: overlaytest-xpxwp on wk1 cannot reach pod IP 10.42.7.3 on wk2 + wk1 can reach cp1 + wk1 can reach wk1 + => End network overlay test + ``` + If you see error in the output, there is some issue with the route between the pods on the two hosts. In the above output the node `wk2` has no connectivity over the overlay network. This could be because the [required ports](../../how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters.md#networking-requirements) for overlay networking are not opened for `wk2`. +6. You can now clean up the DaemonSet by running `kubectl delete ds/overlaytest`. + + +### Check if MTU is correctly configured on hosts and on peering/tunnel appliances/devices + +When the MTU is incorrectly configured (either on hosts running Rancher, nodes in created/imported clusters or on appliances/devices in between), error messages will be logged in Rancher and in the agents, similar to: + +* `websocket: bad handshake` +* `Failed to connect to proxy` +* `read tcp: i/o timeout` + +See [Google Cloud VPN: MTU Considerations](https://cloud.google.com/vpn/docs/concepts/mtu-considerations#gateway_mtu_vs_system_mtu) for an example how to configure MTU correctly when using Google Cloud VPN between Rancher and cluster nodes. + +### Resolved issues + +#### Overlay network broken when using Canal/Flannel due to missing node annotations + +| | | +|------------|------------| +| GitHub issue | [#13644](https://github.com/rancher/rancher/issues/13644) | +| Resolved in | v2.1.2 | + +To check if your cluster is affected, the following command will list nodes that are broken (this command requires `jq` to be installed): + +``` +kubectl get nodes -o json | jq '.items[].metadata | select(.annotations["flannel.alpha.coreos.com/public-ip"] == null or .annotations["flannel.alpha.coreos.com/kube-subnet-manager"] == null or .annotations["flannel.alpha.coreos.com/backend-type"] == null or .annotations["flannel.alpha.coreos.com/backend-data"] == null) | .name' +``` + +If there is no output, the cluster is not affected. diff --git a/content/rancher/v2.5/en/troubleshooting/rancherha/_index.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/rancher-ha.md similarity index 100% rename from content/rancher/v2.5/en/troubleshooting/rancherha/_index.md rename to versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/rancher-ha.md diff --git a/content/rancher/v2.5/en/troubleshooting/imported-clusters/_index.md b/versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/registered-clusters.md similarity index 100% rename from content/rancher/v2.5/en/troubleshooting/imported-clusters/_index.md rename to versioned_docs/version-2.5/troubleshooting/other-troubleshooting-tips/registered-clusters.md diff --git a/content/rancher/v2.6/en/admin-settings/_index.md b/versioned_docs/version-2.6/admin-settings/admin-settings.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/_index.md rename to versioned_docs/version-2.6/admin-settings/admin-settings.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/ad/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/ad/ad.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/ad/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/ad/ad.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/authentication.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/authentication.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/azure-ad/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/azure-ad/azure-ad.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/azure-ad/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/azure-ad/azure-ad.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/freeipa/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/freeipa/freeipa.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/freeipa/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/freeipa/freeipa.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/github/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/github/github.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/github/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/github/github.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/google/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/google/google.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/google/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/google/google.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/keycloak-oidc/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/keycloak-oidc/keycloak-oidc.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/keycloak-oidc/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/keycloak-oidc/keycloak-oidc.md diff --git a/versioned_docs/version-2.6/admin-settings/authentication/keycloak-saml/keycloak-saml.md b/versioned_docs/version-2.6/admin-settings/authentication/keycloak-saml/keycloak-saml.md new file mode 100644 index 0000000000..3ddca672fa --- /dev/null +++ b/versioned_docs/version-2.6/admin-settings/authentication/keycloak-saml/keycloak-saml.md @@ -0,0 +1,179 @@ +--- +title: Configuring Keycloak (SAML) +description: Create a Keycloak SAML client and configure Rancher to work with Keycloak. By the end your users will be able to sign into Rancher using their Keycloak logins +weight: 1200 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +If your organization uses Keycloak Identity Provider (IdP) for user authentication, you can configure Rancher to allow your users to log in using their IdP credentials. + +## Prerequisites + +- You must have a [Keycloak IdP Server](https://www.keycloak.org/docs/latest/server_installation/) configured. +- In Keycloak, create a [new SAML client](https://www.keycloak.org/docs/latest/server_admin/#saml-clients), with the settings below. See the [Keycloak documentation](https://www.keycloak.org/docs/latest/server_admin/#saml-clients) for help. + + Setting | Value + ------------|------------ + `Sign Documents` | `ON` 1 + `Sign Assertions` | `ON` 1 + All other `ON/OFF` Settings | `OFF` + `Client ID` | Either `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` or the value configured in the `Entry ID Field` of the Rancher Keycloak configuration2 + `Client Name` | (e.g. `rancher`) + `Client Protocol` | `SAML` + `Valid Redirect URI` | `https://yourRancherHostURL/v1-saml/keycloak/saml/acs` + + >1: Optionally, you can enable either one or both of these settings. + >2: Rancher SAML metadata won't be generated until a SAML provider is configured and saved. + + {{< img "/img/rancher/keycloak/keycloak-saml-client-configuration.png" "">}} + +- In the new SAML client, create Mappers to expose the users fields + - Add all "Builtin Protocol Mappers" + {{< img "/img/rancher/keycloak/keycloak-saml-client-builtin-mappers.png" "">}} + - Create a new "Group list" mapper to map the member attribute to a user's groups + {{< img "/img/rancher/keycloak/keycloak-saml-client-group-mapper.png" "">}} + +## Getting the IDP Metadata + + + + +To get the IDP metadata, export a `metadata.xml` file from your Keycloak client. +From the **Installation** tab, choose the **SAML Metadata IDPSSODescriptor** format option and download your file. + + + + +1. From the **Configure** section, click the **Realm Settings** tab. +1. Click the **General** tab. +1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. + +Verify the IDP metadata contains the following attributes: + +``` +xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" +xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" +xmlns:ds="http://www.w3.org/2000/09/xmldsig#" +``` + +Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. + +The following is an example process for Firefox, but will vary slightly for other browsers: + +1. Press **F12** to access the developer console. +1. Click the **Network** tab. +1. From the table, click the row containing `descriptor`. +1. From the details pane, click the **Response** tab. +1. Copy the raw response data. + +The XML obtained contains `EntitiesDescriptor` as the root element. Rancher expects the root element to be `EntityDescriptor` rather than `EntitiesDescriptor`. So before passing this XML to Rancher, follow these steps to adjust it: + +1. Copy all the attributes from `EntitiesDescriptor` to the `EntityDescriptor` that are not present. +1. Remove the `` tag from the beginning. +1. Remove the `` from the end of the xml. + +You are left with something similar as the example below: + +``` + +.... + +``` + + + + +1. From the **Configure** section, click the **Realm Settings** tab. +1. Click the **General** tab. +1. From the **Endpoints** field, click **SAML 2.0 Identity Provider Metadata**. + +Verify the IDP metadata contains the following attributes: + +``` +xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" +xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" +xmlns:ds="http://www.w3.org/2000/09/xmldsig#" +``` + +Some browsers, such as Firefox, may render/process the document such that the contents appear to have been modified, and some attributes appear to be missing. In this situation, use the raw response data that can be found using your browser. + +The following is an example process for Firefox, but will vary slightly for other browsers: + +1. Press **F12** to access the developer console. +1. Click the **Network** tab. +1. From the table, click the row containing `descriptor`. +1. From the details pane, click the **Response** tab. +1. Copy the raw response data. + + + + +## Configuring Keycloak in Rancher + + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Auth Provider**. +1. Click **Keycloak SAML**. +1. Complete the **Configure Keycloak Account** form. For help with filling the form, see the [configuration reference](#configuration-reference). +1. After you complete the **Configure a Keycloak Account** form, click **Enable**. + + Rancher redirects you to the IdP login page. Enter credentials that authenticate with Keycloak IdP to validate your Rancher Keycloak configuration. + + >**Note:** You may have to disable your popup blocker to see the IdP login page. + +**Result:** Rancher is configured to work with Keycloak. Your users can now sign into Rancher using their Keycloak logins. + +{{< saml_caveats >}} + +## Configuration Reference + +| Field | Description | +| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Display Name Field | The attribute that contains the display name of users.

    Example: `givenName` | +| User Name Field | The attribute that contains the user name/given name.

    Example: `email` | +| UID Field | An attribute that is unique to every user.

    Example: `email` | +| Groups Field | Make entries for managing group memberships.

    Example: `member` | +| Entity ID Field | The ID that needs to be configured as a client ID in the Keycloak client.

    Default: `https://yourRancherHostURL/v1-saml/keycloak/saml/metadata` | +| Rancher API Host | The URL for your Rancher Server. | +| Private Key / Certificate | A key/certificate pair to create a secure shell between Rancher and your IdP. | +| IDP-metadata | The `metadata.xml` file that you exported from your IdP server. | + +>**Tip:** You can generate a key/certificate pair using an openssl command. For example: +> +> openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout myservice.key -out myservice.cert + +## Annex: Troubleshooting + +If you are experiencing issues while testing the connection to the Keycloak server, first double-check the configuration option of your SAML client. You may also inspect the Rancher logs to help pinpointing the problem cause. Debug logs may contain more detailed information about the error. Please refer to [How can I enable debug logging]({{}}/rancher/v2.6/en/faq/technical/#how-can-i-enable-debug-logging) in this documentation. + +### You are not redirected to Keycloak + +When you click on **Authenticate with Keycloak**, you are not redirected to your IdP. + + * Verify your Keycloak client configuration. + * Make sure `Force Post Binding` set to `OFF`. + + +### Forbidden message displayed after IdP login + +You are correctly redirected to your IdP login page and you are able to enter your credentials, however you get a `Forbidden` message afterwards. + + * Check the Rancher debug log. + * If the log displays `ERROR: either the Response or Assertion must be signed`, make sure either `Sign Documents` or `Sign assertions` is set to `ON` in your Keycloak client. + +### HTTP 502 when trying to access /v1-saml/keycloak/saml/metadata + +This is usually due to the metadata not being created until a SAML provider is configured. +Try configuring and saving keycloak as your SAML provider and then accessing the metadata. + +### Keycloak Error: "We're sorry, failed to process response" + + * Check your Keycloak log. + * If the log displays `failed: org.keycloak.common.VerificationException: Client does not have a public key`, set `Encrypt Assertions` to `OFF` in your Keycloak client. + +### Keycloak Error: "We're sorry, invalid requester" + + * Check your Keycloak log. + * If the log displays `request validation failed: org.keycloak.common.VerificationException: SigAlg was null`, set `Client Signature Required` to `OFF` in your Keycloak client. diff --git a/versioned_docs/version-2.6/admin-settings/authentication/local/local.md b/versioned_docs/version-2.6/admin-settings/authentication/local/local.md new file mode 100644 index 0000000000..b7dea95468 --- /dev/null +++ b/versioned_docs/version-2.6/admin-settings/authentication/local/local.md @@ -0,0 +1,16 @@ +--- +title: Local Authentication +weight: 1111 +--- + +Local authentication is the default until you configure an external authentication provider. Local authentication is where Rancher stores the user information, i.e. names and passwords, of who can log in to Rancher. By default, the `admin` user that logs in to Rancher for the first time is a local user. + +## Adding Local Users + +Regardless of whether you use external authentication, you should create a few local authentication users so that you can continue using Rancher if your external authentication service encounters issues. + +1. In the top left corner, click **☰ > Users & Authentication**. +1. In the left navigation menu, click **Users**. +1. Click **Create**. +1. Complete the **Add User** form. +1. Click **Create**. diff --git a/content/rancher/v2.6/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/microsoft-adfs-setup.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/microsoft-adfs/microsoft-adfs-setup/microsoft-adfs-setup.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/microsoft-adfs/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/microsoft-adfs/microsoft-adfs.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/microsoft-adfs/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/microsoft-adfs/microsoft-adfs.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/rancher-adfs-setup.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/microsoft-adfs/rancher-adfs-setup/rancher-adfs-setup.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/okta/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/okta/okta.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/okta/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/okta/okta.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/openldap/openldap-config/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/openldap/openldap-config/openldap-config.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/openldap/openldap-config/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/openldap/openldap-config/openldap-config.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/openldap/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/openldap/openldap.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/openldap/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/openldap/openldap.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/ping-federate/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/ping-federate/ping-federate.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/ping-federate/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/ping-federate/ping-federate.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/shibboleth/about/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/shibboleth/about/about.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/shibboleth/about/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/shibboleth/about/about.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/shibboleth/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/shibboleth/shibboleth.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/shibboleth/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/shibboleth/shibboleth.md diff --git a/content/rancher/v2.6/en/admin-settings/authentication/user-groups/_index.md b/versioned_docs/version-2.6/admin-settings/authentication/user-groups/user-groups.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/authentication/user-groups/_index.md rename to versioned_docs/version-2.6/admin-settings/authentication/user-groups/user-groups.md diff --git a/versioned_docs/version-2.6/admin-settings/branding/branding.md b/versioned_docs/version-2.6/admin-settings/branding/branding.md new file mode 100644 index 0000000000..fcd8e53345 --- /dev/null +++ b/versioned_docs/version-2.6/admin-settings/branding/branding.md @@ -0,0 +1,213 @@ +--- +title: Custom Branding +weight: 90 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Rancher v2.6 introduced the ability to customize Rancher’s branding and navigation links. + +- [Changing Brand Settings](#changing-brand-settings) +- [Brand Configuration](#brand-configuration) +- [Custom Navigation Links](#custom-navigation-links) +- [Link Configuration](#link-configuration) +- [Link Examples](#link-examples) + +# Changing Brand Settings + +> **Prerequisite:** You will need to have at least cluster member permissions. + +To configure the brand settings, + +1. Click **☰ > Global settings**. +2. Click **Branding**. + +# Brand Configuration + +### Private Label Company Name + +This option replaces “Rancher” with the value you provide in most places. Files that need to have Rancher in the name, such as “rancher-compose.yml”, will not be changed. + +### Support Links + +Use a url address to send new "File an Issue" reports instead of sending users to the Github issues page. Optionally show Rancher community support links. + +### Logo + +Upload light and dark logos to replace the Rancher logo in the top-level navigation header. + +### Primary Color + +You can override the primary color used throughout the UI with a custom color of your choice. + +### Fixed Banners + + + + +Display a custom fixed banner in the header, footer, or both. + + + + +Display a custom fixed banner in the header, footer, or both. + +As of Rancher v2.6.4, configuration of fixed banners has moved from the **Branding** tab to the **Banners** tab. + +To configure banner settings, + +1. Click **☰ > Global settings**. +2. Click **Banners**. + + + + +# Custom Navigation Links + +In this section, you'll learn how to configure the links in the left navigation bar of the **Cluster Dashboard**. To get to the cluster dashboard, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want custom navigation links and click **Explore**. + +It can be useful to add a link for quick access to services installed on a cluster. For example, you could add a link to the Kiali UI for clusters with Istio installed, or you could add a link to the Grafana UI for clusters with Rancher monitoring installed. + +The custom links don't affect who has access to each service. + +Links can be created at the top level and multiple links can be grouped together. + +### Adding a Custom Navigation Link + +> **Prerequisite:** You will need to have at least cluster member or project member permissions. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you would like to add custom navigation links and click **Explore**. +2. In the top navigation menu, click **🔍 (Resource Search)**. +3. Type **Nav** and click **Nav Links**. +4. Click **Create from YAML**. +5. The simplest way to create a navigation link is to add these fields: + + name: linkname + toURL: https://example.com + + For more details on setting up links, including optional fields, see [Link Configuration.](#link-configuration) +6. Click **Create**. + +# Link Configuration + +### `name` + +Display name for the link. Required. + +### `group` + +Name of a group of links that expands when clicked. + +Optional. If not provided, the link appears standalone. + +Groups are displayed separately from standalone links, as shown below: + +![Screenshot of group and standalone link]({{}}/img/rancher/grouped-vs-standalone-links.png) + +### `iconSrc` + +Icon source in in base64 format. + +Below is an example of the Grafana logo in base64 format: + +``` +data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAgAElEQVR4Aey9d5xkZZn3zb/P+3mffZ9nDcDAzHTuqs49PZEhCBhBJc10nO7pHKbD9PR07p5AWlEEZM2ioph3dXGNqLgCxhVBRIYRmNy5cjqnTlWdqu/7ue5zTk+DiNIsCn4suOacrjrxvq/fle/7PofX+ycDZNT/pIAUaUUmaRBKp8FMQ1L9qL6S4+VYUikIeuBrn+LppiuJNG/BvHYd7HbhbXLBZ/dB8AkwFokCHsAH6Kb8IxTHJIyBHwgDMTBT6h7yWAmb1L3sxyEjfxkYxDDQMa0nWV0vyE2slwZ5JtUO8v5JUhgk0EkRw5TnUg9sgJm03lsezkyTIU0C2VrNJU2WsdtTPVQyAmYU4mGIhDESaULAcSG5RjwJEQ8YsxgEOZoOcSxjvXsyKM8nL24QJ4UmF5TnlL7IWLure/G/3lnn/PVu9SrdaQVApO3/CCAZAYjNSLIVBrD/VMwSnsf4/B38ofWthFu3krhhPZmaLJZuyEY7vJPIV++AwEnImJwJ66qTFcMLSswkGWKkiKqtMIICwB890zL/2lwoHGLYnCIMtMqP3Md5N7mMDRDTBkhKAURAqNBs3TOdspjWERgrmkYuJbQMEPUeKdAEIBrIuSIKEiZ+B0ShADz7W/Tjv8TMLOIjybxcJwNJOUgAkjHQSFoAkedMWn2g7rXKV/9rnfZ3DRCRjgoN0ik2A0nDLgMkbYD3aU7dPcRT7ZfhadmEVldA/IZ1RN6TRahxO8f37CTyxU/B4pySvOlYHNM0sZhQRKgwTRrRWALOZc0lWksoY30n0lPkuMBCPYCcKn/Ic632Iy/ivNvy+4k2EOAamDbJ/rIKFYEhx74AWPIWcpRhv8dyu5mQTEEkmUYTjSsaMBWDiA9mjsF/foWTH76Z33zqEL6jD5IgRkTeR64valfOQQSL6My0Ap/c32qvlNJgq339v8Z5fzcAkcayBKrFkNIBCiDOD/Lj8jG2iZWOwvGHefxQNY+3beHk7grCDS7067JhRwn6dZX4d72L3zVei/6Vz0HYB/EQJINKawRTQXVP+UfYQEjdRphITBlTNIrFqBopIsqcsZnaQYww0iv5yA1XkuJ+eRJREStJvrMFhnO8A5S09ewCDoMkCQUw67KxOMQMCBkpkok4JIIQnYGf3k/s7mlO7N3Fw7VX8J2Ot3Pi/rvBXMLMJKxbx+UR5L4CEJ0IKYUbeV0xAUXDZVSrvZIGeHXPff0DRDGE9PRZPhGec8jhBWcr30uXCWEG4Xf/wW+H38ajXVUcaS7GX+dGvyYX6jeQvL6MZO1lzFx7Mc81XkPyM3eC/xlIz5LJzBIz/bbUtUyKZaksUtPUIS06wzK/LGluM6jwrVg9wkCvECDOe51lE2kL5w2drdU+Ths5bSBbMacsVMtGtKDFug5+5Q00Iw2JFOhhmD0C3/goS6M1HKvfiqfxMo7t3MLv2i7F97nDoJ+BpG45IXLysgYRgIhvJPeRu4QVibZ7LX/+rgDiNLTT58IADjM4rPI8HyXpgYc+yXODF3G0ZyPPtZSxUFeM/9p8MrUVJK4rIX5NMfEdm1jauZ1j7Vfj/exNcPoRSB2HxIICiHI+Hb4U00mYSWkP4RAhHTKiUexggfCEPJDiUOepX/5W3tN5R9m3PpZZJ6bdWbJ+kWPkto51JyaVxbBnpYtcT35XwFDXt8Ee8cJ//wj9X6c40fF2TtZU4qspJV5bidawCU/HxWgfHoTwccuhl4srE0saRnwQwwaIZQKa+BH6B0CcfnvVttIBK8jmFId5zjKEmA+WhJTIkeoYYxY+t5/FtmJ8zeUEWzdypqmM07VFhGpK0W9wYVyXh15dQLxnG/NdF/NE00V4PzgAv/0OLB5RbyWKQD2CML5wV1KMbIlmCSgkapQEkcLCNPJ72mJAsdXl+Vb7cRh+mcnlQvKl0IomUfs2mOT28rwCaiErgiW+hXWOaBSnzRSw4/Mw/wR87zN4xht55vqNzNxQQXj3VoyWzRjX5ZPcWUigrozozU0QeMbSNAnIyA0EcaQRE1N8EPWn0hoSDxSSRnntfl73GkTMAsvXsDnCYZAXMERc2dei2i0HVnWMdpro4etYuv58orUujLatLLZsZKapkqXaYqINRZi7XWQ63ASacwh2lhPtvZwjdVs4M94ETz4M8ajFjI5TLlsJLavwsu0GCA84JpX4uEAAVHBYGHa1H3lVuZaQxXgvAIh86QDFDqu+ECDSIstS3AGWnCdmUnwRjv4Y/XMHON51OSerSwjv2kCqdRta81ZiDZWwqwjq8onWFKIfrIPQs5CIKl/ekJvZDyagFJJbWKFuiQFLePwfJtZq+//PnieNLc64lUuwTYoXMITwZowMgbSu3EIjE8NMiKMdhmd/zlzrRjJ12UQb3IQaKojUbiRUW8VSQynzjQV4mtfjbz6fSNN5hBvXsrjbzXzTZjz1V/Bs0/Xw0A9g7qRy3E3DRzARUJpKni0ZSljpEUkcCEBsQR3BYIEIC2mxw+WBV/dx3v950TP5UshpBxskqURG+cvCjtImwqyyDYZ9pDPiMKfR4hHQY5aJdPIIPPg1jrS/nZndW/E0lRJodBHY5SbYUEq4biOx2goi16+D1iLCO/PwDL0HfvdD5X9JFNwXP+vjyL2UMJDnUs8kRpzkjv4BkNX1/l9wlmiOhHL4RIbaDrA0vs0UwifSMVEkuSWJsyTxRACMIKSi8Nj3WWyphLr16PWFaPVlGDs3ou2swldXpgCy0LoWT+t5RJreqEASaMpRDGLs2E6w+u2c7mkgcM/t8IdHID2PSZAQUaJmSrkAypgXXrClaTIj5kZcRXXiKlH4ygAibXA2Yme3wUqAJDMWWDJgGEmiWgzDFL1hCRcjHkWP+kgaPkgHQUyqIz8l+fHDzLa/i0DdVrTaUmL1LsINBTZIignXVRCpK8W3cx3Jdjehnfl4970bHnsA0rpi/QWxLqUf7SiZ2pd/BBPio0kQQyVO/4LO/hsd8ro2sYQxRPYJSJSZJYwhHSBkWoJUABJRqTLRM0m08KKlPYwgiR99kZOdlRi71pOuKSS90wJIrLqK2M4yZTaEGy0N4ml9M96W89Hqs0lVu0nt2Ii+YztPv8XF8ZZ3EPrkAfj9dyF50gaJhscwEP5U0twGrZlKkEhKwDNkZ7gV27yC7ndUxYtfwkymMZMp5L7KF0pJNj2OmYiR0MMkRWsIoyY8MPsYia/fyZmxGzjdsInAjjKM6nIS1aUKJOH6UgK7LAo2FBPc5WKh9kJiYoJWF7I08G5SP7sfMrryLuZsYCjZJYlFp39EWKhIn4TBVy8gXvyN/2e/fd0DREoY4sqvsEo8lHQSCSV8Z5c8RDJJxAeRUKvmm4FEGDQ/S1+6k2NdG4g1ZWPWFJLZWUZi5wbiOypJXF9GfIcbrT6HYNNaPC0X4G2+AF0BxEVqZ7k6NvRuN96dGzjR+VZOva+LyMNfgOizYrwQJkIkY1qmhcMcSZGccTJmCF0XJ/UVAESuuRytOssY8rWjSGVf12MYsbDKyYh0z4SXyIQWIB60hIU2D7/6FjN37uVo56XMNpQSqXdj1LpIVBdj1JSi1WwgUrtBaQ4xscTc8u7O40z9hYTa3UR2uvD2vpPIA/dBJooXmLOfQwkIx7SS13X6RwIY/wDI2Y77n94TDWKVMCQtP0Q6QRpfJJRsJXyfgWhaoCFaJk5SACKM4ZnlxIemOd2+mWhTnvJB4nUlmDsqMK8vVwDRdxYr0yLSkEekIYfwrhyCjXkqmajVFlvMc0M++k43vh2lnKzfxqmpRrRvfRwWHlPVW7G0D40YyaSBGRVJLV66xSWmZPIV96yyZRQSnIy9JRDkK7m6NIES1CIYMgkykv0W3yvug+giaB7LnDrxK1LfvYeT040cqdvEUm0JdFRhtpbhr84mXF9MuL4crWYjevVG9JoKYhIKbypgsTmH0w3rCLcWkbjBTbD9SrxfuxvSfnyYLDmvpQIXKzSpoFceVJlX/9AgTjP9j2/FsYyRVKQcdWlr4QrHIzQhYUIsbSq7Py2Z8/CC0h4cO8rRw3uZb9mKvstFZJdoixISO8tJ3lCuTKxItUjSYiVNtTqXAotyVBtd6ju91gWNLpI7skheV4BevYHFhks4PnA9S/feQup3D4B+0o7WxIjrYUxdEogWs4j584o+DkCk3kku9YJIlTjikZROUsIGKfEvvJDyg+EB3wkSj3yD2dt7OTnwdk7UbySwq4pE0wYiNYUEavOJNJfiF5OqoVw55HpNOUICEKVBmgo4U7+OsBR2Xu8itvsyFu65GVIegsRUpkN1hiRNRVvY7+3UellZ9FfYBq+oAf/8ya9rE0sAItlZAclLAUTLpIkmJfQoUZoASNLrsV/x9Gg3vqaLSNcWkagtQq8rUY6nxPT9tWXKUQ/WVygJKlJUnFIHMFKSEqvPg+4K0rU5pG7IgdpSkjdUMn/NBmbarmLu1n7MR/4NPE8qkMTQCKSlhtcSnlIoqyTpn++nFz9CACKaUsiWyqI9RD5ICkLyDhF0YmYYQ1+CmJhVi7BwlNPf+zK/PtBGtPsSwlefy+w7z1P+RazrYhYbN3K6upTFunIFDvE35H0jDQVKMKg2qCtV2mWxJksFOHivi2TdxczdNQqJWaKZABKtIxO2KhZsh1we09JuVtWxFaJ/8dd7LXz7ugaINLb4FpZ/YYd5RZ07XJK0SoESaZOY5icj9dcSvZLy7Ie/y+mBBsINW0nXlJKsKVEAEfva11CCTyI0dWUogNRV2FEbkZ6lSoKKjR6pd5FoLMLcVUSqzkXyhgL09+ShS/a9fjvB7qs5PlJP8vufhugzKjEmxp4wsDyiVIorS8PGibP/oowhP4oIdrbOvi2V5VfHtLJyDhLhk5yPbmmN2Ax4j5L61bc58pGDPDTcyFNd78SoLoTq9dBUjL67grn6ck5Ul3GmpgJPQxXB+nLCDW4VwXLMSzG7YrVWW/h2ZqHX5cF1hcTrt+G5bS/EThNP+YjKmwowklEwJWJllZo4Cl4EnHqdF33h18aXr2uAqCa0TQtpaKvCVuobjGVTS3zAuCHJvCCYAZICEM2DcecowV0X2aZDKbo4ohLOFC2xTGJarKA66xjruGIideUqbyI5AUvTyPluxBwzatzqev66TXj73g2fvRWOPQpxj2KaU1qIZ2NhO5ttMbcw+LJ5btsjmYyE44QEHBKIcLZytDU2Q5KOkmqRrELELsBPZOKQDkPKC8YMnPkl+r/fydHhnfy+YRvzjdtJtl+GUVNMosaFVudW7y2ACDRUqPdR4Kh3tKZoEREK8t5WG4mppTfkE63OVu8c7ryMk/vq4fhvVeInYJorhJVIAyvULhUN8p6veXQAr2+ACCrEvLATcAlV8xNUA4WUHo+DRDWTCRnLEADTgxxDeAbz5naSNWXLwBCmf7kkibJw7VYCddvwN2zE21iKv9GlnHkV7arJg8ZKaLmUeP8NxO+cgF9+T4VUpZJ1MR1TCbuzDvXZ6FPaCYvKOzpaQrbCWYoEKOJbpfASZyEVYTERJGRKUFukdRik1mzpaXjk6yzePcLxve9ltmkbkboK0tUlZKrd6LVnGd5h/L90K8IgUZdPpCbb8km6L2W2fycc+ZXKhQTkOaV/RGWIv5XRVd9IxbAChy3cXhu64sWf4u8OIHGkFF23OiYOetQkrQoHxbySMYE6zD5FdLzOju8XK5NJpOHLJqVdJPy5Udnq3sZiPLsL8O7OIdyYrULEyVo3qcbNeOsv4Wjj2zl2yyCZn/4nhE6qkXppCSRImZZpkRPkUmkLqWdaaTeJ8xJNk4mYZMIp0mHNMhkTS6rM3Kpt8oG5CN4j8Mwj+L54JzOHezjW/HbmqrcSq5YQtovUNdkY165Hr7W1ggpGSEDiLycBiFGbpwAigsHTtZ2Z/hvg1z9WWjKsRk39AyAvDr2/xrciXYWBVmiQuCqjtgFiQDRkmSLL5obk1Z98mKW+d2PsLEHCtS8bGDaYRONIIk2rLVcOqwzTXWjJY6ElB//ubKVJQjuyiVaLlN7CfMOlHG27iuMHOkh842Nw9BcQ84IeAc2maBhiMuYiCJEgEm3j2afh6FNw5En4/eOkn3iUzOO/hsd+Bk88BI//CJ78L3juF3Div+G3P8T4z3vwfewQj7VdzTONV6roWrLxIlXGn7khH65bR2ZHNhKJezmgWHmsACReY2kQ365Cljq3MrPnGvjJf6j8SlTU4PM0iDUGRHyjf2iQvyZARHur6Ig4wFGVEFQdk4BYWPS8HCCOokhXP+Eff4XZjiuIVxe9IoCIeZLaKTa85bB7mwqYb81jti1HgUQ0iUR+gtflEb2uGKPhIoJNlzHbdDmefTtI3L4HfnUf/Pwe+OHd8B+3wudGMT7cQ/C23XhursV7Yw3ew9V4DlzP4tS1LIy/h7nRq5nd/w7m9l1FaLgeT88OllquI9xRjdm/G/qbibVdz2LNFczVXsJ8/RYWaivw1rgxGlywuwh25UN1tkoGrmT6l7MvANGr84hU5+JpzGexfSMz3e+Cb34GdC9xGaIrALESMsrekuG/UtHwD4D8DQAiDS9DPsVZd0I6CTFLRJJJNEUAEjnJwlc+yJmWi/4HAOLG3OkiVS0h3wKVPJtvKWCmLQ/Zirnlr8kjurOA5A1ukteXkthZgV5bRbhhM77GSuYb3cwJsHa7WGh2sdTsxtPqwt/qVhRocRNsLiDYlE+gMQ9/Qw6++vV469bhq87FaN5GtHoLwasriF1VBddeDDVXQt1boe5StOrNxHZVqfox784cwrXryezOh0YpmblQDTF+OaBYeawARNuZvwyQhfYK5trfCl+4EyLzmKrU3/ZDRE5lZEShjLBcAZC/Bp+8gnu8/n0Q24GV9heAWMM4ZRyG3TGSaLYdRFI+mP8tpz48ohKEEmmSTrZCti9/K5EfAUequkAlEcUOX2h2M9viZr65GE9TMUvX55KQ7HRtKZnrCkm8NwdzRyHpWpcybxJdm9G6thDr3EqkfSPR1irCzZWEdlcQ3F2Kt86Ft64An02B2gKVxJNEnqIGF776QjUSMlBfonI2oZoygjfIwK9CNfhL8jvplgqSzUWEG7Lw1a5hqfZNeOrWqPCtFcaVUO7LI2m72I48wjtzWGrMYb6tlPnWizE/cRiC4mNJlMQ2g6WfVFhXQs82QETtv8Y/r2+ASOPakRALICKh7LCW9YX6XUq9JcRI0gfHH+H0+9rxNm9RodhXChAJkUrNkoAssEtAUcrC7nI8jRX4d1UQra9QtUzmDS7YUQA78mBnNunq9aTq85h/73pmr8ln/rpCFm9w4a0uIVgrSbhyIg1lBGqKCFbLAC6poC2y/SUr6y8l+r7GXPxt+QQ6ixR5W4tZairEU5uPrzqPeEOxer5kvYtUc5Eqq5mvXcOJujczs+tCAo0FLxsYDpCk7aLXWwBZbMpisa2IxeatpP51BHzPWZM7SD8ICRgUIOz+Wf77tY2Q1z9A7Ia2+kEST45TbneMjHKNZ0gbGsTm4dff4Ll9V+HdVUFkR54aKBWuc7EakkFWMaGaIsK1JWpUnb+uEl99FYHaKjWuRKJGcSlf2VGiRt4ZNdnEa9cRq19DtH6dun+0RiqHy4jWlqhtrKYEi4rUtbWaIixyEa92Ea8pVNtoXT6epizmWtcx15JlUy4Lu3PxNeYTqi8kVluIVmNRTLROg5yTy1xLLvPNuYhzHWhwrYqkzVKitXbmEel2M9ucQ6BlI6cHr4VnfmbNpyUdY/uIFhRWRFVe29hQT/d3ARDBiAMQ9VYKNHZ9kgKIOIoJ0Bbgh59kpu9ygo1lRHfmvyKAWKCywBGqKyNUW6lAEarZpPyC2M5NaIoq0apLFLOG6yVnsA7f7jX4G9cpYMZqyrBIgCH7Jep4BYrqEhscAhKXTWcB4m/MwrN7Hb6mdch+qEEKDHMRMGi1+QpMAhABijyvgMG3y4WnsVCRf5XgkOvI9TKNlerawW4XpxsvVLVbZ/a8Ax7/nnLUpWOk9spRIhZabCH2D4C8+i3gaGoLILYaV2aX3QkSaZTaCylBic3BFw7i6d5MrEkYNl+Vi0id0epIRtcVWyPspCSjbgOxmo3Eajaj79yKXr3ZcpKlTFyNpXCrEvHFlmzm29YiWzlfEncWSb2X7MvWrv1a3p7NaIs5p0LT9S60+jwStdnKbEPqwa4VyoNrC1QBYXqHW5mSco5VmWuFtSUCJyFu+V7GdayG5Fx2b1ZmnL87n5O7zlNaa777Cvjp1yC2ZJm4diBL+mi5ImA5+/nq88grucPrXoM42kPgIPtn292ydSURJ9l09aPvGJHbWwm3lxFvLlYRHLHjpZJ3NRRqLCLYWKK0kfgL4m9odVUqShWv2YReu4loXZUayhtoKsHb7GapJZ+FtlwW2rNYaslV58t50Xo53yJ1rYYSIg0lRO1tZFcRFp191uguqX/KJ1mXT6Y6H3bkg+Q4ri+EGwphhxuzxi7ErC9S1crisCdqyzCry1TwQMbdhxrdqyI5l8aNGLtK8HbmcqrxXPRdBYR6r4AHP2dVDcuQAztXKH1k9Y/s2QLslXDvX+Hcvz+ALKsSVd+gZsuURLoCyPHH8Ey8B6PFhdkiYz0KiO0qItq4Ogo3FRFqKlYUbSwmtqsUvaGUeH05upSIN5QTbiwluLsYX0sRvhYXntYCfK15ioLNecQaXarcXkruHbK+KyDWWIDeKMesoKZcYjbJOJbw7gKCzS517aVWF/PtFkmoWL6X90vWF6mK5UxNMUIpSXDWW88qzx3aXbQqiu0qJlVTqTSIpyub2eY3k2krIrnvbfDtj1rjTaQWzh4DvwwQmfFFjc957YPkdQ0QR3sIJqSplQax2l6VlMiUm6rOT6KKaQj88gGWBq+E1nwyLUVEanPQmtzEmopXRZHdxUR2uxXJdYTijdY21uQm3Owm0FqIt80if2uh+jvcXIjWlE+8KR+tSfbPUmx3IbHd+RYJgGyKNOcRacl5PjXnWfdoKWapvZjZzmJOdbs52eNmpsvNggyFbZH3c5PY5casd5NqKCbRUEx8V4kyM+Udws2rIzFT9etLlgGy0HYedBeT2Xcl+hduBn1WFVeenWLIcRb/AZC/gvKzAOGAYxkgdhRRaq5UqbeNIhFaz/3nF/ANXKoAQoubUHWWYh6HyVezFYYWZtd259lkMXGoNQd/Ww6ejhyWOmWbR6A1TzF+QrLYdYVkJMrU5CYkz/I8soAk4LIoX4Vy/W15rKRQSz7xxmJl4lgMbwFWQCFg9LQXstRhkewLUOU3Aa7zrrK/WlJ+3LVFGA1leLuzWGw/HzpdJHsu4rRUCcwfeXGAqBGVMsJTek0lSP4q/LKam7yGNYg03AtImH0l2X9KM4udq5xASzjZVq8M2LF/MBIcue9u/AOXQ0chtOYS3HkuWnMBEdEmq6Boi4toSwFaS766jlxL9iOt+QTb8vF15CKmh5C3M5tgezZ6cy5mYz7UCxWo84JtBTyfXATank++dhe+9gLObgsIt7owmmQ8SolNRSTFZNttPVegvQBvRwGLXfksdMlWzrfuFWktUM8uzy/7kRbXi9AL26WESMsKai7FU+0i1FaqAOJrPRfackk2l3F85Fr43Q9UTZb0i5hZ0keqS6WPVg7DVUEV6UyJBzsduAI49kzyZ7veCuf/NQD2NweIemm7ASwbyW5FaShFYh8JOWMhbFTY2DH0FKFYnEBaJmawf9OTEI1DPIVKEkpnhDw8dfdBwmPXIqFW/7X/C63h/6J3FBDrKH4F5LbPLUVrq0Br20CsfQORjgrCnaUEu9wEevII9uQQ6c5C78wi1ZYDLXkgJlPTeQR3WxRqXkOweR3B3VmEmnMJiXllP1u0qwShSFcZEdnvLkXvKiXVWUK6vRizrYhUq4u4Ddjobjk/l+DuHIIt2YRa8wi35RNtK0BvdxHvcBPvLCTTX0Zqj7UfFaEhxyvKJbS7AL2t1KZy9LYK9NYNyxTs3MCZvnJO9RcR6F6P2boGGtbArlx8HZt5Yu+74blHIHpazSujy8yWUuErGJCpkJYk9C6DWVIQS9jDcmWAlcxbJgdYCRQVJl4xQbhUTJiq5k7mPZaLvXqfvz1AnBj5WfFgN4wARIAhWmAFQBylImLJljwpE8JpmXTZBoiIKl2mNMkQlepYmazg+GMcOdSJv/cdBGtz0Rr/mVjLG9A684h0Fq+Kwp3FBLtKCXQJEEoJd5YT6Sgn1lFqkwBPAOQm1ulC63Ap5jTaC0i0uZBttD2HyDIJE+cqRhZmDreKlrA0RailUDndgd2FquZLEnzeXXnonUXqurF2l2J+0QYWMCxm1zrc6nfZxtoLcY6LthUSEQ3TkoO3JQtfS5YCUrglj1hrAUabm1R7MYlWtyKjrRi9vRit3Xq3SEcp/p5iTvWXcKbfRbgzF7NlPTQK5aK1lnFqz0Wc/FAXxq+/BimZ4ySKaWhktKQFDBlyvDLEJdEUGXkY91ujEJVQtPIoAgM51NIvUq4SVmR98/cMEOHpleCQ/WU9LA6FTUqb2ClZOcYGhxxtJFLEjJTS2nYLKmUkQzp1meTTnIOf/RvHB65Fb96OUZ2P2VeAv20Nka4CIp3uVZGAwtu9AU9PBd49bvx7cgj3XECs+3z0rgswOrIsBmupING8WRUW6i1b0No2Kk0T7thAcM8lBHovw997Ef6erfh6qvB1leHvKsLfUUi8v1xRYk8ZiT0VJLorMbo2kOisIt6xAb13I9G+DUR6qwjvqSDUU0mop5xgdwXB7jKWWouQ6Ja3vQRfRzGBrnL1e3jPBoL9Vcz3livy9pap4/WOEhKtxdaUq40F0JxHujWHVJul/cI96/H3rmexfy3evvWEOwrR2oqItZURtSncXkKoo4iFnjKe6d3MiffVE/7OR+HMb0Am7ktrpE2ZsClD1ATNnsPMshIMezoima9LhuxafS7yUMAhW+vj8IYwwtH2mK0AACAASURBVKv3+ZtrkBf6FC8KDtU0KxpiBaDiCQMhmR9agUa2KUvaxJUenwf9d6S+cjNL3ZdD6zaodcPQRubb1qsSiWhXEashMXVC3WUEe0oI7ikk2JtFqG8N0T3nE+9eQ7IzC7O1ALO5HHP3ZhK7t6G1bCfcth1/5za8ndsI9V9OsO8tCij+nm0Eujfh79qAv70Uf0cx3uZ8fM35BHbnK5NHImDK92kW/8ClGFGAtJIsX8XyN8I9xaykUHcRDgX2lLDQVcRiVyn+7lJlthldZUpzpFvdpFsKSLflYrZnk+jMQutZR6j3Qnz9F7K0d41633SzG5rE9NpIoHMjiz0bWNhTgWdPsSU0ess51V7BM3vfhudTY/D7H0JK1qAKq8k2ZhI6c8kUkUzGsrClRF40iZhYQmI9KCjZUUpHOFqqxP7y7xkg8m4OwysOFyDI2zsSwop2yOwXzmErtzK6Wf5W/5j2HAEx0FOaWhIMjsLv72Pp5huItGyAzougqRRz31Zm2wsJdYttvzqA6J2FinnS7VkkOnPRugqXr6V1WXZ9ol2kbw6pVrH93Yjd7unaxkzPW5jv2U5kTxnx7gLiXUWWuSQmTGs5sdZKoi2WTyO+jfgClrlUgN6Vi96VTbxrHVrHhWida5ZJNNdKCrW8iXDrm4m0nUus43x1XLz7QoyetaR6sqC3CPaUwZ5i6Cki2eNSzxPtzifcnUeoJ5/Qntw/okhPrvJj2LUBdm0m0bJBaadZ8UkGyjk9UM7inlLlR9FZgtnmxtNejveDTfDYVyEzq2Zc8WCySEZNESTTM1n9qEqwQeYSUHPAyJgFMaDtyQGFPeRPoRVy89WAyd9egyyDwgGGAwirLFqcMGuCaqfe6vlbOdoBiClDVMW3k/GrMsWPcZzQo5/lzGc6ebarglBjIXRuhLYK4n2bWOouI9xTSqy7eFUU73JBe7aiTHuBkrzJ9kqMjg3EOyuJdxUrHyfWlaUcdHHU/T1uPD3lLPRsxttdgdaejdl+gTJhku35JDrcGJ2l6nytu4pY9xYi3VsI7dlCsG8zgf6N+Pduwje4Ed9QFWe6rNzH6Z4SzuwpZqa3jJneEoRRZ/tKWdi7gYW9FcwPVDLXX6a+l9/P7CllsdtNqrecdFcRiY5Com15hNoLCHQVEuotItxfQrC3yNrfU0S0pwi9u0iBOdlZRLqtlExzJWZLFVpHJYEeuUclswMVLPaVEepxk27NgqYLQbY9JSwObufY+xsJPHQvxI+rhT/DagWqJFEzoeYPVh0qnSrLRygUSKeqjrU6X76WiMzfP0DssJ74Fyu0howXsIBhgUN8CecIZyu2qFAsmSSeThNPmWoOWjVBmRGGM0/AL77KsX9t46n9F3O6w0203QWdpdBdTqijhEhvJdGeYtXx0vkvl2KKWYpJdhSTaSuGllKbykm3laN3lePtKWa+z83sgIv5/jy8vVmEe9aS6DxfASO9ex1mSzbptnzMrnxSfS4Sg0XEhyuIjFUxt7eS0/u3cmb87cwcvJ75W1tZ+mA/3rtGmf/INOZ370V/4F7iP7gP44efJ/ngl0j9+IuY//UV0j/5Mvz062qrP/B5fP/xUc588Q6euecmnvzIAf5wRz8nJt/J3P4tnOyt4GRnKTMdxfh6KtH3biWz/2IFVKOjklS79U7yXplWi8SJj3bnEugtZKm3ksU9W/D2bCXSWYnZng9ta6BnHVrdPyEROnPsYkKjb+NY18WEpnfCN2+HM/9F2jyhpmoVq0F8SS1h8b70tQjHsyCRCejsUYoCDvHa/741iAMQx5yytgIQmQjO+k/A8XyAOOCQBtRUOtCad1eNGpRZA71HSXzn4xw9UM+xfVfyXFsZsaHNJPZWqmiS0Vem6qBifWWEbcn4csEhx4v2CfVsINwtGqOcVHupKrVQ5RYSnu0uxttTynxfKbP9xcz3F+DvzVJOvNnxz5jt5xHtKFQOdaC3iuC+LQRGLsY/fQWLN76D2Vuvgf84AN+9HX5yLzz2HXjuUVg6DiEZHSnj1kPWeHZdVqKVCNCfIRn7LueF/OA/BY/+O/z4bmJfmWLu7g5OHtzByeGrmN/7Nvx9VxDu3I7esZVU20YyrZXQWm4JgdZiUh0u5YcsDmSz2F+EV4IEXRXEO0rItOVC6zqMhjcqX0zrkQBBKaHujST7Loaei1jq3U7gK1Okn/0OJGfUNEWJpEnAgGDaWqBKcGCFciWaac/Q6Mx9JoygzIdXw7iyrvk3NrEEIHYo9wU+h0gOAUYsqas1ti2QpNXU/VpcxzBkjiWZORD8yTAZAUZiFk79HP2rt3D64E4W979DRYciPZuI9FQR2VNBuLeEUL+bUH8h4b5CIr1uIr3Fq6JwbxmhgYsI7NtOaGgL4X0VBAeK8PVk42lfy1LzGrQ9Vpg30laI1llEvLcUY28xWl8eS31uToxdzLGb3sviv7ahf3EafvBJ+M234blfw+yzEPFDLAq6zPUlQ4nFiXVyRLIvs+OtklKylqBMYC1zZy2A/1l4+mFS3/8c/k8eYPHWDuZGriW4753oA5eS7tsCPRXQ6YaOPOjMJT5USnS/1F/lYu5dT7L3QrSutfhac5nfXajMQ61rE2ZHBbQXQ1shtBdidhYjkbRj+7ZjfGUcnvsRhE+QNmJqfi9ZWkfm+1KznCnr24CUzOIijrvARozrV//zGgGIo0EsA8oBh4BCZpaV5Q3ipoFuRLEmfLYYJGnGmUv41YqzSgL9/rtEPjPK7ORVeIYuITl2GWLHG50Wad0bCPWV4B9w49+bq8gCyWoBUkJo72Z8e7fgFZ9gpJLAeAWBURf+gWx8XRdi9rthsBIGt2Ds3YZv8BIWxt6B933VLH1sD8YPP0ryv78Gxx4C/x9AZlrXRDNEIBg5G/wXv1XW+FieVtQywxWLCAOthjIQ0ZJoyYwValeTLAgYF8H3DMz9Gn79NTJfO8zcjdfybO9G5nsr0PZvwhypwtxXQbK3HGOPi8SeCzH6ziW+91wi+9bhHShmsbeKQM+laB1vISPRw5YKCyAdORh7soj05ZIa2Yxn5C2EPjEIR36gZoGUyR7mE7AohpcpARd5P5lXQISpRjIZQktbK7682lOXvgYAskIa2supiea0oCLbNEY6QVQLocl0OCr0JwkliWzEVJ4jkzoGj99P7GNDLAxchlcy2AMlxPaVou8pJdlVCe0bSXdUofWWEtjrYnF/LovD2QT3uoj2Fa2aRFJG9pSgDVcR2l+JZ28xnoECtJEiOLABvS9XZdElJzAz9nY8HxtSfgKnnrKWbwuKiSTTg9raQGL/GV2tgmUtj+CYmxZWBCDin0oCWvalrVb7EQmsEttSki5zc6mtRAXl6iLDpdjwGOiPw6lvk3j4Q8x+dg9P3nw1j49cwrHeKlLid7WXkt5bTniomDP7cjk5uJ6FoXxi+ysxujeT7LiEZNtbSXRcjtazmUi/i+DQm4kOvgm616vk5cz+K1n8zCippx8EM6T632OkCMQzhGPidwhARAoY6EkfYdOrVl5XczKvtgH+gvP+tgBRkQp7PT+pwzkbrX0+QJKamhldqVgjjBlcsObXTcv6GsfxPPYFTt3ejm/gbZh9WzD7iwkN5OIbyEbrc5HsLoH2ckWJnlIFioXhXIReKUCMgVJCnYUsteYQ6HRhDFVhjm9BG97AQn8R0ZveQuTO64l/aQR+/jmYewIkiGAzpRIEUhKehFjcJBo3iCY04qZGAs2eBURmApGAhQWI/8mtU8Yhgb9oKkM4lSSc0YgisSUfCRaAU8AxQDTcb0g8803+8O338+Rd7cr8MnsvJdS3nfl+GW5bxtxgEYGhIuJD5eji8HdvJdJzGYHeS/AObMSzz4V3+M1E972BVOMboK8cfeRijo69g2c/PUbm2YfU9EwChlQiSTQSJ67ZdXWyTHVGpuQOqxls/r5LTQQgCiRnnS0l1VZoEGEGNQuGWndcJK3M0O6B6DzEjhH41Sc59slWZgcuJdWzFfZUoHVmsbh3HeEDLqKDOaR686FD7GY3qT3FRAfceIfcLO13E9orf6+OIgOl+PaUEeqtwOgpJdNdCj3lsHczxtTb8PzLTvj5p+H4g5A4LXljtU6JP5ViMawzGwgRNDQFCN1MIsuzyfuKNWGaGSsq5xTwKfPCWbZM5hqWBXGseiWnGVe3XVn45wRJ4qRFi2WiJNJBYkkfkZRHMWWcKBphvJlFwv6nmP/mR/D+6xBnRq9lrutSol2byfRvxezfRKyvAt9gBYv7y5gbKWVuxM3ScD6+/XmE9uUR788j05YNXW6SQ5vxT17Oczdew4l79pF54n5IzEBKSoV04ppOMmXlwtIk1eJ7ii9e5TDW316DOAARBWIXLQqTiGRVklJpFjHD7BIEWedCZicJncH87bd47Jb3MDu+lVTfRugsJ9GWh6f9As4MrcN/MJ/ovrWk+9ZCVxZ05ZDszVcACQ6KJikj8goBstTlVgDR+qvQ+jajD10Od7TC9z8Kz8oquIvq2WViackfz6QzLKUFKhLUzBCKB4kmIyQyoiXOJkOV7y02j+NbSORGrbEh64s4AJGrnHVWpSkdLfyXbOXiGbWIj5irTjLOWqJN1i6MabJMnMwSLzPoy8TYsGgmmU0nmFeGWIq0/wQceRi+fQ/6HYOE915NqGWLKtgUcza4rxDvcBaLYxewNHoBvuF1BPflERkoUwCS+jTJvxgD5SQnt7M0spWjo5exdO8gPHW/NfF2Okg6k0A3TeIpazHSjCwrZ8gqodY7v1r//u0B4jDACwCiwCHS1JnNXCIYRggML/hP4v31jzh272HmhreSEQ3R6YIOF+E9LuYHCzgxms2Z4QuI71sD/edC77nQvwZtMJfwYAnC0JIs1KSj9hatirS9LpjcgD5UxPzQJvx3NsGDn4b5p8CIKZQLvrUURAzQhcdVT6ZVLZKmiZ0vDJ5W852bmRSptEhHK/n5ohpBhIhNZ5ljZSO+jH2ZyC0WISMr28rAfclkv4gfGE0k8IdlASA7ky2HxSGyFLAEl0j5yAI8+RDpL91B+OYmYvsvJj3ottp+4J/IDP4TqcE3EB9cp9o6vHeTigDOdJYQGChH31tKRLL2XbkEhysJvO89nP54Jzz9HYgdVwBWII2nSciO4DlqC5BXCx0yu7u8qwVCJyehQgbqO8dRtjpVDnT8BZHo9okrStXVdV7498t+eOlgub5M/iYmlTT+HERmQJ8Bz+8JPvh5fvPhcZ44cAPcciUMCkByoN+NNlLG3LCL40NZzA+vUwDJDJyLUHLvGqL7cgnvKyLeV0Wytwqjv4j4gMvqtMESooNFimKDLrS9BcQHLBIQRQdLCA+WEdpnkX9ISr3LiN1VAz+5C+Z+CvE5VWQnTq/Id6nkliWRRQEobSDOpvRwRhxhXUlwicwlUjpGUkNPxRFzSyI5MuRLcmEOOf3hbJf7ZYXHptruL/1bhI+Ej6XUPGaQihsqfK4nU8iiQ/KEihclepZIk1RIF7SnIJqBeFqZPoYCV8oSCoFZePS7ZD49RnjyCuJ788kMnAf9b1JgSQ2st9p7oJLA3q3M79uOZ/92fH3l+NrWo7WdR2ZvLsbERmYPvgX/v43CyR9BJqiESzSWsCbhcBpFAdriWcWTTjvL1uFRtSti56yGtnj+zzPnOdIIEuZLRHzWRMpGkIweJRQHX8Jad0KTq0lvSLw9IYVkMjm0tVCNDGlVK4DZW8GQREaU8ycxBzt8n5Sl+WxGERvbuaSEcWUFc7VMmJQTqHU8vJCYA/0EBJ6EmZ/CE1+HH3yI1OeG8XygnpMT7+TkwEai+0tIDuaTHCwkPlRIdL+L0IiL4GghoZF8tOFc4vuzFen7c4kOW8doQyUqY53oySLZl4e+v5zo+EZi4xuIjZYQGxJgZMFQLum9uUT6i1jqK2F+oJyF4U0EDmxn7rZrmPv6nYR++6AFYDH/TA0jFkXTk6TsUv5lIbTcYbYQULkfQ6U6rVUUrVYQplT9smIrESvhCetazvmy1LSMo7BzI85WKjdtyiSd1W1F4NjHiaZQJHMi2ReWrf2gspF7Oc8hW/lbfSQULLNgJERD6giYxPSSY6TfVWjNG4Hjz8ETD+O9o5fZ4Svw7CmFsUoV2ZP2jPesx9hfgXffpfgmrsI79lY8feWkutfAwAUwlEVsbw6nRyvQ75+CpafsnE8aM2pYkb+krpZ6ULP5y4vI/QUrK7MG0lRKP69MPC+/qv1Sf3pzjrycNL6KMZuy4IosmSXrOFg2pyzMIq6gxdEyCCmo1vjLxCIktJgFBFuxpFMmYhsSF3tCA01UtzSmbkkXZxZzZ/bymAf96E+JHvkRoce/jf8XX2PpwXtY+vYdeP79MIEvj3Lqrnrmb78e7/veReCmywkf2kZ0spLoWCmx0SKL4YXpV0H6UCHJvhxM0RLDZUTHNxCcqCI4WkpoqIDI3vUw7ibRn6Wknj6xhfmRbfxh/3ZCH2uFR7+szD1JWjqfRCJBLBZD0zQljZ3vX3wrvWnZ/zJ+XiJVKxnTAYnDoMK4Vu/rloMumeWUQSadIiMqSqSVSB9RVbL5c1slzUTQ2bafLXUdPpNnEZKntLFjD2Kzw+xqQRxL0zjaRiaxFPmZ8SVhdhGO/Abzm59k7uYmTvZvwz9YRmaiBEbzVHLRN1BFYPhyAiOXEhnaBMMlMFkEY9kk+y9Q/ey9/Rq0733YWutEVqtKGKTj0ioy1WzMjmbZAHEALw8u+zZABB4yq7z4U897nxfvmOVvzxEAyMupj7Lzg6QTIaJxHa9usBCPs2RohBMBjKQPM+0V11I9WIogaWbI8BzwDKSPgnEEor+DwOPgeRROPgJP/wh+9Z8gSwR/+cNkPn4Lidsnid/aS+TA9cSm30ls+h1Ep68gOnUZkaltxKY2E5ssJzRWQHgsh/D4WiJj5xMZfxORsf9DZOz/IzL6RqIj2asChwBK2+9SmeDEUBnJkSLi48X4xzewOFbJ4nAxS4NuVQ+VGK2AkQLM0VICt1xF9L4x+O13rYnolJMr89IliMfjpFIpMlK6ncmofadpX3wrjCzhSyHLbBXmdCT4SuZUHa0Uhy3BJR8kmfCMwMoKdr7YViw6iY4JU8jvcm2HrCSb46BLyMAyQZbVuxxonWidZONOvhKRICR5KusJFCdawJRdeXjh4XgKluYxHrqfUx8Z4pnxK5kbLCYxlAWj64n2riXen0+mJ59kr5vwyDYiBy4iPOrC07de8cLMyEV4b3sP/PgOCB612wuloSWJLIsiSTDheRpkBQqsuj4Bk5AVKZRXUzLkxTtm+dtzfAYE4qBpcUjIEgGiQXyoiZ5lXTtZjEXq95NnVHUskT9A6Aj4nwLvf5M59XUyz34W44kPE3zkfSx8e5JTX+jj+CdaOHlXPSduuZaZm65h6dA1BKbeTXT83ehjV5EYfhfsv4K0lB60rrOofS10rCXTfSFmz/kkxLEeW09mYh2pqTUkp8/DOPBm9ANvInbwjcQOnE90NIfYiGt1NOzGGKtSlBgrQRsrwj9RwcJ4BXOj5cwNl+Ef30RkfBOJySrSH7gavvMvajkzlctIWhJbgBGJRBRAlltWBJiYpC/1UWgQ6b1CggtzvZBWMqpjPigT12JKh4dfbCsOdixpoKVSyq8xMk4pqMXcsqSoQUwZuSJd5Vbq/nIxYXKH5L5iItt8L5aHkPxsiWo7BJ0W/8qW5vJ4coK0kyyc+uzPCX39Vv5w8F0cE59utABz34UwKIGUtUqTL+zfyqmxS5gbrWRufwmBqYsIT28jNl1B5K6rMR7+FETFz4OIWPzqGawckWJ4555yXxsBFkAERJJ0PeuHOL+/VBedE08lEScrbUSs9fOiJyzJ/8y34NHPwoN3wPdvg/tvIfXlQ0TvGWPpQwPMvL+ThVvqWRi9FN/wJvwjm/GNbVQMFZjYiH9qE/6pjYQObyZwuArfwUoWD5QyN1nEmYkCTo/msTCUA6NlsK8Y9hXBYBH0F5Lqz1cx8uhArpLy0RE3oVE3ofEiAlPF+KdL8B8sJ3SgVEmaVwKQ1HgF8fEqgpOb8E5swDfhxjPhZmZqA3OTGwlOVOKd3Er0I+3w8/vAd8Raa1ySe7bzqurCEgkr4ma3tmiUaFTc9D/zkU50ACGdqrhcfAMxfSSDbPsJDvfLMSIsbV9PggGJjJVDEfloaRFHq5hKeyjZmZHsQep5f8tKHWFSyh6I2KFcuc3zJPELASIxhhf4RkozpXTSMlow4YOMU0Wlq+XvTPFX0mIO+sHzFNGHP8cfPt7PsQNXkh7Khv1vhulcMjeWEzq4lYWJS5mfuBzPgSuUsIrfUoV5sBD/SCGhuxvgN99S682L5SPPIs8szaIwYfu/0qwWCXjEz3PIEir2j3+mc+Ac9VKiNdIRCBwn9N9f59i9+znz/muIve8KEoc2Yx7YTHzqEiITbyE49jb8I1cSGroMfe8WGK6CoVIYKoEhqTsqJL0vj8S+XLShLEJDa/HvX4t/bC2+qfV4D67Hd2M2gZtzid5UQGK8EEaKyIyWYI6VKDMmNVZOcqxSRTK00SoiYxsJj2wiNLoF/8hWvCNb8I5sVcCMjJagjbpXTfGxMiLj5XgnqlicLMc/6cY37cJ7eAPew1uZP7Ad874BePJ7EJoDQypKlz0Ba3KOdFqZVIq3bPPKMbNeqgfOduJyb2LNdCdOthTnOSTOtQUMcYSFKcR6ERNHysPjCYmAxe0QsZSKOBGbF9864WQtIwCxAjGOLyq3UcyzUhLb9xZQOiFm59klEKFAmohjGCFMQxYp8trl60FSpoDFHs8hyU0zCAkv0T/8kvl/uw3/eBXpobXo0wXoN1eg37gR7/gmTo9eyszEJSyNFhCbyiE5uZ7wcA7+qYuJfX4M89nHl4fjOs8ijynkyBILOKI1rECI1WK2KeYIJTn5JT7npPynSIdnIBmA8Ck8D3+e37x/J8+OFmMeLoDRN8LIG0mOZhMdLyI0uYHQpER7yjAmpPAvm8DeLCKDOWj78zFGXSQnXKQni8hMuUmM5dqUTWIsG2M0i8ToOoyRtcRG1xM56CZ4qJjAwRKC08X4J4vwjxcTGCkjMFKhIkcS3ZDx0+GBzeiD2zGGLiO1/0qSIxersG5srJDVUGSsSDnkgZES/KMFeMfy8EwXEjjsJnmzm+htF6F9dRKe+xnEJaMLft3qbmlXMyVh2hSmaSpSOZuXaOwX/mR1rGPDixx27CdhfWdfutkyfWTPAYcDEHVN5b+IDyN5ColWSeTKIJOMk9AiJPUoqXjMmjBBQrLyezqpQC3Xs3wJxyKRu8i9rXs6v8tWgUcBR7SaDV47IJYwIW5CTGaXkSy8MsAkT7IEyNqQHtLxBTJSjStMLNp39jm0f59Ev+sqZqc3sTRRijblUn6nRK/OjJah7V9LYug8jLE8olOleCY3MHvr1cS+eYe1arDSThaopT3l6eV9HLJmQHkBQBzwy/bPAURlZePyItKwUcK/e4AnPtzOsclyuNUF+/8X7P9/SIydS2wyj/DBUsKHK4gfLiFxuJTgeAH+cReBCTehCTeRiSKi4y70CTfx8UIFltR4AamxfMyRPFJDOST3ZZMaXI+2fz3esRwWpnJZmipQzOmbLiI4XUJ0qhxtagPahPgIm0iObiY1shVz+CIYvkRRemSbKgpcDTjknPC4G+9IMZ4RFwHJ8I5egH86i9BNbqLv38T83deC/7dqzUDhKbF5fRlL4lpdIZ2dUqaVOOcrASKgEd/kpT/SQ1bnOSsvWdEWcTytDLbjBJ/VWfYKthkJKftAPw3h58D3NMz9Fk78itQfHiH++x8TfeKHJJ9+CPOZn6rv1e/+oxA7AfHTYMxb0UUpo3cYXkwhO6sugBVj7awTLuFKu6hSwsiqzN72n+xKY9FEEviRtXZ1Ff4JQnqRdPy0BZC0rFNoB4bk9aNPwX99kOMf2MGxye0EDlWg31RM+MZStEMu0vvXwPCFpCbdyg9ZHC9jZnIz0Q/VkfrG+6wlLdSzW0EOAbGAYzmqpt7BgYsEQ+zkohz4lwBE5a1slS0Pju7H+8h9PHf7e5mfKCA2/Ab0oX8iPHIBwSkXnuly5ibK8E+Xod20geBUGcGpCkWhyQqEIpNliqITJQjFbNLHixGSaJFFbvSJfPTJvGWKT+SzTOOFygQTMywxJtrITVJotFiR/C1AjE0Wroqik26lrbxKjWeRPHgh8Yl/JvIvJfDgYdCetMpaZGJ4ex1ykaSW0HEkvLTyS38EPLpujWER08v5SA4jri2RSPqUByDgEEhJ5wqjiQcTjmtkpP5MCjNTpyH0Gzj2Tfjlx0g/cCOBTzQS/WgN4bt34L/rOjwfeA/zt13F7C3v4Mwtb+fY9GWcOHwFc7e+E9+d16J9tJbUZ3bDfR3w5X2qnJ2nH4KFp60aNwmjSoLWtH3S9JI1mMk4qRK1GVl70IgSNNJEpTRAluU1ksrmk5GAQUAMLClVl9IaaTe1aKdjZknEzp6pRDGoVEZEn4Hf34/vi8OceP+78N6yidShtaT2/2+Sg2/CHF5HeiKPuPieN27Ff+M2jOkNykcJf+cTVhY/EcaMSvGqNf3TYjyCpoSM5YNYsEla95YucwDidMaf2J4jLyDk5DvEZDCO/YLFL45wfHoL4YlczIn1JCfy0cZF/ZUpM0tCsGJmCRhCU2VEJmxgONtlkJQRnRCywLJyG5sQoLjPgmA8n4RNFkjyiE+cBY/Yos+jyTxik/kvCQ5R2S9F/uFcQuM5hCbWEphah3nXBnigD05/H+ILymwRwSFttBwOtxX4yymWE5CIMy+RLdEulo8i/kJSMVw4GkI3pHpXACg9KEwXAP8JMid+jv6rL+K7/zBzn2rm1O3v5PTN21g4UI55axXcVAaHy0kdLCI5VUR8sgB9rIDYmGSkXepv0ebaeN7y97GRPPxjpfhueQ/zd7TgvWeU2P13wi++Bsd/CsGnWewSOQAAIABJREFUrahl4jikjoM5Axlh+6gan+PLZPALMGI6BMOkQgaiMB1eEpDMZ2TuEqvd5L2U1lUmoJPIdELWsrLvKTVoau7rh/j9wUtYGvg/MPZPcFMuifG1aENr0MfzCB3cRHi6HCZySB7exKm7u+Hx74MmGnEJUrIMuEwimCaUljCEFbZQ7SlWkoDT0RxnZdWfgAeoRKG8lLyIkPrI6Lxf3Mfs+68ndnAzTBXDaCEM5sCgC0bKYX8pZl8+KQmPThYRn7C22qSzLVLfy29/mkrQJsrRJiptKle5j9hkKdHpEsIHilRFbuigi9DBAgKH8vEfzrUpm8ChXGLTBS8JgJcChzBScjKb6NhavBPZ+O66nPSDN8HSTyB5gozhU1BQhpI0pmpQm3ntBJ/V2k7D/fFWolkCCgGIbJ19OVKYRq4dk7XSRTWpS8cgeAaO/hfmw/eyeO8w3o8147ntXSxOb2RJqgRGckiM55GeyleCy5i0BIs+nqtsdW0sh/hoLrLlcCncWAIHizEPujGnXDjHByeLmb3xLZw4fCUz0xfhnd6Cfsul8OHr4EsD8K33wZmfQeiYmi5GTC0pwg8SJYIMWJIKWystYbstjnqVOfsIJayRJSJYxMhRIJE2VOZZ1Brbk05jZsScFDEUJn3mlyx8dRrPwU1oI2tI35hN4uA64pNr0cfFhy0iNV4E09nKxD86fSVz98mIxAdAgVnK8wXESfyGGHp2vylgiI8mZDvqf9xdf/TNOVa1pl37lcpYGVnJpi88hv6Nmwh94N1KnaWH1sPgBTAsodkC2F+golWm+Bc2JScKEDImz5IwoZA2ZZEw9DJNFdmAqLSy41PlyvcIT5cSXgGQ8IECwgfzCR3KJXQo26b1hA9m/1mA6NNu/hQZ0wVwcxHBiRw8H3gLPHirFeJOLZJJBkiZcdWxwruKeUX6qQiRdKZjqyvD9I8a1vlC8iOiORz/RLYCFkVSniP952BOxpk/8wsSD3wC/z19LP3L1fimNiFCJzNZCFP5MJUH03lwoAAOFpAczyE+kWMx/VQeyelCktP5pA64SB0oIDFVsPy3hErTh4rIHHYr0Ji3lBG4uRLfjeVEDxRgjGeTHlkLYtIe2IL/pqsI3TuM/zsfRzvykLUwJ34y+EixoMaLCOOr9hHGl3yORN5kdsSkTsaQ3IP1+/OOU+UqkuiU8K+pJsEU40hpaIl0Lf0GfvAvRD70LuZG1pO8OZ/MbRXKWtCGs2HKBYfEesjDe/PFPHvL1YTvPwCen4J5EtIe9ERImaeqH6R9paHl+eTeapIQK8Ln9NOf2p6jXkYShAkdMx5VJM46yVk48QCeL/Vx5raLmBk+j8jUGzBvXoN2+Hy8h9YSuTGflHTYuE0TeTCRp+xFczJP/SYdJIwoYTztQAGxgxZFDhUgFDvgPksHC9EOCBWgH8jDmM4jMZ1DaipLkTm1HqH01FpF5lSWurZ1jnPu87f6QRcvRfFDbvzvu4zQNw/D4qNKikXSCbwJqYsS7rWljlOEqVryLwfICxteTCtHm6hybcl1BL2YR39J8PufZunTQyze9l6CUxswhBluzIcDa2HiPBg9F/avIbN/HYmRXKUlBBzxqWzVVsmDeaQOFZK+sZDMTUVkbnKp743pHLWV4+JTuep4fVLOuwB97P8lMfW/SRw4n8RNBRg3V2Hcsp34TZeh3XQ5wUOXMX/wck697z0sfWEvyUfvBd8vgTNqftyQDNu1eU/5LhLmjS9ZVdcyMExK9O2aNAGSEgiqUSypEIj6kJiXaFJxaSxTSIYzHIHffZngnVeRvHUzocMb8U4XEz6YS+qmfNKHRBMWwPu3MH9oI8++/53Ef/JBMJ6EzDwyGjMptWdyTUGpyDYFEvlHqgYkAGFVD7ywj1b+fQ4pAYc4ZGEy8TAyDkBFtERCmrPEfvkZTt1Ty7ED2YRuPI/UbRcQuvHNzNy4Dv9tJYqRxYESElA4JMBJKHo+QFaCRMAifys6mId2MG8ZGNKpFjhyMCdzSCvKgoksGLdIvhPwvRRA/n/e3gO6ruu689b62swksS1ZYhFI9F4JgBSpFpdxquM4TuI4zngmseOZzKRMkkkmK7YlkUTvAEH0QoIA2FQs23KXYjuO4xa5SLIkq8vsRH29l9+3/vvcB0KyLUfkzGCtg/vKfe/du/f+73b22UcA+GlD2biLLXcQvffvXNOAxJLNtCrQlEYT482tMq2z4T14BJZ2FISMq5tp+hMfK+4QMF7xlwzAc18i8+URLs7+Oc93/ibnD9yB/0CTaX8+tgP+7kb4nze4499tJfP3BaQ+VkHi7hpi+2sJfqyUwN2FBO8uIbS/mMiBcqItZUQPVhBrLScukLRVEGuptOc65t6PHyyEljeTPfgm4i03E2wpZf1gLWsHGlm/R9a8nszBRuJ3KdOnJQT1vNT5qyyf+Esy/zIHF75pzTKcKlHZiwJ8zXOsQUxLE/TcZZfMs/KsiYDiwJIhkgkSS0eJe3OiTq8nHG3TF+HRY8RnPsgLd+3j/D2NRLtqibeVEd9fDa31cLDMUvLP3r2Lp0beR+KJU5A556JGscbhwf2gQKLrsdIUxSeql35t/l1nlWWZhMuZJxLmOMjUKS4x5q/8kMgX+swFibdUQlsJyXuKWLm7hrXWZvwHawgdrLQROVCJRnT/lRG7pwo3aoypYuyVUUXi7ioS98ikv2rodRubz68jcXcdybvcMCG5p+Y1LUT8YA0/bSy338nlEx+DJ78IMZXSLBGO+S17pOktBZqiw4YLYb6ry17JZ47napdeIfU//kRxh4oXFY/oT2BZWVkh9NSXiB95L/RWE7pnB/67dhBuKSPRXUeso45AazXLd5Wwur+SYGsdkY5dxDp2EWmrJXSgytzOtf1lFospPlNMpqPvYBm+gxV2zL2v5/6WcgKtVQTbKgm11xDqrGWltYLltjKbvF1rKcR/sIDAgZ2EDuwkcqCQ5b/dRuxAFfTshZ7biO/fbe5w4uBt+A69h8Sj04TOPALhZ7117EpbuwIAzY3YxKJueqOCO7cAK2OrK6XJ1YQhHlGdX2xjnkeTj6aok0skHr2XlZkPstZ1p5tMPFhF9GA1mdZy+Nib4WAeq+31fL/jLTx2/G8Jq/5PYLVCtE0+3gZABA7FKWGviu3HeZZ75Tp/yN1QOu1KTsRCmbt1FeXqiUozH/88q0f+C6udd5Bsryd9sIrwXXVu7uNgDb7WSnwtVfhaagwwAk3wYJUxUYCJSfhfIfB1JO5qMGF/NUB07mZQRQWAe2Sp6ojsbyB6T8OmY50BUlozdLDcgdPOd6DRb4dbq4m0ViNwJw6WE28pJ9xaZ77rM92/R+Kxh12aUKXqcR/B0DqBVNqyesvqGu/R44olcbZapDFxlzR4SsjccM+i67H9STCSKbLWmCFmWRbOP84LDx/l8dEPsL5/O9x1HRz4t2Rb30yybQfR9lLCHbUEOxpZ72hmtXMPa+1NJgTrrVWst5ThaynCBLqjkvWOatbaq/C1ubHeVk5uBDqr0PB3VNr7662VaKy1CBiVnG2t5XxHLcudVQS7y4h17iTevo1YyxYi+7eSaCsmcHc+vr8vspQ8B13GjL8vx/+3pTx5cB/Pz3+I0LcnYPnrNuchDS3ayL5KX28EyoYT5ZVkc1RZmySuiT4VXmqZg601d+ltNbSOJNLus5EL8J1jrE7+By7cs4fQQSmJeoKysAe2kr3rBuhvYqnnTr7R8hYufL7TpY7Da+4idCGObRvunmZpsqb+XmXVc3zzjtcJDLoZVw9qmXd3aznG68sTARKPf5zHR97PSx23EOtugP0lcFcByZYqp9F0we27CHgj2F5PpK3GzHuypYy0xsEKktLoLbtsDiXS2kS8tda5AXIFNkYN8dYfH5pA0vkbx7Yqou3FRDoKCXYWE2qvInGwieSBZvt8qKOCpdZSIr01JNuLyLTuINmxgzP3lPPs4T8h/O0vuDb7tt5bvrKIdWXWWs82GGwEExLcMACozGI15NJQXgmIKKg158FU0vbCSMdyRVspiKzC9+7n8tQfcv5gFYGOmwn2bCXYcxOhru1EOgWOYosFRKf0gVrSB+pJHqwl0eJoGekoJtSZT7B7B76efNa7K1jvqmG9qwrfxqgg0PmTxuZzquwzq91VrHZX4OsuJdhVSLRjB4n2PJLteaTbdpBsyzeQyE1ztN9FtKXZSkKiLU1mXaQYz/XcwYXjf0zyiaOQesqcVFkE0UmeZUpC5imSZEa9zlQR7pSznaS5l2zcQCWrLbmU6Pm1nkWPkkvw1KcIHvvPLB/cQ/hgLQw1EW8vhJ5yaKm1xESwpZhLfXtIffEeiDwJkRDZtRgRrb7wem35jLFayeb3MiSvQsWmp7ZgShdyBSBrriGARfwuKWGB1vJTXPiHQzw19E4utdVDWxns30HmYKkJtsy+QCGA+Dp2EeioNYGNtpcZgdMtpQgoidYqoq31hFqbCbU1GbiiEvSrGe1lRDrzCXXtINBVTLCjinjLKwHi76/h/P6bCR7YQranEH9bHi9338rlz47DyiWX1dhEkNf1UAwPu10qxcdg0jWyE4OV4wqq4Zssh+q3tCXyN+9nfeqP8Xc2Q+cW6L/BgLHeU4yvu5hgZymR9jKibRUOJC1VZq1NsYh2baXEPYUQ6nRKQdbB11lLoKPGhmggxaDvsdFRahZJVkm8yA29F+oo2wBSsLOMiM7tKCTRnm/AEDiSbYXud9sqTOFJCfram1lv32v8o10ZpUJz5852N/PS7Hu4/PB+ki9/BmIvuSJFT6/YOvtE1lZPJtTAIhs0bFyxzq4qV+DQkECvqdOKiCsARV6C7x8jevQPWWm5Bd+BagLt5QQPFpFRVq+1Cva/mfWPXk94+l3wvWMQugShhLUz1QLnFa30FGpl1mJa3++h9qcwfsOC2Ps2kSJz57Wdt2rVlNXykF6Di9/g/MJf8lLb7SQ6a6GlwPLUsg7S/rIYwXYxS2C5ApB4WymJVoHDnScLIOsRad1FtK3m6sBh1qPMNJ40b7CjglC7rEu95celcQW61FA9F+/eynrrVoLdJbzcUs3K/B/Dy/9sltHSfj+FOP+ql0XfpDCgYj0tNHMzuaYBVfekVObqM2T+aZqzh3+f8wf3EO+phwFljPJItlQ469DqrlfCHegsM42+3l1qR1+XE2RZSCmieIvucZdZy2irrPEuu29peN23rLpAJeXFwWJQMH5AI3/T2GHvyVLLXdH3ihfio7JZOX4Zz9pUSLiZv/WeEqwnoc8drLCM5lJrOc8dqOCp7lu5cOrPiH57DvzPeVbatTbS3jnmlJAgquSQVUCahnbk1ky7uWAu/ltVWbspa7li6mTztLlbvpk/4kd3NxHpamTlYzutRCjTWQWtO4jdvZPVrn28NPZBOPeoldEIDz6to1e1SQ4gWuJqrsBP5/R1uhxZHPsTsy01JofNXWhErT9jEYfg+EUyXz/GhfEPsNzZRORgMdnWEmgpIdVWRqK9nGhHtfnPCgDDHdVEOyqJdZSTaK90o63aiBpra8BGey2x9uqrGx3l5jPHOguIttdZABdrq7PvT7bWWnYj2V1JrLcUf28BL7SW8MLgu+BbRyH2sosHbG4jR4DXd8zRWRv4JBPSd45msWQIdX20tjzrPyTw5RFeGHgnZ/dXE+2uhqF6/N3VXPhoIamWBrIHGu1owtZRTaizEn93OWu9Zaz1lrPWU4m/q5ZgZwOh9mairbegIDlxcB+Jlt0kWptJtjSSbGkg1VJHVu6H5joOVMKBcpsz4ECJc4v3F4GNAns9c7Bh4zs2f0/C6FhHzPhT6/FVvNX1uRHsrMXfXkegrY5Ie4XxOXCwiMsHyjjXfRsvj70P/5cOk9I2bEnFGBmLm0MpBeqebHoAkQCbrFrHlpAp+JxLJKoaLa29rEpTnod/WWB56kOsdr2V1YNVrHeWEuoqIbq/kExbPdGefTyx/y2saf1I+Ix9uRZ+ZjX772UlfxY49LPXuWpHl4qzqzKnW0hxacxQJkI8mySrzmJqjLz8FMmHB3l54Je4IC0lgBwsgBb5+CUeSCoJd3pDgOmsNALHBBIDiKuWzT2+FoAkOneS6Cgi1tpMrHU3UWNoJQJIpqXatEl2oJKLPSU83Xcrvi8M2T2QuAgpr0jz9eFi42wx1NyBTMra0rhIUAyIeKnOF7n8xT7OzPwel9oriXfsINtdQLgtn4utNSx37SHc3kiitYGcQG4+CuzR9gbCNhoJdjTj79iNr/MWfB37CLbfYvctcCRa3RBIHFAcWDIH67DRUmvAEXjcqCZ7sI7sgWYyB1T7tM+GyjcSLfre3UTbmu367Pc7aj2elhPtLCPaWUKoqxxfbyPB3iYHkLt3krpru1ksxSyXWhv5Yfcvc+nBj8JLD7vFd7h17FpLY4pZRPQeO4B4JfGeJZa7agAhSSq86rqvyN3yPQ/fu48LYx/kcvcdrPcXEuzdYUWvvv3NZHpvY7XvLTx39I9IvPAZNx8jFNr+h2tuojYHyg2O/viD61wkL6Z6pkRXrce6IOKEslpxBiGVsvojbrHQi1/iwvE/4+Xe20mq4rc1H1oLoa2YdHspSdPsFUS6qgh31diIdNYQ66wh3lFFsqOCdJsb+ny8Q69fxeisINlZaL+Xat1NsnUPkc46Yp1VpNrqoKXGViKGW4t5oaeWix//b7CkAsSwt1xWMx6yllf3J3qHbLbdBfZJdSuRo61KhMuPE/nnKX44/C6WDu0h1qfrvJHowZsIthbh67mF4ODb8HfvJty5y1K48fZ6Um31ZFtUHqIcvzumW3eRbNM5TQS6drPevZvVnj2s9zQR76izCcF0Ww2p9hq772R7nWUb9X3x9l02Yu1NvHok25rg4G44sAe07ufgXlIttxJvvZVI+62EO/YS7thDuLOJcFed8TPWVU68q5h4Vz6R7mJLgvh76o23sbsLSH30ZtifDy3FxPYXs9JWw/Pdt/PywodJPn7CGuhp8ZTcHO2zmrMcOtqfqollRWwSz4FDHR8jiaRrqCehlnyqgjd+iew3Frk4+X6ebdnGxfYtpjwCrXeQ6GgmPngrz/btZfWLH4Nz33OVy+qpFl8hop3Jchm23G//hON1Lpz0lkjqKjWsVMXlq0PeSjMtLU5Z0VAYwi8T+5djnJn9AKHOJmgthjYBpJBsezGpjmLineVEuioIddcQ6qqzISJLeAUgAcmBqcJe0+uvd8QNIKWk2itwQtREpLPeQCIhsaCtQynREi7M/hbZpxe9+EqqX9ZDSdxrAYgaFsQ2Mi7GOJWOX36O5FcmuDj2XlZ7dxEfLCbRl0eo4yYC7fnE+htI9N9GsGsfa917We/eY3TUHEe6VcBW4FvhuUflcLAcWitMGYmGEkh9r7+nhnRHPrRth/Y8aN9JuqNwE/2dggp0y6XbZUOg0ljrabLfNDAeEBh3kVWCo203sfY9BoxQp65rt513BSClxLsKSXXuJNmVz/KBbfjaipECTHU0uMLJllrMpTuQB4MV+A7czPP7izk/9154TIHzC+ojxLpfcyE5C5GTfEmpBND8IZsLUTtWvysYNqsTVY1XDlnhs6S+Ps6PRu/kTEcJgf47ifW9zVnezkp8PWVcPHwnwS90w+XHbFWj1qRoN6t/JUAkIF6K0wq6nN3R78tPVMQvXzDnM2ZDPrde/fJ3OHf/3Sz33EmqvYp0e7mBQwBJdxSTVEamq9wAEuiuQ0wSUGRVBJBsuwOTgPR6gbH5fFkk+31p0LY608ahrl2mWdNtVXCokXDvHmKf/xisP+Z8YNMUihY1HbgRgf0E/fHaL4lGIrI/rZy+Z3mjQVJfe4DlsT9iuaWRbG859OWRHMonNlxOeKCBgK6vvYlE5257LOENdeWArRKRGgeSlgpXaqJyk7ZSUh2lxEXX7lJCPToWQsdWaL8B2m+0x9mOPBNeaXk7r7uCQE+VgUmA2jzCXVVk25QidQAUvaRYBFRZDSm/QJfGrldYkGRnManOfJJdO8kOlRnoV/aXEm5vJttzJ3TeCm210FHkZeu2kurbwaWeal6eei/+r4zD0nPWTUV0Ew0tHthoLqHn8mpUzRG1OrbcBIR6GKtNlarP7XNKKCWeg0eHeHn4VzjX2kS4u5FYdx3J7nIYKuXiR29ibeJd8Ox9toZGa22UZRQ4za17DTZfl7swd3ruY1dWk+kiNPRlTpSEbK+kIPoMkfkP4ZPpb62GgXrSncpW7STdXUqsq5RQdxUCiLSejuHuCmNytiMfjVhHGeHO8qsaSgCkuhqtvit2oIB0Z4XNxay1NhLurCfZowRAJckj74FnPmlBuXzai+pkbskI3ZX5k69Bop/+lughJ031SM56aFL1q5wb/nNSg78BPU3QkU+m62bCfSWs99ey3ttMuKuZTFsDaNXigTxC7TuMVom+GpJ9TcS7mwm2NrC+v8YeJ7oaSHdVkeosJNN+M5n2G8h0/Dy0/zy03QBtN5Jpu4lMxzZSHZrr2WlaPiZXqLuEaGcR4bYdBFu2ET6wlcjBrTYRmGrdBl159p2a95C7muqrRNcR7Kpktb3ceGZ866rZcLGk/OQl6PxYdz7hnmIDeKBzL+H2O4m13UpaZSDyKto10/3/km7/OeL9hVzuv4VzY79H5stjth+I5E9S5/jgLIr60qXkQtkut5I397oBwnNyVAakoaJEsqrdet7iy5WhO4n3lZEarLbro7MADlWS6Skhcuo/WlcU/d6lgN+aWPxsgEg+7Cx9TDiVCLl8tK039laKCekK6LM57Cn1pqzCPw4RmX4vSwoauxtJ99aQ6Sol011MqrfMABHoqWG9VyCpI9RdgRgn10Aj1lFyVeAwUHVUk+7Za5mWRPsO0t3FRLt3E+y5lWDfLgK9Daz17IaH/hYuPm7VnBJoFUSL2BllM67hT59e9poMWhXrxSfJPjRIeOB3Sbfshu46aC8i0V1CoK+a9b4GfL3NRDsbnYZtKyPdmUei52biPQWEZRm6a1jvrGe5vZHLrY3Ehn+RcP8tbib8wA4C+28k1no9dL8RBncQ6akk0NNowbK/bzf+3lvw9d1CoG8vgf49BHsaCPfUkuiuId1dTqazyEBLWx60bSF04I1Eu24i2b+D1EAB0d4CAp078XUU4tOkZLfc5Arjo1zmWKcUXLl5AbJmod5iAr0CUj0BuWQdil/2uoqL9lLo2k6m5Q2k2n+BdO92/B2lXGrfRWL29+Gro1bek0jEyCTlp7j2RLYNQyJFRk0Kvepbiagstv501NmSVFecouW/wIUfwmf/gtWeUtZbtsOhCtBE4oGtZFpuIDhxJ0vfOkI4eMHm8TW/8rMBol8zJSqAyCf3AiRVYOY60lg1pkCTm573Yhah/OI34B86OT/8W5xr20usrxH6q8n2FMFgGdGeMoK9lfj6am3ocbSnhHRXgQ1ZGRH+qob83r59pHoarYgy3p1Hoq+ZyNAd+AaaOdfTxNLEb9rkkm1Kk4b1jNv5QgBJZLwNQD3Cv96DeLLkbZlH/DzZR4aJDP0WdN9maxfoqUPaP6R4oW+XgTba00CyS8CpIdtZSaq/iPhgAdGBIhM2X5c3M97TZMK+2r3bQO6XsA80ERxsIDBYjb+vlIt9jVw++iHOHP0fNs4e+2vOzf01F4/9FctH/4K1o39CYOYPCYz9Dv6+X8anyb2DSgTUgaohhhoI9exgvXc7Po2ePAI9eYR68wn3FRDtKzZeiYcacVn/rkobic5qIt3VxtP1vnoCvfVEuutJdNaT6qgl1VlNQuf2VeDvyDfQxfpLCHcVsX73zcRbS4iOvxOe/gSsv+S2V1PK3eaO0taFUzsebHg43ky7BFp013DuWYqQukSav5uBHz3I+vHfZFUKoDMP5NJ/JI9Y+82sDNTy4vT7SDz9oMm5Xw2wfwbTr7MzDEa6misWxC5MwNmwMLn3BZKUazOpb9e65pcfZuXev+OZjrey0tVEqq+ajPzjoRLiPUWEe8vw9TliCiCRnjJS3QU24t1lRLsrr2pEumuJ9ewhOdBMrCePcOcW4v01RIf3cXloN88PvoXApz8CF9R0IYlaNkW9mEoaKPKvXzfzE8mo2zf3QAHlS//A2ux/InigzhV03pVHtqeGUE+TaXRpdwEl0V1FuqvSwKGj6p/8A+VEhmoID1YT7K8k0FdBeLCO8OEm/MO3sHJoHxcHbuPs0Nu5OPluAif/mNRn/ob0l/tg+XFYfdEbL8CqxnOw8jQsfx9e/BI8fi98aZDU6f9JaPwD+Pp/g/WOt7PWuY+zHRWc7ytldagS36EKAkNlhAdLLLGQGiol0V3gjRLEK4HE8auacHc9a32NrPU3EOytJdpdQaqrhHRnCQkpvZ5KQkNNLHXXsNRZRWigllRfubl0tLyJTF8Nl+Y/TPrpT0HqsqvgCF22tqZG282pYC3BSIdMLC3es5WCScLErIe1lbLY9N2L8MMp0vO/xtpHt0KPJlQriQ/UstZfy/nOBlKf/4htnaFCSc86/ET+6kUvBtFD11XDlZyYSXHoNYBonkSi4LLSKhE25Jnd0z5350k9usjzY3/AGWVAuivIdBdAXz7JXmmiUoL9VajsQ8dIXzmJ3kKSPYXEesuJyqpczeipMdMeHdxFsDuP9Y43E+0vwD9UxdnhvTw/+3vw9AMQPKPmUWSEDrOGyj5lbYbW7uOnkudnveFlW8IvEvxcK0tDbzd3KttyI3Tnk+gpJ9DfjG/AuTvR3l0ke6vMH870FhHvreJSSx3LvXsJDu4lNCRNXMx69834+rfhGypgbayWlZk7WT/1BwQ/9zGS356H5/4Zln8EIc0uez6HbmTzkIq1Xsra3FPNHc65FPczD8PXjhH+ZA/rp/6Wx9ru4OXBO1mbejuh6bcQOiw3rdw0vcAhPmZ6dprCS/YUb/BL9W3BvgazjP7+BuOfeJru2Umydyex3mLjdXBkn9VGXehsZqWnkWg4+/l8AAAgAElEQVR/FdnefOi5AQYK+VF7E2v3/xU892mIvuB6Msd91g4sF/ua/pYFSav61lkPFxXHrdG3XLJkWP2lBR25Pefg0UGCw2/D39ZoMd/64K2s9zeR6CojPfsr8MRJW1hl2bLXYPN1OQTphzcPez2XSVCmwIYKykT5K2ZOKTerk1n6Lv7PHOTy8C+ZH20A6dlOujfP5gCkGX0DNfgHqgj1a3a7mERvsZngWF8VVzOiskrdtUSG6vF1F+Lr2kp0cAvLvTfzo7HbWfrsRyHwDKgSQK37NVQ5l3VrloNZr1r0NQj0mm8py6LdWZ/7JC9O/h5Lw7eSHa0g1v4GGMoj1ZdPuL/WQKKYINTXbPeZ7Csk1b/d/P611hJLYEQE8v4qVjuLudRZyNJABcvje0k++B/gKx+BZ4+D//uuMZs1lfO692hNkDwMrU60NqNXeCO8RCPy5dWxRn661v6sQfSitwfhN4h8vp31U/+VsyPv5kzf21jpv5XwQJNZg2iLXJSd0J1Htnsn6Z4C45sUnK412F9DpLeOWE+V8TLRt5NY/3aiA3kEB4rxidejt7M2eDuXuvey1NnEuiaQ5V733ES2P8+U6bnOvayc+jN4+XOg5hCJgDU9F0A0TOIsw5rT+HpFHo1iFK2CxeKVVDrrMlsCidrePjrKub63s9631yo/ZMEZqiMy2Ezsof8Boe+49SuvwWRvHkT9ZF13buvMbh/QBag8YNXrqKElkl4DM9PC7hJXVEem89We9NkHSN37QWKDzdBbAN0qENxOoreA4EC5EUxEE1hkVQSSWF/FVYFDgBJAZJUiI40OeAMFxIffyMXuX+D8kXfAD+bNLEuRpiVU2nBFBYQJVUzJiniW8DUI9JpvqT5t7Z9JfPYj/HDoHVwYv43wdA3BwZvIDLwJDm0l3V9gbl9gYB/rg/vwDzUQGSoiPaQg+9+Q7f6/oP/nYXArkd4iVnsaWBv5VaKn/pLUI0Nw6VFYeR4CWqUXcQrSEw8THGUU1fFEbYDMRVYFhGuCLQ6aK6ljNkMsk7BCwbSWxKpTSuwiBB+3lXvh+z/CxdH3szr4a0SGfpFkT70VKtK5Ezq3G0hkSTThGeovxT9QaYou21WMMkWyHAKGf2gn64cKWD0kt62KwKFmwkP7iA7eRrB3N6tdtaz0VLA2WEZwsNDiVK0/We65lewX7rGl3lK4ygpq2wjZhNwwS2LzI86TMWWtxtsJFc+rQthlu9RoxXVb+RHhz/xPwlNvs0VW9NWTGHkLl/puJTX7a/C9Qbdy9jWY/GMA2VjgYggV0QWQNWclcl3APTdFDFjSmgkz7apYfQ6+0odv8teJDtaS7M0j03ezaVIBISyNMqhjGbF+N+KyJgOVXM0xMlhNeLiO2FgjoUNVxEeKyYy9mbNdN/Hi8T+CpW9btaYW7jjbHCezvmSbZ+bW4hvRN/Vr3WxF7b1cHOatRReznGCKMVqWPI9v+le5MHQna6O3EhirIDqyg0D3z8Ooywzp/oKDjQQGmwgYQIpJHfoFGLoO+q9zQBnaYe9dGn03a/ffBd/9NFx+CdS+VJtYumpw85py+UYrJldvrOQFSC07PuGaiGpBqbSvRElDQMlp5Ki2184kSce1BuYyRF6Ey9+Fby5wee5veLHrnSy330aqvxm6S6Bnh41M3w5iA4UWo4QGHf/oLoLuQpL9+YSH8vENF7J6uIjVw2X4hlx5vWJEhu8ge2gfkcEmUxKrY3VGKwZ2wGCxxSO+sV8n/vVJp2yzKfwh3cUVi3gFIKKAZ02sY13MmtWFBRPNk0QduKx+68JDxD7xYWJDt8LoHQRGf8lcPsYa4BMfgOQLr/BMc/x3nlWK69zPu5jDXUAOTnpNFyEYeBOJVz7t+YLq46Qafq+Vik5dfp7ww/2cHXsHofE6ksMFMFwMh+rJ9lcbIeP9+SQGK0gMVJE4VEnsUOVVHquJDCgoryYyWkh2ugRmKjnbu5tL37qXpLZgyAm4lTXrfpRk8NYh6Fb1unZa8jSVYd1r2qbHJlXm2kZtn75lEq77i0qlg0+yNPMrpMbrjPkM7CEzWGpZqdBgCb7BcvyHqu0Y6S+0ib1wa4mluePDNxDr+39gVtm+Yla6dnFp6vdJf/soqD9yKkTCr7okL7ZQutMrstNLEnaR2/iXm3U2fgm+Wrh65aO5r9h8NF4bcVyhpTbPsS0rLr1A5GunWZn7c1b63krmUJNNtikFnBgoIDZURHyohPShMjhcTXKwjsRgnfEgNFJKcLTERvhwqfE0018HfQ3QI6+ikdSAOzc4Uk5wtIjo0M0k+7ZDbyGx/jpWFn8bnj1mnktOHjPaPiOrqUFHDyW7tMAvpcV8Of6+KkRw9xeG2LPw0oNcuv8veHbg7Vwc3kd4uBr63kx48hYuP3GvWZ71RIalQNqslegkmU/FVgSQq/9TQB+3jSmTLqsl+QtFyTz1CEsP/gXnx24lMFxKWqa0Twvsy8n2FZAa2ElS5d6DHkCGy22WWTPNr2sckqtWTWSwnPj4DtKTO2Gshosjv8nFpx4l4CmZjXy1uR6qLXOiZcLn5dklVnpVR7lfAr1ZCiX2JI3am5tLXLJmz0JT2LJ3genbYLQYBhpNAOgrJSH/e6iapUN1rIzsYn2onFh/Hpkel3ZUGjw+mkf40E0wVE5q8DbiJz4M35kDdT5Mhm22WNfvmOUlAxTzKC70wKzrdYJwdTzUd+s7AllYkfepG9YXqhXt9+8n+sm/4/LAW0iN7iNzuIZY906ivfkWZ6WGS1lu3070UB3hQw2EDlcTGiknNFJCeKSE2HAJqcFyS/nTWwc9DeiYGagmcajczgmNFhAbzSc5tMMSOqn+Utan9xH+yt/C0jddBa8tB9e+NRc9d9/F4bpMgcSuVyDRCxvE0Au6mSiZ6Hnr65V85jTPzP8hL/TfYpk6+m9k7VAVz365i0T2slUeBsIZ2yrFyuGzKbJJ37UCRH58VN2RHCNFcU3uBF+C70xzfvY9LA3X45f57Soi2VNKpr/EtGxSbtZguRHrdYFiM4g8gMgnjo/l20gdqiJ0/EPEL71o/aZMw9iEjoRIvvmrAKL3tDegJyy6BblfapBpANF0rZUFrZJkBbc7t9ImPlLfmMU3uQ9Gi2BAmrIe+svRvfkP1bA8Usf6WL2lT02w+opgaBccbiA6VoJvpIz1Q7eROPmf4bE5iD1j20/okgJagOix2dSkFfFpnY4uxpvI3SwTV4MRTQLr67z14i6DKfdFTRdegDMPE/9cK6Hp9xHpaoYele5UkBwsZKW/gKWhYqLDlabUoodL0ZDl0BBPk0OVBohsfy301ZoHkRrMnV9CdKSAzHgR6ZECkoMlluZeGmnkR3O/SeCbYxBTwzq/xZGZ5IoJbA4I4pOWnOeebxw9OrhmDHGS2hE5ex7ijxH6p27OzvwmlwcaifQVsDJUxeOL7ye7/AVXaRyJWB4jJYUopyiTuVaAgPabCKXFMKFWKk8r6Fbh4jeJfKGFy7O/wuWhSlY6dxLuKSQ9VGNDLlZ8qIzkcAXxw1c5hquI9FZZajQxXkBoZIelSnnkHtsJS6tdnYvi9Y81i/cqgJjr5dYnyJNyNNdzJ4TZHECiasK8ip+EbdiiJg+XH/gYvom9ZEeKwbRlFQxUkRysxne4hpXRWoLTjYQOK41dQLa3FA7vIjVcz9qhCi6O30Lw/j+F7y+4HrWpVeu4KBHVzyp2MJAat+ROKJvo/G9dp4RkQ2leLUD0dfZF+iX9oiaLFXsG3A6za98j85VDBMd/l8TQbWQPNRDpL7YYI3K03uK+9HA+qeFikgLGSBkx8XO4iuShGlLG7yrSQxoVHr/L7DzFjAIH46UWQ64N1bI03MgLg7dx4fR/gR/e56p/CZNKh60DpQXmuHUlWrPuGCYllxvuJUcb9R9Tlx7F0IqzHib7T21cnvkNLvXWszLSzGMje0l8tx2C33W7pwVd11VVIgmA1+hiOQaqeEyz7M5HVDcJBZbL8NKX8D30V1yafSsX+vIJDOSTGd5FZqjBxSCHSkkOlxE/fJXDA4jqnGITBQRGCvGP3Q6PTdtKPtMwopRUssBiluHVAHGvb86USPu4lpVeYki3lnAdyjUxZfe59G1emv0g/pE9pA8JIIUwUAoDNSQ1nzFax/p4DdHZBmLjtbZoS6U3HK61DNDZwTrOzv82PPMA+J51lkPdTuKwnAGVUQokuq5XWJBNbta1AiQHsmgsQcKv5MXKxv7rcl8StlOvD0JPw2NHuXzs/ZzpaSR4uIHUzC0kJ6tJH94OwzeRHd5pdDCFN1xDXMmT4ToDilOApcQPbx5lBqjE4E7S45WsjdZxabiB9dE9LA81szKhXmV/CuvfMIumWbqgJnvNgiYIxZIEtPmoaTSl7jUEkisAEX2Up0yoF0D8AiSfg0ufI/bFuzk3/m5e7N/NC4criXzyt+Hl+yB0zpltTR0pJLOJQmPA1f0zAstMW38usVN7QwRA2wpr5ZbM2+MnWP/EBzk/Usn6cB6ZkV2khxotQBcxEyPl1zAqbeIpKndtogDfZDnB+d+Cs582S2baN+eOGu2037cAolvPaU73MAcQ90xkFUu89I+2G7ZOWZeJmZZdgR/cy5nx38F/uJnkIQXaO2CokKyC1uFGgnKtJqoJTVSRnKwl0VdtFQbpkRqWBqo4N/F2/A/fDckX0W5VyZTbi1zrps8m4Ly3gaqzICLw/3oLovtTuiIUD5MIr0My6LZ1SyZtbkEC5hcvZU1SzxF+bJYfHf+PnBt5C6HRvTBZC4e3wMj1cPhmGC4iPVxpFjIx3EB0pJ7oaDWRsTIi40VEx4qIjxWRGCkznqcOlxMfLCIxVsHSeB3nDzcQHN1Damw38ZFaLkrZPTEBseeNF/6E5ntjqOmDwBHJlUIYODyQbEpQiHaK2BJqdapFbNotLfgkvPhpgp89wA8G38HFsRpWj94GT6gJ9osbKT9tuKZK9mu2IMKBJqiypu/E3iiJeNQaNlou2vc80X/q5vyRJtbH8yzjkz7UQHywhuRIHYmRSuKj5Vc19FnNzCoTFh3PJ3Cknuin/gR834JUzK7LLIdTJUbkDYAYup2PoocCiAmjnlga2+u6pzcCApTmGi7YnkwkzhD+UgerU79O4NAuEkPFZIa3wKE80oPSms0EJxrwT1WxcqiA5Fg1GZVZDNSRGG/g0kgTq/d9EF76lKVmtVgzqOF13VjKuhovPdflGJsNIF6ud1PnFfe+g/Xr/S9L6U+qC7oSxk77EtdKJm9vMz1MaxdbpTUElDMknrufyx//c/xj/x6mmmBkC4y+EQ5vM4BkDlUj/uYAEh6rJDhRQnCygOBkvgPJaAmpw5WkDlfbMThcweXxKi6N1jgXeawRFKMNlrL2iT+Giw/bfiYCSMTbFTkmoklvmdVQsC4o5LIyjpfGOq0jyRFJezqGzkLoOXj+YZYf+ihnJ27n/NSd8MQshF6EkLrVO6Wv/Mw1AySkCgDjjITIbylUlZL7c/6BOpp/7xTnjt/B6tQ2UhPSMLVunmS0kfg1AiQ2ICJXG0D8c3uIPHIXxJ81Kyba/WsAousXMe0+vM+oTseCVs1S+5VilHU8R1pBY+QF1j/xF0SPvIPAUJ2lPpPDbyZ1eItpz9jIbkKTuwhMV3OpZ5tpS1kWWc7k9G4uz7yV4MMHIKGgXLu0OsuuBgXy/kU6m7fIXVCuosEL0MVvfUYjx/vXCw6dL1Bo+zaXtXOFBrZE3wiSJOpXax53LYpMNB2npt688HFin/pzVoabLDGSHruJzIjc5zIyh2o3AaSW4EQ5vqkifNP5BKbyCU8UEM8BZLiWzESzxWMrY2WsjpVZZTCHqmC6kMxoAWfHb4UfztoqwFA8ixbwqVWpFzo6gOi6cgDxKj1yPF2JqHWQRyvpAK1n0vp47Wf/wuf5wcRv89z078Czn4DIefArRPBCh/8VABEBjY8miRIqEdxlRmVZzJu5+AOWHnwna7PbiI8VkB1zmiI62kRsrIbYePlVjcRoFYw2GUAyR8pZOnorPLvgUoK2/NW7OF2gN/t/xYJ4cYn3+gZAdK4pU1ebZmWllni/DFllRCKw8n3WTn+A6HgzsdFdJEcryIxvsZEYrSEy1oR/0lkQuRdyKZK9FWRHmgjP7GP9vt+HMw9BTFsKXJkMywm96OcEXxci5lsazfnZnqXLzYM42l8NPLzPbNDFeRf6Xl2Hl7qzZuY5QKrmyWbr0y+Q/eExzp1+PxfEg+N1ZKdrLHhnpJHMcAMRrSuZqCM0WY5/upj12QJ8MwWEpgqJjZeSHqkwDyIyupvg+C5CU8XEJgqRC8pwBRzeCqNbCY5Xc2bud2H9Sbt/TYMabHVRqo4wLSiKeRbEAOLmgXQfup/cJKnOMsKqIkTLrtWX4IXPwsuftyXSBC+5LKzR2GUyr9mC6EeNSTltbZNsTgvq4izRvvQjVj71btaObUXZJgEkeGgXIk70WgAyVgEjDUZUAWR54S2e2xIgq15KuYvTtW0IgtwFuSqvBIi0qLkZ3mdkPdTcOKNcu24ivQTZFcgmrEo2ePp3iY9XkhitMx86PbGF5MQWYmMVhMZ34RNAJmtsXiAzVkFmsJzsaCOBI3cQ+NyfweVvubZDpvlEqNyfLjYXdG4Gh3MfROsc40XfawGIZEyf1y1vFiRl8DSZakLnJThU7xTXHmuWRVsmtfYtVr8zzMr9v41vdg/B8SZSU3tgtJn0oVo313GoiNBkqQeQQtZnigkKCALIaDnJkRpCY7cYQCQX6fE8A05mRHzdbgBJjxWyMv1Wkt9dtNWAiiC1qYEtqLK9F0Q3j2YbINHzV96XQKI72pBV3ZdcrrWXwH8G1IVRnek9vidto4fAtbpYLh0qAhuVxUOvRFkmWV6rcXN9hfXPvIfgwjbSE8UwXk9guN6IGpuoJj5RcVUjOV5pAMmM1pIWQE7/GsmLX7OLyajn0WaAiB4mCDmAvLKuTEG5neEdBBYtEEvZPuhx0iq3kfOTUYDwfSKn30VqopDkWC26jtTkzSSnthKbLCM0VYt/ahcBjaEimKgETZiONeA79g4y39TWYS9dabG0YSFUfKdgfNOw9xw4JNC6vBxANhhu4vD6/+m7xCMNPXb/5SpruCSFZNC0bjpDMqJ9Gl3NV1Z7eYQeI/Hdfl4+8uucGb2T5Pw7SE40kzpUQXa0DH/fNqJTJQRmSvDNluGbrSA0XWE0yoyVkRqrJDTRbO5oZmIHjG/DvS6LXEh2fAeM7SQy2cT5U/8NVtRVft21LY2onsxZCtHFgcRztSxod5edo9VmK2LnC0O54SlPR1vJtFqnrhPj8rUBxE2oOR92g3Oe3Akg8qWNm/41Ap97D+GF7WQnq2G8Gd9IHb6pOqKTlVcFDoFKgpk9XG8WKX2kEt8nf5ek7wemDV1libeQRoTwgKvSGGvRLSJ66lcHWQsTEB1EZ7OEWmvu2h9pP1jLM4m6y08Qu/dXyU7m2TXEx6tJTu4gPrWN6HQhgekKA0hoopFAfxHYPZeQGq9hfUFroxdd5xO7rlyGKpel2lw57SYFxUn9bE7bO0Y6cXbCYRLyuv9JeLwCDvt+A6atu5CFdb+nCnJHpjjp+JrbxXajFiFkmw1d/Mzf8dzku1mefgexid3IYjJWSPzQNuKTRYSnyvBPV9sITVUSmxAAikiNlxGdqCc6UUt2PB/GbyY1XkR8ooz4RIkpICYKyEzV8OKht8D3pyFzxs2Qp5MkE25qM4dho8iGS6pXHY2kSGQhddQ92/2IcJ5M5N7XOe48RVtrpFi6doDEbPGit6xOv+RZkZxmMuD4zhL6zDuJzefB2G4Yu4P18SrWj1QSmS4nPnV1IzVRZQBhvJaUtNMX/iOpxHmb6jKOW5m3R5GfBBBHQyPYhkshCnrpdVcjKosjpyxiwaxpnaUnSdz3yzC5xZIOiYkaElNFxGfyCB/JI3CkGP9Mk9X6hAcqYKoSJnaSmq5m7eR74dIjZj0sRjNGea7Uj1kOd4GbgaHHV0AhhF39Xw4gpsjEOE3yqu5J5bCebtEuUcpxpQmTZp1Map2stxGnlWQkLpF+/iFevPeveGb47QSndsOUXKRtMJVvHkNsqsomTAPTTYSn64hPVpKdKLKhpE1istImCxFoJouIyw3zZILpMpit57JKWj75XyDwLbd5qZKLXqcTiZ0DcS5m87J9KhfxQOKx1cTxCv3c5/SegJGznTG0DEJrT66x1EQWRGJj+5YKGDmYZlxgJLNmV7T6FIGH3k78iCaVfhFGf5n16Qp8CyWEZ0qvASCVZIbrYKLBABL/yp+g7rjatsCIEPfUn2dBHCFkQVSyobUhTric0Hno1sEDSMx2kEgQsSSn61huMeHK0yTueztMv5nURDnxyRoSk6XEZnYSnNuCf26nA8jEPhKH60ECMLGV7JEqC+7xPWrmXfS5wlhdjOcimJZxVkOv/jhzJQhe4G6IvTqQODdSFlKiIffOS9/oorziA4UdcjWdIxYkk9GcTRS1EHVLYiWlZ1n/+gTPTP8WgZk9MFsGozfC7E4k9FIgKgwMTu4jPNVIfLKK7EQBTOTZMauYZLzKKZupAmIzBUSmK4lN1pCcqoIj9eZl+KZ/EZ5ZsK6Y4q+WO0sRS+w8Vjq6mKLRO1dcRd2SznG8lkA4emfScVKag9K0ne5JsZadp+RJ+NosiH4i7NUt2VoRXamG139VP2SLlC79M74H9hGd2goDvwoj78Z/pBL/qXzCs8XXBBAFhEzuMoBkvv6X5hKpKMQAoopbj9l64bUB4gmJBxB9TABxu0gkCXo70Cq2Y/kZDyBvIjVZagCRFRRjg3M34p/bbgAJTtxOdnw3jJfD2BthoYr1+z4EgadsT8L17BXmOsY5Qde163lOs+X0ziuEwFuC6kovrg4gBrLkZbfc1eusLtdYbtcGeFXMqeBVrVWTMdsXPpnN2PqSUEaWRWCKwvkv4//8X+Of20tmpgCmboLJmwwgqnaOjN+K6BGe3G0AscLSyS0wsQ3GC0iMNxKbaDQaxmbzDCCRqTqbcE3N1MNMoZXs8MW/h7Xv2e9e2HStopX9SXFYSY7WyXi1axZPuSSMc8oEeDFaMx1K3/vNauo2teWH9iYRraUYrimLJeK4qlepm9QVjnp5Z110JrgK5z6P73QzkfGtMPjvYfRdBI9UET5ZRHS2hMR0xVUNaRdNFqand6G6oPQ3/952JpIFMYFTJZ4eSOF6LpbsnWnMTVksCaRbn+y5il54ogU4mqXQ5JQ2rzQmCCArT+N78L2EjpQRmakmOlNNcrqE5MwO4nM3EDl6M6HpZqITt8FMswlJcuwGEieauPTgn0LwaaKRDD51c93kG4spulwNPdbviY25c/SaZdqkgZQ8sGXQYrbe0U16msG0gycvGy/pwjedY0pUM2JaA67da3WPrqGF6KfH9jXWKTIO4bDtcaLYTtfn9nF3v2yZvfgZeHKeS/O/juIM5gvIjr4BpnaQnKogOtVEdHIviYm9JCbrSczuID0rEG2HiWKSE7uITzYRnykheiTfaBs8Us3KWBXRiWqYyLe6r8SJ34LlLxKN+FHxT27OSBRw9/cqgJgVcTQSONxw0xHO/njrnTIpuw31IVGJl+PDNRYrOoaI6B7hRXRx1cOKbVGWUlnGAmtzjXCsGCb3kZ64g9hMA7HZSpKzVab9FUO83pE8UmHfEZlrwH/irfDUIeuWKIFywuTRzNMIrqAy559urmLdJFtecJoTTgmC7iORWkcVpbZ4LPUCgX8Z4fuTv8ZLU00Ej1TAfBEcuxlmbyQ5sZ3YaAPp0SY4vMOC+fhiHWcW38aTn/rvEH8SUhHXpdSzwB7ZbNmAkVPVCd61KMMmbaYNZ3Rebuge3TZwcicEGC91vQkLDmy50hmvXkkf3PgSCZRgqHPcy3qmx/Zn37mJxxuAE82SxBKCk1Zpxmzb7PVHOjl/5JdJzVeTmLmR+PxNxOa3E58rJj1db/FndnIvoYVCQovbSR0phekqmKqxuZTkbA2RuQr8i4WsHy/EN1tCbKqSzFgJmdlaLszW4//a39iW1NpuIncbjre6MV19LprwvAJzEV1Bbe7WdZZS+y5oFoCuKI/NeuaaLIgxRBpG6BPh9E+/5S2V1LwnqQvwgzn8RwWQQjIzDTabnJzSsYbUNQAkfrSC8NFSfAsNrJ78JXhyzNYY54TCBEgaT4v/jLEyDXK7LE11BUSbCtxyBLxCeImJAljtuXfR7X+XfpHVJz/J149+mOen9xI6Wgzz2+HYFpjZ7vzqsToYb4DRfDJT+QQWannp1Nt47KE/Bf8/u7XQlkN1gqlr1hANbWwIonxht5+eSz++2qpIGwog7p7sS/RFHr/d/UiQ9D06Ry6Tlh97aJCMXPWfmO0t9xXvEwkyT36OCyc/RPBYI/GFbYSO30R48SaSx7aTnVFscgeM324AWDu5ndhcmQHDgvHpCpIz9YTnalg7kc/ayTz8R4tIzAggFaRmarg4V8rKlz4A4ccM2LonKQ9XO6fr+UlDVBCdHaB067KQOopUr/V3bQCRiJmZ1yo9XaT3i0Z/IVZFbpccQObqYD6P9Gwt8dldJGaqScyWkzxSdtUjfrTMABKY9wDy1Ajax0TXIZJoCByvBZDNbol47ATKASr3Hc790szrZZArkT5D4uzX+N6J/86FiWYS09vJLNxIdnEr6aNlpGeqbGUjM7Wg1YbTVQSPbmflvjqee+B9ZF7+uPWINV7mfkSCugFUWQyXTFBCQWlpCYGbzPQUkWcldK9OezpQXLlx933ufrQOxgmSA5IHEr2pm77qP31BiHD4svsa9d4JvET4iy2sCiAndxJazCO8kGcAYaYURm83gAQWClk/uZXYXCnpmQr0ngDkAFJr1mP9xA6CR4vJHK0iO15OarqSpWNFnP3UO0mc+bR5C8o+ilqiwZWkb26idTNxHUAcPa7oh591+9cOEAt0AsvFCHwAACAASURBVF4a0HNQDSDy9TU7uWTNE/xz1bC4hdTRSuJHaokdKSF2tIjUXBmpo+VXNRJz5Q4gC3WsnnobPHXYNSfecE2c16FGFFcsiLp7iICe9hCFPOHMvZYja+5oK9ekb5JrrtgtcxF8T/PiQ/fgn2qG6ZuIL9xA9MR2YvMVpI7Vw9EaOFoP07eQmakjOPMGIvflc+H0W4g92g8KjqXCcppckm4AcRZDOzA5cAgoEgBPlj1gGBA8cus67R5ymkFKVOd54NHnlbJWulpAM8DbZJpnZtypV/FfQpnAH1hxIFWAHPHBE/dy+fg+Qid3ElwsIbJQQlru50wJjN1mbnZkoYDAyW3E54tMqXCkBFVDJI7WETlWR+B4CYHjRUSPlsJRxSBlpKdKWDtWxEsnbmftu4fdQigVx+aUYY5GdvOeW5h77FVK5EiU+8z/ZoCIE8p7aFLFZ8Q3ThpAVFK2Cl4M4jtWCYs3GCBiR6uIzu0kOpdH4liJA4mA8jpHYq7MCBicr2P91C/C04OWI7/iu3vh0StcLKUynWCIWBuC5Qle7rM5cNhRfXWk7lMh0kG5WW59gf8bo6Rnb4GZm2wSdP3kTgKnikicqAbFXEf2wuxe0keqic38HKkTb8Z/ooLgwx+G4DNYxzNxSmT0LkYa0c055DSj6zbzyuvU5KJLY+r6DPy6fu8ynel00uLcCvUOUBCudkculnHrXTwUXQU0ch+JRuOEoxGi6q6iOjW1Vlp5nNVP/QbLxwsILFYSPVZO5uhOmCmCsVtgcg/RhZ2ET2whPr+T9FGBxAEl7gEkuFBOcLEMeQkGkKkyMlPFBOcKeWmunkuP/A3EvmcWLCfsuv0NgdeDVw97yVkS0cVInruRn3K8RgsizrrOJwnW3Eo7/arlkj2AJNetSZdvvhxOXI+EOjJXRfjYzYTnt5GcLyZ9rPyqRvJYuQeQWnynboOne627R07IjWCeT29CJMmR9XAGJCeTzq/yAGJafJMF0mSe+i0ZktIxsjGf279Cu+I+/0k4/jY4VkDweAHLp3fiv/dm4qdK4KiAcxvZI3tIzZaRnXsTyfl/R+z4Vvwf/yV45iFYWbkCDuOsxF0mxeXvjcE5wc8hVs+lqY3usjLuE3aJuuHceXbzkhH53SmbrwqT0tq8jbkrJSDsZ3+KcPxrXg4EEpYWjWT8ZLNhL610kdS3/orLp+vwHa8nNl8FitNmS2GiCaZ3kVjIJ3piK8mFPNLz+aTnC0nOl5I4Vkd0vp7IQhXRhQrzOCw+mSknO11kivXisUouffJ9cP5+U845gOjWc5jYeJB7IXf0LInTjD9bQfwvAEiALGvE8RkjctpL5lwzkWgfusfvZW2+0gASP1ZOeK6K0PzNhBa2kFhwhBFxXvc4Vk7sSCnh+Wr8p/bC0x1uUcyGL38l4HXC4Pnem+JZ08yirATqp4FEk2WpjCtcTGsWUdYkAr7vwIPvg5O7CJ0sY+X0DgL3XU/slGKtW0hO3E5qtpH4bDGczCMy8/+ROnG9CU30kTY4q/0qvASHSaMuwAuk9VDXJR2UG7lrtDe0TZlK9zyA6PP6jM7JBV5Zl/uXACkolc3JDfUEU4pep1/1nzqze1NNccFO/bb0Q9EAnJth+aFfZvXELgeQI+UwXQGTdTBTRXp+J4njW0kubietGGUhn8R8KbH5GgNIdL6GxLEqMrOVruTkaDnM5BM7utOs0tp9byf5WDdwecPF+tcCxBFKZ2uIaD/97xoBoh/QlHzAupuIEcZMS/OK8VrXHDKA+I5Vw/EbiM9VWpYiOJ9HaH7btQFEBD1SQuRYNcFTzfB0K6TPmxrZUBgeWFyFqjfP8WqAmEBtAoho5lkefY/ejicypHNd9bJpsmqynDxD8rN/Dff/EqHT1ayf2kbw3jcSO7mV+Gw9sfFbSM40EZ0qgvsLWZ/8v0mduhHfXBmB0x+A5z/t9loxRomWHjhela7dDN7cfclF0CdeARAxW7PhueGVWogvSn1LdnNDDpyjyWsLyE8XHUejXN9oS0OnI07m1Jwv+lVWHv5Dlk7tIrpQDrM1lso1kBwpJDOfR1JJjQUHElmU2EIpUVmO+ToDSvJYFdmjFdYBhvlSOLKTxNGdxE+UEzrZSOSrfw7ZM0YH0ws/yYJ4vNywKAYIj9b/+wGiX48Ty4RYT/qNWQbnhJo56JKV+QmR+fYCsZO7SR65gfBMuQVhMd3kwg4SC8XmZsnVev2j1Fyz1Ik6/MdryHzzv9uiJk3+iiAJW5TuzK6yOJbl8LSxCb64bzNfV5ILRj9PCnPCuPlohNbn9GJqDZa/QuJz/5W1kzUkPr6D0OIbCc/dQPZkDSzuIT27l+RcvblWodNvImiZnRpiR29nae79pH74WQhdtM4dmpHOxCOkUgnDhG4jlFaoriDbyZ6oamlgZay9+Sa954ot5ZopFrgyg6wO9loguBJO4Yu7NKeyYQnNxMuGaH/FTMZGNpu1phGvCYrNb+bcVytHcXM1Ya3I04XFniHx/S7OLNYRO14Es41WxY2s6dFtJI9uIXnsJtKLWwwoscWdRI4XE1msMJAkjtWQnquCuTKYK7KJR+YLyS6UkjleRvJEGZfu/RVY/66xIpFME405axzTfSZc4bW9uZmBUj5KUGyY5ddWENdoQUzCiGcS1oI+Z0HEn1AsSkp9YONr8Ogs8ePNJKevJzpTQXxxF4lTNQTnCzyAyM0SQF7vsZjssQoyJ2os+E1/408grd1rnQCr2lMEEjDcDPoVCyJBs4kiESznkohWr9I4r6Cthwu9Zn8qsfA/D49PEPjEW/EvbiN5agepE3nEFgvJnqgnfWS3ASR0Mg/fvVtZO1VOaLEGZitIHLmdcw/8BclnP2kd/qzDeWydYGgdXzyCL6vlrqou04y+A4mu29DjTa/ruQO/nCclTLyOJBbHZAhFde+OJFpfZDeogFrd1JVlzAgkVw8Q0ctau3qxTiBqUmA9gONPHuPsqSZiJ/JhugkmG+FoPtn5LaSOaW7kJjILW8yKRI/nEzpeTOh4KZHFcmQ9MnPlFt9xbAcs7iS7WAiLVbBYAcdvZv30bbD0Vbu7dCpBQp0WBPtEBtWpWkX3qxmoC/4/CRAZCi29EFnECAFTfRuCwSDx8GXX3PnRw8QXmkhMXE9itpTMyWbS9zYRPFpCbKHYAjbFIjKzr+coUDFfSWaxitDJEmJf/U9uua2shGbPdbTyAjkUShp4KterYVRcYieKkhqmXTwUGKmvIOLVdDaQ6PxYABUfJr/631iaqyB5qhTuKyZ8bCeZE5WmBcXs4IlSVk+Vs3KqjpAYfGQrHMnn/HQTa5/9Y/jBGCz/o9sLnCTBDJyNWR5QuUDWSVviN6kYSF0y5CuZPCgF7FkOA0cOIK57pLXv9DS9aVNtkJk8A2vfIqG9XdIqQLxKgIg80jkxZ3k00+JPu6JOKZ3Ui1/k4ifuIHpyB8zsghlNFheY1UgJJPNbPIBsJ7pYSPBEsQ1ZEiVgsnPFsLATjm8nfSKP1PFi0ov1MC93fQvhU03wo4ecxUyGScmdV0ugZBrNwUrvbeZbjqWeuvCsiGmNK2+96tE1WxBrTOilUS2TIqaF427f8PQyxB6Df2kltdBAavx6kmq1eV8z3L+X4NEy4oslJBcLrmqkFmR6nUaJnCok8MhvQ+AJd/+qp/EQq5lm5W9+HCAyyR5IcsccRV9FKD3NvbX5aCDMLMOzR1i7/zfwK7g8WWL3FZ8vILOQj64zdLyW9ZMNrJ1qIHq8DI69CY69gfjCDpYWGrh477sIf+0gXPoyiG4qlMzmaqOyrKvlaTpMUn2e1FZJpR1S3RvaMGdBZEW0j4abuNVOssJTStZUiQVtDbD+T0S+P8xL2l8ktXL1ABHwFJtph9e0RSH4iFoiQG4gS4+z8vl3EjldADPVcHQXmYUiEioxOb7NgMLCNjKLO4ieKDElFzpVQPREAZn5Emc9FreRPbGFxGmXHUwu7IGFBlh8s3khsafnIbPutHJc27ElkTVRvJhbcGhu6Sb+/R8FiMmV/H1ZEYFDWw2oc7hptYtw+eMkv/6nZBY12XM96aN58EADPHALIU0aCiDH869qpBYLYEGFcaXETu1k9TO/CivfdvcvcJg/Yo1trC731QDR5JmbgZX4vwoor3j+E9DiEdw8CpWB+n5A6ls9nJ+/Df9CGdn76okc2UbmxPWkjm8lutBAaGGvAcVcjoU3wOK/IXP852w+QDHM2ifehf/Lf0/iBydh5QnH9HQuThCYZadlOhRnBLxq1Zya1A2LAa73sJ7p7KglGFQ2HHYZvsSj8OQQlz71+zzzwIcgffaaAGLOfjwqBFpdmCJRVQObcQudIfyP/4nwac2GF8HxalKL5cidSp/aTnrxBgwgSvkeLyNwqoTA6R1ET95MZqEA5nfAyTeTPn0Dkfu3E7q3jMTx22H+Fli8kezJKgL/Mux6XqmJtxoWqrIjE7e19IpBJQI5UdBjBxbRUu6F3vnfaUGkJfT9qkpVoy0pMRWvZdVHVcszn2XlsTYCj7wbTpbB9PVkj94I95fDx/cQPVZN4ngpqRMFVzXSx51Pmj1WQvz0Di4++Fay5796RUHYvcv9CFi7HgtkJTWeiyWAKA5xhHMpUWdRRLhXj80AcoDR7UtDxzQ5Jq1+8R9Z+9yHCdx3KzxYR3zuTWRP/QLpk1vMgibmbyG1WEXqxE7Sp64nffrnSJ1+I4lTNxK7txj/6V1cPvGLXLrv9wl/tRWefdAqf4mqe7sAoXuR6GkyUW6jHO1NfN6It5TCzcHFuRpypUg9C+cWWP38B7h8qonAI38AqeevHiD6cbl8aW14rmVVMes8qcXJcnYkC4lv/iXhU+Wkj26HUyU2iRo9XkH6vjxTHizkWWwRO1FJ8HQ5wfvyiNy7jezxnbCoz7yJ5H1vIPjAVgL3VxLVvNP87bBwo8nU0lfarMsMGVUmqLGGytxjqLFXPOEqEF7NSSlKDVeaYkLykzXgtXY1MZ8jnbGsogxHPCIOnQdeIBl8maUXP8sTX/gAlx/aC6eLYe4GMnM/D/fmw8ebiS/UGkASJwq4mmEAOV4Nx4qI33szZ+69lcTL/+BkW0DYAIhm+VUOc6UOX6DQXI3iEEdAN8MqojnC6VUv5bWhbXKkdmARQMICiAJmE9RlePE42X/4ANmPF8Dpf+uAcCKPrCzdXA3MVZJdLCB83434HthC7BPVhE5W2ERj/HQhiVOFRBaKiJ9qJv3J98BjnfDsKbj4bQircYRL2arzoqINSxbqXnVpnjHR01xaN6wgOp0iE3yJ1I8+ZZWwZxabid5fAI99ANJPXj1AFIOpEljzHxpErOOI1uPo+lTgmf3ufoKnakgeeyM8kE/8RD2h4/VkH1CM9iYEEHkAkRNVBE6XE7h/O5F7t5A9ngeL2+D0z5P4+C/g/8QWfA9UEznxKzD/Vpi/CY6Xcf5zH3HN4DLnXZeS/5+99wCvK7vuey+HBexEryQI9jKcrhLLlhRFUYrlOLFjRYmduDyX51jOs2PHzy+2FSdOHMe27Eh2JCeO7TiSZoYVBAESIFhBdICoLGCvYEXH7fee8sv33/scEBwNZ4ag9OaT7MtvcV/ce8655+y9/qvttdfWPimKEmn9TkZCxGrSQC4GvrICFyFI3hUgQc/qgUKNoFZkR/3x3p/5zp6nxFgJEa2nMWkG3g3w+0kPH+Rq62/Q9+bL3Nu7GjQgXyvA+4vl8GYF7H2FzNd2PBNAHGkQOcL/ew3ZXWUMv/4iXN4T2OjGQw8yYZXwJ6n7iOctQEJwSAmGAJEm0bO9Ez0CiNZsG7lu1JBWil2G/t9hatcLUFNBamcpmTdWY8D81XXwlxvwv7aO6d2rGd9bSaz6BSbf2GzylvzqCqguwH9zqZWuezbh7PsY3pGfgP4/hLuHIXnZaESbfBgkmYWKZbbVYMZPs3j3IN5L+vrr3G/+Ja7v/hRX/mIb6Vr11S+aiolzdtJNBFAMoHX00hvTJuImD2pC/aF0mL4vMfn6R0h+dSXUFJHauY2YUnH2VuK/XmCjU1/dgNUgG4jvKie1s8Q45Xy9BH9XLpm9uUxXlzG1dwupNz6K/7W/AV8thTe2cefAL8Jkn82yVgV4E8lThzikJLUDs0qjKWGouZ/3Cg6dG1FKulmPrQ7VQ4V8IR4wyzoVU5ddp/yfADPqGMMWWjgfOMNG2GoBzg0YP0i29xe5Vf0Sk3VFZGqWwe4iUDRj52rYuQHvzZdxRbu24O5eP2ealk9TvRn2V5KQ3d/4y5A6C9nbSqNDm6rIbZVE1eOFzyAnUo8a4j1sTY+GBz2xtUfpfHlbijKNKe6uPsjEYLIbzvxH4oe/l7Fdm4ntrSSzbzXe3rW4O9eTfXMLqV3Pk9i9nWT1RpLVlST3lhDfk0ty7zKy1ctwDuTi1JYQ21/JWPU27u77EMO1f5/Rpp8ideY/wJ3/CeMHIN4N0SsQGzPBEeOiyOSTuZHthZt/SLL9hxmv/zATh17l9u5XuPS1DxNv1sKtertPn32cp/9fnRbyCbIcVJrVztZLYpsgwnAz0T0/QqZ6De6BxWRqVxPbWYH7xhp4vQp2rjf84O7ahLN7A+7uKkPsqsLQ7tW4e1bj7Kkis2cD7s7n8XZugzcrcXa+yoM9Pws3DgfFHMaIE2XKi5NUiXYDYPGt5sBsJFOC0pimM/f+zo8dUYRnBiB62BAkRgJoMY11euxcQdghYg2hNGvX6MjgNQDRhu7tOKd/k9G672GqutyCozoHdufDLgFkA7z5PO7OF3F37kAdM1eAqENjr6/F27ce9pfh7NxItu5HYKIR3CtkmWQKz6h7yRITZVOniYLGasp37qQnfasu0nUFQJXFlDa1/TAJY21w7U94eOQHGa3/AGP7q5jcW0Jq7xq8vZtgz0uw+wXYtwl33xqye8rJ7Com9WYuyTdXkti5koTs75oy4nvLGNtZxv3X13D/jY2M73uFaP3HSDV+H+7Rn8A98StkW36fdMefEe38KhMtX2Hi5K8TO/rPyTZ+GK++Cu9gCfGaNdza9SpXq/8JsbN/CpnLgTnypCd8D58bgOjBh4F7xoQROyjEapK0bvUxtednSO5fS6ZuIdmDhcR2FeMKHPJLJSx3rZ8TJXe9xujuH4aLX7fzX0wHAtEnrXQgSSyFMs2CMIX5Y0E0U3wdKAMB5R1ej4d5jRgN5gTCuQFdaDaZC4ZXDxlCnSHtcRku/1eGD34vt9/YTqx6E44xr4pgT67VIm9uw3vzRZxdz+Ps3mKkhrNnPXOi3RvI7N5GRtJmf4H5reje74NbO8G5Y6I+kvCKqsxokFByCCDmed+hd97tq/BainKmPDzVtVXXCDlmYdZNkre+yoPun+PSgRe5truA2IEiOFQJ+7QKcT18bQN8bRN8XZNfWyxpnuSrG+z3e7fDni2wax3+m3Luy3Dk4O4rxzlQydiu1Yzs38BI/YuMHHmVh0df4cHhrYzUljCxbxmxNyL4e5ea4yd27uBuzQ8ycfr38cds1XSL6Hd70Hf4Xs9rQs1Wg0hzPAKIA7d6mNr706Rq1pOpW0TmUCHx3SV4byg/7dkAkt69g5E9fwuv9z9C9Kzpc5Ut0+/LjEopcBIKRFum26xR8iXcQ0Vg+PnJzxeZYX7zRpe2mmHGHwnBYa4RRnoeuSjupBbxqlTMLbzLf8ndY/+Y4dpXmTrwMun9L+Lu3o6/ewPsLYU95fi7tuLu3kF2z1Yy+zbi7t0wd9oj0+pVUm+sxd+/HKe6kLGd3w1n/hhSDw2zChgCiRk0PUPIwCbi8+SOeU/f6Fq6sCjj4afTpNJZEloKaraukFq5Rfx+Dfd7/jW3Gz/JWN1WsjUVsEtOZoExFYxW3bUZdj0PO1+ANxTnfwG+qkVmz5tiD3x9E7y5DnZXwp4y2FcK1SVk9xeTqCliqjaPsbqlTNTnEDu8AKdxHhyeD/ueg+oiMw5Te/8uifbfgtFmM4v+XqI479oPBiD6TzPzY2/RIA7c7mZq708agGTrlpA5VPw4QHYpJL5hTpTZu5XJ/a8wffJHYVg75Gr9kd3xV0ZVLJMwOLAYkJYL54qCWgUCybsCRAfp+YxtoAtYBycEiMn7MULRLuSxSxxtRXdjxEdVCmIYRvfzoOknuLhnB6N1W3GPvIpb+xLerpfwd76Av3uz6QR37zoy1etIVW80lN0ngKybGwkgNR+0E0v7c0jvX8bIzhdItfwaTN6dCecKJNZJD8KiQce8S9+8K2/Y5w9imhKdXoqEN8GoO8KIM82Yp0rAQmIC4lfg+k7iLf83o9UvMbVzMfHdEZyGlWQO5ZOpKzLmlHyV7M4qk2vkfG0L3td3wNdfwn/dkv5Of3ULib/cQPx/rYc9r8Gul3B2bia+s5LpXcXE9xXg7s+D/dLcq6H6Faj/AWj7DbhVD9mHZng10uqKZ3qpE01HWic91CAS3CbfY7iV6L4fJ1OzCad2BdlDZSR2l+FrScCbmwxPeHvWMRcSH8Xrt/Cw9ruh5/dg+oKZb1AQS+WKNNFqx143Ewh/41dbnjf3+C5MEDGDrPNNjdhAe+hPhbi1LmKmGohmozWfoOWfds2RERfJcXjQSKbvl7hT/z3c2rve2NvZui0QAuTN1/B3voK7ZyvZ6jWkasqJ799gHNTMMwFkI96+l40GcaoXk6pZzET1ZkYbfgLuXLRYN4l0EixKD1e0JeAKo0H0xzO8dLrsN8kVk+ikvpEdPMUUUSbNAiXbl7avJuBBA5z5fxk/9ho39y5j5GABD2vzeLg/l4f78pjYV0S8uoJMzQao3YovU2zvVti7w5AmIOWoOm9ux1M0aOfL8OYOM7ckXyZRXczk3gLGdxUz+vpaxt/4AG79j0L/H8Gdk6AFX1m7F59qQH3zACJPzFaqFytagCThbhPR6h8xAHHrVpE9VE5sT5kJVjwrQLL71pKuX8+DvVtIHFZ29Fdh/ApMJMzWcvIPZT2ofJMBiHwSw7wWIIGcfEcGiFgnRikLIhtINxGeIIIoiaDQWMasGNTKQaVsWDVmBv1hO5z+PA8PfZwHNZuYrN1IbH8Vmeq1ULcdf8/zsOtV/F0fxNnzItmaNaRqy4gfWE+iZiPZ/etxqqvmSBvJ7txKdvc6MvuWkqhZSrxhHXdqvheuttjZKj2AAYPmQSatsDMC4R375T1+6eE7SbxsAleFkM3MvECiPlL8TJVGAuFlR8ok8ZHpxbn9RUZO/xhjJ/8eE43fxcO6l7i/bxOj1WuJ1a3Bb6yE42tIV6/A2bfMkLdvJX51oBkOlMEBTaSthF1L8WqeI1kfYeJohAfHl3P/2HpuH/448bZfhbO74e51u1+3wKx5PUWkg4Vj7/Fh3/Yw48eZb3Rhm4elrBYjiBTRu9dAouYzFiC1BTj1FcT3lpvADLs2wt71+Puq5kTim/SBDUzv20h0/wtwSnuu1MDEPTN3qTuysA3sYIFDKiUYFv35LgqEiPHwTY2lIIw7S2uE6tKGxkIG088Gxyrdu//3SDR8L8NvbuDhntWk6jbi12+GgxvgwEb8vZsMSLzdL+PsfYF0TRWJ2jXEa6sMZfdX4eyvtAB52rZ6vdUee6pI7c0nXrOC9LE13K7+EFzYA9H4jEulaTWl/tkZ1PfQM2/LDo9/aFfraalQnKnsJAnHrs03PyqzypTCCUJd6rZQQWu2lxtAD9z8czj728Ta/hX3Gz7D7eq/yZ3qVxk9sI3pQ+tIHV5Dur6EVF0+yZpckjX5ZA8U49aVwcE1UF2Jt7+CWG0po42rGWnZzlj/p5i69C9JXP0CRLshqRBwIE7VamCVGiSz/BlfhsH0nzFd5BrbyxspqjX892tIHPh+sgc249YWBQApMwAxvocBSGUAkKdrFb1M7Nlorh3bnUviwA4499swNWhuQjETC5AgvPgNAJGn8s5WRMSIEhMKCyWt1UIWHCrXrIJpGVw/Y2qymlTd7Ah+8gZMH+b+wU+QqN1MsuZF0vt34NSsw62pxN+/FqrXQfVGQ17NetyazWRqXiB14HmSB6tIHlqDW7sW74Co6m1bt2atud7bt2vN6j1qN+HUrCVzsIL08RLuHtjCaOMvwd0ztockQJgmyR0eRm+RNqkINrbwLPyh/g75TuVJVWjBqi3lJgWaI5BYxsWzCtrwpz42zGX2c7wNYwNw6yjZ83/B9On/xFjL5xg98c+41/ApHjR8jJGGv8FYwwcZa3iNcVH9Bxk/9D1EG/4xsaM/Q6Ll10gP/BHO1Wq7tcLUdUjIaQ2KgolHdE+iQIOa33+WDtClpJ3N9SQ0fbPxpX7CAuQBXPwTotWfNP4R9atNQCG2rwRqt8C+DbB/PdSshZqqp2/3byaz60VczYMdWkaqeinj1R+Bi182m+RIu92Pxon6Lqr270xmbPqBKS3rEc3Kong3gISdFhhkOkErD2xVjRgJN246wfrwStWYAO8q7tgBJgd/gYn6DWTqCnD2b8WpfhF3/6YAHAJIJdSsgQMVeLUVZGvXk6l5idSBl0geXEvqUBlu7ZoAGCFQHm8Ftnei7G519nrky6TqqsicLOZhXQXjDZ+BK/tgUqvbNGJavz3CpDOM1k+LO73A/Jorj6jrHgHEbjFtfkyTqmLIwC80bWDs6yfDuROda8ZH32mFT3oSErfsjrfRdpg+Qvbqn+Fc+zL+tS/CtS/Atd+Ha79r6eofwd39JgeMBwMwph2SMvam9NshKAJA6Oc0vtKidjZZUcm5v3SuwZveCCkz/l7wXM4wXPg9YjUfNRVdLEAKie8vhroNVoDOAEMgeUravxktmzA+Wl0O1D5H4kAVbudPwugpM5OvqKI2/jGRaPGBOt8YQFlSbuI9AEQjpo40QFJ+ihxxrfO9h8MYWSWhqbNlQ0tAph7AZD0T5/8frh1+nujhlbgHF+PuX4e7bwf+vu341QJJJf6BEqgrwj9YhFe3Gq92A86BHWRrt5M9WIlzsNx+iNxyWgAAIABJREFUXleJ9wRyDqzmSeTWrMZTuLNmrVG1ybqNOKdKGT9cyOjBD+F1fR7GtB2b/CsFGSZJ8ZCoP4JWnRkp9wwcolOtpp2xWiy36QsxvbhnFpMaHAS40TgJINNmpl+bwqiCvNXWnjbpEYWJgEpTl6bJToEzYZfpOqopO4HvTpF1EmZDmWzaMRntM4drvPSjgeYSKFRjK8U0CcZNa1cizg0keszw8ULNFMpbA5jsFTjz6yRrP0hS0bRDFcRri+xc0EFN7lbBgSr82so5ETVKen0Z9m2DmuVwKIfsoVwmG14mPvAbJsUGd1ZIVzyuLcudJI6XfU8r8iNGus0ARPlKIzgMGzLLddQLAocy47Sr6EQP8b7Pc+Poa9xtzCd1eAkcXAZyGvfLnHoeb/8WozG8ukL8Q3kBQKrwajcFtM4C4z0ARBrmSSStRE2ZAWNszwYSBy1Aoo25jNRUMnXkH8GNg5AYMdJEjnMKpSOMkxBoxDx6vmd46XQTCg9MDRPnEEOK9JkJENif0s/ZteTKIrabKkSJMUXMGIBy6xUWNqFJreEIuU9t+F4caC9kri0MKJCmIVKuqOZfdJ7Bls4xzyetoXI/KvIQI8UkCSMs5DPpgnN76dLKhlYxa7PMOdAguj1T8Do5iN/zOdIHXzKpNn5dGfHaAkPGfzqwZk7ACAHFgXVwQDlvyu3Kx9+/FL9+KWM1hYwc/bv4V/8bZIZnKQBFZWM4fswsNdbYvNsrYkSg6SOpW0VfBJAHRnuYkpZCnVFNHkzfhPN/wp2Df4fhA4VkmwvIHsrHqyvGq8vHqy3Fq9mIe2Ajbl0x7sFc/Pp8/EPlJmTJgW1QVwEHpWLLg/dr8A8+mdzaCp5EBiC1miRcS6x6PbH6KtInS4keWcF47UpGa1+CPjltKlOpNAPVhRKDxNEaZpM79W499I7fiwMlOZTNaplY2lYRIpn+0i6PAhzKeQu9dHW4zpVWGwm09YiJss3YR8pk0DWFlpCkyeV5aofXjLaHU4DACxxRa9EZ5jSo1Y3rBFWdUXBC46qJvOngnuzl3wOPPLEHZK4pwqm0DpNmMwMQDy8zZdKO0m0/TKZ+G87+CqgtJXEw35B3sAxqy22gQcGGuVDdWji0CQ5uxtldQWpXHv6hXBIHcxmv28B002fhYYOZ95GNZarSe6rTr37ROAWq9YlPqF1u1aPGvLILi6yJpTmDQLJIa5g9z4fh0k4mjv4YD/dvJ3E4F5oEDqnIdVZLHMpDD+4cKsOpV0ivALe+HP+gkL4NDmyGulI4mA91AUgOVrwjQIxpJvPsbUiShAPrze/L6Y83VBI/WkyscQWx+kVM1FUwffKfw/B+8GQ6iv0yaJcpMYZjp7vfoXve7St1tFIVVSvLmlSKlivmod+ypL4ctyWQjFEVLJZWv2vQXG31NULWH8fxdJ1gIZRSVVRqXNmgIam0p8iohwwyxaQTQs2T9rWKzsNxHLOXeNrXdpcqxyRgKEQvsMjcnLm5Z9KgFiBBxUbFdmcAksVT5GzsKNMnvx+ncaPxQ2Vyi3mTh1aZ3DAOCiASmHMFSAXUr4H6dXj7NpLctcYIaw4Xkj5YxKiiWmd/C8aVOTBNwneY9jX6Fhi++PpdJETESixZw7bOklSmBtYkmylOrm18YsMwXM10288b2z7VsA4aC6FOam0HXs2Lxodw6nNxGueTPTKfbEMe2UOrceu24tdug9qNIMRLatQWQ22eBcrBclxFs55AzsHVPIkETmoEuk1kG9cSP1LK1KE84oeX4xxdQOpIIQ/qP0ri7Bcg8cDIApkk0pMaXLNB57t00DtDRKiQJpBmUB+K9F5kxH3wffDeJNAFzKnTgnXT0jTqcwk0Mbg2qElnYmTMfoCPrqfUEOEqBJ/RFvoj9CNVFE9FGHx5M3ESZvmrZmVmfFOrNnQ74a3q/Dm+LEDSpPwk2cCn0z2ZxVyJh/j3DzF65JM4R6qMteDVFBpwpBpW4R4shEMSls8AEAMwCdtSOLQDr+5F3AProaES6guJ7i8n3vSPyF79Y9zUJZR4orCvFLIBhgSQ1Q5P7IGIzaGRhLOTPDpZp5n13Or41Bjc3k+i++eZOPZxEkd34BxbS7ZhJXHlPx3YbgDi1FXh1OeRPTLPkgBysAq3bpsh/+BajMSQ31BbasBlNIkBSEUAkG9sLTjKA5A83kqrULsODm3AO1ZF8shqJmuKiB/Kh5OL8JpWcL9uPZMdPwsT/Qb1inzLsJEv4D1m2AfqVoB5K5lONGp2piPDQ+wH4jJBT9pEqyHU6u9gSwIdPDv5U1wUAEQ8rbQIs2RZZpmksOeazUMdXwaU9vASqWio/tnx0TjJjDNRGXn8Kd9qGhNREUDSZgY5hK05XvehC+iPbwJALJ95qJBENp0x2knmi5ZQuLHbcGcfIw0fgSMSjJX4e0vJ1JWQqi8kVZ9PtrHImN/+IQuSp20NPx1YhX+gAI6+CMc/aMx7o5GOFuDUrWKiYQuJgZ/FGzthxkQjpa4y5rUZUv1n1wBJAIVkgaP9QdwL4N03kkn9LItapL1nTOdPNOIP/hzRxh1m0so7+SLJk5t5cHIZd48uJN24Gk8MWl+J11CGd7jAkt7rs0PrLdUL1astqUPqywKqwGuowKtfPYe2DBoKoKEEt76KbP0WMod2kG1YT/bYQjInIsRPFDJxXFt3/RkkboIW9gdCdzKrFGhxipg5ULe2v6ykNe8FHKlSda3pUYMfKylDDa3P9b06LaTg+BBJT2hnO/LhIRZ04Y2Eg/Z4oqh+X8eb/3SoOXn2OXYBmO5CNHP8o0NmfWh/8an/12+q+0yKm2z7MaLcI+3fhvR1HrT/LtOHP2yiV1RvsNGmA9vM+ERPFDB9MpdsY4kZd/GG+OBpWuor4FCZvf6hqhleM583LsM9ugSvpYzx4x/GH/oSxIcgOYyTiptKPNFE0G3BpkrihTAlySwBIU3ELJF1Y6YCRMyz4FDI0Umrds8AXPk8tH836YNL4NBiOLKZ5PGtPGjK5cGp5WSOluI3rJk7HV6NP1dqLIYjK/AbC/AaNuIdegHn0Es4DRtxji0ne2IBiWPLiB/fQbrj5+DmPltWx1UMHKKeCg1I6UokBAAJOT/g70c5ajJ1JH5tp4aHGSZ9as76DjlBYAtki8cUSYZJowW3Kit0nJHWf0288RU4pIRJpZVshwPbcQ5vYPJUIZPNeZZ/5jj+NJRDQymmrV+L3yBSmk4x/tHleMeW4p0sJN30Aqn2H4crfwqxQcjGSacgqjVVZjwVvbVmaTj/p4CKQBIxYy7fL6mCW5YFHH8YEtUw/pskWz9K9nAZ1ETMRIx1qDYYcyZ1fA1O4zOAQw9jOqcc//AcSB0RAqR+qwWIwCGt1liKc6SYeH0BiRObud/wYaZO/yKkZGrFSCWSJnPK5mfJeQ0UxGMiV3+IA2ymqhWXdnLNdux3CKPP9TFkqpj6COqTSZL+/UCI3CF2/Q8Ybf00ySNbLUBq1oGobi1OYznRk8VMNRWSlYCdy9gfLofDxXA4DznlHLIpTv7hUrwjefhH8gwPZA9vxm1+jQdHNnO/9R+Zbau1dimbcEwdcw28xlK+t5YxK/iufwqLi8x6EBN5kWUQ6uJUL/7Ir+Fe/hST9ZW4tQVQuxRqta5Ys+Mb4dB6aNxgESsmD5n9ads5do7pVAOQPPzGQryGDTgNmw1gDUAa1uEcXke8roTEkUruH67iYcvfgzt/AdnL4GgpsawMJRfK1LKZy+E8hkWMNIY0jAASgkQdJdH51y8xlqpqJh2bnJlOaxJTxfmGGOn/UUabt5I5oiiTQvqiUuM8e0cKiZ8oJnailOyROQjGkGdkQTSugsY8oz28hnV4jeV44gsB5fBa3MPbcI5v537jSm4c3Ub6whdtpRhV59dgy4IOMtY1stYHF1gUrPKIRLlPQkvtLYxAD/mwlvTQDzPduo2E0tYP7oD656FuG+zfDvuVyr4eDlaCNMiRCvy5tjr3SNmcicYyOFKCf6QE92gJ7pE1uI3r8Ru24TdsIXmwlOThIlKtFUy2v8h474/C2C6zTUJoVdn5ChsMmq1AbM0s63Q+8s6kUWRu6ci/2kDRVECcaSY91X1Ut2gtfBRGD/Kg5UNMnVqFe7QIDhdBQyE05MORlfjHckkfLzOJpe4zjL/GnSOFcCQXjq3AP1qIf3ijGXsatsDh9XBsDenGQmInyhk9uZV77Z+G+18CX0JSmylZB01NCBALEssPkYdcYJrbJlpialqNNeFc+vdEOz7CZGMp2cMb4ejLcOQDcPAFqN0OhzbbUJom/BpD5g4Z/Wnb8Py5tBVwuMoA1D2+EufECpxjZThH1hvJIYBogU5aczany0h2r+P28RdJX/lVu5FmQuvtbUc8Aomteh7+bftPjrK80TCEIa2iCNU3IV/821gRaeJ1iklGmcTsMSQOmxyAi/+esRNVJE8shKO50JgPjSuNv2gY+ViuHaejFbhH5zLu4TkCSBEcXYl/IgfxgNu4EbfhRfz6HdCwEY7bbA+/ZS2pjm3cPL6O6NA/hWSdWQVrXIxZ4XaBwwQFA20SGeE8ca7hch9nopn4lV9lsvtjTDVVkjieS+p4Hl5TKeljVcTq15I6vBZOrYNTRbhHlsOxYjhaCkfL59jq3DnSkUo4vBWOrMM7uRS3aRHuiSLco2vxjmyynzeU4B/Nh5Z8Uq0FPDhRznjH98L1P4eoainZ2sICgt3yTHudaDsHK1FCoEhaKnxpTS2p578GiGbRJ5hmTCaqOEuK9eGfE2/7hNEQcpQ5mmfp+HI4sRKOFbwNz8xx/MV75nqr8E6swj1RgHusEq9RAvwFOLwJGpfhH1sEp8rJNK/l4ckiRjtewb/xKyYbnawmZAMtEqxAFEBCikS5RUZrE9whssP/i8nuTzF2tJh4YwE0lxA/lkPs5BLGGldy9+AqJo8V4Xeuga5csicW4R2f9cAhWN5rO1dghOeFAGncYDuoaRFOkzRJEd7R1SBqLIKThWQOLyJ+dAmp1jU8PPYSMc2NxNpmOkeTXjaLQLPOsRl1GwJFALKhP4FExQCCGXGZpn9FX7LTJ0iaYnFW7I7BvX/DpMyeYyU2BG8kfAHeiZW4J1fhHRMYVsORUjgSCte5AqQIjq2C46vwjhfhHi+1dGw13pEN0LgWrYOnKRfv6ApSR1eRbClnpKmKye6PwvgfgDKOBRAZA8EiwVCLCCSRTDZqioulbuxntOVzJJtfhpMlODUqCFBEtG4BY4efY+T4Ukab8phoKSTenofTtRg6l+E15eKfKMY/XvK2LSdKzfXettV3z0LHK+HIFji6BY5X4jcV4LUuNERTnr22BuJoARxfin8yl8zJKmInX2Sy6XsYPqnatNfMwiY/JSfclkBS/ELLUdVRIoHEmlryOYI5Eb0VfYcDJJlM4rrGw5gRA0plUbE59Uk438h0FG7W4PR+F7GGCN6BFXB0HWgW/dhqOzanivCPr4Nj66zFIRAdfxYeKIETeXCyAP9EFd7JKmPteKfyMON/UsK7HATKk0vh1EqyJzcRP7WFyY4S7rV/GGInbE2xVAIv7ZqJWo23lOG0r1ysWILszS7GOn6V0caPkT2sybw8qF8LTdugZQ20l5LtLCPRUUa8o5hE6yrSzYtwTi0OAFJowXHiG1uBjZOS4m/X6rOA3glIOubtvj8hLbEBjm6CYxvwmkpw2ubjtEfwmxcbyYGcRCPFVhnV7h3bSPbEVuLN6xhp+Qjc/e8Q7wHtXJuJ4SY8m2wYzLiGGkSmlmETASLAyaPZtxne+Y57k1UF+VkvgSOVSpHJZMxkm9GsWpE60YV76TdJd1bhHo9Ag6JLSknaEoxNqVmK4J3YYAEipj1WBCeeYfwNXxSYa4QA0XIHp2UlfstCaM6xADy22gLpRAH+8U2kT1UR75rHWOdquP3FIMviAXhR9LwSjgKIwB9h/C7x7v/O2PHvMrOOnFA4Ls+mghypgJPr4dQGaN0E7evw2yugvQRayuCUkFlkpIPfNJdW54RUErx/760BzbEqK6mOS3qU4LTm4LRF8Noi0LwQjss5FEgqbMqDgHSykmzLSqbaS7nb8Qmc2/8Zkn2QiJuisr7WUwWJd9bcUgasXU6qyKARnValfMdrkFnYMFojnU4jraLWJHuqH5K38O/9IdG+v028qQhOSVrLzKqAxu2g9KSmSpymciPlpe2NVDcAeYbxF0CkgY6X4mvsT5WSPbXaZJmLB/zWCDTJTy4H45NughOr8ZpXku6MMNWZS7Tvx+HO18HrM9u5+dkJ4umMqYov6yHC6HmiLb/JyPHV0LMYWjT5kgtHFkDjAlAlioPFpOvLSB8pIXN8hbH3OVYGRyugqdR2hjpkTlQMp+ZI+u3ja+DYWjhRgd9cjNu6Cqd9MW5HBNrmQdMSOFYIjZq3kaapNOrXb1tAqnMxd04UMNb1d+HG/4DoDSs2BBAzy6p1G9q9V6T3wfJSM2nyV8PECgGi7dlkakmDhCaWieIlJ01KefLSZxlrryB6LA9OFUCr+KECjm6DYy/gN63DbS6DpjI4KbOq2FLTHMdePKPxP1Fh6VQxbnMxmZZSMq35OO0L8NvnQctiOKmMi+ct6bdbF+F2Rkh0LmOi9WOkz/0bmHzd1ghQqkx6ykasDUCifcT6/h13W9cy0TmPZPNyaFkCp8VgEWhZhH8qzyDUbcrHPbUQ9+RCM/egORBOlUFLwZOpOR/eieYEqgCM6tzjZdb8kt3ZnIfTIhVbGHRQBP/UAjiuUKMiGpIgCj4shrbncDqeI9a8gPGmtaS7fgzu7oLkHe1nYBas+07UOO42/UCTR3Yy0ZhYf0V8kLCwtVqB5LGXKqmPt8Dl32SyZzvjrYtJyKIwTFgIHRqbdXBiI37TGiPANEacWgUnCi3jPuv4Gx9WwKvAbS4l07aCdMcy0vKTW/OgdbH9vaObQdSkiOYi/LbnSLevINYmf+Rv41xWTed9wEV8Z8xsnaDxjpC+Cnd3M33lJ5m88Alig99Dov9lol1FjJzIIdO5AbdtB27LDvzW9fgdi6H9OWjRepBiKykkLZ5E7wQefddcNHc6Jd9GpOvkGpC6LavJtpbjti/Ha8+xdujJXDhaaemkgCSALIT2+dC6ALelkHTLh/H6fgZG3gDnnNlsJjl536TFS3M8CveGVpU+lX0upHznvkKN8VZH3Txx5oZZJ+/2fpqp9nyi7QtJN5eQPVlmwdBRbiKhSHM0yyzPh9YlVgCfyoemkrmPvfjmlPwXgbAcmlbjtZSQ6VxKunM5mbYysq1l0Cq+yIWT5ZbEJ61LoXUlbls+XncZE80bmOz6Xrj/e+B2KlZt19QYJ13FAlLjuNNncSaPQ3QPzv0vMXH+3zLe86+Y6vgcsbafJtn8z0m3fJxM1woy3REyHRGy0jDtK6BVKvUJpE55Ij0jQKSZZO/KzhRYTpXjtpTbzmkrwW0rwD+10n5/cjmcXAUGIOqkJQFp0EqMepY0yV74LEx+GZIDZKeUOvFoD0aBRGkpZnvrmXV8Asp37is0q6RBwpc+m5qaInG/iWj3D+C0VxqJnWpbbswbmTlO8xoj0WlbDm3q65XQovcSSvOtQNOYPZOAVPBGjC8AFuC15ZLpyCfZWUC6XQCRv1EZgFPAXGjvwQjmcgMoTq8g1Z7PWPsHmTr3L2F8f1CIW7V9IaI1I1pPnFbRQVMoSTVWr4FzEbyLMNIM9w7BzTfg8m+RufgPiJ17kYnezYx3bCTd+jJOy6s4LS/itm7DbduI37YaX458Rz62g4TYgNRJLeqs3MD0eitIZLIV4bWtxGtbjteabx7EPuhqa8pJKrQKeJIOS+CUACJptBq3tYxsW4mRHnrvyR4WSKQ1TikmLpNPYNY9rIQuResK8ZoWku5YQqy3itjQ98OD/2krhodhLNXaDXK3lOCoORNVC58Jos947gJMSKEdFrLWW9vw+0fM99YjvvFvHTv7+sERb7F+vvG8Wcfp2JCMFgwnPYPU/mBdhCZHwyW11gOTxozhJm8yMdzC5MUv8PDoFtzWZdBZQKYtj3RrLm5HCdmWSpInCvDalwSaejm0CiALLZnx19iHJLBIy4iK8FpFBUbKu225hvnFC9ac17EKEgXgMD7PcmM1CCDpjiIyhgfK8Zqr7LHmd2X5iBdK4FQVXrP4NA+vp5ypzud50PV9pG79PtBrsibcdErp7rM6y3SyOkFBrqQtja9dUc1S0HG0nx3eADgdkGmD1Ammu3+HVOevkez4HIn2Hyba/kmibS8Saysn0bYMvzcHTs+DLmkbRZbmwcn5cCwHjkj654LUrbRBSwlKCXAUVj49n2xPhGRbDpm2cpyWl8k0bSPVtIx0i663AnpLwNiZYvhi/NZSQ15bMV6bfe+3hWZgqOH0d/CZAhIKNhzXdZZC53y8vgWk+0pJnP4E453K/r0KsYytjKCZI8OeWr6kPKQkqewkjjONp6WyBjBxfD+K40Uxc0yGmR9lAAdsOhMrDhfoaKJy9lA89kf4hTHnwvFREFKMHVxKuAmPe1Ib4nF2kCGtPdqDfdqVm5SeJp0cIZoeYcofNxOBYyZbz8wMmN3DmK4hdeHfMNr0UTJtimzm4bcJDCEVojHw2wrx21fhy8poywtoFbSJgvFoXmV8RxMRba7Ab60w45/pLCTVvYrk6aUkexaTOr2ITPdSnI5VZmy9lg34LeusCdW2xJjTbscSAxIDKAnW1gBIBnShQC2E5lJoroDmdbintnGvbjX3jlYy2vMhRs78E+LDvwsZ1TEYeRuAPKFzTeKjQp8qjZX2SKdc/MQUZJWuMQSJdhivgTtfxr/6a2Qv/ATZc/+QB03PM9a0jqlTFSSay0g3y4kuNSFlMTRdq/E6y030IXGqmERTKanWPLI9C/AGFpLpWoXftQE6PoDb+hLptkJSbYtwupbC6VVmYASCuRD6fWO/llrV27nQBid6l+L2rCfd9Sm48QV4cBQS9yFjCiqZ6Q8rQlwyaKGQdiHRbubSKAk8L2FLkqrqvUkPDpg3QIedtbf1qbRG3G4oGWiRsP9DZp7dmu/0gUAi1faWRV76Ssc8FkV4y0rJ8HohSJJBKSEJQbN92SiOPxqUBkrywFHdSNcsozXrPKL7cK/8IomeD5HUOLYXBmNQ8NStAY20T+uKwDKwgi3bUUyqq4Dk6RUGHMmeBaR6FpDpXozTucIAT/NzApPAkO1YOousJnHb5WMoB68YTpdCt3itFDpk2ZSYuT3a1kLXazjN20l2ljPds5oH3duZOP9ZmPg6+L08vj/II/H2De9kXphCG9peN+uS0QaJaS3JUpEBbUg/aXdl1TpqLdNNaOuvq7ao2d1quPUVvKufJzX0fxEd/D7G+j7CWM8Oome2MN27jrG29Yw2lTHZstSEX+nLh4FKaFday0Y4vRm6NuO0bTKZuYnOJWROL8KXGddeNDdqk6ottoMjqdaeCx1F0FmB172WdO9Wov0fxLn+4zD9v8A7axfOyMIxeTvSIirZM2FaUw5ADKjvQ69erfg5OEcdqzRqFQ5QqoYFR3hAwMxvZeKQmdXqOuHfBhCBRjG5YdIooU04u9Uxb0M6X1pR92h8qmEy3CLBXVOlS1eQTDC/qe2nJ0+QvfILTHa/xlR7LunORfid6rP8OZLOlXmWAxJOXTn4nctxO3NxO/Mtda00n9G5HDpWgpjemNgy11YabZPqqCDVWUqqU8DKM0I1073MaJx01xIe0VLrQ3etItOVZzQe/VXQVwI9EdLdEaY7lzLR/Tzu5Z+H+J+/O0AEjJBCgGgPakfFBVzHbHWVcFLGoTECVuMQMofpYY2N6uA8hPQgJI5A9HX86S/jTf4W0Zs/TvTqP2bq/A8x2fdJpk9vJdFdQbZD4FhrHT1FyLoK8bvKcNu3km7fQrxbEmaZ6UTaCwOAzKHVtY26z8NeZzV0VOJ1VZDpLWaqbwWT59YRv/YDMPoFSPWCmEUZJ1qVSNqYIZOoBI9vhIhhKPVByKN6L0YTQwZvw69kYs0s6Q21TQgQtSEYZrch0+ozk1EcACNYjz5jIpu5YJliYUJIYDrPmNCPcmm0Pj/JQ6a4yzSjpsCBWQKh8Uw8hFvVpPp/gWjnh4h2FpLuysE5LYZeOUdw5Btw6Xy/axm+wBEAxABOQk8CrK3IRkhbQodeJpL8xxzr/LeXmCCB07Eap6MCp7MEp6sQpyufbHcusZYc4go/ty0h2b6UlELAinLJMjGgWwYdEdxmmfMR3IEI8Z4iJrv+Jt7Fn353gMzUGnjrxlOqwOFbASTrW6TiZSFpd1UzZZ/FgEemmeeEeymo5pO83jH8dDskD8LUXnjwP+Dar+Ke/Qzprg+Sbi0j0zwfpyVCtjWC07aEbMdqMt2bSfZUkTxdjttVbBxEOiX5C3jqVuAyWiiUgrpeMV5XAU7PCtKDOUwN5jAyWMjEhe8iO/xvYbIBEhNGEIj39cyBR2Aqk5jl66GgCJk5ZPqA58OPH/Mhws42TC/zaRbjP/Ze380qHxSCT79hflzw0x3J5HvkG1ng6MZ0QmCO6TIKZHqPjtaZGipzmPacvPtnuAN/n2RrCamWZZaxThfA6aDPu/JgDmT6uLsUp6sct7PcmNq0r4Y2BWPWQnOQOd6kQMoaE8o1Gl9zKc2Knsq3CfwZ+Zptpfbc1rXQKv8kIJlSum57CX5HIZ78mI4VZsrCO6G5sgiJpgjxtgjZMxGifUsZ73wJZ+gH3h0g0haimbELNEooCbWb6Zg/kwQeBD89s/zd1uNQ7SbPTN0nHEhqP/UEJGKgTUgd1XkyVUDugXcJEi0w8jrc/E9w9V8YU8fpXkCyPUKiPUK6vYRM90bSfevJ9K3F7Q6BIXA8JWlQu1daEsA6iq0W6cjF61xKtlt+0HxSg88xPfgco70FjPZ+F6kLKox9GOIjNgYc+g1GSfhknCzxRIppVZcPlnTObi13iptnuQvSBobpW/u1AAAgAElEQVTBw0iSmFwSX+wqCqW/PhdApHmCc2bODZjecLaAIAqON5GpxwMBOl3Va6TgtUZf1T5mLC79XvYc2cu/Ded/EPrXQvcC6BDlQFe+te3nAIwQTJ7ModMFZE4X48gX7VDe3xpoXWNzAAWQpg3QpOUVa6BZ5rAm/wSMFdAus2uJpfaV0C5zW2NYZq/TVgkdG6FjA7Svx2+rxGkuI3WqhGRTEfGTinZuxu3cQra3nNRgKfGBKsb7XyN16Sdg/D+/O0A0UaS4dzijOns2VR0cDqM6Vqu8tVN2nAfEuE2Cm6S4Tdps7jhhlLhklzHZxAeu3Wdc+1QYZ1bF0hRvFnomJmCyBy7+PAy9QmZgAfFuAaSMbNdG3L5S/L58/O58mCudXgH986FP6l3Om6RMkYm6eJ0RXEXe+nLw+paT6VtJ7HQhk53rifV+DIZ+Dq5+Be6dhrG7kBizRd/MRpEJprwkDzOquWs1jPJ6JArUZ9Zx12KtoPxpaD4ZgIQH6Q91jCgAkzk3+D441jr5ykLW/Iwt56/fMBQIs3AjJNO+xWrzFVTw0wYoRvkId6lpstNv4D78Wcb7N+D0F0LfGji9Gjplii4BZXP3LA8ETC50Pz35PcvIDkQMub0r8but9raCShpB0UUBpgI6ZGYvg57n8PsieL05NkLasxB65sPpoO3VeNoxpW8x9OVCfwl+/zr8/m3Q9xp+/3dB/8dxej8DF34Xd+jLcPO/woMvkb31X0nf/EtTYhfn3nsHSAgSI/Vm/acCcyILHMFFal2p45pPuYNnViuqHcFjHOkWVQT0Xc9YWcbq0vyC4QchJli4p+IYIknqG79AamgLsf4ckt0FuKfL8HuKoEca4OkHJjxHA8Tgc/j98w3QvM5S/I4AIB3zcZXL02s1TLYzD6e7FG9gLe7AJjI9zxNt+YTdj+LqmzB2DNyztqIHWnSl1XYm99HssiugCCTCgl0LHdQoDRjdcrTtWBvynV3uZ5Yror4K+svCR9saq2qkStbY2YoQVo8BIzxvFkCM/2PGSoWwo5CagOlrZO/tYfziP+Nezxrigwtx+iK2n00EqBDkd3QrfC9aMecx8HtW4PblkO1fhNe7BL9nCfSE11WrlKcV+KdzcXsKSfWWEOtbw1R/FRP9m5ns20ayZysp0entJHu2E+/ZQaz3RaZ7X2Gq50Okzv0d0kM/gHv5x+DmL8Dd34SRL8DEV2ByP4xfIHvnEkQvgn/Njp+CTupETRTO4vU5vBUyZMTOIv09Q2FUJTQVQps4XLoqyRdIOyP9BLBAaEolhYffbyZ55WeZPqsiYItwJSVkTyoPTCZWz6q5Ue9y/L7F+P05RlPQuxxkW0ubGCqC7lXQLSCWQPdq6KrE75FEkgRbSeJUFdnOT8LQ5+D+VyDeaje1N7UxrPYI/TOjZV3XhMiduEs2ljUVFNPOFBlPJULF6KpwKVJwVaVoRCqXak2gpI8xV5Ne0JpAlN0YT6asjpMuMUVrfG2HbPfsMAWmFa71M2Qczd/ESKfDQndX7bzWw68Qv/gjTPR+gMmuzcS7V5s5oWz/KpyBHDzzzHm2j06vhNPqrzn2vcasu5xsy3YTmfT7lsPZCJyP4A3KUY4w2RkhdSFC8sJSYhe3Ebv4aaYv/Sumr/4Hpm7+B6K3fpvYtf9C4voXyd7+U3iwEyZk+nZCesguqXVGzF4hpjq+CnVoBaFK+GjORwWFJbFMirYV7D7jQSEP64c9I0AEs3BCUVPy+kFbKcIgcLbpICfG2MQCi25GIAm26TXhTlUWETPYpEBpFqPydfhkGu/OHuKX/qlxmJNKpOyQk7jeDpak/BzJ712FGRxjaklNSzqWQKfs10roEkgEkIKABMjlMBCx1CtToxS371WcMz+Ie+XX4OHXINkO/k18Rs0KxaSXJOlmTd1ps/4o0BxZL0nW1NC1GyGEhctsETNVXFHFS81oPwqKGbAEGkmgC0ldpe8eeR2BSacKHdkkmeQkWWkJV7pNY3AP6IP4Trw7nyd1/h+QHqjC61sJfUUwUIHTU066v4jk2RxSZxeSHViO17fKAuNZwBEAhK4PQOfL+D0FeAPzcM5FSJ2PED0zj9GBFSRvvED85qeJ3/4lUvf/B9mxo/iJPvDOABfsgjd3GNwR+1wh0wcaYCaiqh0lAtktv8vUT9beQmHcyFS/j5tKxyr7E/Zx5Bsmld46yfSOf+suQi9Erf7WyIcOp1UP8jmkKozvEah4fWTUh0BkTpF5oOrrsrJkuz8we1joigZX02M4t/6EyfNVTPVG8HsXwflSAwwx+JxI4JA2kLlm7NWF0KsJSGmRSujURFJpICVzQJqrV8eEtNisrNQMb6p7Gan+IjLnN+Fc+wT+7Z/CHf7PkD4JqSugOSP5zDJJg6Wd5tlCFaruMCnlvgmfy9FPOtrfS7P2tuC2ClUn3/Jv0plmyomZIngJ3xZTNf2tPhUJWWHgylTxiEL6ISRuQ6KD2J3PE7v+A8TOvkqmrwp61tgMhe4VZl5C/ldqYBWxMwXEzq4ieXYx2QEJBYEkz7TSpHMhc43+EhhcA4MbcPrKiPYuY2KwgOjlV0ne+imI1sPEdZiSbyrhK6tDGla1q4JAhfrQc021+2wmTiodJZGeJpqaCjYLst0gDn0raQysHBf/2cqKj8xVlR41vRj25tO2spPtTHCYKhEqDdMGdm94E2pnkwFJeEJQh0gAiZJkmltEGTaAMbeoJxs5TfL6DzN1tojU4AI4vwA0WP3LoF8O91O2koS9hdBTDL35oL/NZ7n2MzMLq+/0uX7nORiYB/0LoHeFOTbVtYBk7zwy/RGcs/Pwzi3COZtHZnAj8b6Pw93fgIf/G6ZaIH0zcOS1eUvW1OD1ZDNJBejBxcxhZwVDoWohUspWfEibqD69FnBpjYpMMn1jB9lEGnWNUEmbCIFCh5q3eQDuZUi2wsguYtf+iPGhX+Th4HczMbiGeO9yXK0HklaQ+dS1yKTe+P0ryAzkkzhTSvxsMalzS3DOLLL9HfbXXNv+JXjdETi3DC5WkT2zgdHTVYye+QjZe78M6Xq7kWKgIhXpVoFvdVnMc4m5jonAKZ9Q2M9mfLPSMZNNkHaiyHQN657ZfrLWibpodldbGaUIn0o8CYA2amgrKz4DQGzKxONMbwAgtRXSrJvRoiO7tkKFEe6ZTTWN3hNIBKYA4eoPYVll/RNmB4yslYKxEbMpZOrazzBxMY/xM2LK56B/SQCOubSLoE+OpsKYMtnKLEgGnoPBSHBtgUHREIFQQFF0ZAX+wDK8C4twLi4ge24h7uCSQJLmQW8xfs8asn1byJ79CM7lz+Ld/XUY/7pNy3HugqJ2GqnZYi3U7eFnihQnbAVDL+0bU0m5X9o2IasJSwFJnR5eR+CwHWjb2ChM9cCD1+H2b5C69hmmL36IyaF1TA+VkDxbQuZMAU7vIrPKLq3VmFps1rsAzhZBr8zHMpz+YgMUZyAXd2AZDEg4LXjUP6Zv1D9PQQOLyZ6O4A5GyAxFSFwoIXrxk2Tu/DokjoP/wFTz1opeJ4UpF6qa0Yp6ahMgU+ZUyHgrxxsmnFENto8MCtRZYehbroFsrVC6BBZN+L2ZdFXp0W8FQGZpCv1+KOGsWpR61IYuD5BDZMwsHSB//y1jLR4xm904U2aDeFIqqHoPRvYyeePT3DuziszZlTCwdI60BAYWghzEnnLoqYRe2d5LrcMop3FwCd5gLt5AkSEGVsLgUhhcjndmCdmhRTiXFsOlfLhQCufLYKDISGLNEDunl5DoXs5EVyljp19kcvAfkrr6y3D/yza12u8B9xw4lyGj4toPQflRiRTE3EfgERNkpU60h7N8Ny17jEImjukXTS5pdZ9209I+h8mzkFQm9p/DzX9n0vhj/a8x1VNMtH8B6XMRGFK4VH2QB2dXwJnnyPZFSPdE8Pol2eXbCSRltu0rAFH/SnMsZ2z/mD5RvzwtDS6BcxHivRHGTkeIDr0AD38dsifBVdjcagWlnaeUca6ijYGbq02BTJhTfaF+MTRr7/JQwDwGFplockQUPAoA8tj3gVkq4ATHRd5G/s9i7/DsJ7UhGnU3eh8au0Jq8LL6y3L/W9+H0k+XD7SIjb9oNyYb01fJRy+TxpeZoH23tVdf7AqJe/uYvvxLJBXbVkfPhfqXW80gp3Qgx4R8zaCfmR8AYyXeQB7uQBHuQImlwQLcMyvxzi7EPbuQZJ9MkGI4VwEXVsOFcjhTjNujXKXlMFiC119CuqeIeHehSWNI9pfinFtL8uJ2Jm/8LcZv/xDT93+S9Oj/B9NfhMROSJ+ATD8412xCaHYEU9hPrcwlkXcrAFcbJPeTnfxjEg9+hanhzzJ57buZurKVyaFyEufzyJ5bhX9mFQzkQ48icuUm58wsk+4shDOFcGkV/pUcspeso5wxgQgFLpbbYEWnzivARPsG50PQTwwoaDEHOpODfz5CvC+HidOvkLn4CzDRaKteah2OapYZ5ah6AFpeoK0lFPtXHyjJUoEGmUWWFMOTESVBrKkEV6onVABv5T39Lf4LNUjY6jOT8mN5O9Aguo2QW5+21U1Ir4tC2M4Cy+z8ovAm1M7GXPi5btpcR4FRXctmz5pGE4jZuO21dJLM5D0y9w8x1vd9JAfXkRkswT2z3A7aGZlHOVYTDIpJl+KdEc0C0oDMseXQK6koiRh5pDXOKtS43IJDgAgoa9o88zvuufkGIJwXcFS1cRVRrSfpzCGl+ZUzBTBURqYnD29A70vgSjFcWW4kt8yK6b4Ik5dzGL2Sz/iVtUxce4mpqx8nevUHiV/5KWKXfxnvzn/Bu/0l3Nt/gnvrT8ne/jMyt//MhDWzw3/M9LXfYPL6zzNx7UcYu/JJRi5t4eHFlYwMqdxAhNGBCNGBiIkOcUFh1FwYkLbcAF3roXuNiQhmOpeQ6FtI9sIS3GuL8a9H4FLE9ouWLGi+qUuRw1zokwZdaISE6degj42Qmq3N1e/qx8GVVgsP6rdX2n7vk+ZaTrwvzwQI0hc/B7e/DmMXIWHNJpM5bgAi01xb1N0K9pdX1oXdnFOTnOH+KWFrLRWBJvSPQ/4OzKgQLGL1t5LhQZkzOkfFq81LR+k1l1bnhNz+1l8L7CZdOryp2a0On/23uQd7YzN3Hn4fPokBXPgAw8Qe/jfunP8h7g++SOJSMdmhYGCv5sKFIvzBfOM0Z8+tIHt+Kd65pXBGJlng0A/KtJBJtQDOzTetf24R/tml+GeXPUbezN9Lsccstt+fWYEvmvl+1nmPfW7Ps781D//8PJyh+WSHFpAdWkxmaBmZ83lkzhaTOVNBZnAdqcGNpAY3kxrcSmpgu6HkYNhuJXlmM/Gz60meW0PyfCmpoVVkhhaTvTCf7IUIzoUI3lDESGr7fIvs8+q5B6VRVoHpg2Wmb7xzi3HOL8I7vwDf9IeOXwyDmlQNyPTXYnO8NJP61j+72PbhwCIQnVEfLsYZWk7qfB6pc6U4Z8vgrLSVwsjSTCXcaf0oYxd/Bab2WG2onXy1D6MsoWCLd+tiaApAQQlpEjnRllcfTarKyVbQSK2lGR6a4WvDYI/+C3krbB99M4OcZ5wHeeyK78Mfk2TSzTy8818YufpZoldfJHrWJpxxYQWcL8Y7U0j2bKFhnNTQMpzzGkgNYuC3nBWTv090TqCcZ+xw2eJzITGxf07MPFeyjDyXPpDAyJ7LI3s23wgII2gGF4EijGd1P4vInFtC4twK4mfzyZ4rsZpUWqx/vqm1Nn7mXzI9/HVIdmH3qlG6keVPhaslLi1ANBckTaKoXbBNYCBfv5WM920OEEmSGzjJg2Qe/g7pW59lerCS6e75NtJyvgD3bBHOuRLSQ/lkLyzHHVpow8NnckB0LgfOv1+ke3nOzB7PrdW5CnfrOnOlZ3j2czJp7YSiWbszKA2x/LE+VUg4O5hDdnAhvo6/WAgX8412muqvxB/5HfyYIlZK9VAoOmXTxGUkBJFNa5/IDxE47D6Nssol+L/Vr29zgMjXUSRMdmsjjH2BzOVPm5VhSUWmhlbinCsykit7vojs0ErcC2IoSevn4OxCOCfGegYmeaZz9du6HzH6XCiYC5ozOJ7x2c/Jj5A2CEAyWIF7Lh/3/DLcoRz8oQX4Z+bBWaslvbPPkT6zgtT5CjJXXiB54zPgVQPnbA6UP24mVJ1k1gaRAh9aGiPc0MZuSRGA4/8HhHybA0TLSpQHFofMXRvWfPgfyVz4W8QHC3AuLsW5UIBzvtSQO7QKLs7DOqvz4Px8OL8ILryPJI32rHRhIcyZnuHZBUwBQNGsc4vh/AqcoWIyFwpxLy6Ciza3yrSXrB80PbiUiaEPkb3972HyEDAUVBEZA2cSJxkjGU+YZd2O6xt/QuB45HAHfu3b+g3ffH3ybQ0Q9VEyyBq3AbQHkKiD4Z8mfqGM9JX5ZC6uMoMmkLhDeXbQNHBy5gUQMef7CpBFAUDm2M4ZGCGongEgQ4H/pPki9eeF+TgXVpG5mGcBoiiY+vpqBK5F8C+tJHbhg8Su/zKMd9mJUi3ZVl6eF8XNREmnEqb2bzqbQbvnWsfbAsR6JIFtFcaDvsVa5NseIJkw3V7uiJYxpgfgweeJXqkgcT1C5tJynKFC3POleOdnA2TeI8n9zEwWMtvTtrOYc2ixBerTtu/bvetZA2AYcFgweJciuJfmmwwDo60FjqsREzb2br8Gd34fJs7a1BpZyCYY5Zg9610nY0ubellcX4ZVOF0QtoH20DlyQtT+NUCerDbVN4pwKK0lE7VFOUwh5YdfYPL6ahK3IqQvLzKawztXDAKIBtVoEJk2YsqnZepv9vHSHIvnRkbzfbPv52muJ60R9Kf69DGah3dpHt61CPGLS5m8uJXM8Ocg3mUHLVwVocltOeOhoDM8H4ZsBQyFtNTOWkH51wB5MihmfxMCRGsjTDxQc5XOBKS/xtjtSkavRshcXYB3cSWcL7STZBpEqf4LS/AvrMC/mIN/acH7RIvs719cgj8n0r0vep/uXX22CG4sw72cg2f8uQVwcQlcXoh/KULsfITJC0VMX/t7ZLQvYKzT1uCSz+hlTI0CZTYrSjV7rtgu5HoLOKQuNOAijXdIsxniW/D+29zEUjzcIe27NpVewsZVbtJeRu+s5+H1COnr8+CSIlrSHsvh0jy4PM/Yw97lVfiXF5sB1aC+L3Rpsb2HS0ufukU5YJdz3p/7vrwQ70oO8cs5pK6thGv5cKUALmqCdjnOhRyiQ2vI3v8XpB9+hezEOUgZO9gWkvAn8bLKywusgEDGWQyEk8WB5ngrOEKQqP0Wv77NAaKYuGZYlbvl4GeVyKe850OM3N3G6M0QIDKllsIFMVMErszDubyK7JU8vCuL8a/Mf59oocl9EqMJqE/bKm/Kv6JrPAvN/dmz1+YzdT1CbHgp7s0KuFJhtfKFUvwrH8e9qYrpbZAew/fs/IWiUWaxljuFrw2LvgEgdgnFjGllEfNIY4RFML7FwAgv/20NEClnVTPMKn6ueirelF0qljnF6L0PMHYrggaRSznW11CI93IE/6qK2q8kbQAiJps7kzzrud5VK4kFjqemq88CDJ37bM+tvo3fWcz0neVMX80jeqGY5MX1cOP7YOSLEDsNWWUdWjNK/qIAYUBiFnBoCfAjDTLznQ1JfqPfEVhZOk6LpRT+tcgJ2fmb336bA8TD99K4qupoikQoy1MiqZ+Je59i4lYOzjXZxEpHj1jtEURU0tdXkL6+yiTmce053he6ugD/2iLcOZLO5apowbPRHJ9ffZu6XcbEjXxGrkcYv1lK+s6/gIk3ID5uI1WBby1RpjWAqqWmajZmnLQc4zGAyDmXnSxnUl58wP86XuCYmVlXzq6WI2vlvQb8W/f69gaIOtBRRpvWWA/bNdYSKpnrTN39DNO3i3CursLY6gLIFRuPd29ESN1YQuLmCsOc7ws4rj2Hb2gB7vUFFiRzaN8vcKjP3KsrSF5ea9Lqo3eex5n6p5B5E3zt92gDUI7WcHguGT9lSNEqJSEaVaLIVYAVtdb8CjPDNQEcHKcvtRRmJjdLK/VjM6b1tw4ez1zV5Ft5a+/h2gKIBI4q0JlU6LvBwDwgevcnid3egHe1CC4vseBQTP5GBO9WhMRwDonbS3FvLAQ58u8LzYfr8/FuiBbOoV0I156Rruse5vj8V3Nxr+yAG/8ERv4Akk1mrYakuoYl5vlmWWzKyZr14mZfu0BDSFPITJL8F+m91gLNFMkzi5ZmAWSWtlFJpYTZy16ZvZKI37rXt78Gyfi2IIJZTKNUaYFmgqm7v8T07VfIXiuxTBTM5gog7q0IyeH5hrybFjT6/BGJYcQ4Ak9Ilpm5MW/WcW85R98F5z1i+Hl4TzxH54fnBNc357/H999wXd3bbGafHwBoCVxdakkmp55p9rla+xHchwVreB2dn2OvoWMM5cD1HNRv2WtFMPxD8FD+Rp9d3aiSs6aMaYqEaoR4abP2/tEiJIFAzrnW1atVnlWoPULzSu1jDsfbahBpkb82sd5ROEj2hLULg8VVEiiph7jRL5Gd+H4SwyW4dyJwOwK3xAg5cHMZ7vB8nLsR0Hez6XYE//ZC/FvLcG8tnyH97d/KgdvPwXDE0h0da8m9GcG58RzpazmkruaQuLqI+JUFZG8vIDs8D2c4gnMngjdsSeeZ+7n5HNycPze6FcG/E8G9Z1u9N/cWXts8rwCRC0ZQlMD1fNBz6BkeO34+7u0cnGHRQtzh5+zz3iiAaysMOPxrEdwruTg38snciTA9vB7YbXaHxZ0GN2N3AJgpMiGtELzMG9lMoc7Qd/Z7e0w4saHv9X7Wef+nvXMNte266vg65+xzzj7Pe+7Nuff03pt3G1objG1ME7W9IpVQCTVSqpZUKkZMiSik2pp+CKgfzAcpVBFbNdoiFB/UQCBSIaCxRaGlEUKj1UaTe85+v9+P9V4/+c+51r773ua2+ZDmZF+yYbDW2WudNedce/zHGHOOMcfQDenqlT21pSNMVpP5e7P/eQ2Pi61BjLpo2fyFenN6twKI1yAYfgGv+wCj8gFBykAGJIe7cLhHUlw2zDVj9pTpzffFVcssxQ3C4gaRoXUiAUf/V7IMaRhMIBOVVgxDhUeb+IfbeEebuEdr+KVlw0xBeQ4gApWYV9pLx1ekZSjM0woUrqDisgG/gB6lNGN68/xlOBRAduHidRYcR1skxRU79jmA6DuBIyht4JUFEgsYLp6Bl09YgBw6hC/t4R+ewa+t06v+KPCNWVraLD3t/PE15NVjedSCA0RBbG0SJfkUQLIZn9cmGn+JsP8rDCoHeLWUoSVZ5wEiUBxtw+EpC5qjbZLChmEgaZ2M6SSZDSikPTIm1bmeV1mGSg7Ka1DKQ3ELSlvEpTxROWe0lJ5zuXRfsYx7tGo1wByDZ22a9tUHUUm0bLSeQJqRwBsW9ggLu/Z6phHNuASOdduOjuqrKAW3GY/uS7/XM/W8oLx+OUAO94y3XCBOjhyiQwmNMwT1c4waHzTlyuYBceX5sXD1a9jowgNEkaAJw0srI9IickxNvkwyfJhh5Rx+PZXyxSU4OgFHJ43Ej8s5OHwLXLzJHgsnoZg31y4zu8pLICqtXKJiDor6O2OyHJRWobgOZdEqSWWZuOIQV+fMOPWhoBCNbShsEpdXCGvWTAqrVx6XiCpLhFUdVwirK5cdo3KeqLBvSIBUW7N+q39qR6Q2pS10vWr7Y8ZeEqgtaOLSGmF5E7+Sx6+uEVbWDMiTw004WjX3xUUHX1TZJ27eRdB+xGQ3VMK7q9FryKvH8qhrACADU/4sVLmRTIOoypVqjkw/ybBy/rsBUjhBXF5CTBEfnic+vIm4cNL8PQNCYQcO9y1wdCxsg5hODJWZJjqXmXSZqSTQrBmQJBULkkQAEXOKStI8q1DYgKNdKJwkKl5HXLiOqHiSuHiCqLRDLO90OY8YWeBQfzXXENiiWnrUuUBS2jb3azy2jysWFCnzZyDIwGPGXs4ZcGZjEcAMQKpr+FVpPgFkjeRIgmDJ9F3j8CoOXv0MtD8Cg88DxauCQ6BZ9M+CA0TqYmQAIoetqvVaM0t5tP4VvN9nVLmZoGElp2XOXSjuoh87ruaIi6dJCgck5S0SmUoymaQBCqfg6ACOzkJhH4oblrkFDgMQSdVV4osOiZaOj7R8rIntBklph6SyTVzbJK6vkEiDZSSwGI0iIOVtO4XTcHTatqN2i3tQ3IGS2pQ2kgknU+6SFphpA2mfWo64sk5S3ki119y92f9Iu5W27DjLW8QpeM1zypqPbRFVdghqqwS1HFFV4F414zL3qJ26w7Tu4LbOQ+dRGP9zmsl/0WFw9f4vPEC0dq4sF0EYm4TEBiDBANxvgv8ZhtVb8RsOSU2MLem9C5LQ6Q8e1WQGrVqGETBKYsydOQYVYFKmFrPpXPMMAefoPMHhJkEhT1DcJSjvE1UPiOpvIWyeIWrv4TVX8VoOQdtS3HKImw5JMwWNeZ6kdEoFmWnSMK90FHA1n0iPpRxx3TECQMxrxqL5lsYqyjSXQCYAHp0nKZwnKe4bQOkdWNBdDSA5Qk329ZyWQ9R0GDUdJu3boPtn4B7aBGxX56+Fv7LgALGhCcqDpGTP0iIGICai97/A/3OGldssQIzUFhMKAFsWIA2HSFRPtUl5awYgM58QIPR/lzGbJvGnSApniUoHTKs7TGu7TGvXM629E7f2Ptz6B/DqP4vfuJ9h6f0My+9lXHkX49oPMandilu/Aa9+lrB+ykr8kky3V0tWc2VzHb+2wbSxjltfNeOUttSYYgkFM/fSsvSOAXRydKMBiLRmXNkgrl7SNJdpkPqy1SBVh1CTeD2n5eA3Hfoth3H73baisetjizIuPA6uOoCFB4h2nYmU7yJK4nQeotpuSjL2JOPq7YQtB2RmVVahvA2VzRlAdM1I9NoqSXUTypv2PgFDjNFO/7eYJzw8QVg4RVw5CXrZgg4AAAtoSURBVO1NktEq3bbDoJ/HG78L/Acg/AxET0P0gu1DUAXvJZh+E8ZfgcET0H2cpPVJks4DJK0fgc710DpH3NwnrO/h17bwqmu41WW8umx+B6/x3TSt5Um6dzOu30W3dJpBZQWv48DQgZEDfS3drpKUdqFyGqpnoLZPXN4lKGziyjko8NeXobpDUjtB1FgjauZIGjk7fr23ngNdh2HNodtycMf3wvg/bKzVnMviqly2wBcWHCByKcndZGM7zTZNpThX2Luv3K1fYVK7IwWImECTZwEkTyIp23TMtVAmT32JpLYC1ZRpxDiipv7OQ/mAuHQ9fuU00/oWk7ZDv+vQ7p+mN76bIHwQ+CPgqxBXLfMoM2ZGZiuEijMewvQ5m5zZ+xuYPgbex8F9CNwHYfIxkvFHiEcfJhrdz7DxfobNCwxb72XY+rHLqf4zMHiUuPfbBIMP4vVvZ9o9ybTtGLNO4Pc1udcSdG0DGinVNohKm4SFHMjUayxBfZu4sUvcXCdurUBTpPELaBlAlui0tnHHD8BEmeJTjb3AAPh+XV98gJgANoFEOkSe3DSpq+KzkmcZ1d5NIKmqH7y6DtUtqK0bE0TAkLQUGUbINI00R00T9h2oHED5RpLqzfiNc0xaJ+h3lul2HarNk/T6v8Zw9DmI/xH4lg2YNHn6bbIVw0RZaQOFsmq/ihIuKwI5Ua5ZBVkeXkEXbTkwlQTzXgTvf9KjzkWqN/J/4L4I4+fBfxaSv4LwEdz+Bbr1M3SqDoOqw7TmmFU8CQOjDVtL0MpBbQ2qcyCob84BRPdYs8ocew5xZ4lBbYtu++1Mp58CL417e1ODfD+MHd/1zDeo1V3l3FNu1kRhpAohVV3j+BvG/Ai7S9CUFM1DbRsa6ySadIrqW0SSrGIgMYWOAkhVK0L7UD5PVLkBt3aGcWufQfeAweAWBoMfptv7RfzpU/iT75AombRSYipUNTH1XAh8peyPbck1PyJRhgl5+udJ4eAaQLZMHdnVOFVDMqtyuq65laK/rzjOIgdUU8/klnoK3MeYdO6jU7uJTjXHpOnga5wyFWe0DNIS+jsDQiNP3Ngmaq0TtZcu3avrPYewk6PfOE2v89P43mchTOv4Lf5K7vdk4IXWIBlAxDeKH1X5MlOlT3HRRv0/z6R+D1FvGdrr1sSo70Azbxggbmsyu2fImFIZQBrSNntQvc7Y5V4jz6jt0Ovs0u/dyWT0q0STJ0imXzf17xJhMiviog2NgUK8lSAwxo083GiKH3oEQWTvU6oi5eFWOq8AVB5bKf7NMbT15ZXuX3Xm9RyVZ1ZMplFEc0cpJBNBYMKYhKgaRP8O7ufwex9l1Lwdt3OCsOsQSYvOAJKedx3Ivm+tkbS2iNt54s6y/V7XDUBWCDp5hs23Mug+SBT+A9A3+zNM+9+TxRb74jUEEC33XgmQbzNt/ARxfwU6eWhuQmPbAqTjkHQEkG2o71oTbAYQzTs2jV0uDeO39ph0rmc6uIA3fgS8v4egOB93Zwq8yKpTni5PlY5izYtk9qkYp6JOxzY1v8K2Vc5R6wjTtE55Visv3RCUKhS7IGeUi4LzsgC9YC7LoA0X97VXQgGwZhVPCRFeAO8JGD5E3LsLBmegl7NMn40xM7kEGoGklSNpbbwyQDrrBJ0NRq07mPQfI4m+ZnZwqp/XuALJsrsvJsqNBpltotEOs4HdU6BfTltEoheZNt9HPJREzENrywKktW5XZnoOSVvmRg5km18pYVs5osZJovZ7oKeUNV8A7zkIW2memjTwVB2RRSdGjxLCWKtq6o9KgGXFgurEtEzcWKLiLaaSZEzsTYi9MYk/MWSrsGpvvW/J7ItIH64G5klGZTRmjGtSH6kP1nyTSvoOeE9C52EY/DgMdu2YBRBjQq5BffWSpmivQnuDpLNB0l0xq1ZaudJ7STqb+J0dxq27mfY/C8l/ktAzuzmUSf1a/iy+BplJ3WzDjSoHmdJUEB/Rb96HPxRzbEB7DVoyr1ZIeg7RwCHpWk0ibWKo6xB3HWNSuK0bGFcvEDQ/AcOnIaxdmj+oqJFKHqkSkeFKa+jZdTUFUSqZRB8vruFTMRSmZecS7Z8XghPXFoFRIRjZW7Fr615okm9SqgagIhmKo8lIdpUSganZOGIYdEyZumy/t9EiMi81B4vK0Pw76PwOcfcCQec0QTN1LMr7LoAIBKIUIFq+pnNJgFjBsYHbyzPs3sN08NeQaGGhk9by07iv3c9CA0RAMOE+EmLSGjqa8lppYQma+NGniZO7jEc7C/kwXmzXIZSvYOLA2B4j18FzHSbuHsPBXXRbvwxodep/LcOKKWecqMYyu0bnGYlZdG73O1hO1n0Z6SGXwDQ7NzPwbD/qFUcB/pXIlAHQ8oTmX5bsttW0CfVV5L4Mk7/EG/wS7vBtuON1omkOvHW8wSphb90KjqY07J4xtYKuJubWl8JwnUnPYTB8D/CMWYyIgxrE3VQ4vAmQN+wbmAEkE+RGoqcOQwZMkj8k4l6C3qaVlNmk1F3DFUCCJZg6uAP5Ndbo925hNLmPIPhd4lCZxwvWrMmePzO89YVE9fFJUJk3mulYcNgkBjo3XbQYtQARSLxvEbt/QeB+FHf6TsajLYajJcbjNYLhBnRWQaZnW9p225il8UDaJQ+DTaaDJUbjnySOv27BqoDQWJrw+Mb/ejDlYmuQbJIoZsgY+DKAjHD5Y+B+kuGe9TDL6dVeIh6fYNhdI/KW8CcO49Eqo94dTFufhsHTELycMoCYQMtOWWNiCGkDgUNHXTiejwWIACESWNJIm6yrpm/qZ5r1JX4egi8RTn7DLFO3uuuM/FWCqQMCgzzm/TUQYCZ5GG9D6xR0zuIODphMfoFQBUfN+5apl6YsOZ7hvy6tLjRAZHWIKYz5MbNabLoY41tgyjT+PAk/TzLcTwGi5cwNgv4NdJtnGfZPMRnfSOz/FISPgvcv4CoaODVPDP8LfQKD3OFyjeuo744PHJY7sjiCOXCkczKZWppIJ5RT56Wck/JdfBvcv8V1H2Ls3s3QO4fnrs8BxCEZrRBNNmC8B63T0LqFYHAn/vS3CIKL1m+TCST9CNfw55oAiPmNMvEp71qWewmPSfBF4vhjBL2zYPwh20T1A/zunXRq9zDs30sw+TgEXwSesxNQ1S9XbXIpjxkOBAyFr6RfqtHjxkfaB5mZOrUaRWDRUvDYACSmSmKqwk7AV7HxLgT/DclTwOOMph/And5KMsgZEzToOfgDB3+cIx5uQ/skNG8j6X+I2PtT/KBpMi3JspL/xzT8JkDemG9ATJFVHJrZF2ZZNPVOyzoPv0wS/Tpe+3oSrWI194kb74DRzzHuChh/AsE/QfCSRYRQEcuZEZo0v+a5BggyVQQO1dhOwaHvdX5cH7UtzWnUqLqiRGpKhdMzWUOSNHtIGA8J/AmhOzXLyoQaR9XsJ/emjxO5H4bRLdBfwu87TAcOk5FDMNIqV564+TYYPAzBk3hB2zgtzWt+EyDH9cu/unYlMTVFNdktLFpsPIYWgQzTyAR5BpJH8btvNev5tG+G3gWIPwHTL0DUtNaTvG3hBCLtb1fohkgJ6dKJuJ4vEijm6dV19Qdzl/ohJpWWM1pEDskyCSUSswwbGLdJpKGFIUE0IIx7JhvlDPj+C2ZbANMPwehGgvEy3sRhqlU+1yHpO8Stm2D0KYifwY2qxtgMtYqctvuDGdwb46kLbmLNAyQtMGEcaRlA9JL/Dfg9wt7boasVmnfA+D7gD4BnLf9rSiFGk3POVDxSphTFVknKyqxKJ+kCXSaxj1t7aGjqg6ZGM0kuR+nLJLyUAiSe1d2QqIgZEtEhSnzj+Y+MM1UhKl8F/zfBvYPE3cL3HfzQIQwdkoEins/B+DHga0ySkpmeyRUj18yxatDXAUP/Dzulf7mPSc8LAAAAAElFTkSuQmCC +``` + +### `annotation` + +A Kubernetes annotation for the NavLink custom resource. + +### `label` + +A Kubernetes label for the NavLink custom resource. + +### `sideLabel` + +Label that appears in the left navigation bar + +### `target` + +Sets the target property of the link's anchor tag (``), which (depending on browsers) determines if it opens in a new window or in an existing tab. + +The default value is `_self`, which opens the link on the current tab. To open the link in a new window or tab, set the target to `_blank`. + +For more information about the target property, see [this page.](https://www.w3schools.com/tags/att_a_target.asp) + +### `toService` + +Has five fields that are constructed to create a URL like the following: `https:///k8s/clusters//k8s/namespace//service/::/proxy/` + +For example, a link to a monitoring service can be set up as follows: + +- name: `rancher-monitoring-grafana` +- namespace: `cattle-monitoring-system` +- path: `proxy/?orgId=1` +- port: `"80"` +- scheme: `http` + +It is required to provide either the `toService` directive or the `toURL` directive. + +### `toUrl` + +Can be any link, even to links outside of the cluster. + +It is required to provide either the `toService` directive or the `toURL` directive. + +# Link Examples + +### Example of Link with `toUrl` + +This example NavLink YAML shows an example of configuring a NavLink to a Grafana dashboard: + +```yaml +apiVersion: ui.cattle.io/v1 +kind: NavLink +metadata: + name: grafana +spec: + group: "Monitoring Dashboards" + toURL: https:///api/v1/namespaces/cattle-monitoring-system/services/http:rancher-monitoring-grafana:80/proxy/?orgId=1 +``` + +Adding the above YAML results in a link to Grafana being created, as shown in the following screenshot: + +![Screenshot of Grafana Link]({{< baseurl >}}/img/rancher/example-grafana-link.png) + +### Example of Link with `toService` + +This example YAML shows an example of `toService` used for the link target: + +```yaml +apiVersion: ui.cattle.io/v1 +kind: NavLink +metadata: + annotations: + key: annotation + labels: + key: label + name: navlinkname +spec: + description: This is a description field # Optional. + group: "group1" # Optional. If not provided, the links appear standalone. + iconSrc: data:image/jpeg;base64,[icon source string is clipped for brevity] + label: This is a label # Optional. + sideLabel: A side label. # Optional. + target: _blank #Optional. _blank opens the link in a new tab or window. + toService: # toService or #toUrl needs to be provided. + name: rancher-monitoring-grafana + namespace: cattle-monitoring-system + path: proxy/?orgId=1 + port: "80" + scheme: http +``` + +Adding the `toService` parameters above results in a link to Grafana being created, as shown in the following screenshot: + +![Screenshot of Grafana Link]({{< baseurl >}}/img/rancher/example-service-link.png) + diff --git a/content/rancher/v2.6/en/admin-settings/cluster-templates/_index.md b/versioned_docs/version-2.6/admin-settings/cluster-templates/cluster-templates.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/cluster-templates/_index.md rename to versioned_docs/version-2.6/admin-settings/cluster-templates/cluster-templates.md diff --git a/content/rancher/v2.6/en/admin-settings/config-private-registry/_index.md b/versioned_docs/version-2.6/admin-settings/config-private-registry/config-private-registry.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/config-private-registry/_index.md rename to versioned_docs/version-2.6/admin-settings/config-private-registry/config-private-registry.md diff --git a/content/rancher/v2.6/en/admin-settings/drivers/cluster-drivers/_index.md b/versioned_docs/version-2.6/admin-settings/drivers/cluster-drivers/cluster-drivers.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/drivers/cluster-drivers/_index.md rename to versioned_docs/version-2.6/admin-settings/drivers/cluster-drivers/cluster-drivers.md diff --git a/content/rancher/v2.6/en/admin-settings/drivers/_index.md b/versioned_docs/version-2.6/admin-settings/drivers/drivers.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/drivers/_index.md rename to versioned_docs/version-2.6/admin-settings/drivers/drivers.md diff --git a/content/rancher/v2.6/en/admin-settings/drivers/node-drivers/_index.md b/versioned_docs/version-2.6/admin-settings/drivers/node-drivers/node-drivers.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/drivers/node-drivers/_index.md rename to versioned_docs/version-2.6/admin-settings/drivers/node-drivers/node-drivers.md diff --git a/content/rancher/v2.6/en/admin-settings/k8s-metadata/_index.md b/versioned_docs/version-2.6/admin-settings/k8s-metadata/k8s-metadata.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/k8s-metadata/_index.md rename to versioned_docs/version-2.6/admin-settings/k8s-metadata/k8s-metadata.md diff --git a/content/rancher/v2.6/en/admin-settings/pod-security-policies/_index.md b/versioned_docs/version-2.6/admin-settings/pod-security-policies/pod-security-policies.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/pod-security-policies/_index.md rename to versioned_docs/version-2.6/admin-settings/pod-security-policies/pod-security-policies.md diff --git a/versioned_docs/version-2.6/admin-settings/rbac/cluster-project-roles/cluster-project-roles.md b/versioned_docs/version-2.6/admin-settings/rbac/cluster-project-roles/cluster-project-roles.md new file mode 100644 index 0000000000..8d1d1fb702 --- /dev/null +++ b/versioned_docs/version-2.6/admin-settings/rbac/cluster-project-roles/cluster-project-roles.md @@ -0,0 +1,225 @@ +--- +title: Cluster and Project Roles +weight: 1127 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Cluster and project roles define user authorization inside a cluster or project. + +To manage these roles, + +1. Click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles** and go to the **Cluster** or **Project/Namespaces** tab. + +### Membership and Role Assignment + +The projects and clusters accessible to non-administrative users is determined by _membership_. Membership is a list of users who have access to a specific cluster or project based on the roles they were assigned in that cluster or project. Each cluster and project includes a tab that a user with the appropriate permissions can use to manage membership. + +When you create a cluster or project, Rancher automatically assigns you as the `Owner` for it. Users assigned the `Owner` role can assign other users roles in the cluster or project. + +> **Note:** Non-administrative users cannot access any existing projects/clusters by default. A user with appropriate permissions (typically the owner) must explicitly assign the project and cluster membership. + +### Cluster Roles + +_Cluster roles_ are roles that you can assign to users, granting them access to a cluster. There are two primary cluster roles: `Owner` and `Member`. + +- **Cluster Owner:** + + These users have full control over the cluster and all resources in it. + +- **Cluster Member:** + + These users can view most cluster level resources and create new projects. + +#### Custom Cluster Roles + +Rancher lets you assign _custom cluster roles_ to a standard user instead of the typical `Owner` or `Member` roles. These roles can be either a built-in custom cluster role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a cluster. See the table below for a list of built-in custom cluster roles. + +#### Cluster Role Reference + +The following table lists each built-in custom cluster role available and whether that level of access is included in the default cluster-level permissions, `Cluster Owner` and `Cluster Member`. + +| Built-in Cluster Role | Owner | Member | +| ---------------------------------- | ------------- | --------------------------------- | +| Create Projects | ✓ | ✓ | +| Manage Cluster Backups             | ✓ | | +| Manage Cluster Catalogs | ✓ | | +| Manage Cluster Members | ✓ | | +| Manage Nodes [(see table below)](#Manage-Nodes-Permissions)| ✓ | | +| Manage Storage | ✓ | | +| View All Projects | ✓ | | +| View Cluster Catalogs | ✓ | ✓ | +| View Cluster Members | ✓ | ✓ | +| View Nodes | ✓ | ✓ | + +#### Manage Nodes Permissions + +The following table lists the permissions available for the `Manage Nodes` role in RKE and RKE2. + +| Manage Nodes Permissions | RKE | RKE2 | +|-----------------------------|-------- |--------- | +| SSH Access | ✓ | ✓ | +| Delete Nodes | ✓ | ✓ | +| Scale Clusters Up and Down | ✓ | * | +***In RKE2, you must have permission to edit a cluster to be able to scale clusters up and down.** +
    + +For details on how each cluster role can access Kubernetes resources, you can look them up in the Rancher UI: + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Click the **Cluster** tab. +1. Click the name of an individual role. The table shows all of the operations and resources that are permitted by the role. + +> **Note:** +>When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. + +### Giving a Custom Cluster Role to a Cluster Member + +After an administrator [sets up a custom cluster role,]({{}}/rancher/v2.6/en/admin-settings/rbac/default-custom-roles/) cluster owners and admins can then assign those roles to cluster members. + +To assign a custom role to a new cluster member, you can use the Rancher UI. To modify the permissions of an existing member, you will need to use the Rancher API view. + +To assign the role to a new cluster member, + + + + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to assign a role to a member and click **Explore**. +1. Click **RBAC > Cluster Members**. +1. Click **Add**. +1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create**. + + + + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to assign a role to a member and click **Explore**. +1. Click **Cluster > Cluster Members**. +1. Click **Add**. +1. In the **Cluster Permissions** section, choose the custom cluster role that should be assigned to the member. +1. Click **Create**. + + + + +**Result:** The member has the assigned role. + +To assign any custom role to an existing cluster member, + +1. Click **☰ > Users & Authentication**. +1. Go to the member you want to give the role to. Click the **⋮ > Edit Config**. +1. If you have added custom roles, they will show in the **Custom** section. Choose the role you want to assign to the member. +1. Click **Save**. + +**Result:** The member has the assigned role. + +### Project Roles + +_Project roles_ are roles that can be used to grant users access to a project. There are three primary project roles: `Owner`, `Member`, and `Read Only`. + +- **Project Owner:** + + These users have full control over the project and all resources in it. + +- **Project Member:** + + These users can manage project-scoped resources like namespaces and workloads, but cannot manage other project members. + + >**Note:** + > + >By default, the Rancher role of `project-member` inherits from the `Kubernetes-edit` role, and the `project-owner` role inherits from the `Kubernetes-admin` role. As such, both `project-member` and `project-owner` roles will allow for namespace management, including the ability to create and delete namespaces. + +- **Read Only:** + + These users can view everything in the project but cannot create, update, or delete anything. + + >**Caveat:** + > + >Users assigned the `Owner` or `Member` role for a project automatically inherit the `namespace creation` role. However, this role is a [Kubernetes ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole), meaning its scope extends to all projects in the cluster. Therefore, users explicitly assigned the `owner` or `member` role for a project can create namespaces in other projects they're assigned to, even with only the `Read Only` role assigned. + +#### Custom Project Roles + +Rancher lets you assign _custom project roles_ to a standard user instead of the typical `Owner`, `Member`, or `Read Only` roles. These roles can be either a built-in custom project role or one defined by a Rancher administrator. They are convenient for defining narrow or specialized access for a standard user within a project. See the table below for a list of built-in custom project roles. + +#### Project Role Reference + +The following table lists each built-in custom project role available in Rancher and whether it is also granted by the `Owner`, `Member`, or `Read Only` role. + +| Built-in Project Role | Owner | Member | Read Only | +| ---------------------------------- | ------------- | ----------------------------- | ------------- | +| Manage Project Members | ✓ | | | +| Create Namespaces | ✓ | ✓ | | +| Manage Config Maps | ✓ | ✓ | | +| Manage Ingress | ✓ | ✓ | | +| Manage Project Catalogs | ✓ | | | +| Manage Secrets | ✓ | ✓ | | +| Manage Service Accounts | ✓ | ✓ | | +| Manage Services | ✓ | ✓ | | +| Manage Volumes | ✓ | ✓ | | +| Manage Workloads | ✓ | ✓ | | +| View Secrets | ✓ | ✓ | | +| View Config Maps | ✓ | ✓ | ✓ | +| View Ingress | ✓ | ✓ | ✓ | +| View Project Members | ✓ | ✓ | ✓ | +| View Project Catalogs | ✓ | ✓ | ✓ | +| View Service Accounts | ✓ | ✓ | ✓ | +| View Services | ✓ | ✓ | ✓ | +| View Volumes | ✓ | ✓ | ✓ | +| View Workloads | ✓ | ✓ | ✓ | + +> **Notes:** +> +>- Each project role listed above, including `Owner`, `Member`, and `Read Only`, is comprised of multiple rules granting access to various resources. You can view the roles and their rules on the Global > Security > Roles page. +>- When viewing the resources associated with default roles created by Rancher, if there are multiple Kubernetes API resources on one line item, the resource will have `(Custom)` appended to it. These are not custom resources but just an indication that there are multiple Kubernetes API resources as one resource. +>- The `Manage Project Members` role allows the project owner to manage any members of the project **and** grant them any project scoped role regardless of their access to the project resources. Be cautious when assigning this role out individually. + +### Defining Custom Roles +As previously mentioned, custom roles can be defined for use at the cluster or project level. The context field defines whether the role will appear on the cluster member page, project member page, or both. + +When defining a custom role, you can grant access to specific resources or specify roles from which the custom role should inherit. A custom role can be made up of a combination of specific grants and inherited roles. All grants are additive. This means that defining a narrower grant for a specific resource **will not** override a broader grant defined in a role that the custom role is inheriting from. + +### Default Cluster and Project Roles + +By default, when a standard user creates a new cluster or project, they are automatically assigned an ownership role: either [cluster owner](#cluster-roles) or [project owner](#project-roles). However, in some organizations, these roles may overextend administrative access. In this use case, you can change the default role to something more restrictive, such as a set of individual roles or a custom role. + +There are two methods for changing default cluster/project roles: + +- **Assign Custom Roles**: Create a [custom role]({{}}/rancher/v2.6/en/admin-settings/rbac/default-custom-roles) for either your [cluster](#custom-cluster-roles) or [project](#custom-project-roles), and then set the custom role as default. + +- **Assign Individual Roles**: Configure multiple [cluster](#cluster-role-reference)/[project](#project-role-reference) roles as default for assignment to the creating user. + + For example, instead of assigning a role that inherits other roles (such as `cluster owner`), you can choose a mix of individual roles (such as `manage nodes` and `manage storage`). + +>**Note:** +> +>- Although you can [lock]({{}}/rancher/v2.6/en/admin-settings/rbac/locked-roles/) a default role, the system still assigns the role to users who create a cluster/project. +>- Only users that create clusters/projects inherit their roles. Users added to the cluster/project membership afterward must be explicitly assigned their roles. + +### Configuring Default Roles for Cluster and Project Creators + +You can change the cluster or project role(s) that are automatically assigned to the creating user. + +1. In the upper left corner, click **☰ > Users & Authentication**. +1. In the left navigation bar, click **Roles**. +1. Click the **Cluster** or **Project/Namespaces** tab. +1. Find the custom or individual role that you want to use as default. Then edit the role by selecting **⋮ > Edit Config**. +1. In the **Cluster Creator Default** or **Project Creator Default** section, enable the role as the default. +1. Click **Save**. + +**Result:** The default roles are configured based on your changes. Roles assigned to cluster/project creators display a check in the **Cluster/Project Creator Default** column. + +If you want to remove a default role, edit the permission and select **No** from the default roles option. + +### Cluster Membership Revocation Behavior + +When you revoke the cluster membership for a standard user that's explicitly assigned membership to both the cluster _and_ a project within the cluster, that standard user [loses their cluster roles](#clus-roles) but [retains their project roles](#proj-roles). In other words, although you have revoked the user's permissions to access the cluster and its nodes, the standard user can still: + +- Access the projects they hold membership in. +- Exercise any [individual project roles](#project-role-reference) they are assigned. + +If you want to completely revoke a user's access within a cluster, revoke both their cluster and project memberships. diff --git a/content/rancher/v2.6/en/admin-settings/rbac/default-custom-roles/_index.md b/versioned_docs/version-2.6/admin-settings/rbac/default-custom-roles/default-custom-roles.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rbac/default-custom-roles/_index.md rename to versioned_docs/version-2.6/admin-settings/rbac/default-custom-roles/default-custom-roles.md diff --git a/content/rancher/v2.6/en/admin-settings/rbac/global-permissions/_index.md b/versioned_docs/version-2.6/admin-settings/rbac/global-permissions/global-permissions.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rbac/global-permissions/_index.md rename to versioned_docs/version-2.6/admin-settings/rbac/global-permissions/global-permissions.md diff --git a/content/rancher/v2.6/en/admin-settings/rbac/locked-roles/_index.md b/versioned_docs/version-2.6/admin-settings/rbac/locked-roles/locked-roles.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rbac/locked-roles/_index.md rename to versioned_docs/version-2.6/admin-settings/rbac/locked-roles/locked-roles.md diff --git a/content/rancher/v2.6/en/admin-settings/rbac/_index.md b/versioned_docs/version-2.6/admin-settings/rbac/rbac.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rbac/_index.md rename to versioned_docs/version-2.6/admin-settings/rbac/rbac.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/applying-templates/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/applying-templates/applying-templates.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/applying-templates/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/applying-templates/applying-templates.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/creating-and-revising/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/creating-and-revising/creating-and-revising.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/creating-and-revising/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/creating-and-revising/creating-and-revising.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/creator-permissions/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/creator-permissions/creator-permissions.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/creator-permissions/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/creator-permissions/creator-permissions.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/enforcement/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/enforcement/enforcement.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/enforcement/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/enforcement/enforcement.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/example-scenarios/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/example-scenarios/example-scenarios.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/example-scenarios/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/example-scenarios/example-scenarios.md diff --git a/versioned_docs/version-2.6/admin-settings/rke-templates/example-yaml/example-yaml.md b/versioned_docs/version-2.6/admin-settings/rke-templates/example-yaml/example-yaml.md new file mode 100644 index 0000000000..3c85e86d61 --- /dev/null +++ b/versioned_docs/version-2.6/admin-settings/rke-templates/example-yaml/example-yaml.md @@ -0,0 +1,112 @@ +--- +title: Example YAML +weight: 60 +--- + +Below is an example RKE template configuration file for reference. + +The YAML in the RKE template uses the same customization that is used when you create an RKE cluster. However, since the YAML is within the context of a Rancher provisioned RKE cluster, the customization from the RKE docs needs to be nested under the `rancher_kubernetes_engine` directive. + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker + +enable_cluster_alerting: false +# This setting is not enforced. Clusters +# created with this sample template +# would have alerting turned off by default, +# but end users could still turn alerting +# on or off. + +enable_cluster_monitoring: true +# This setting is not enforced. Clusters +# created with this sample template +# would have monitoring turned on +# by default, but end users could still +# turn monitoring on or off. + +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` \ No newline at end of file diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/overrides/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/overrides/overrides.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/overrides/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/overrides/overrides.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/rke-templates-and-hardware/rke-templates-and-hardware.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/rke-templates-and-hardware/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/rke-templates-and-hardware/rke-templates-and-hardware.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/rke-templates.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/rke-templates.md diff --git a/content/rancher/v2.6/en/admin-settings/rke-templates/template-access-and-sharing/_index.md b/versioned_docs/version-2.6/admin-settings/rke-templates/template-access-and-sharing/template-access-and-sharing.md similarity index 100% rename from content/rancher/v2.6/en/admin-settings/rke-templates/template-access-and-sharing/_index.md rename to versioned_docs/version-2.6/admin-settings/rke-templates/template-access-and-sharing/template-access-and-sharing.md diff --git a/content/rancher/v2.6/en/api/api-tokens/_index.md b/versioned_docs/version-2.6/api/api-tokens/api-tokens.md similarity index 100% rename from content/rancher/v2.6/en/api/api-tokens/_index.md rename to versioned_docs/version-2.6/api/api-tokens/api-tokens.md diff --git a/versioned_docs/version-2.6/api/api.md b/versioned_docs/version-2.6/api/api.md new file mode 100644 index 0000000000..746895d3d0 --- /dev/null +++ b/versioned_docs/version-2.6/api/api.md @@ -0,0 +1,84 @@ +--- +title: API +weight: 24 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## How to use the API + +The API has its own user interface accessible from a web browser. This is an easy way to see resources, perform actions, and see the equivalent cURL or HTTP request & response. To access it: + + + + +1. Click on your user avatar in the upper right corner. +1. Click **Account & API Keys**. +1. Under the **API Keys** section, find the **API Endpoint** field and click the link. The link will look something like `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment. + + + + +Go to the URL endpoint at `https:///v3`, where `` is the fully qualified domain name of your Rancher deployment. + + + + +## Authentication + +API requests must include authentication information. Authentication is done with HTTP basic authentication using [API Keys]({{}}/rancher/v2.6/en/user-settings/api-keys/). API keys can create new clusters and have access to multiple clusters via `/v3/clusters/`. [Cluster and project roles]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/) apply to these keys and restrict what clusters and projects the account can see and what actions they can take. + +By default, some cluster-level API tokens are generated with infinite time-to-live (`ttl=0`). In other words, API tokens with `ttl=0` never expire unless you invalidate them. For details on how to invalidate them, refer to the [API tokens page]({{}}/rancher/v2.6/en/api/api-tokens). + +## Making requests + +The API is generally RESTful but has several features to make the definition of everything discoverable by a client so that generic clients can be written instead of having to write specific code for every type of resource. For detailed info about the generic API spec, [see here](https://github.com/rancher/api-spec/blob/master/specification.md). + +- Every type has a Schema which describes: + - The URL to get to the collection of this type of resources + - Every field the resource can have, along with their type, basic validation rules, whether they are required or optional, etc. + - Every action that is possible on this type of resource, with their inputs and outputs (also as schemas). + - Every field that filtering is allowed on + - What HTTP verb methods are available for the collection itself, or for individual resources in the collection. + + +- So the theory is that you can load just the list of schemas and know everything about the API. This is in fact how the UI for the API works, it contains no code specific to Rancher itself. The URL to get Schemas is sent in every HTTP response as a `X-Api-Schemas` header. From there you can follow the `collection` link on each schema to know where to list resources, and other `links` inside of the returned resources to get any other information. + +- In practice, you will probably just want to construct URL strings. We highly suggest limiting this to the top-level to list a collection (`/v3/`) or get a specific resource (`/v3//`). Anything deeper than that is subject to change in future releases. + +- Resources have relationships between each other called links. Each resource includes a map of `links` with the name of the link and the URL to retrieve that information. Again you should `GET` the resource and then follow the URL in the `links` map, not construct these strings yourself. + +- Most resources have actions, which do something or change the state of the resource. To use these, send a HTTP `POST` to the URL in the `actions` map for the action you want. Some actions require input or produce output, see the individual documentation for each type or the schemas for specific information. + +- To edit a resource, send a HTTP `PUT` to the `links.update` link on the resource with the fields that you want to change. If the link is missing then you don't have permission to update the resource. Unknown fields and ones that are not editable are ignored. + +- To delete a resource, send a HTTP `DELETE` to the `links.remove` link on the resource. If the link is missing then you don't have permission to update the resource. + +- To create a new resource, HTTP `POST` to the collection URL in the schema (which is `/v3/`). + +## Filtering + +Most collections can be filtered on the server-side by common fields using HTTP query parameters. The `filters` map shows you what fields can be filtered on and what the filtered values were for the request you made. The API UI has controls to setup filtering and show you the appropriate request. For simple "equals" matches it's just `field=value`. Modifiers can be added to the field name, e.g. `field_gt=42` for "field is greater than 42". See the [API spec](https://github.com/rancher/api-spec/blob/master/specification.md#filtering) for full details. + +## Sorting + +Most collections can be sorted on the server-side by common fields using HTTP query parameters. The `sortLinks` map shows you what sorts are available, along with the URL to get the collection sorted by that. It also includes info about what the current response was sorted by, if specified. + +## Pagination + +API responses are paginated with a limit of 100 resources per page by default. This can be changed with the `limit` query parameter, up to a maximum of 1000, e.g. `/v3/pods?limit=1000`. The `pagination` map in collection responses tells you whether or not you have the full result set and has a link to the next page if you do not. + +## Capturing Rancher API Calls + +You can use browser developer tools to capture how the Rancher API is called. For example, you could follow these steps to use the Chrome developer tools to get the API call for provisioning an RKE cluster: + +1. In the Rancher UI, go to **Cluster Management** and click **Create.** +1. Click one of the cluster types. This example uses Digital Ocean. +1. Fill out the form with a cluster name and node template, but don't click **Create**. +1. You will need to open the developer tools before the cluster creation to see the API call being recorded. To open the tools, right-click on the Rancher UI and click **Inspect.** +1. In the developer tools, click the **Network** tab. +1. On the **Network** tab, make sure **Fetch/XHR** is selected. +1. In the Rancher UI, click **Create**. In the developer tools, you should see a new network request with the name `cluster?_replace=true`. +1. Right-click `cluster?_replace=true` and click **Copy > Copy as cURL.** +1. Paste the result into any text editor. You will be able to see the POST request, including the URL it was sent to, all of the headers, and the full body of the request. This command can be used to create a cluster from the command line. Note: The request should be stored in a safe place because it contains credentials. diff --git a/versioned_docs/version-2.6/backups/back-up-rancher/back-up-rancher.md b/versioned_docs/version-2.6/backups/back-up-rancher/back-up-rancher.md new file mode 100644 index 0000000000..629a7ad2c6 --- /dev/null +++ b/versioned_docs/version-2.6/backups/back-up-rancher/back-up-rancher.md @@ -0,0 +1,76 @@ +--- +title: Backing up Rancher +weight: 1 +--- + +In this section, you'll learn how to back up Rancher running on any Kubernetes cluster. To backup Rancher installed with Docker, refer the instructions for [single node backups]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups) + +The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. + +Note that the rancher-backup operator version 2.x.x is for Rancher v2.6.x. + +> When restoring a backup into a new Rancher setup, the version of the new setup should be the same as the one where the backup is made. The Kubernetes version should also be considered when restoring a backup, since the supported apiVersion in the cluster and in the backup file could be different. + +### Prerequisites + +The Rancher version must be v2.5.0 and up. + +Refer [here]({{}}/rancher/v2.6/en/backups/migrating-rancher/#2-restore-from-backup-using-a-restore-custom-resource) for help on restoring an existing backup file into a v1.22 cluster in Rancher v2.6.3. + +### 1. Install the Rancher Backups operator + +The backup storage location is an operator-level setting, so it needs to be configured when the Rancher Backups application is installed or upgraded. + +Backups are created as .tar.gz files. These files can be pushed to S3 or Minio, or they can be stored in a persistent volume. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. The `local` cluster runs the Rancher server. +1. Click **Apps & Marketplace > Charts**. +1. Click **Rancher Backups**. +1. Click **Install**. +1. Configure the default storage location. For help, refer to the [storage configuration section.](../configuration/storage-config) +1. Click **Install**. + +>**NOTE:** There is a known issue in Fleet that occurs after performing a restoration using the backup-restore-operator: Secrets used for clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here]({{}}rancher/v2.6/en/deploy-across-clusters/fleet/#troubleshooting) for a workaround. + +### 2. Perform a Backup + +To perform a backup, a custom resource of type Backup must be created. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. +1. In the left navigation bar, click **Rancher Backups > Backups**. +1. Click **Create**. +1. Create the Backup with the form, or with the YAML editor. +1. For configuring the Backup details using the form, click **Create** and refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) +1. For using the YAML editor, we can click **Create > Create from YAML**. Enter the Backup YAML. This example Backup custom resource would create encrypted recurring backups in S3. The app uses the `credentialSecretNamespace` value to determine where to look for the S3 backup secret: + + ```yaml + apiVersion: resources.cattle.io/v1 + kind: Backup + metadata: + name: s3-recurring-backup + spec: + storageLocation: + s3: + credentialSecretName: s3-creds + credentialSecretNamespace: default + bucketName: rancher-backups + folder: rancher + region: us-west-2 + endpoint: s3.us-west-2.amazonaws.com + resourceSetName: rancher-resource-set + encryptionConfigSecretName: encryptionconfig + schedule: "@every 1h" + retentionCount: 10 + ``` + + > **Note:** When creating the Backup resource using YAML editor, the `resourceSetName` must be set to `rancher-resource-set` + + For help configuring the Backup, refer to the [configuration reference](../configuration/backup-config) and to the [examples.](../examples/#backup) + + > **Important:** The `rancher-backup` operator doesn't save the EncryptionConfiguration file. The contents of the EncryptionConfiguration file must be saved when an encrypted backup is created, and the same file must be used when restoring from this backup. +1. Click **Create**. + +**Result:** The backup file is created in the storage location configured in the Backup custom resource. The name of this file is used when performing a restore. + diff --git a/versioned_docs/version-2.6/backups/backups.md b/versioned_docs/version-2.6/backups/backups.md new file mode 100644 index 0000000000..d9e4dd9606 --- /dev/null +++ b/versioned_docs/version-2.6/backups/backups.md @@ -0,0 +1,99 @@ +--- +title: Backups and Disaster Recovery +weight: 5 +--- + +In this section, you'll learn how to create backups of Rancher, how to restore Rancher from backup, and how to migrate Rancher to a new Kubernetes cluster. + +The `rancher-backup` operator is used to backup and restore Rancher on any Kubernetes cluster. This application is a Helm chart, and it can be deployed through the Rancher **Apps & Marketplace** page, or by using the Helm CLI. The `rancher-backup` Helm chart is [here.](https://github.com/rancher/charts/tree/release-v2.6/charts/rancher-backup) + +The backup-restore operator needs to be installed in the local cluster, and only backs up the Rancher app. The backup and restore operations are performed only in the local Kubernetes cluster. + +- [Backup and Restore for Rancher installed with Docker](#backup-and-restore-for-rancher-installed-with-docker) +- [How Backups and Restores Work](#how-backups-and-restores-work) +- [Installing the rancher-backup Operator](#installing-the-rancher-backup-operator) + - [Installing rancher-backup with the Rancher UI](#installing-rancher-backup-with-the-rancher-ui) + - [Installing rancher-backup with the Helm CLI](#installing-rancher-backup-with-the-helm-cli) + - [RBAC](#rbac) +- [Backing up Rancher](#backing-up-rancher) +- [Restoring Rancher](#restoring-rancher) +- [Migrating Rancher to a New Cluster](#migrating-rancher-to-a-new-cluster) +- [Default Storage Location Configuration](#default-storage-location-configuration) + - [Example values.yaml for the rancher-backup Helm Chart](#example-values-yaml-for-the-rancher-backup-helm-chart) + +# Backup and Restore for Rancher installed with Docker + +For Rancher installed with Docker, refer to [this page](./docker-installs/docker-backups) to perform backups and [this page](./docker-installs/docker-restores) to perform restores. + +# How Backups and Restores Work + +The `rancher-backup` operator introduces three custom resources: Backups, Restores, and ResourceSets. The following cluster-scoped custom resource definitions are added to the cluster: + +- `backups.resources.cattle.io` +- `resourcesets.resources.cattle.io` +- `restores.resources.cattle.io` + +The ResourceSet defines which Kubernetes resources need to be backed up. The ResourceSet is not available to be configured in the Rancher UI because the values required to back up Rancher are predefined. This ResourceSet should not be modified. + +When a Backup custom resource is created, the `rancher-backup` operator calls the `kube-apiserver` to get the resources in the ResourceSet (specifically, the predefined `rancher-resource-set`) that the Backup custom resource refers to. + +The operator then creates the backup file in the .tar.gz format and stores it in the location configured in the Backup resource. + +When a Restore custom resource is created, the operator accesses the backup .tar.gz file specified by the Restore, and restores the application from that file. + +The Backup and Restore custom resources can be created in the Rancher UI, or by using `kubectl apply`. + +>**Note:** Refer [here]({{}}/rancher/v2.6/en/backups/migrating-rancher/#2-restore-from-backup-using-a-restore-custom-resource) for help on restoring an existing backup file into a v1.22 cluster in Rancher v2.6.3. + +# Installing the rancher-backup Operator + +The `rancher-backup` operator can be installed from the Rancher UI, or with the Helm CLI. In both cases, the `rancher-backup` Helm chart is installed on the Kubernetes cluster running the Rancher server. It is a cluster-admin only feature and available only for the **local** cluster. (*If you do not see `rancher-backup` in the Rancher UI, you may have selected the wrong cluster.*) + +>**NOTE:** There is a known issue in Fleet that occurs after performing a restoration using the backup-restore-operator: Secrets used for clientSecretName and helmSecretName are not included in Fleet gitrepos. Refer [here]({{}}rancher/v2.6/en/deploy-across-clusters/fleet/#troubleshooting) for a workaround. + +### Installing rancher-backup with the Rancher UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the `local` cluster and click **Explore**. +1. In the left navigation bar, **Apps & Marketplace > Charts**. +1. Click **Rancher Backups**. +1. Click **Install**. +1. Optional: Configure the default storage location. For help, refer to the [configuration section.](./configuration/storage-config) +1. Click **Install**. + +**Result:** The `rancher-backup` operator is installed. + +From the **Cluster Dashboard,** you can see the `rancher-backup` operator listed under **Deployments**. + +To configure the backup app in Rancher, go to the left navigation menu and click **Rancher Backups**. + +### RBAC + +Only the rancher admins and the local cluster’s cluster-owner can: + +* Install the Chart +* See the navigation links for Backup and Restore CRDs +* Perform a backup or restore by creating a Backup CR and Restore CR respectively +* List backups/restores performed so far + +# Backing up Rancher + +A backup is performed by creating a Backup custom resource. For a tutorial, refer to [this page.](./back-up-rancher) + +# Restoring Rancher + +A restore is performed by creating a Restore custom resource. For a tutorial, refer to [this page.](./restoring-rancher) + +# Migrating Rancher to a New Cluster + +A migration is performed by following [these steps.]({{}}/rancher/v2.6/en/backups/migrating-rancher) + +# Default Storage Location Configuration + +Configure a storage location where all backups are saved by default. You will have the option to override this with each backup, but will be limited to using an S3-compatible or Minio object store. + +For information on configuring these options, refer to [this page.](./configuration/storage-config) + +### Example values.yaml for the rancher-backup Helm Chart + +The example [values.yaml file](./configuration/storage-config/#example-values-yaml-for-the-rancher-backup-helm-chart) can be used to configure the `rancher-backup` operator when the Helm CLI is used to install it. diff --git a/content/rancher/v2.6/en/backups/configuration/backup-config/_index.md b/versioned_docs/version-2.6/backups/configuration/backup-config/backup-config.md similarity index 100% rename from content/rancher/v2.6/en/backups/configuration/backup-config/_index.md rename to versioned_docs/version-2.6/backups/configuration/backup-config/backup-config.md diff --git a/content/rancher/v2.6/en/backups/configuration/_index.md b/versioned_docs/version-2.6/backups/configuration/configuration.md similarity index 100% rename from content/rancher/v2.6/en/backups/configuration/_index.md rename to versioned_docs/version-2.6/backups/configuration/configuration.md diff --git a/content/rancher/v2.6/en/backups/configuration/restore-config/_index.md b/versioned_docs/version-2.6/backups/configuration/restore-config/restore-config.md similarity index 100% rename from content/rancher/v2.6/en/backups/configuration/restore-config/_index.md rename to versioned_docs/version-2.6/backups/configuration/restore-config/restore-config.md diff --git a/content/rancher/v2.6/en/backups/configuration/storage-config/_index.md b/versioned_docs/version-2.6/backups/configuration/storage-config/storage-config.md similarity index 100% rename from content/rancher/v2.6/en/backups/configuration/storage-config/_index.md rename to versioned_docs/version-2.6/backups/configuration/storage-config/storage-config.md diff --git a/versioned_docs/version-2.6/backups/docker-installs/docker-backups/docker-backups.md b/versioned_docs/version-2.6/backups/docker-installs/docker-backups/docker-backups.md new file mode 100644 index 0000000000..274a0ece9e --- /dev/null +++ b/versioned_docs/version-2.6/backups/docker-installs/docker-backups/docker-backups.md @@ -0,0 +1,71 @@ +--- +title: Backing up Rancher Installed with Docker +shortTitle: Backups +weight: 3 +--- + +After completing your Docker installation of Rancher, we recommend creating backups of it on a regular basis. Having a recent backup will let you recover quickly from an unexpected disaster. + +## Before You Start + +During the creation of your backup, you'll enter a series of commands, replacing placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from rancher-data- -v $PWD:/backup busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher +``` + +In this command, `` is a placeholder for the date that the data container and backup were created. `9-27-18` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the [procedure below](#creating-a-backup). + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that you're creating a backup for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped with `docker ps -a`. Use these commands for help anytime while creating backups. + +## Creating a Backup + +This procedure creates a backup that you can restore if Rancher encounters a disaster scenario. + + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` +1.
    Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data- rancher/rancher: + ``` + +1. From the data container that you just created (rancher-data-<DATE>), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). Use the following command, replacing each placeholder: + + ``` + docker run --volumes-from rancher-data- -v $PWD:/backup:z busybox tar pzcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** A stream of commands runs on the screen. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Move your backup tarball to a safe location external to your Rancher Server. Then delete the `rancher-data-` container from your Rancher Server. + +1. Restart Rancher Server. Replace `` with the name of your Rancher container: + + ``` + docker start + ``` + +**Result:** A backup tarball of your Rancher Server data is created. See [Restoring Backups: Docker Installs]({{}}/rancher/v2.6/en/backups/docker-installs/docker-restores) if you need to restore backup data. diff --git a/content/rancher/v2.6/en/backups/docker-installs/_index.md b/versioned_docs/version-2.6/backups/docker-installs/docker-installs.md similarity index 100% rename from content/rancher/v2.6/en/backups/docker-installs/_index.md rename to versioned_docs/version-2.6/backups/docker-installs/docker-installs.md diff --git a/versioned_docs/version-2.6/backups/docker-installs/docker-restores/docker-restores.md b/versioned_docs/version-2.6/backups/docker-installs/docker-restores/docker-restores.md new file mode 100644 index 0000000000..23e002610c --- /dev/null +++ b/versioned_docs/version-2.6/backups/docker-installs/docker-restores/docker-restores.md @@ -0,0 +1,70 @@ +--- +title: Restoring Backups—Docker Installs +shortTitle: Restores +weight: 3 +--- + +If you encounter a disaster scenario, you can restore your Rancher Server to your most recent backup. + +## Before You Start + +During restore of your backup, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker run --volumes-from -v $PWD:/backup \ +busybox sh -c "rm /var/lib/rancher/* -rf && \ +tar pzxvf /backup/rancher-data-backup--" +``` + +In this command, `` and `-` are environment variables for your Rancher deployment. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version number for your Rancher backup. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Restoring Backups + +Using a [backup]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups) that you created earlier, restore Rancher to its last known healthy state. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container: + + ``` + docker stop + ``` +1. Move the backup tarball that you created during completion of [Creating Backups—Docker Installs]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Creating Backups—Docker Installs]({{}}/rancher/v2.6/en/backups/docker-installs/docker-backups/), it will have a name similar to `rancher-data-backup--.tar.gz`. + +1. Enter the following command to delete your current state data and replace it with your backup data, replacing the placeholders. Don't forget to close the quotes. + + >**Warning!** This command deletes all current state data from your Rancher Server container. Any changes saved after your backup tarball was created will be lost. + + ``` + docker run --volumes-from -v $PWD:/backup \ + busybox sh -c "rm /var/lib/rancher/* -rf && \ + tar pzxvf /backup/rancher-data-backup--.tar.gz" + ``` + + **Step Result:** A series of commands should run. + +1. Restart your Rancher Server container, replacing the placeholder. It will restart using your backup data. + + ``` + docker start + ``` + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the restore succeeded and that your data is restored. diff --git a/content/rancher/v2.6/en/backups/examples/_index.md b/versioned_docs/version-2.6/backups/examples/examples.md similarity index 100% rename from content/rancher/v2.6/en/backups/examples/_index.md rename to versioned_docs/version-2.6/backups/examples/examples.md diff --git a/content/rancher/v2.6/en/backups/migrating-rancher/_index.md b/versioned_docs/version-2.6/backups/migrating-rancher/migrating-rancher.md similarity index 100% rename from content/rancher/v2.6/en/backups/migrating-rancher/_index.md rename to versioned_docs/version-2.6/backups/migrating-rancher/migrating-rancher.md diff --git a/content/rancher/v2.6/en/backups/restoring-rancher/_index.md b/versioned_docs/version-2.6/backups/restoring-rancher/restoring-rancher.md similarity index 100% rename from content/rancher/v2.6/en/backups/restoring-rancher/_index.md rename to versioned_docs/version-2.6/backups/restoring-rancher/restoring-rancher.md diff --git a/content/rancher/v2.6/en/best-practices/_index.md b/versioned_docs/version-2.6/best-practices/best-practices.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/_index.md rename to versioned_docs/version-2.6/best-practices/best-practices.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-managed/containers/_index.md b/versioned_docs/version-2.6/best-practices/rancher-managed/containers/containers.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-managed/containers/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-managed/containers/containers.md diff --git a/versioned_docs/version-2.6/best-practices/rancher-managed/logging/logging.md b/versioned_docs/version-2.6/best-practices/rancher-managed/logging/logging.md new file mode 100644 index 0000000000..42edadf1bd --- /dev/null +++ b/versioned_docs/version-2.6/best-practices/rancher-managed/logging/logging.md @@ -0,0 +1,88 @@ +--- +title: Logging Best Practices +weight: 1 +--- +In this guide, we recommend best practices for cluster-level logging and application logging. + +- [Cluster-level Logging](#cluster-level-logging) +- [Application Logging](#application-logging) +- [General Best Practices](#general-best-practices) + +Before Rancher v2.5, logging in Rancher has historically been a pretty static integration. There were a fixed list of aggregators to choose from (ElasticSearch, Splunk, Kafka, Fluentd and Syslog), and only two configuration points to choose (Cluster-level and Project-level). + +Rancher provides a flexible experience for log aggregation. With the logging feature, administrators and users alike can deploy logging that meets fine-grained collection criteria while offering a wider array of destinations and configuration options. + +"Under the hood", Rancher logging uses the Banzai Cloud logging operator. We provide manageability of this operator (and its resources), and tie that experience in with managing your Rancher clusters. + +# Cluster-level Logging + +### Cluster-wide Scraping + +For some users, it is desirable to scrape logs from every container running in the cluster. This usually coincides with your security team's request (or requirement) to collect all logs from all points of execution. + +In this scenario, it is recommended to create at least two _ClusterOutput_ objects - one for your security team (if you have that requirement), and one for yourselves, the cluster administrators. When creating these objects take care to choose an output endpoint that can handle the significant log traffic coming from the entire cluster. Also make sure to choose an appropriate index to receive all these logs. + +Once you have created these _ClusterOutput_ objects, create a _ClusterFlow_ to collect all the logs. Do not define any _Include_ or _Exclude_ rules on this flow. This will ensure that all logs from across the cluster are collected. If you have two _ClusterOutputs_, make sure to send logs to both of them. + +### Kubernetes Components + +_ClusterFlows_ have the ability to collect logs from all containers on all hosts in the Kubernetes cluster. This works well in cases where those containers are part of a Kubernetes pod; however, RKE containers exist outside of the scope of Kubernetes. + +Currently the logs from RKE containers are collected, but are not able to easily be filtered. This is because those logs do not contain information as to the source container (e.g. `etcd` or `kube-apiserver`). + +A future release of Rancher will include the source container name which will enable filtering of these component logs. Once that change is made, you will be able to customize a _ClusterFlow_ to retrieve **only** the Kubernetes component logs, and direct them to an appropriate output. + +# Application Logging + +Best practice not only in Kubernetes but in all container-based applications is to direct application logs to `stdout`/`stderr`. The container runtime will then trap these logs and do **something** with them - typically writing them to a file. Depending on the container runtime (and its configuration), these logs can end up in any number of locations. + +In the case of writing the logs to a file, Kubernetes helps by creating a `/var/log/containers` directory on each host. This directory symlinks the log files to their actual destination (which can differ based on configuration or container runtime). + +Rancher logging will read all log entries in `/var/log/containers`, ensuring that all log entries from all containers (assuming a default configuration) will have the opportunity to be collected and processed. + +### Specific Log Files + +Log collection only retrieves `stdout`/`stderr` logs from pods in Kubernetes. But what if we want to collect logs from other files that are generated by applications? Here, a log streaming sidecar (or two) may come in handy. + +The goal of setting up a streaming sidecar is to take log files that are written to disk, and have their contents streamed to `stdout`. This way, the Banzai Logging Operator can pick up those logs and send them to your desired output. + +To set this up, edit your workload resource (e.g. Deployment) and add the following sidecar definition: + +``` +... +containers: +- args: + - -F + - /path/to/your/log/file.log + command: + - tail + image: busybox + name: stream-log-file-[name] + volumeMounts: + - mountPath: /path/to/your/log + name: mounted-log +... +``` + +This will add a container to your workload definition that will now stream the contents of (in this example) `/path/to/your/log/file.log` to `stdout`. + +This log stream is then automatically collected according to any _Flows_ or _ClusterFlows_ you have setup. You may also wish to consider creating a _Flow_ specifically for this log file by targeting the name of the container. See example: + +``` +... +spec: + match: + - select: + container_names: + - stream-log-file-name +... +``` + + +# General Best Practices + +- Where possible, output structured log entries (e.g. `syslog`, JSON). This makes handling of the log entry easier as there are already parsers written for these formats. +- Try to provide the name of the application that is creating the log entry, in the entry itself. This can make troubleshooting easier as Kubernetes objects do not always carry the name of the application as the object name. For instance, a pod ID may be something like `myapp-098kjhsdf098sdf98` which does not provide much information about the application running inside the container. +- Except in the case of collecting all logs cluster-wide, try to scope your _Flow_ and _ClusterFlow_ objects tightly. This makes it easier to troubleshoot when problems arise, and also helps ensure unrelated log entries do not show up in your aggregator. An example of tight scoping would be to constrain a _Flow_ to a single _Deployment_ in a namespace, or perhaps even a single container within a _Pod_. +- Keep the log verbosity down except when troubleshooting. High log verbosity poses a number of issues, chief among them being **noise**: significant events can be drowned out in a sea of `DEBUG` messages. This is somewhat mitigated with automated alerting and scripting, but highly verbose logging still places an inordinate amount of stress on the logging infrastructure. +- Where possible, try to provide a transaction or request ID with the log entry. This can make tracing application activity across multiple log sources easier, especially when dealing with distributed applications. diff --git a/content/rancher/v2.6/en/best-practices/rancher-managed/managed-vsphere/_index.md b/versioned_docs/version-2.6/best-practices/rancher-managed/managed-vsphere/managed-vsphere.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-managed/managed-vsphere/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-managed/managed-vsphere/managed-vsphere.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-managed/monitoring/_index.md b/versioned_docs/version-2.6/best-practices/rancher-managed/monitoring/monitoring.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-managed/monitoring/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-managed/monitoring/monitoring.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-managed/_index.md b/versioned_docs/version-2.6/best-practices/rancher-managed/rancher-managed.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-managed/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-managed/rancher-managed.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-server/deployment-strategies/_index.md b/versioned_docs/version-2.6/best-practices/rancher-server/deployment-strategies/deployment-strategies.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-server/deployment-strategies/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-server/deployment-strategies/deployment-strategies.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-server/deployment-types/_index.md b/versioned_docs/version-2.6/best-practices/rancher-server/deployment-types/deployment-types.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-server/deployment-types/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-server/deployment-types/deployment-types.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-server/rancher-in-vsphere/_index.md b/versioned_docs/version-2.6/best-practices/rancher-server/rancher-in-vsphere/rancher-in-vsphere.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-server/rancher-in-vsphere/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-server/rancher-in-vsphere/rancher-in-vsphere.md diff --git a/content/rancher/v2.6/en/best-practices/rancher-server/_index.md b/versioned_docs/version-2.6/best-practices/rancher-server/rancher-server.md similarity index 100% rename from content/rancher/v2.6/en/best-practices/rancher-server/_index.md rename to versioned_docs/version-2.6/best-practices/rancher-server/rancher-server.md diff --git a/content/rancher/v2.6/en/cis-scans/_index.md b/versioned_docs/version-2.6/cis-scans/cis-scans.md similarity index 100% rename from content/rancher/v2.6/en/cis-scans/_index.md rename to versioned_docs/version-2.6/cis-scans/cis-scans.md diff --git a/content/rancher/v2.6/en/cis-scans/configuration/_index.md b/versioned_docs/version-2.6/cis-scans/configuration/configuration.md similarity index 100% rename from content/rancher/v2.6/en/cis-scans/configuration/_index.md rename to versioned_docs/version-2.6/cis-scans/configuration/configuration.md diff --git a/versioned_docs/version-2.6/cis-scans/custom-benchmark/custom-benchmark.md b/versioned_docs/version-2.6/cis-scans/custom-benchmark/custom-benchmark.md new file mode 100644 index 0000000000..36f70ccaa5 --- /dev/null +++ b/versioned_docs/version-2.6/cis-scans/custom-benchmark/custom-benchmark.md @@ -0,0 +1,84 @@ +--- +title: Creating a Custom Benchmark Version for Running a Cluster Scan +weight: 4 +--- + +Each Benchmark Version defines a set of test configuration files that define the CIS tests to be run by the kube-bench tool. +The `rancher-cis-benchmark` application installs a few default Benchmark Versions which are listed under CIS Benchmark application menu. + +But there could be some Kubernetes cluster setups that require custom configurations of the Benchmark tests. For example, the path to the Kubernetes config files or certs might be different than the standard location where the upstream CIS Benchmarks look for them. + +It is now possible to create a custom Benchmark Version for running a cluster scan using the `rancher-cis-benchmark` application. + +When a cluster scan is run, you need to select a Profile which points to a specific Benchmark Version. + +Follow all the steps below to add a custom Benchmark Version and run a scan using it. + +1. [Prepare the Custom Benchmark Version ConfigMap](#1-prepare-the-custom-benchmark-version-configmap) +2. [Add a Custom Benchmark Version to a Cluster](#2-add-a-custom-benchmark-version-to-a-cluster) +3. [Create a New Profile for the Custom Benchmark Version](#3-create-a-new-profile-for-the-custom-benchmark-version) +4. [Run a Scan Using the Custom Benchmark Version](#4-run-a-scan-using-the-custom-benchmark-version) + +### 1. Prepare the Custom Benchmark Version ConfigMap + +To create a custom benchmark version, first you need to create a ConfigMap containing the benchmark version's config files and upload it to your Kubernetes cluster where you want to run the scan. + +To prepare a custom benchmark version ConfigMap, suppose we want to add a custom Benchmark Version named `foo`. + +1. Create a directory named `foo` and inside this directory, place all the config YAML files that the kube-bench tool looks for. For example, here are the config YAML files for a Generic CIS 1.5 Benchmark Version https://github.com/aquasecurity/kube-bench/tree/master/cfg/cis-1.5 +1. Place the complete `config.yaml` file, which includes all the components that should be tested. +1. Add the Benchmark version name to the `target_mapping` section of the `config.yaml`: + + ```yaml + target_mapping: + "foo": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + ``` +1. Upload this directory to your Kubernetes Cluster by creating a ConfigMap: + + ```yaml + kubectl create configmap -n foo --from-file= + ``` + +### 2. Add a Custom Benchmark Version to a Cluster + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. +1. In the left navigation bar, click **CIS Benchmark > Benchmark Version**. +1. Click **Create**. +1. Enter the **Name** and a description for your custom benchmark version. +1. Choose the cluster provider that your benchmark version applies to. +1. Choose the ConfigMap you have uploaded from the dropdown. +1. Add the minimum and maximum Kubernetes version limits applicable, if any. +1. Click **Create**. + +### 3. Create a New Profile for the Custom Benchmark Version + +To run a scan using your custom benchmark version, you need to add a new Profile pointing to this benchmark version. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. +1. In the left navigation bar, click **CIS Benchmark > Profile**. +1. Click **Create**. +1. Provide a **Name** and description. In this example, we name it `foo-profile`. +1. Choose the Benchmark Version from the dropdown. +1. Click **Create**. + +### 4. Run a Scan Using the Custom Benchmark Version + +Once the Profile pointing to your custom benchmark version `foo` has been created, you can create a new Scan to run the custom test configs in the Benchmark Version. + +To run a scan, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to add a custom benchmark and click **Explore**. +1. In the left navigation bar, click **CIS Benchmark > Scan**. +1. Click **Create**. +1. Choose the new cluster scan profile. +1. Click **Create**. + +**Result:** A report is generated with the scan results. To see the results, click the name of the scan that appears. diff --git a/versioned_docs/version-2.6/cis-scans/rbac/rbac.md b/versioned_docs/version-2.6/cis-scans/rbac/rbac.md new file mode 100644 index 0000000000..ad2b47bff2 --- /dev/null +++ b/versioned_docs/version-2.6/cis-scans/rbac/rbac.md @@ -0,0 +1,50 @@ +--- +title: Roles-based Access Control +shortTitle: RBAC +weight: 3 +--- + +This section describes the permissions required to use the rancher-cis-benchmark App. + +The rancher-cis-benchmark is a cluster-admin only feature by default. + +However, the `rancher-cis-benchmark` chart installs these two default `ClusterRoles`: + +- cis-admin +- cis-view + +In Rancher, only cluster owners and global administrators have `cis-admin` access by default. + +Note: If you were using the `cis-edit` role added in Rancher v2.5 setup, it has now been removed since +Rancher v2.5.2 because it essentially is same as `cis-admin`. If you happen to create any clusterrolebindings +for `cis-edit`, please update them to use `cis-admin` ClusterRole instead. + +# Cluster-Admin Access + +Rancher CIS Scans is a cluster-admin only feature by default. +This means only the Rancher global admins, and the cluster’s cluster-owner can: + +- Install/Uninstall the rancher-cis-benchmark App +- See the navigation links for CIS Benchmark CRDs - ClusterScanBenchmarks, ClusterScanProfiles, ClusterScans +- List the default ClusterScanBenchmarks and ClusterScanProfiles +- Create/Edit/Delete new ClusterScanProfiles +- Create/Edit/Delete a new ClusterScan to run the CIS scan on the cluster +- View and Download the ClusterScanReport created after the ClusterScan is complete + + +# Summary of Default Permissions for Kubernetes Default Roles + +The rancher-cis-benchmark creates three `ClusterRoles` and adds the CIS Benchmark CRD access to the following default K8s `ClusterRoles`: + +| ClusterRole created by chart | Default K8s ClusterRole | Permissions given with Role +| ------------------------------| ---------------------------| ---------------------------| +| `cis-admin` | `admin`| Ability to CRUD clusterscanbenchmarks, clusterscanprofiles, clusterscans, clusterscanreports CR +| `cis-view` | `view `| Ability to List(R) clusterscanbenchmarks, clusterscanprofiles, clusterscans, clusterscanreports CR + + +By default only cluster-owner role will have ability to manage and use `rancher-cis-benchmark` feature. + +The other Rancher roles (cluster-member, project-owner, project-member) do not have any default permissions to manage and use rancher-cis-benchmark resources. + +But if a cluster-owner wants to delegate access to other users, they can do so by creating ClusterRoleBindings between these users and the above CIS ClusterRoles manually. +There is no automatic role aggregation supported for the `rancher-cis-benchmark` ClusterRoles. diff --git a/versioned_docs/version-2.6/cis-scans/skipped-tests/skipped-tests.md b/versioned_docs/version-2.6/cis-scans/skipped-tests/skipped-tests.md new file mode 100644 index 0000000000..f2b125c026 --- /dev/null +++ b/versioned_docs/version-2.6/cis-scans/skipped-tests/skipped-tests.md @@ -0,0 +1,54 @@ +--- +title: Skipped and Not Applicable Tests +weight: 3 +--- + +This section lists the tests that are skipped in the permissive test profile for RKE. + +> All the tests that are skipped and not applicable on this page will be counted as Not Applicable in the v2.5 generated report. The skipped test count will only mention the user-defined skipped tests. This allows user-skipped tests to be distinguished from the tests that are skipped by default in the RKE permissive test profile. + +# CIS Benchmark v1.5 + +### CIS Benchmark v1.5 Skipped Tests + +| Number | Description | Reason for Skipping | +| ---------- | ------------- | --------- | +| 1.1.12 | Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) | A system service account is required for etcd data directory ownership. Refer to Rancher's hardening guide for more details on how to configure this ownership. | +| 1.2.6 | Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. | +| 1.2.16 | Ensure that the admission control plugin PodSecurityPolicy is set (Automated) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | +| 1.2.33 | Ensure that the --encryption-provider-config argument is set as appropriate (Manual) | Enabling encryption changes how data can be recovered as data is encrypted. | +| 1.2.34 | Ensure that encryption providers are appropriately configured (Manual) | Enabling encryption changes how data can be recovered as data is encrypted. | +| 4.2.6 | Ensure that the --protect-kernel-defaults argument is set to true (Automated) | System level configurations are required before provisioning the cluster in order for this argument to be set to true. | +| 4.2.10 | Ensure that the--tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) | When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. | +| 5.1.5 | Ensure that default service accounts are not actively used. (Automated) | Kubernetes provides default service accounts to be used. | +| 5.2.2 | Minimize the admission of containers wishing to share the host process ID namespace (Automated) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | +| 5.2.3 | Minimize the admission of containers wishing to share the host IPC namespace (Automated) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | +| 5.2.4 | Minimize the admission of containers wishing to share the host network namespace (Automated) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | +| 5.2.5 | Minimize the admission of containers with allowPrivilegeEscalation (Automated) | Enabling Pod Security Policy can cause applications to unexpectedly fail. | +| 5.3.2 | Ensure that all Namespaces have Network Policies defined (Automated) | Enabling Network Policies can prevent certain applications from communicating with each other. | +| 5.6.4 | The default namespace should not be used (Automated) | Kubernetes provides a default namespace. | + +### CIS Benchmark v1.5 Not Applicable Tests + +| Number | Description | Reason for being not applicable | +| ---------- | ------------- | --------- | +| 1.1.1 | Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. | +| 1.1.2 | Ensure that the API server pod specification file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. | +| 1.1.3 | Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | +| 1.1.4 | Ensure that the controller manager pod specification file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | +| 1.1.5 | Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | +| 1.1.6 | Ensure that the scheduler pod specification file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | +| 1.1.7 | Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. | +| 1.1.8 | Ensure that the etcd pod specification file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for etcd. All configuration is passed in as arguments at container run time. | +| 1.1.13 | Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. | +| 1.1.14 | Ensure that the admin.conf file ownership is set to root:root (Automated) | Clusters provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. | +| 1.1.15 | Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | +| 1.1.16 | Ensure that the scheduler.conf file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for scheduler. All configuration is passed in as arguments at container run time. | +| 1.1.17 | Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | +| 1.1.18 | Ensure that the controller-manager.conf file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn't require or maintain a configuration file for controller-manager. All configuration is passed in as arguments at container run time. | +| 1.3.6 | Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) | Clusters provisioned by RKE handles certificate rotation directly through RKE. | +| 4.1.1 | Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. | +| 4.1.2 | Ensure that the kubelet service file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. All configuration is passed in as arguments at container run time. | +| 4.1.9 | Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. All configuration is passed in as arguments at container run time. | +| 4.1.10 | Ensure that the kubelet configuration file ownership is set to root:root (Automated) | Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. All configuration is passed in as arguments at container run time. | +| 4.2.12 | Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) | Clusters provisioned by RKE handles certificate rotation directly through RKE. | \ No newline at end of file diff --git a/versioned_docs/version-2.6/cli/cli.md b/versioned_docs/version-2.6/cli/cli.md new file mode 100644 index 0000000000..67c688c3b8 --- /dev/null +++ b/versioned_docs/version-2.6/cli/cli.md @@ -0,0 +1,133 @@ +--- +title: CLI with Rancher +description: Interact with Rancher using command line interface (CLI) tools from your workstation. +weight: 21 +--- + +- [Rancher CLI](#rancher-cli) + - [Download Rancher CLI](#download-rancher-cli) + - [Requirements](#requirements) + - [CLI Authentication](#cli-authentication) + - [Project Selection](#project-selection) + - [Commands](#commands) + - [Rancher CLI Help](#rancher-cli-help) + - [Limitations](#limitations) +- [kubectl](#kubectl) + - [kubectl Utility](#kubectl-utility) + - [Authentication with kubectl and kubeconfig Tokens with TTL](#authentication-with-kubectl-and-kubeconfig-tokens-with-ttl) + +# Rancher CLI + +The Rancher CLI (Command Line Interface) is a unified tool that you can use to interact with Rancher. With this tool, you can operate Rancher using a command line rather than the GUI. + +### Download Rancher CLI + +The binary can be downloaded directly from the UI. The link can be found in the right hand side of the footer in the UI. We have binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. + +1. In the upper left corner, click **☰**. +1. At the bottom, click **v2.6.x**, where **v2.6.x** is a hyperlinked text indicating the installed Rancher version. +1. Under the **CLI Downloads section**, there are links to download the binaries for Windows, Mac, and Linux. You can also check the [releases page for our CLI](https://github.com/rancher/cli/releases) for direct downloads of the binary. + +### Requirements + +After you download the Rancher CLI, you need to make a few configurations. Rancher CLI requires: + +- Your Rancher Server URL, which is used to connect to Rancher Server. +- An API Bearer Token, which is used to authenticate with Rancher. For more information about obtaining a Bearer Token, see [Creating an API Key]({{}}/rancher/v2.6/en/user-settings/api-keys/). + +### CLI Authentication + +Before you can use Rancher CLI to control your Rancher Server, you must authenticate using an API Bearer Token. Log in using the following command (replace `` and `` with your information): + +```bash +$ ./rancher login https:// --token +``` + +If Rancher Server uses a self-signed certificate, Rancher CLI prompts you to continue with the connection. + +### Project Selection + +Before you can perform any commands, you must select a Rancher project to perform those commands against. To select a [project]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/) to work on, use the command `./rancher context switch`. When you enter this command, a list of available projects displays. Enter a number to choose your project. + +**Example: `./rancher context switch` Output** +``` +User:rancher-cli-directory user$ ./rancher context switch +NUMBER CLUSTER NAME PROJECT ID PROJECT NAME +1 cluster-2 c-7q96s:p-h4tmb project-2 +2 cluster-2 c-7q96s:project-j6z6d Default +3 cluster-1 c-lchzv:p-xbpdt project-1 +4 cluster-1 c-lchzv:project-s2mch Default +Select a Project: +``` + +After you enter a number, the console displays a message that you've changed projects. + +``` +INFO[0005] Setting new context to project project-1 +INFO[0005] Saving config to /Users/markbishop/.rancher/cli2.json +``` + +Ensure you can run `rancher kubectl get pods` successfully. + +### Commands + +The following commands are available for use in Rancher CLI. + +| Command | Result | +|---|---| +| `apps, [app]` | Performs operations on catalog applications (i.e., individual [Helm charts](https://docs.helm.sh/developing_charts/)) or Rancher charts. | +| `catalog` | Performs operations on [catalogs]({{}}/rancher/v2.6/en/helm-charts/). | +| `clusters, [cluster]` | Performs operations on your [clusters]({{}}/rancher/v2.6/en/cluster-provisioning/). | +| `context` | Switches between Rancher [projects]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/). For an example, see [Project Selection](#project-selection). | +| `inspect [OPTIONS] [RESOURCEID RESOURCENAME]` | Displays details about [Kubernetes resources](https://kubernetes.io/docs/reference/kubectl/cheatsheet/#resource-types) or Rancher resources (i.e.: [projects]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/) and [workloads]({{}}/rancher/v2.6/en/k8s-in-rancher/workloads/)). Specify resources by name or ID. | +| `kubectl` |Runs [kubectl commands](https://kubernetes.io/docs/reference/kubectl/overview/#operations). | +| `login, [l]` | Logs into a Rancher Server. For an example, see [CLI Authentication](#cli-authentication). | +| `namespaces, [namespace]` |Performs operations on namespaces. | +| `nodes, [node]` |Performs operations on nodes. | +| `projects, [project]` | Performs operations on [projects]({{}}/rancher/v2.6/en/cluster-admin/projects-and-namespaces/). | +| `ps` | Displays [workloads]({{}}/rancher/v2.6/en/k8s-in-rancher/workloads) in a project. | +| `settings, [setting]` | Shows the current settings for your Rancher Server. | +| `ssh` | Connects to one of your cluster nodes using the SSH protocol. | +| `help, [h]` | Shows a list of commands or help for one command. | + + +### Rancher CLI Help + +Once logged into Rancher Server using the CLI, enter `./rancher --help` for a list of commands. + +All commands accept the `--help` flag, which documents each command's usage. + +### Limitations + +The Rancher CLI **cannot** be used to install [dashboard apps or Rancher feature charts](../helm-charts/). + +# kubectl + +Interact with Rancher using kubectl. + +### kubectl Utility + +Install the `kubectl` utility. See [install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +Configure kubectl by visiting your cluster in the Rancher Web UI, clicking on `Kubeconfig`, copying contents, and putting them into your `~/.kube/config` file. + +Run `kubectl cluster-info` or `kubectl get pods` successfully. + +### Authentication with kubectl and kubeconfig Tokens with TTL + +_Requirements_ + +If admins have [enforced TTL on kubeconfig tokens]({{}}/rancher/v2.6/en/api/api-tokens/#setting-ttl-on-kubeconfig-tokens), the kubeconfig file requires the [Rancher CLI](../cli) to be present in your PATH when you run `kubectl`. Otherwise, you’ll see an error like: +`Unable to connect to the server: getting credentials: exec: exec: "rancher": executable file not found in $PATH`. + +This feature enables kubectl to authenticate with the Rancher server and get a new kubeconfig token when required. The following auth providers are currently supported: + +1. Local +2. Active Directory (LDAP only) +3. FreeIPA +4. OpenLDAP +5. SAML providers: Ping, Okta, ADFS, Keycloak, Shibboleth + +When you first run kubectl, for example, `kubectl get pods`, it will ask you to pick an auth provider and log in with the Rancher server. +The kubeconfig token is cached in the path where you run kubectl under `./.cache/token`. This token is valid until [it expires](../api/api-tokens/#setting-ttl-on-kubeconfig-tokens-period), or [gets deleted from the Rancher server](../api/api-tokens/#deleting-tokens). +Upon expiration, the next `kubectl get pods` will ask you to log in with the Rancher server again. diff --git a/content/rancher/v2.6/en/cluster-admin/backing-up-etcd/_index.md b/versioned_docs/version-2.6/cluster-admin/backing-up-etcd/backing-up-etcd.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/backing-up-etcd/_index.md rename to versioned_docs/version-2.6/cluster-admin/backing-up-etcd/backing-up-etcd.md diff --git a/versioned_docs/version-2.6/cluster-admin/certificate-rotation/certificate-rotation.md b/versioned_docs/version-2.6/cluster-admin/certificate-rotation/certificate-rotation.md new file mode 100644 index 0000000000..e813cb5fe4 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/certificate-rotation/certificate-rotation.md @@ -0,0 +1,75 @@ +--- +title: Certificate Rotation +weight: 2040 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> **Warning:** Rotating Kubernetes certificates may result in your cluster being temporarily unavailable as components are restarted. For production environments, it's recommended to perform this action during a maintenance window. + +By default, Kubernetes clusters require certificates and Rancher launched Kubernetes clusters automatically generate certificates for the Kubernetes components. Rotating these certificates is important before the certificates expire as well as if a certificate is compromised. After the certificates are rotated, the Kubernetes components are automatically restarted. + +Certificates can be rotated for the following services: + + + + +- etcd +- kubelet (node certificate) +- kubelet (serving certificate, if [enabled]({{}}/rke/latest/en/config-options/services/#kubelet-options)) +- kube-apiserver +- kube-proxy +- kube-scheduler +- kube-controller-manager + + + + +- admin +- api-server +- controller-manager +- scheduler +- rke2-controller +- rke2-server +- cloud-controller +- etcd +- auth-proxy +- kubelet +- kube-proxy + + + + +> **Note:** For users who didn't rotate their webhook certificates, and they have expired after one year, please see this [page]({{}}/rancher/v2.6/en/troubleshooting/expired-webhook-certificates/) for help. + + +### Certificate Rotation + +Rancher launched Kubernetes clusters have the ability to rotate the auto-generated certificates through the UI. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster you want to rotate certificates for amd click **⋮ > Rotate Certificates**. +1. Select which certificates that you want to rotate. + + * Rotate all Service certificates (keep the same CA) + * Rotate an individual service and choose one of the services from the drop-down menu + +1. Click **Save**. + +**Results:** The selected certificates will be rotated and the related services will be restarted to start using the new certificate. + +### Additional Notes + + + + +Even though the RKE CLI can use custom certificates for the Kubernetes cluster components, Rancher currently doesn't allow the ability to upload these in Rancher launched Kubernetes clusters. + + + + +In RKE2, both etcd and control plane nodes are treated as the same `server` concept. As such, when rotating certificates of services specific to either of these components will result in certificates being rotated on both. The certificates will only change for the specified service, but you will see nodes for both components go into an updating state. You may also see worker only nodes go into an updating state. This is to restart the workers after a certificate change to ensure they get the latest client certs. + + + diff --git a/versioned_docs/version-2.6/cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md b/versioned_docs/version-2.6/cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md new file mode 100644 index 0000000000..72a62554f2 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/cleaning-cluster-nodes/cleaning-cluster-nodes.md @@ -0,0 +1,285 @@ +--- +title: Removing Kubernetes Components from Nodes +description: Learn about cluster cleanup when removing nodes from your Rancher-launched Kubernetes cluster. What is removed, how to do it manually +weight: 2055 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to disconnect a node from a Rancher-launched Kubernetes cluster and remove all of the Kubernetes components from the node. This process allows you to use the node for other purposes. + +When you use Rancher to install Kubernetes on new nodes in an infrastructure provider, resources (containers/virtual network interfaces) and configuration items (certificates/configuration files) are created. + +When removing nodes from your Rancher launched Kubernetes cluster (provided that they are in `Active` state), those resources are automatically cleaned, and the only action needed is to restart the node. When a node has become unreachable and the automatic cleanup process cannot be used, we describe the steps that need to be executed before the node can be added to a cluster again. + +## What Gets Removed? + +When cleaning nodes provisioned using Rancher, the following components are deleted based on the type of cluster node you're removing. + +| Removed Component | [Nodes Hosted by Infrastructure Provider][1] | [Custom Nodes][2] | [Hosted Cluster][3] | [Registered Nodes][4] | +| ------------------------------------------------------------------------------ | --------------- | ----------------- | ------------------- | ------------------- | +| The Rancher deployment namespace (`cattle-system` by default) | ✓ | ✓ | ✓ | ✓ | +| `serviceAccount`, `clusterRoles`, and `clusterRoleBindings` labeled by Rancher | ✓ | ✓ | ✓ | ✓ | +| Labels, Annotations, and Finalizers | ✓ | ✓ | ✓ | ✓ | +| Rancher Deployment | ✓ | ✓ | ✓ | | +| Machines, clusters, projects, and user custom resource definitions (CRDs) | ✓ | ✓ | ✓ | | +| All resources create under the `management.cattle.io` API Group | ✓ | ✓ | ✓ | | +| All CRDs created by Rancher v2.x | ✓ | ✓ | ✓ | | + +[1]: {{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ +[2]: {{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/ +[3]: {{}}/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/ +[4]: {{}}/rancher/v2.6/en/cluster-provisioning/registered-clusters/ + +## Removing a Node from a Cluster by Rancher UI + +When the node is in `Active` state, removing the node from a cluster will trigger a process to clean up the node. Please restart the node after the automatic cleanup process is done to make sure any non-persistent data is properly removed. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +## Removing Rancher Components from a Cluster Manually + +When a node is unreachable and removed from the cluster, the automatic cleaning process can't be triggered because the node is unreachable. Please follow the steps below to manually remove the Rancher components. + +>**Warning:** The commands listed below will remove data from the node. Make sure you have created a backup of files you want to keep before executing any of the commands as data will be lost. + +### Removing Rancher Components from Registered Clusters + +For registered clusters, the process for removing Rancher is a little different. You have the option of simply deleting the cluster in the Rancher UI, or your can run a script that removes Rancher components from the nodes. Both options make the same deletions. + +After the registered cluster is detached from Rancher, the cluster's workloads will be unaffected and you can access the cluster using the same methods that you did before the cluster was registered into Rancher. + + + + +>**Warning:** This process will remove data from your cluster. Make sure you have created a backup of files you want to keep before executing the command, as data will be lost. + +After you initiate the removal of a registered cluster using the Rancher UI (or API), the following events occur. + +1. Rancher creates a `serviceAccount` that it uses to remove the Rancher components from the cluster. This account is assigned the [clusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) and [clusterRoleBinding](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding) permissions, which are required to remove the Rancher components. + +1. Using the `serviceAccount`, Rancher schedules and runs a [job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that cleans the Rancher components off of the cluster. This job also references the `serviceAccount` and its roles as dependencies, so the job deletes them before its completion. + +1. Rancher is removed from the cluster. However, the cluster persists, running the native version of Kubernetes. + +**Result:** All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + + +Rather than cleaning registered cluster nodes using the Rancher UI, you can run a script instead. + +>**Prerequisite:** +> +>Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +1. Open a web browser, navigate to [GitHub](https://github.com/rancher/rancher/blob/master/cleanup/user-cluster.sh), and download `user-cluster.sh`. + +1. Make the script executable by running the following command from the same directory as `user-cluster.sh`: + + ``` + chmod +x user-cluster.sh + ``` + +1. **Air Gap Environments Only:** Open `user-cluster.sh` and replace `yaml_url` with the URL in `user-cluster.yml`. + + If you don't have an air gap environment, skip this step. + +1. From the same directory, run the script and provide the `rancher/rancher-agent` image version which should be equal to the version of Rancher used to manage the cluster. (``): + + >**Tip:** + > + >Add the `-dry-run` flag to preview the script's outcome without making changes. + ``` + ./user-cluster.sh rancher/rancher-agent: + ``` + +**Result:** The script runs. All components listed for registered clusters in [What Gets Removed?](#what-gets-removed) are deleted. + + + + +### Windows Nodes + +To clean up a Windows node, you can run a cleanup script located in `c:\etc\rancher`. The script deletes Kubernetes generated resources and the execution binary. It also drops the firewall rules and network settings. + +To run the script, you can use this command in the PowerShell: + +``` +pushd c:\etc\rancher +.\cleanup.ps1 +popd +``` + +**Result:** The node is reset and can be re-added to a Kubernetes cluster. + +### Docker Containers, Images, and Volumes + +Based on what role you assigned to the node, there are Kubernetes components in containers, containers belonging to overlay networking, DNS, ingress controller and Rancher agent. (and pods you created that have been scheduled to this node) + +**To clean all Docker containers, images and volumes:** + +``` +docker rm -f $(docker ps -qa) +docker rmi -f $(docker images -q) +docker volume rm $(docker volume ls -q) +``` + +### Mounts + +Kubernetes components and secrets leave behind mounts on the system that need to be unmounted. + +Mounts | +--------| +`/var/lib/kubelet/pods/XXX` (miscellaneous mounts) | +`/var/lib/kubelet` | +`/var/lib/rancher` | + +**To unmount all mounts:** + +``` +for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done +``` + +### Directories and Files + +The following directories are used when adding a node to a cluster, and should be removed. You can remove a directory using `rm -rf /directory_name`. + +>**Note:** Depending on the role you assigned to the node, some of the directories will or won't be present on the node. + +Directories | +--------| +`/etc/ceph` | +`/etc/cni` | +`/etc/kubernetes` | +`/opt/cni` | +`/opt/rke` | +`/run/secrets/kubernetes.io` | +`/run/calico` | +`/run/flannel` | +`/var/lib/calico` | +`/var/lib/etcd` | +`/var/lib/cni` | +`/var/lib/kubelet` | +`/var/lib/rancher/rke/log` | +`/var/log/containers` | +`/var/log/kube-audit` | +`/var/log/pods` | +`/var/run/calico` | + +**To clean the directories:** + +``` +rm -rf /etc/ceph \ + /etc/cni \ + /etc/kubernetes \ + /opt/cni \ + /opt/rke \ + /run/secrets/kubernetes.io \ + /run/calico \ + /run/flannel \ + /var/lib/calico \ + /var/lib/etcd \ + /var/lib/cni \ + /var/lib/kubelet \ + /var/lib/rancher/rke/log \ + /var/log/containers \ + /var/log/kube-audit \ + /var/log/pods \ + /var/run/calico +``` + +### Network Interfaces and Iptables + +The remaining two components that are changed/configured are (virtual) network interfaces and iptables rules. Both are non-persistent to the node, meaning that they will be cleared after a restart of the node. To remove these components, a restart is recommended. + +**To restart a node:** + +``` +# using reboot +$ sudo reboot + +# using shutdown +$ sudo shutdown -r now +``` + +If you want to know more on (virtual) network interfaces or iptables rules, please see the specific subjects below. + +### Network Interfaces + +>**Note:** Depending on the network provider configured for the cluster the node was part of, some of the interfaces will or won't be present on the node. + +Interfaces | +--------| +`flannel.1` | +`cni0` | +`tunl0` | +`caliXXXXXXXXXXX` (random interface names) | +`vethXXXXXXXX` (random interface names) | + +**To list all interfaces:** + +``` +# Using ip +ip address show + +# Using ifconfig +ifconfig -a +``` + +**To remove an interface:** + +``` +ip link delete interface_name +``` + +### Iptables + +>**Note:** Depending on the network provider configured for the cluster the node was part of, some of the chains will or won't be present on the node. + +Iptables rules are used to route traffic from and to containers. The created rules are not persistent, so restarting the node will restore iptables to its original state. + +Chains | +--------| +`cali-failsafe-in` | +`cali-failsafe-out` | +`cali-fip-dnat` | +`cali-fip-snat` | +`cali-from-hep-forward` | +`cali-from-host-endpoint` | +`cali-from-wl-dispatch` | +`cali-fw-caliXXXXXXXXXXX` (random chain names) | +`cali-nat-outgoing` | +`cali-pri-kns.NAMESPACE` (chain per namespace) | +`cali-pro-kns.NAMESPACE` (chain per namespace) | +`cali-to-hep-forward` | +`cali-to-host-endpoint` | +`cali-to-wl-dispatch` | +`cali-tw-caliXXXXXXXXXXX` (random chain names) | +`cali-wl-to-host` | +`KUBE-EXTERNAL-SERVICES` | +`KUBE-FIREWALL` | +`KUBE-MARK-DROP` | +`KUBE-MARK-MASQ` | +`KUBE-NODEPORTS` | +`KUBE-SEP-XXXXXXXXXXXXXXXX` (random chain names) | +`KUBE-SERVICES` | +`KUBE-SVC-XXXXXXXXXXXXXXXX` (random chain names) | + +**To list all iptables rules:** + +``` +iptables -L -t nat +iptables -L -t mangle +iptables -L +``` diff --git a/content/rancher/v2.6/en/cluster-admin/cloning-clusters/_index.md b/versioned_docs/version-2.6/cluster-admin/cloning-clusters/cloning-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/cloning-clusters/_index.md rename to versioned_docs/version-2.6/cluster-admin/cloning-clusters/cloning-clusters.md diff --git a/content/rancher/v2.6/en/cluster-admin/cluster-access/ace/_index.md b/versioned_docs/version-2.6/cluster-admin/cluster-access/ace/ace.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/cluster-access/ace/_index.md rename to versioned_docs/version-2.6/cluster-admin/cluster-access/ace/ace.md diff --git a/content/rancher/v2.6/en/cluster-admin/cluster-access/_index.md b/versioned_docs/version-2.6/cluster-admin/cluster-access/cluster-access.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/cluster-access/_index.md rename to versioned_docs/version-2.6/cluster-admin/cluster-access/cluster-access.md diff --git a/content/rancher/v2.6/en/cluster-admin/cluster-access/cluster-members/_index.md b/versioned_docs/version-2.6/cluster-admin/cluster-access/cluster-members/cluster-members.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/cluster-access/cluster-members/_index.md rename to versioned_docs/version-2.6/cluster-admin/cluster-access/cluster-members/cluster-members.md diff --git a/content/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/_index.md b/versioned_docs/version-2.6/cluster-admin/cluster-access/kubectl/kubectl.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/_index.md rename to versioned_docs/version-2.6/cluster-admin/cluster-access/kubectl/kubectl.md diff --git a/versioned_docs/version-2.6/cluster-admin/cluster-admin.md b/versioned_docs/version-2.6/cluster-admin/cluster-admin.md new file mode 100644 index 0000000000..ea79525e27 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/cluster-admin.md @@ -0,0 +1,35 @@ +--- +title: Cluster Administration +weight: 8 +--- + +After you provision a cluster in Rancher, you can begin using powerful Kubernetes features to deploy and scale your containerized applications in development, testing, or production environments. + +This page covers the following topics: + +- [Switching between clusters](#switching-between-clusters) +- [Managing clusters in Rancher](#managing-clusters-in-rancher) +- [Configuring tools](#configuring-tools) + +> This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.6/en/overview/concepts) page. + +## Managing Clusters in Rancher + +After clusters have been [provisioned into Rancher]({{}}/rancher/v2.6/en/cluster-provisioning/), [cluster owners]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/#cluster-roles) will need to manage these clusters. There are many different options of how to manage your cluster. + +import ClusterCapabilitiesTable from '/rancher/v2.6/en/shared-files/_cluster-capabilities-table.md'; + + + +## Configuring Tools + +Rancher contains a variety of tools that aren't included in Kubernetes to assist in your DevOps operations. Rancher can integrate with external services to help your clusters run more efficiently. Tools are divided into following categories: + +- Alerts +- Notifiers +- Logging +- Monitoring +- Istio Service Mesh +- OPA Gatekeeper + +Tools can be installed through **Apps & Marketplace.** \ No newline at end of file diff --git a/versioned_docs/version-2.6/cluster-admin/cluster-autoscaler/amazon/amazon.md b/versioned_docs/version-2.6/cluster-admin/cluster-autoscaler/amazon/amazon.md new file mode 100644 index 0000000000..717abd28a4 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/cluster-autoscaler/amazon/amazon.md @@ -0,0 +1,580 @@ +--- +title: Cluster Autoscaler with AWS EC2 Auto Scaling Groups +weight: 1 +--- + +This guide will show you how to install and use [Kubernetes cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/) on Rancher custom clusters using AWS EC2 Auto Scaling Groups. + +We are going to install a Rancher RKE custom cluster with a fixed number of nodes with the etcd and controlplane roles, and a variable nodes with the worker role, managed by `cluster-autoscaler`. + +- [Prerequisites](#prerequisites) +- [1. Create a Custom Cluster](#1-create-a-custom-cluster) +- [2. Configure the Cloud Provider](#2-configure-the-cloud-provider) +- [3. Deploy Nodes](#3-deploy-nodes) +- [4. Install cluster-autoscaler](#4-install-cluster-autoscaler) + - [Parameters](#parameters) + - [Deployment](#deployment) +- [Testing](#testing) + - [Generating Load](#generating-load) + - [Checking Scale](#checking-scale) + +# Prerequisites + +These elements are required to follow this guide: + +* The Rancher server is up and running +* You have an AWS EC2 user with proper permissions to create virtual machines, auto scaling groups, and IAM profiles and roles + +### 1. Create a Custom Cluster + +On Rancher server, we should create a custom k8s cluster v1.18.x. Be sure that cloud_provider name is set to `amazonec2`. Once cluster is created we need to get: + +* clusterID: `c-xxxxx` will be used on EC2 `kubernetes.io/cluster/` instance tag +* clusterName: will be used on EC2 `k8s.io/cluster-autoscaler/` instance tag +* nodeCommand: will be added on EC2 instance user_data to include new nodes on cluster + + ```sh + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum + ``` + +### 2. Configure the Cloud Provider + +On AWS EC2, we should create a few objects to configure our system. We've defined three distinct groups and IAM profiles to configure on AWS. + +1. Autoscaling group: Nodes that will be part of the EC2 Auto Scaling Group (ASG). The ASG will be used by `cluster-autoscaler` to scale up and down. + * IAM profile: Required by k8s nodes where cluster-autoscaler will be running. It is recommended for Kubernetes master nodes. This profile is called `K8sAutoscalerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:DescribeTags", + "autoscaling:DescribeLaunchConfigurations", + "ec2:DescribeLaunchTemplateVersions" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + +2. Master group: Nodes that will be part of the Kubernetes etcd and/or control planes. This will be out of the ASG. + * IAM profile: Required by the Kubernetes cloud_provider integration. Optionally, `AWS_ACCESS_KEY` and `AWS_SECRET_KEY` can be used instead [using-aws-credentials.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#using-aws-credentials) This profile is called `K8sMasterProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ] + } + ] + } + ``` + + * IAM role: `K8sMasterRole: [K8sMasterProfile,K8sAutoscalerProfile]` + * Security group: `K8sMasterSg` More info at[RKE ports (custom nodes tab)]({{}}/rancher/v2.6/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) + * Tags: + `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add etcd+controlplane node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--etcd --controlplane" + + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +3. Worker group: Nodes that will be part of the k8s worker plane. Worker nodes will be scaled by cluster-autoscaler using the ASG. + * IAM profile: Provides cloud_provider worker integration. + This profile is called `K8sWorkerProfile`. + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:GetRepositoryPolicy", + "ecr:DescribeRepositories", + "ecr:ListImages", + "ecr:BatchGetImage" + ], + "Resource": "*" + } + ] + } + ``` + + * IAM role: `K8sWorkerRole: [K8sWorkerProfile]` + * Security group: `K8sWorkerSg` More info at [RKE ports (custom nodes tab)]({{}}/rancher/v2.6/en/installation/requirements/ports/#downstream-kubernetes-cluster-nodes) + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` Ubuntu 18.04(ami-0e11cbb34015ff725), installs docker and add worker node to the k8s cluster + + ```sh + #!/bin/bash -x + + cat < /etc/sysctl.d/90-kubelet.conf + vm.overcommit_memory = 1 + vm.panic_on_oom = 0 + kernel.panic = 10 + kernel.panic_on_oops = 1 + kernel.keys.root_maxkeys = 1000000 + kernel.keys.root_maxbytes = 25000000 + EOF + sysctl -p /etc/sysctl.d/90-kubelet.conf + + curl -sL https://releases.rancher.com/install-docker/19.03.sh | sh + sudo usermod -aG docker ubuntu + + TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + PRIVATE_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/local-ipv4) + PUBLIC_IP=$(curl -H "X-aws-ec2-metadata-token: ${TOKEN}" -s http://169.254.169.254/latest/meta-data/public-ipv4) + K8S_ROLES="--worker" + + sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent: --server https:// --token --ca-checksum --address ${PUBLIC_IP} --internal-address ${PRIVATE_IP} ${K8S_ROLES} + ``` + +More info is at [RKE clusters on AWS]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/) and [Cluster Autoscaler on AWS.](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) + +### 3. Deploy Nodes + +Once we've configured AWS, let's create VMs to bootstrap our cluster: + +* master (etcd+controlplane): Depending your needs, deploy three master instances with proper size. More info is at [the recommendations for production-ready clusters.]({{}}/rancher/v2.6/en/cluster-provisioning/production/) + * IAM role: `K8sMasterRole` + * Security group: `K8sMasterSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * User data: `K8sMasterUserData` + +* worker: Define an ASG on EC2 with the following settings: + * Name: `K8sWorkerAsg` + * IAM role: `K8sWorkerRole` + * Security group: `K8sWorkerSg` + * Tags: + * `kubernetes.io/cluster/: owned` + * `k8s.io/cluster-autoscaler/: true` + * `k8s.io/cluster-autoscaler/enabled: true` + * User data: `K8sWorkerUserData` + * Instances: + * minimum: 2 + * desired: 2 + * maximum: 10 + +Once the VMs are deployed, you should have a Rancher custom cluster up and running with three master and two worker nodes. + +### 4. Install Cluster-autoscaler + +At this point, we should have rancher cluster up and running. We are going to install cluster-autoscaler on master nodes and `kube-system` namespace, following cluster-autoscaler recommendation. + +#### Parameters + +This table shows cluster-autoscaler parameters for fine tuning: + +| Parameter | Default | Description | +|---|---|---| +|cluster-name|-|Autoscaled cluster name, if available| +|address|:8085|The address to expose Prometheus metrics| +|kubernetes|-|Kubernetes master location. Leave blank for default| +|kubeconfig|-|Path to kubeconfig file with authorization and master location information| +|cloud-config|-|The path to the cloud provider configuration file. Empty string for no configuration file| +|namespace|"kube-system"|Namespace in which cluster-autoscaler run| +|scale-down-enabled|true|Should CA scale down the cluster| +|scale-down-delay-after-add|"10m"|How long after scale up that scale down evaluation resumes| +|scale-down-delay-after-delete|0|How long after node deletion that scale down evaluation resumes, defaults to scanInterval| +|scale-down-delay-after-failure|"3m"|How long after scale down failure that scale down evaluation resumes| +|scale-down-unneeded-time|"10m"|How long a node should be unneeded before it is eligible for scale down| +|scale-down-unready-time|"20m"|How long an unready node should be unneeded before it is eligible for scale down| +|scale-down-utilization-threshold|0.5|Sum of cpu or memory of all pods running on the node divided by node's corresponding allocatable resource, below which a node can be considered for scale down| +|scale-down-gpu-utilization-threshold|0.5|Sum of gpu requests of all pods running on the node divided by node's allocatable resource, below which a node can be considered for scale down| +|scale-down-non-empty-candidates-count|30|Maximum number of non empty nodes considered in one iteration as candidates for scale down with drain| +|scale-down-candidates-pool-ratio|0.1|A ratio of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|scale-down-candidates-pool-min-count|50|Minimum number of nodes that are considered as additional non empty candidates for scale down when some candidates from previous iteration are no longer valid| +|node-deletion-delay-timeout|"2m"|Maximum time CA waits for removing delay-deletion.cluster-autoscaler.kubernetes.io/ annotations before deleting the node| +|scan-interval|"10s"|How often cluster is reevaluated for scale up or down| +|max-nodes-total|0|Maximum number of nodes in all node groups. Cluster autoscaler will not grow the cluster beyond this number| +|cores-total|"0:320000"|Minimum and maximum number of cores in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +|memory-total|"0:6400000"|Minimum and maximum number of gigabytes of memory in cluster, in the format `:.` Cluster autoscaler will not scale the cluster beyond these numbers| +cloud-provider|-|Cloud provider type| +|max-bulk-soft-taint-count|10|Maximum number of nodes that can be tainted/untainted PreferNoSchedule at the same time. Set to 0 to turn off such tainting| +|max-bulk-soft-taint-time|"3s"|Maximum duration of tainting/untainting nodes as PreferNoSchedule at the same time| +|max-empty-bulk-delete|10|Maximum number of empty nodes that can be deleted at the same time| +|max-graceful-termination-sec|600|Maximum number of seconds CA waits for pod termination when trying to scale down a node| +|max-total-unready-percentage|45|Maximum percentage of unready nodes in the cluster. After this is exceeded, CA halts operations| +|ok-total-unready-count|3|Number of allowed unready nodes, irrespective of max-total-unready-percentage| +|scale-up-from-zero|true|Should CA scale up when there 0 ready nodes| +|max-node-provision-time|"15m"|Maximum time CA waits for node to be provisioned| +|nodes|-|sets min,max size and other configuration data for a node group in a format accepted by cloud provider. Can be used multiple times. Format: `::`| +|node-group-auto-discovery|-|One or more definition(s) of node group auto-discovery. A definition is expressed `:[[=]]`| +|estimator|-|"binpacking"|Type of resource estimator to be used in scale up. Available values: ["binpacking"]| +|expander|"random"|Type of node group expander to be used in scale up. Available values: `["random","most-pods","least-waste","price","priority"]`| +|ignore-daemonsets-utilization|false|Should CA ignore DaemonSet pods when calculating resource utilization for scaling down| +|ignore-mirror-pods-utilization|false|Should CA ignore Mirror pods when calculating resource utilization for scaling down| +|write-status-configmap|true|Should CA write status information to a configmap| +|max-inactivity|"10m"|Maximum time from last recorded autoscaler activity before automatic restart| +|max-failing-time|"15m"|Maximum time from last recorded successful autoscaler run before automatic restart| +|balance-similar-node-groups|false|Detect similar node groups and balance the number of nodes between them| +|node-autoprovisioning-enabled|false|Should CA autoprovision node groups when needed| +|max-autoprovisioned-node-group-count|15|The maximum number of autoprovisioned groups in the cluster| +|unremovable-node-recheck-timeout|"5m"|The timeout before we check again a node that couldn't be removed before| +|expendable-pods-priority-cutoff|-10|Pods with priority below cutoff will be expendable. They can be killed without any consideration during scale down and they don't cause scale up. Pods with null priority (PodPriority disabled) are non expendable| +|regional|false|Cluster is regional| +|new-pod-scale-up-delay|"0s"|Pods less than this old will not be considered for scale-up| +|ignore-taint|-|Specifies a taint to ignore in node templates when considering to scale a node group| +|balancing-ignore-label|-|Specifies a label to ignore in addition to the basic and cloud-provider set of labels when comparing if two node groups are similar| +|aws-use-static-instance-list|false|Should CA fetch instance types in runtime or use a static list. AWS only| +|profiling|false|Is debug/pprof endpoint enabled| + +#### Deployment + +Based on [cluster-autoscaler-run-on-master.yaml](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-run-on-master.yaml) example, we've created our own `cluster-autoscaler-deployment.yaml` to use preferred [auto-discovery setup](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup), updating tolerations, nodeSelector, image version and command config: + + +```yml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler + name: cluster-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["endpoints"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list", "get", "update"] + - apiGroups: [""] + resources: + - "pods" + - "services" + - "replicationcontrollers" + - "persistentvolumeclaims" + - "persistentvolumes" + verbs: ["watch", "list", "get"] + - apiGroups: ["extensions"] + resources: ["replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["watch", "list"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["coordination.k8s.io"] + resourceNames: ["cluster-autoscaler"] + resources: ["leases"] + verbs: ["get", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create","list","watch"] + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] + verbs: ["delete", "get", "update", "watch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + app: cluster-autoscaler +spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler + template: + metadata: + labels: + app: cluster-autoscaler + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '8085' + spec: + serviceAccountName: cluster-autoscaler + tolerations: + - effect: NoSchedule + operator: "Equal" + value: "true" + key: node-role.kubernetes.io/controlplane + nodeSelector: + node-role.kubernetes.io/controlplane: "true" + containers: + - image: eu.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.18.1 + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=aws + - --skip-nodes-with-local-storage=false + - --expander=least-waste + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/ + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs/ca-certificates.crt + readOnly: true + imagePullPolicy: "Always" + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" + +``` + +Once the manifest file is prepared, deploy it in the Kubernetes cluster (Rancher UI can be used instead): + +```sh +kubectl -n kube-system apply -f cluster-autoscaler-deployment.yaml +``` + +**Note:** Cluster-autoscaler deployment can also be set up using [manual configuration](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#manual-configuration) + +# Testing + +At this point, we should have a cluster-scaler up and running in our Rancher custom cluster. Cluster-scale should manage `K8sWorkerAsg` ASG to scale up and down between 2 and 10 nodes, when one of the following conditions is true: + +* There are pods that failed to run in the cluster due to insufficient resources. In this case, the cluster is scaled up. +* There are nodes in the cluster that have been underutilized for an extended period of time and their pods can be placed on other existing nodes. In this case, the cluster is scaled down. + +### Generating Load + +We've prepared a `test-deployment.yaml` just to generate load on the Kubernetes cluster and see if cluster-autoscaler is working properly. The test deployment is requesting 1000m CPU and 1024Mi memory by three replicas. Adjust the requested resources and/or replica to be sure you exhaust the Kubernetes cluster resources: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hello-world + name: hello-world +spec: + replicas: 3 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + ports: + - containerPort: 80 + protocol: TCP + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi +``` + +Once the test deployment is prepared, deploy it in the Kubernetes cluster default namespace (Rancher UI can be used instead): + +``` +kubectl -n default apply -f test-deployment.yaml +``` + +### Checking Scale + +Once the Kubernetes resources got exhausted, cluster-autoscaler should scale up worker nodes where pods failed to be scheduled. It should scale up until up until all pods became scheduled. You should see the new nodes on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. + +Once scale up is checked, let check for scale down. To do it, reduce the replica number on the test deployment until you release enough Kubernetes cluster resources to scale down. You should see nodes disappear on the ASG and on the Kubernetes cluster. Check the logs on the `kube-system` cluster-autoscaler pod. diff --git a/content/rancher/v2.6/en/cluster-admin/cluster-autoscaler/_index.md b/versioned_docs/version-2.6/cluster-admin/cluster-autoscaler/cluster-autoscaler.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/cluster-autoscaler/_index.md rename to versioned_docs/version-2.6/cluster-admin/cluster-autoscaler/cluster-autoscaler.md diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/aks-config-reference/_index.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/aks-config-reference/aks-config-reference.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/aks-config-reference/_index.md rename to versioned_docs/version-2.6/cluster-admin/editing-clusters/aks-config-reference/aks-config-reference.md diff --git a/versioned_docs/version-2.6/cluster-admin/editing-clusters/editing-clusters.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/editing-clusters.md new file mode 100644 index 0000000000..b6baba3117 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/editing-clusters/editing-clusters.md @@ -0,0 +1,30 @@ +--- +title: Cluster Configuration +weight: 2025 +--- + +After you provision a Kubernetes cluster using Rancher, you can still edit options and settings for the cluster. + +For information on editing cluster membership, go to [this page.]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/cluster-members) + +### Cluster Configuration References + +The cluster configuration options depend on the type of Kubernetes cluster: + +- [RKE Cluster Configuration](./rke-config-reference) +- [RKE2 Cluster Configuration](./rke2-config-reference) +- [K3s Cluster Configuration](./k3s-config-reference) +- [EKS Cluster Configuration](./eks-config-reference) +- [GKE Cluster Configuration](./gke-config-reference) +- [AKS Cluster Configuration](./aks-config-reference) + +### Cluster Management Capabilities by Cluster Type + +The options and settings available for an existing cluster change based on the method that you used to provision it. + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '/rancher/v2.6/en/shared-files/_cluster-capabilities-table.md'; + + + diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/eks-config-reference/_index.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/eks-config-reference/eks-config-reference.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/eks-config-reference/_index.md rename to versioned_docs/version-2.6/cluster-admin/editing-clusters/eks-config-reference/eks-config-reference.md diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/gke-config-reference/_index.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/gke-config-reference/gke-config-reference.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/gke-config-reference/_index.md rename to versioned_docs/version-2.6/cluster-admin/editing-clusters/gke-config-reference/gke-config-reference.md diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/gke-config-reference/private-clusters/private-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/gke-config-reference/private-clusters/_index.md rename to versioned_docs/version-2.6/cluster-admin/editing-clusters/gke-config-reference/private-clusters/private-clusters.md diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/k3s-config-reference/_index.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/k3s-config-reference/k3s-config-reference.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/k3s-config-reference/_index.md rename to versioned_docs/version-2.6/cluster-admin/editing-clusters/k3s-config-reference/k3s-config-reference.md diff --git a/versioned_docs/version-2.6/cluster-admin/editing-clusters/rke-config-reference/rke-config-reference.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/rke-config-reference/rke-config-reference.md new file mode 100644 index 0000000000..4c553c2995 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/editing-clusters/rke-config-reference/rke-config-reference.md @@ -0,0 +1,360 @@ +--- +title: RKE Cluster Configuration Reference +shortTitle: RKE Cluster Configuration +weight: 1 +--- + +When Rancher installs Kubernetes, it uses [RKE]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) or [RKE2](https://docs.rke2.io/) as the Kubernetes distribution. + +This section covers the configuration options that are available in Rancher for a new or existing RKE Kubernetes cluster. + +- [Overview](#overview) +- [Editing Clusters with a Form in the Rancher UI](#editing-clusters-with-a-form-in-the-rancher-ui) +- [Editing Clusters with YAML](#editing-clusters-with-yaml) +- [Configuration Options in the Rancher UI](#configuration-options-in-the-rancher-ui) + - [Kubernetes Version](#kubernetes-version) + - [Network Provider](#network-provider) + - [Project Network Isolation](#project-network-isolation) + - [Kubernetes Cloud Providers](#kubernetes-cloud-providers) + - [Private Registries](#private-registries) + - [Authorized Cluster Endpoint](#authorized-cluster-endpoint) + - [Node Pools](#node-pools) + - [NGINX Ingress](#nginx-ingress) + - [Metrics Server Monitoring](#metrics-server-monitoring) + - [Pod Security Policy Support](#pod-security-policy-support) + - [Docker Version on Nodes](#docker-version-on-nodes) + - [Docker Root Directory](#docker-root-directory) + - [Default Pod Security Policy](#default-pod-security-policy) + - [Node Port Range](#node-port-range) + - [Recurring etcd Snapshots](#recurring-etcd-snapshots) + - [Agent Environment Variables](#agent-environment-variables) + - [Updating ingress-nginx](#updating-ingress-nginx) +- [RKE Cluster Config File Reference](#rke-cluster-config-file-reference) + - [Config File Structure in Rancher](#config-file-structure-in-rancher) + - [Default DNS Provider](#default-dns-provider) +- [Rancher Specific Parameters in YAML](#rancher-specific-parameters-in-yaml) + - [docker_root_dir](#docker_root_dir) + - [enable_cluster_monitoring](#enable_cluster_monitoring) + - [enable_network_policy](#enable_network_policy) + - [local_cluster_auth_endpoint](#local_cluster_auth_endpoint) + - [Custom Network Plug-in](#custom-network-plug-in) + +# Overview + +You can configure the Kubernetes options one of two ways: + +- [Rancher UI](#rancher-ui-options): Use the Rancher UI to select options that are commonly customized when setting up a Kubernetes cluster. +- [Cluster Config File](#cluster-config-file): Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +The RKE cluster config options are nested under the `rancher_kubernetes_engine_config` directive. For more information, see the section about the [cluster config file.](#cluster-config-file) + +In [clusters launched by RKE]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/), you can edit any of the remaining options that follow. + +For an example of RKE config file syntax, see the [RKE documentation]({{}}/rke/latest/en/example-yamls/). + +The forms in the Rancher UI don't include all advanced options for configuring RKE. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) + +# Editing Clusters with a Form in the Rancher UI + +To edit your cluster, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster you want to configure and click **⋮ > Edit Config**. + + +# Editing Clusters with YAML + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the options available in an RKE installation, except for system_images configuration, by specifying them in YAML. + +RKE clusters (also called RKE1 clusters) are edited differently than RKE2 and K3s clusters. + +To edit an RKE config file directly from the Rancher UI, + +1. Click **☰ > Cluster Management**. +1. Go to the RKE cluster you want to configure. Click and click **⋮ > Edit Config**. This take you to the RKE configuration form. Note: Because cluster provisioning changed in Rancher 2.6, the **⋮ > Edit as YAML** can be used for configuring RKE2 clusters, but it can't be used for editing RKE1 configuration. +1. In the configuration form, scroll down and click **Edit as YAML**. +1. Edit the RKE options under the `rancher_kubernetes_engine_config` directive. + +# Configuration Options in the Rancher UI + +> Some advanced configuration options are not exposed in the Rancher UI forms, but they can be enabled by editing the RKE cluster configuration file in YAML. For the complete reference of configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) + +### Kubernetes Version + +The version of Kubernetes installed on your cluster nodes. Rancher packages its own version of Kubernetes based on [hyperkube](https://github.com/rancher/hyperkube). + +For more detail, see [Upgrading Kubernetes]({{}}/rancher/v2.6/en/cluster-admin/upgrading-kubernetes). + +### Network Provider + +The [Network Provider](https://kubernetes.io/docs/concepts/cluster-administration/networking/) that the cluster uses. For more details on the different networking providers, please view our [Networking FAQ]({{}}/rancher/v2.6/en/faq/networking/cni-providers/). + +> After you launch the cluster, you cannot change your network provider. Therefore, choose which network provider you want to use carefully, as Kubernetes doesn't allow switching between network providers. Once a cluster is created with a network provider, changing network providers would require you tear down the entire cluster and all its applications. + +Out of the box, Rancher is compatible with the following network providers: + +- [Canal](https://github.com/projectcalico/canal) +- [Flannel](https://github.com/coreos/flannel#flannel) +- [Calico](https://docs.projectcalico.org/v3.11/introduction/) +- [Weave](https://github.com/weaveworks/weave) + +**Notes on Weave:** + +When Weave is selected as network provider, Rancher will automatically enable encryption by generating a random password. If you want to specify the password manually, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the [Weave Network Plug-in Options]({{}}/rke/latest/en/config-options/add-ons/network-plugins/#weave-network-plug-in-options). + +### Project Network Isolation + +If your network provider allows project network isolation, you can choose whether to enable or disable inter-project communication. + +Project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### Kubernetes Cloud Providers + +You can configure a [Kubernetes cloud provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers). If you want to use dynamically provisioned [volumes and storage]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/) in Kubernetes, typically you must select the specific cloud provider in order to use it. For example, if you want to use Amazon EBS, you would need to select the `aws` cloud provider. + +>**Note:** If the cloud provider you want to use is not listed as an option, you will need to use the [config file option](#cluster-config-file) to configure the cloud provider. Please reference the [RKE cloud provider documentation]({{}}/rke/latest/en/config-options/cloud-providers/) on how to configure the cloud provider. + +### Private Registries + +The cluster-level private registry configuration is only used for provisioning clusters. + +There are two main ways to set up private registries in Rancher: by setting up the [global default registry]({{}}/rancher/v2.6/en/admin-settings/config-private-registry) through the **Settings** tab in the global view, and by setting up a private registry in the advanced options in the cluster-level settings. The global default registry is intended to be used for air-gapped setups, for registries that do not require credentials. The cluster-level private registry is intended to be used in all setups in which the private registry requires credentials. + +If your private registry requires credentials, you need to pass the credentials to Rancher by editing the cluster options for each cluster that needs to pull images from the registry. + +The private registry configuration option tells Rancher where to pull the [system images]({{}}/rke/latest/en/config-options/system-images/) or [addon images]({{}}/rke/latest/en/config-options/add-ons/) that will be used in your cluster. + +- **System images** are components needed to maintain the Kubernetes cluster. +- **Add-ons** are used to deploy several cluster components, including network plug-ins, the ingress controller, the DNS provider, or the metrics server. + +For more information on setting up a private registry for components applied during the provisioning of the cluster, see the [RKE documentation on private registries]({{}}/rke/latest/en/config-options/private-registries/). + +Rancher v2.6 introduced the ability to configure [ECR registries for RKE clusters]({{}}/rke/latest/en/config-options/private-registries/#amazon-elastic-container-registry-ecr-private-registry-setup). + +### Authorized Cluster Endpoint + +Authorized Cluster Endpoint can be used to directly access the Kubernetes API server, without requiring communication through Rancher. + +> The authorized cluster endpoint only works on Rancher-launched Kubernetes clusters. In other words, it only works in clusters where Rancher [used RKE]({{}}/rancher/v2.6/en/overview/architecture/#tools-for-provisioning-kubernetes-clusters) to provision the cluster. It is not available for clusters in a hosted Kubernetes provider, such as Amazon's EKS. + +This is enabled by default in Rancher-launched Kubernetes clusters, using the IP of the node with the `controlplane` role and the default Kubernetes self signed certificates. + +For more detail on how an authorized cluster endpoint works and why it is used, refer to the [architecture section.]({{}}/rancher/v2.6/en/overview/architecture/#4-authorized-cluster-endpoint) + +We recommend using a load balancer with the authorized cluster endpoint. For details, refer to the [recommended architecture section.]({{}}/rancher/v2.6/en/overview/architecture-recommendations/#architecture-for-an-authorized-cluster-endpoint) + +### Node Pools + +For information on using the Rancher UI to set up node pools in an RKE cluster, refer to [this page.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) + +### NGINX Ingress + +If you want to publish your applications in a high-availability configuration, and you're hosting your nodes with a cloud-provider that doesn't have a native load-balancing feature, enable this option to use NGINX Ingress within the cluster. + +### Metrics Server Monitoring + +Option to enable or disable [Metrics Server]({{}}/rke/latest/en/config-options/add-ons/metrics-server/). + +Each cloud provider capable of launching a cluster using RKE can collect metrics and monitor for your cluster nodes. Enable this option to view your node metrics from your cloud provider's portal. + +### Pod Security Policy Support + +Enables [pod security policies]({{}}/rancher/v2.6/en/admin-settings/pod-security-policies/) for the cluster. After enabling this option, choose a policy using the **Default Pod Security Policy** drop-down. + +You must have an existing Pod Security Policy configured before you can use this option. + +### Docker Version on Nodes + +Configures whether nodes are allowed to run versions of Docker that Rancher doesn't officially support. + +If you choose to require a supported Docker version, Rancher will stop pods from running on nodes that don't have a supported Docker version installed. + +For details on which Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +### Docker Root Directory + +If the nodes you are adding to the cluster have Docker configured with a non-default Docker Root Directory (default is `/var/lib/docker`), specify the correct Docker Root Directory in this option. + +### Default Pod Security Policy + +If you enable **Pod Security Policy Support**, use this drop-down to choose the pod security policy that's applied to the cluster. + +### Node Port Range + +Option to change the range of ports that can be used for [NodePort services](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport). Default is `30000-32767`. + +### Recurring etcd Snapshots + +Option to enable or disable [recurring etcd snapshots]({{}}/rke/latest/en/etcd-snapshots/#etcd-recurring-snapshots). + +### Agent Environment Variables + +Option to set environment variables for [rancher agents]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/). The environment variables can be set using key value pairs. If rancher agent requires use of proxy to communicate with Rancher server, `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` environment variables can be set using agent environment variables. + +### Updating ingress-nginx + +Clusters that were created before Kubernetes 1.16 will have an `ingress-nginx` `updateStrategy` of `OnDelete`. Clusters that were created with Kubernetes 1.16 or newer will have `RollingUpdate`. + +If the `updateStrategy` of `ingress-nginx` is `OnDelete`, you will need to delete these pods to get the correct version for your deployment. + + + +# RKE Cluster Config File Reference + +Instead of using the Rancher UI to choose Kubernetes options for the cluster, advanced users can create an RKE config file. Using a config file allows you to set any of the [options available]({{}}/rke/latest/en/config-options/) in an RKE installation, except for `system_images` configuration. The `system_images` option is not supported when creating a cluster with the Rancher UI or API. + +For the complete reference for configurable options for RKE Kubernetes clusters in YAML, see the [RKE documentation.]({{}}/rke/latest/en/config-options/) + +### Config File Structure in Rancher + +RKE (Rancher Kubernetes Engine) is the tool that Rancher uses to provision Kubernetes clusters. Rancher's cluster config files used to have the same structure as [RKE config files,]({{}}/rke/latest/en/example-yamls/) but the structure changed so that in Rancher, RKE cluster config items are separated from non-RKE config items. Therefore, configuration for your cluster needs to be nested under the `rancher_kubernetes_engine_config` directive in the cluster config file. Cluster config files created with earlier versions of Rancher will need to be updated for this format. An example cluster config file is included below. + +
    + Example Cluster Config File + +```yaml +# +# Cluster Config +# +docker_root_dir: /var/lib/docker +enable_cluster_alerting: false +enable_cluster_monitoring: false +enable_network_policy: false +local_cluster_auth_endpoint: + enabled: true +# +# Rancher Config +# +rancher_kubernetes_engine_config: # Your RKE template config goes here. + addon_job_timeout: 30 + authentication: + strategy: x509 + ignore_docker_version: true +# +# # Currently only nginx ingress provider is supported. +# # To disable ingress controller, set `provider: none` +# # To enable ingress on specific nodes, use the node_selector, eg: +# provider: nginx +# node_selector: +# app: ingress +# + ingress: + provider: nginx + kubernetes_version: v1.15.3-rancher3-1 + monitoring: + provider: metrics-server +# +# If you are using calico on AWS +# +# network: +# plugin: calico +# calico_network_provider: +# cloud_provider: aws +# +# # To specify flannel interface +# +# network: +# plugin: flannel +# flannel_network_provider: +# iface: eth1 +# +# # To specify flannel interface for canal plugin +# +# network: +# plugin: canal +# canal_network_provider: +# iface: eth1 +# + network: + options: + flannel_backend_type: vxlan + plugin: canal +# +# services: +# kube-api: +# service_cluster_ip_range: 10.43.0.0/16 +# kube-controller: +# cluster_cidr: 10.42.0.0/16 +# service_cluster_ip_range: 10.43.0.0/16 +# kubelet: +# cluster_domain: cluster.local +# cluster_dns_server: 10.43.0.10 +# + services: + etcd: + backup_config: + enabled: true + interval_hours: 12 + retention: 6 + safe_timestamp: false + creation: 12h + extra_args: + election-timeout: 5000 + heartbeat-interval: 500 + gid: 0 + retention: 72h + snapshot: false + uid: 0 + kube_api: + always_pull_images: false + pod_security_policy: false + service_node_port_range: 30000-32767 + ssh_agent_auth: false +windows_prefered_cluster: false +``` +
    + +### Default DNS provider + +The table below indicates what DNS provider is deployed by default. See [RKE documentation on DNS provider]({{}}/rke/latest/en/config-options/add-ons/dns/) for more information how to configure a different DNS provider. CoreDNS can only be used on Kubernetes v1.12.0 and higher. + +| Rancher version | Kubernetes version | Default DNS provider | +|-------------|--------------------|----------------------| +| v2.2.5 and higher | v1.14.0 and higher | CoreDNS | +| v2.2.5 and higher | v1.13.x and lower | kube-dns | +| v2.2.4 and lower | any | kube-dns | + +# Rancher Specific Parameters in YAML + +Besides the RKE config file options, there are also Rancher specific settings that can be configured in the Config File (YAML): + +### docker_root_dir + +See [Docker Root Directory](#docker-root-directory). + +### enable_cluster_monitoring + +Option to enable or disable [Cluster Monitoring]({{}}/rancher/v2.6/en/monitoring-alerting/). + +### enable_network_policy + +Option to enable or disable Project Network Isolation. + +Project network isolation is available if you are using any RKE network plugin that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin. + +### local_cluster_auth_endpoint + +See [Authorized Cluster Endpoint](#authorized-cluster-endpoint). + +Example: + +```yaml +local_cluster_auth_endpoint: + enabled: true + fqdn: "FQDN" + ca_certs: |- + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +### Custom Network Plug-in + +You can add a custom network plug-in by using the [user-defined add-on functionality]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/) of RKE. You define any add-on that you want deployed after the Kubernetes cluster is deployed. + +There are two ways that you can specify an add-on: + +- [In-line Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#in-line-add-ons) +- [Referencing YAML Files for Add-ons]({{}}/rke/latest/en/config-options/add-ons/user-defined-add-ons/#referencing-yaml-files-for-add-ons) + +For an example of how to configure a custom network plug-in by editing the `cluster.yml`, refer to the [RKE documentation.]({{}}/rke/latest/en/config-options/add-ons/network-plugins/custom-network-plugin-example) diff --git a/content/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/_index.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/rke2-config-reference/rke2-config-reference.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/_index.md rename to versioned_docs/version-2.6/cluster-admin/editing-clusters/rke2-config-reference/rke2-config-reference.md diff --git a/versioned_docs/version-2.6/cluster-admin/editing-clusters/syncing/syncing.md b/versioned_docs/version-2.6/cluster-admin/editing-clusters/syncing/syncing.md new file mode 100644 index 0000000000..0280aad934 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-admin/editing-clusters/syncing/syncing.md @@ -0,0 +1,35 @@ +--- +title: Syncing +weight: 10 +--- + +Syncing is the feature for EKS and GKE clusters that causes Rancher to update the clusters' values so they are up to date with their corresponding cluster object in the hosted Kubernetes provider. This enables Rancher to not be the sole owner of a hosted cluster’s state. Its largest limitation is that processing an update from Rancher and another source at the same time or within 5 minutes of one finishing may cause the state from one source to completely overwrite the other. + +### How it works + +There are two fields on the Rancher Cluster object that must be understood to understand how syncing works: + +1. The config object for the cluster, located on the Spec of the Cluster: + + * For EKS, the field is called EKSConfig + * For GKE, the field is called GKEConfig + +2. The UpstreamSpec object + + * For EKS, this is located on the EKSStatus field on the Status of the Cluster. + * For GKE, this is located on the GKEStatus field on the Status of the Cluster. + +The struct types that define these objects can be found in their corresponding operator projects: + + * [eks-operator](https://github.com/rancher/eks-operator/blob/master/pkg/apis/eks.cattle.io/v1/types.go) + * [gke-operator](https://github.com/rancher/gke-operator/blob/master/pkg/apis/gke.cattle.io/v1/types.go) + +All fields with the exception of the cluster name, the location (region or zone), Imported, and the cloud credential reference, are nillable on this Spec object. + +The EKSConfig or GKEConfig represents desired state for its non-nil values. Fields that are non-nil in the config object can be thought of as “managed". When a cluster is created in Rancher, all fields are non-nil and therefore “managed”. When a pre-existing cluster is registered in rancher all nillable fields are nil and are not “managed”. Those fields become managed once their value has been changed by Rancher. + +UpstreamSpec represents the cluster as it is in the hosted Kubernetes provider and is refreshed on an interval of 5 minutes. After the UpstreamSpec has been refreshed, Rancher checks if the cluster has an update in progress. If it is updating, nothing further is done. If it is not currently updating, any “managed” fields on EKSConfig or GKEConfig are overwritten with their corresponding value from the recently updated UpstreamSpec. + +The effective desired state can be thought of as the UpstreamSpec + all non-nil fields in the EKSConfig or GKEConfig. This is what is displayed in the UI. + +If Rancher and another source attempt to update a cluster at the same time or within the 5 minute refresh window of an update finishing, then it is likely any “managed” fields can be caught in a race condition. To use EKS as an example, a cluster may have PrivateAccess as a managed field. If PrivateAccess is false and then enabled in EKS console, then finishes at 11:01, and then tags are updated from Rancher before 11:05 the value will likely be overwritten. This would also occur if tags were updated while the cluster was processing the update. If the cluster was registered and the PrivateAccess fields was nil then this issue should not occur in the aforementioned case. diff --git a/content/rancher/v2.6/en/cluster-admin/nodes/_index.md b/versioned_docs/version-2.6/cluster-admin/nodes/nodes.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/nodes/_index.md rename to versioned_docs/version-2.6/cluster-admin/nodes/nodes.md diff --git a/content/rancher/v2.6/en/cluster-admin/pod-security-policies/_index.md b/versioned_docs/version-2.6/cluster-admin/pod-security-policies/pod-security-policies.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/pod-security-policies/_index.md rename to versioned_docs/version-2.6/cluster-admin/pod-security-policies/pod-security-policies.md diff --git a/content/rancher/v2.6/en/cluster-admin/pod-security-policy/_index.md b/versioned_docs/version-2.6/cluster-admin/pod-security-policy/pod-security-policy.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/pod-security-policy/_index.md rename to versioned_docs/version-2.6/cluster-admin/pod-security-policy/pod-security-policy.md diff --git a/content/rancher/v2.6/en/cluster-admin/projects-and-namespaces/_index.md b/versioned_docs/version-2.6/cluster-admin/projects-and-namespaces/projects-and-namespaces.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/projects-and-namespaces/_index.md rename to versioned_docs/version-2.6/cluster-admin/projects-and-namespaces/projects-and-namespaces.md diff --git a/content/rancher/v2.6/en/cluster-admin/restoring-etcd/_index.md b/versioned_docs/version-2.6/cluster-admin/restoring-etcd/restoring-etcd.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/restoring-etcd/_index.md rename to versioned_docs/version-2.6/cluster-admin/restoring-etcd/restoring-etcd.md diff --git a/content/rancher/v2.6/en/cluster-admin/tools/_index.md b/versioned_docs/version-2.6/cluster-admin/tools/tools.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/tools/_index.md rename to versioned_docs/version-2.6/cluster-admin/tools/tools.md diff --git a/content/rancher/v2.6/en/cluster-admin/upgrading-kubernetes/_index.md b/versioned_docs/version-2.6/cluster-admin/upgrading-kubernetes/upgrading-kubernetes.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/upgrading-kubernetes/_index.md rename to versioned_docs/version-2.6/cluster-admin/upgrading-kubernetes/upgrading-kubernetes.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/attaching-existing-storage/attaching-existing-storage.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/attaching-existing-storage/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/attaching-existing-storage/attaching-existing-storage.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/ceph/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/ceph/ceph.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/ceph/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/ceph/ceph.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/ebs/ebs.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/ebs/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/ebs/ebs.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/examples.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/examples.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/nfs/nfs.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/nfs/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/nfs/nfs.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/vsphere/vsphere.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/examples/vsphere/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/examples/vsphere/vsphere.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/glusterfs-volumes/glusterfs-volumes.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/glusterfs-volumes/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/glusterfs-volumes/glusterfs-volumes.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/how-storage-works/how-storage-works.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/how-storage-works/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/how-storage-works/how-storage-works.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/iscsi-volumes/iscsi-volumes.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/iscsi-volumes/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/iscsi-volumes/iscsi-volumes.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/provisioning-new-storage/provisioning-new-storage.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/provisioning-new-storage/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/provisioning-new-storage/provisioning-new-storage.md diff --git a/content/rancher/v2.6/en/cluster-admin/volumes-and-storage/_index.md b/versioned_docs/version-2.6/cluster-admin/volumes-and-storage/volumes-and-storage.md similarity index 100% rename from content/rancher/v2.6/en/cluster-admin/volumes-and-storage/_index.md rename to versioned_docs/version-2.6/cluster-admin/volumes-and-storage/volumes-and-storage.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/cluster-capabilities-table/index.md b/versioned_docs/version-2.6/cluster-provisioning/cluster-capabilities-table/index.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/cluster-capabilities-table/index.md rename to versioned_docs/version-2.6/cluster-provisioning/cluster-capabilities-table/index.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/cluster-provisioning.md b/versioned_docs/version-2.6/cluster-provisioning/cluster-provisioning.md new file mode 100644 index 0000000000..b3e16047f4 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/cluster-provisioning.md @@ -0,0 +1,89 @@ +--- +title: Setting up Kubernetes Clusters in Rancher +description: Provisioning Kubernetes Clusters +weight: 7 +--- + +Rancher simplifies the creation of clusters by allowing you to create them through the Rancher UI rather than more complex alternatives. Rancher provides multiple options for launching a cluster. Use the option that best fits your use case. + +This section assumes a basic familiarity with Docker and Kubernetes. For a brief explanation of how Kubernetes components work together, refer to the [concepts]({{}}/rancher/v2.6/en/overview/concepts) page. + +For a conceptual overview of how the Rancher server provisions clusters and what tools it uses to provision them, refer to the [architecture]({{}}/rancher/v2.6/en/overview/architecture/) page. + +This section covers the following topics: + + + +- [Cluster Management Capabilities by Cluster Type](#cluster-management-capabilities-by-cluster-type) +- [Setting up clusters in a hosted Kubernetes provider](#setting-up-clusters-in-a-hosted-kubernetes-provider) +- [Launching Kubernetes with Rancher](#launching-kubernetes-with-rancher) + - [Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider](#launching-kubernetes-and-provisioning-nodes-in-an-infrastructure-provider) + - [Launching Kubernetes on Existing Custom Nodes](#launching-kubernetes-on-existing-custom-nodes) +- [Registering Existing Clusters](#registering-existing-clusters) +- [Programmatically Creating Clusters](#programmatically-creating-clusters) + + + +### Cluster Management Capabilities by Cluster Type + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '/rancher/v2.6/en/shared-files/_cluster-capabilities-table.md'; + + + +# Setting up Clusters in a Hosted Kubernetes Provider + +In this scenario, Rancher does not provision Kubernetes because it is installed by providers such as Google Kubernetes Engine (GKE), Amazon Elastic Container Service for Kubernetes, or Azure Kubernetes Service. + +If you use a Kubernetes provider such as Google GKE, Rancher integrates with its cloud APIs, allowing you to create and manage role-based access control for the hosted cluster from the Rancher UI. + +For more information, refer to the section on [hosted Kubernetes clusters.]({{}}/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters) + +# Launching Kubernetes with Rancher + +Rancher uses the [Rancher Kubernetes Engine (RKE)]({{}}/rke/latest/en/) as a library when provisioning Kubernetes on your own nodes. RKE is Rancher’s own lightweight Kubernetes installer. + +In RKE clusters, Rancher manages the deployment of Kubernetes. These clusters can be deployed on any bare metal server, cloud provider, or virtualization platform. + +These nodes can be dynamically provisioned through Rancher's UI, which calls [Docker Machine](https://docs.docker.com/machine/) to launch nodes on various cloud providers. + +If you already have a node that you want to add to an RKE cluster, you can add it to the cluster by running a Rancher agent container on it. + +For more information, refer to the section on [RKE clusters.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) + +### Launching Kubernetes and Provisioning Nodes in an Infrastructure Provider + +Rancher can dynamically provision nodes in infrastructure providers such as Amazon EC2, DigitalOcean, Azure, or vSphere, then install Kubernetes on them. + +Using Rancher, you can create pools of nodes based on a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates). This template defines the parameters used to launch nodes in your cloud providers. + +One benefit of using nodes hosted by an infrastructure provider is that if a node loses connectivity with the cluster, Rancher can automatically replace it, thus maintaining the expected cluster configuration. + +The cloud providers available for creating a node template are decided based on the [node drivers]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-drivers) active in the Rancher UI. + +For more information, refer to the section on [nodes hosted by an infrastructure provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/) + +### Launching Kubernetes on Existing Custom Nodes + +When setting up this type of cluster, Rancher installs Kubernetes on existing [custom nodes,]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/) which creates a custom cluster. + +You can bring any nodes you want to Rancher and use them to create a cluster. + +These nodes include on-prem bare metal servers, cloud-hosted virtual machines, or on-prem virtual machines. + +# Registering Existing Clusters + +The cluster registration feature replaces the feature to import clusters. + +Registering EKS clusters now provides additional benefits. For the most part, registered EKS clusters and EKS clusters created in Rancher are treated the same way in the Rancher UI, except for deletion. + +When you delete an EKS cluster that was created in Rancher, the cluster is destroyed. When you delete an EKS cluster that was registered in Rancher, it is disconnected from the Rancher server, but it still exists and you can still access it in the same way you did before it was registered in Rancher. + +For more information, see [this page.](./registered-clusters) + +# Programmatically Creating Clusters + +The most common way to programmatically deploy Kubernetes clusters through Rancher is by using the Rancher2 Terraform provider. The documentation for creating clusters with Terraform is [here.](https://registry.terraform.io/providers/rancher/rancher2/latest/docs/resources/cluster) + +EKS, GKE, AKS clusters and RKE clusters can be created or imported with Terraform. \ No newline at end of file diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/ack/ack.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/ack/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/ack/ack.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/aks/aks.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/aks/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/aks/aks.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/cce/cce.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/cce/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/cce/cce.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/eks/eks.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/eks/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/eks/eks.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/permissions.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/eks/permissions/permissions.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/gke/gke.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/gke/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/gke/gke.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/hosted-kubernetes-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/hosted-kubernetes-clusters.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md b/versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/tke/tke.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters/tke/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/hosted-kubernetes-clusters/tke/tke.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/node-requirements/node-requirements.md b/versioned_docs/version-2.6/cluster-provisioning/node-requirements/node-requirements.md new file mode 100644 index 0000000000..ba600ef51a --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/node-requirements/node-requirements.md @@ -0,0 +1,126 @@ +--- +title: Node Requirements for Rancher Managed Clusters +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This page describes the requirements for the Rancher managed Kubernetes clusters where your apps and services will be installed. These downstream clusters should be separate from the three-node cluster running Rancher. + +> If Rancher is installed on a high-availability Kubernetes cluster, the Rancher server three-node cluster and downstream clusters have different requirements. For Rancher installation requirements, refer to the node requirements in the [installation section.]({{}}/rancher/v2.6/en/installation/requirements/) + +Make sure the nodes for the Rancher server fulfill the following requirements: + +- [Operating systems and container runtime requirements](#operating-systems-and-container-runtime-requirements) +- [Hardware Requirements](#hardware-requirements) +- [Networking Requirements](#networking-requirements) +- [Optional: Security Considerations](#optional-security-considerations) + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution and any modern Docker version. Linux is required for the etcd and controlplane nodes of all downstream clusters. Worker nodes may run Linux or [Windows Server.](#windows-nodes) + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +If you plan to use ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.6/en/installation/resources/advanced/arm64-platform/) + +For information on how to install Docker, refer to the official [Docker documentation.](https://docs.docker.com/) + +### Oracle Linux and RHEL Derived Linux Nodes + +Some distributions of Linux derived from RHEL, including Oracle Linux, may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes 1.19, firewalld must be turned off. + +>**Note:** In RHEL 8.4, two extra services are included on the NetworkManager: `nm-cloud-setup.service` and `nm-cloud-setup.timer`. These services add a routing table that interferes with the CNI plugin's configuration. If these services are enabled, you must disable them using the command below, and then reboot the node to restore connectivity: +> +> ``` + systemctl disable nm-cloud-setup.service nm-cloud-setup.timer + reboot + ``` + +### SUSE Linux Nodes + +SUSE Linux may have a firewall that blocks all ports by default. In that situation, follow [these steps]({{}}/rancher/v2.6/en/installation/requirements/ports/#opening-suse-linux-ports) to open the ports needed for adding a host to a custom cluster. + +### Flatcar Container Linux Nodes + +When [Launching Kubernetes with Rancher]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) using Flatcar Container Linux nodes, it is required to use the following configuration in the [Cluster Config File]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: canal + options: + canal_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +```yaml +rancher_kubernetes_engine_config: + network: + plugin: calico + options: + calico_flex_volume_plugin_dir: /opt/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds + flannel_backend_type: vxlan + + services: + kube-controller: + extra_args: + flex-volume-plugin-dir: /opt/kubernetes/kubelet-plugins/volume/exec/ +``` + + + + +It is also required to enable the Docker service, you can enable the Docker service using the following command: + +``` +systemctl enable docker.service +``` + +The Docker service is enabled automatically when using [Node Drivers]({{}}/rancher/v2.6/en/admin-settings/drivers/#node-drivers). + +### Windows Nodes + +Nodes with Windows Server must run Docker Enterprise Edition. + +Windows nodes can be used for worker nodes only. See [Configuring Custom Clusters for Windows]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/) + +# Hardware Requirements + +The hardware requirements for nodes with the `worker` role mostly depend on your workloads. The minimum to run the Kubernetes node components is 1 CPU (core) and 1GB of memory. + +Regarding CPU and memory, it is recommended that the different planes of Kubernetes clusters (etcd, controlplane, and workers) should be hosted on different nodes so that they can scale separately from each other. + +For hardware recommendations for large Kubernetes clusters, refer to the official Kubernetes documentation on [building large clusters.](https://kubernetes.io/docs/setup/best-practices/cluster-large/) + +For hardware recommendations for etcd clusters in production, refer to the official [etcd documentation.](https://etcd.io/docs/v3.4.0/op-guide/hardware/) + +# Networking Requirements + +For a production cluster, we recommend that you restrict traffic by opening only the ports defined in the port requirements below. + +The ports required to be open are different depending on how the user cluster is launched. Each of the sections below list the ports that need to be opened for different [cluster creation options]({{}}/rancher/v2.6/en/cluster-provisioning/). + +For a breakdown of the port requirements for etcd nodes, controlplane nodes, and worker nodes in a Kubernetes cluster, refer to the [port requirements for the Rancher Kubernetes Engine.]({{}}/rke/latest/en/os/#ports) + +Details on which ports are used in each situation are found under [Downstream Cluster Port Requirements]({{}}/rancher/v2.6/en/installation/requirements/ports#downstream-kubernetes-cluster-nodes). + +# Optional: Security Considerations + +If you want to provision a Kubernetes cluster that is compliant with the CIS (Center for Internet Security) Kubernetes Benchmark, we recommend to following our hardening guide to configure your nodes before installing Kubernetes. + +For more information on the hardening guide and details on which version of the guide corresponds to your Rancher and Kubernetes versions, refer to the [security section.]({{}}/rancher/v2.6/en/security/#rancher-hardening-guide) diff --git a/content/rancher/v2.6/en/cluster-provisioning/production/nodes-and-roles/_index.md b/versioned_docs/version-2.6/cluster-provisioning/production/nodes-and-roles/nodes-and-roles.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/production/nodes-and-roles/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/production/nodes-and-roles/nodes-and-roles.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/production/_index.md b/versioned_docs/version-2.6/cluster-provisioning/production/production.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/production/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/production/production.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/production/recommended-architecture/_index.md b/versioned_docs/version-2.6/cluster-provisioning/production/recommended-architecture/recommended-architecture.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/production/recommended-architecture/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/production/recommended-architecture/recommended-architecture.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/registered-clusters/_index.md b/versioned_docs/version-2.6/cluster-provisioning/registered-clusters/registered-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/registered-clusters/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/registered-clusters/registered-clusters.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/behavior-differences-between-rke1-and-rke2/behavior-differences-between-rke1-and-rke2.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/behavior-differences-between-rke1-and-rke2/behavior-differences-between-rke1-and-rke2.md new file mode 100644 index 0000000000..99a3f18f37 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/behavior-differences-between-rke1-and-rke2/behavior-differences-between-rke1-and-rke2.md @@ -0,0 +1,34 @@ +--- +title: Behavior Differences Between RKE1 and RKE2 +weight: 2450 +--- + +RKE2, also known as RKE Government, is a Kubernetes distribution that focuses on security and compliance for U.S. Federal Government entities. It is considered the next iteration of the Rancher Kubernetes Engine, now known as RKE1. + +RKE1 and RKE2 have several slight behavioral differences to note, and this page will highlight some of these at a high level. + +### Control Plane Components + +RKE1 uses Docker for deploying and managing control plane components, and it also uses Docker as the container runtime for Kubernetes. By contrast, RKE2 launches control plane components as static pods that are managed by the kubelet. RKE2's container runtime is containerd, which allows things such as container registry mirroring (RKE1 with Docker does not). + +### Cluster API + +RKE2/K3s provisioning is built on top of the Cluster API (CAPI) upstream framework which often makes RKE2-provisioned clusters behave differently than RKE1-provisioned clusters. + +When you make changes to your cluster configuration in RKE2, this **may** result in nodes reprovisioning. This is controlled by CAPI controllers and not by Rancher itself. Note that for etcd nodes, the same behavior does not apply. + +The following are some specific example configuration changes that may cause the described behavior: + +- When editing the cluster and enabling `drain before delete`, the existing control plane nodes and worker are deleted and new nodes are created. + +- When nodes are being provisioned and a scale down operation is performed, rather than scaling down the desired number of nodes, it is possible that the currently provisioning nodes get deleted and new nodes are provisioned to reach the desired node count. Please note that this is a bug in Cluster API, and it will be fixed in an upcoming release. Once fixed, Rancher will update the documentation. + +Users who are used to RKE1 provisioning should take note of this new RKE2 behavior which may be unexpected. + +### Terminology + +You will notice that some terms have changed or gone away going from RKE1 to RKE2. For example, in RKE1 provisioning, you use **node templates**; in RKE2 provisioning, you can configure your cluster node pools when creating or editing the cluster. Another example is that the term **node pool** in RKE1 is now known as **machine pool** in RKE2. + + + + diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/amazon/amazon.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/amazon/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/amazon/amazon.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/azure/azure.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/azure/azure.md new file mode 100644 index 0000000000..a6e80db75d --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/azure/azure.md @@ -0,0 +1,112 @@ +--- +title: Setting up the Azure Cloud Provider +weight: 2 +--- + +When using the `Azure` cloud provider, you can leverage the following capabilities: + +- **Load Balancers:** Launches an Azure Load Balancer within a specific Network Security Group. + +- **Persistent Volumes:** Supports using Azure Blob disks and Azure Managed Disks with standard and premium storage accounts. + +- **Network Storage:** Support Azure Files via CIFS mounts. + +The following account types are not supported for Azure Subscriptions: + +- Single tenant accounts (i.e. accounts with no subscriptions). +- Multi-subscription accounts. + +# Prerequisites for RKE and RKE2 + +To set up the Azure cloud provider for both RKE and RKE2, the following credentials need to be configured: + +1. [Set up the Azure Tenant ID](#1-set-up-the-azure-tenant-id) +2. [Set up the Azure Client ID and Azure Client Secret](#2-set-up-the-azure-client-id-and-azure-client-secret) +3. [Configure App Registration Permissions](#3-configure-app-registration-permissions) +4. [Set up Azure Network Security Group Name](#4-set-up-azure-network-security-group-name) + +### 1. Set up the Azure Tenant ID + +Visit [Azure portal](https://portal.azure.com), login and go to **Azure Active Directory** and select **Properties**. Your **Directory ID** is your **Tenant ID** (tenantID). + +If you want to use the Azure CLI, you can run the command `az account show` to get the information. + +### 2. Set up the Azure Client ID and Azure Client Secret + +Visit [Azure portal](https://portal.azure.com), login and follow the steps below to create an **App Registration** and the corresponding **Azure Client ID** (aadClientId) and **Azure Client Secret** (aadClientSecret). + +1. Select **Azure Active Directory**. +1. Select **App registrations**. +1. Select **New application registration**. +1. Choose a **Name**, select `Web app / API` as **Application Type** and a **Sign-on URL** which can be anything in this case. +1. Select **Create**. + +In the **App registrations** view, you should see your created App registration. The value shown in the column **APPLICATION ID** is what you need to use as **Azure Client ID**. + +The next step is to generate the **Azure Client Secret**: + +1. Open your created App registration. +1. In the **Settings** view, open **Keys**. +1. Enter a **Key description**, select an expiration time and select **Save**. +1. The generated value shown in the column **Value** is what you need to use as **Azure Client Secret**. This value will only be shown once. + +### 3. Configure App Registration Permissions + +The last thing you will need to do, is assign the appropriate permissions to your App registration. + +1. Go to **More services**, search for **Subscriptions** and open it. +1. Open **Access control (IAM)**. +1. Select **Add**. +1. For **Role**, select `Contributor`. +1. For **Select**, select your created App registration name. +1. Select **Save**. + +### 4. Set up Azure Network Security Group Name + +A custom Azure Network Security Group (securityGroupName) is needed to allow Azure Load Balancers to work. + +If you provision hosts using Rancher Machine Azure driver, you will need to edit them manually to assign them to this Network Security Group. + +You should already assign custom hosts to this Network Security Group during provisioning. + +Only hosts expected to be load balancer back ends need to be in this group. + +# RKE2 Cluster Set-up in Rancher + +1. Choose "Azure" from the Cloud Provider drop-down in the Cluster Configuration section. + +1. * Supply the Cloud Provider Configuration. Note that Rancher will automatically create a new Network Security Group, Resource Group, Availability Set, Subnet, and Virtual Network. If you already have some or all of these created, you will need to specify them before creating the cluster. + * You can click on "Show Advanced" to see more of these automatically generated names and update them if + necessary. Your Cloud Provider Configuration **must** match the fields in the Machine Pools section. If you have multiple pools, they must all use the same Resource Group, Availability Set, Subnet, Virtual Network, and Network Security Group. + * An example is provided below. You will modify it as needed. + +
    + Example Cloud Provider Config + + ```yaml + { + "cloud":"AzurePublicCloud", + "tenantId": "YOUR TENANTID HERE", + "aadClientId": "YOUR AADCLIENTID HERE", + "aadClientSecret": "YOUR AADCLIENTSECRET HERE", + "subscriptionId": "YOUR SUBSCRIPTIONID HERE", + "resourceGroup": "docker-machine", + "location": "westus", + "subnetName": "docker-machine", + "securityGroupName": "rancher-managed-KA4jV9V2", + "securityGroupResourceGroup": "docker-machine", + "vnetName": "docker-machine-vnet", + "vnetResourceGroup": "docker-machine", + "primaryAvailabilitySetName": "docker-machine", + "routeTableResourceGroup": "docker-machine", + "cloudProviderBackoff": false, + "useManagedIdentityExtension": false, + "useInstanceMetadata": true + } + ``` + +
    + +1. Under the **Cluster Configuration > Advanced** section, click **Add** under **Additional Controller Manager Args** and add this flag: `--configure-cloud-routes=false` + +1. Click the **Create** button to submit the form and create the cluster. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/cloud-providers.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/cloud-providers.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/gce/gce.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/gce/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/gce/gce.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/in-tree.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/in-tree/in-tree.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/out-of-tree.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/out-of-tree.md new file mode 100644 index 0000000000..d9b95fc9b6 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/out-of-tree.md @@ -0,0 +1,66 @@ +--- +title: How to Configure Out-of-tree vSphere Cloud Provider +shortTitle: Out-of-tree Cloud Provider +weight: 10 +--- + +Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. + +This page covers how to install the Cloud Provider Interface (CPI) and Cloud Storage Interface (CSI) plugins after bringing up a cluster. + +# Prerequisites + +The vSphere versions supported: + +* 6.7u3 +* 7.0u1 or higher. + +The Kubernetes version must be 1.19 or higher. + +Using the vSphere out-of-tree cloud provider requires Linux nodes and is not supported on Windows. + +# Installation + +The Cloud Provider Interface (CPI) should be installed first before installing the Cloud Storage Interface (CSI). + +### 1. Create a vSphere cluster + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **VMware vSphere** or **Custom**. +1. On the **Basics** tab in the **Cluster Configuration** section, set the **Cloud Provider** to **vSphere**. +1. In the **Add-On Config** tab, the vSphere Cloud Provider (CPI) and Storage Provider (CSI) options. +1. Finish creating your cluster. + +### 2. Install the CPI plugin + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where the vSphere CPI plugin will be installed and click **Explore**. +1. Click **Apps & Marketplace > Charts**. +1. Click **vSphere CPI**. +1. Fill out the required vCenter details. +1. vSphere CPI initializes all nodes with ProviderID which is needed by the vSphere CSI driver. Check if all nodes are initialized with the ProviderID before installing CSI driver with the following command: + + ``` + kubectl describe nodes | grep "ProviderID" + ``` + +### 3. Installing the CSI plugin + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where the vSphere CSI plugin will be installed and click **Explore**. +1. Click **Apps & Marketplace > Charts**. +1. Click **vSphere CSI**. +1. Click **Install**. +1. Fill out the required vCenter details. On the **Features** tab, set **Enable CSI Migration** to **false**. +3. On the **Storage** tab, fill out the details for the StorageClass. This chart creates a StorageClass with the `csi.vsphere.vmware.com` as the provisioner. +1. Click **Install**. + + +# Using the CSI driver for provisioning volumes + +The CSI chart by default creates a storageClass. + +If that option was not selected while launching the chart, create a storageClass with the `csi.vsphere.vmware.com` as the provisioner. + +All volumes provisioned using this StorageClass will get provisioned by the CSI driver. diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/vsphere-volume-migration.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/vsphere-volume-migration.md new file mode 100644 index 0000000000..41b9d5d254 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/out-of-tree/vsphere-volume-migration/vsphere-volume-migration.md @@ -0,0 +1,108 @@ +--- +title: Migrating vSphere In-tree Volumes to CSI +weight: 5 +--- +Kubernetes is moving away from maintaining cloud providers in-tree. vSphere has an out-of-tree cloud provider that can be used by installing the vSphere cloud provider and cloud storage plugins. + +This page covers how to migrate from the in-tree vSphere cloud provider to out-of-tree, and manage the existing VMs post migration. + +It follows the steps provided in the official [vSphere migration documentation](https://vsphere-csi-driver.sigs.k8s.io/features/vsphere_csi_migration.html) and provides the steps to be performed in Rancher. + +### Cloud-config Format Limitation + +Existing volumes that were provisioned using the following cloud-config format will NOT get migrated due to an existing bug in vsphere CSI. + +If the cloud-config has this format for datastore and resource pool path, vsphere CSI driver cannot recognize it: + +```yaml +default-datastore: /datastore/ +resourcepool-path: "/host//Resources/" +``` + +Volumes provisioned with the in-tree provider using the following format will get migrated correctly: + +```yaml +default-datastore: +resourcepool-path: "/Resources/" +``` + +Upstream bug: https://github.com/kubernetes-sigs/vsphere-csi-driver/issues/628 + +Rancher issue tracking this bug: https://github.com/rancher/rancher/issues/31105 + +# Prerequisites + +- vSphere CSI Migration requires vSphere 7.0u1. In order to be able to manage existing in-tree vSphere volumes, upgrade vSphere to 7.0u1. +- The Kubernetes version must be 1.19 or higher. + +# Migration + +### 1. Install the CPI plugin + +Before installing CPI, we need to taint all nodes with `node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule`. + +This can be done by running the following commands: + +``` +curl -O https://raw.githubusercontent.com/rancher/helm3-charts/56b622f519728378abeddfe95074f1b87ab73b1e/charts/vsphere-cpi/taints.sh +``` + +Or: + +``` +wget https://raw.githubusercontent.com/rancher/helm3-charts/56b622f519728378abeddfe95074f1b87ab73b1e/charts/vsphere-cpi/taints.sh +chmod +x taints.sh +./taints.sh +``` + +Once all nodes are tainted by the running the script, launch the Helm vSphere CPI chart. + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where the vSphere CPI chart will be installed and click **Explore**. +1. Click **Apps & Marketplace > Charts**. +1. Click **vSphere CPI**.. +1. Click **Install**. +1. Fill out the required vCenter details and click **Install**. + +vSphere CPI initializes all nodes with ProviderID, which is needed by the vSphere CSI driver. + +Check if all nodes are initialized with the ProviderID with the following command: + +``` +kubectl describe nodes | grep "ProviderID" +``` + +### 2. Install the CSI driver + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where the vSphere CSI chart will be installed and click **Explore**. +1. Click **Apps & Marketplace > Charts**. +1. Click **vSphere CSI**.. +1. Click **Install**. +1. Fill out the required vCenter details and click **Install**. +1. Check **Customize Helm options before install** and click **Next**. +1. On the **Features** tab, check **Enable CSI Migration**. +1. Optionally, go to the **Storage** tab and set up a datastore. This chart creates a StorageClass with the `csi.vsphere.vmware.com` as the provisioner. You can provide the URL of the datastore to be used for CSI volume provisioning while creating this StorageClass. The datastore URL can be found in the vSphere client by selecting the datastore and going to the Summary tab. Fill out the details for the StorageClass. +1. Click **Install**. + +### 3. Edit the cluster to enable CSI migration feature flags + +1. While editing the cluster, if the Kubernetes version is less than 1.19, select Kubernetes version 1.19 or higher from the **Kubernetes Version** dropdown. +2. For enabling feature flags, click on "Edit as YAML", and add the following under kube-controller and kubelet: + + ```yaml + extra_args: + feature-gates: "CSIMigration=true,CSIMigrationvSphere=true" + ``` + +### 4. Drain worker nodes + +Worker nodes must be drained during the upgrade before changing the kubelet and kube-controller-manager args. + + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you will drain worker nodes and click **⋮ > Edit Config**. +1. In the **Advanced Options** section, set the field **Maximum Worker Nodes Unavailable** to 1. +1. To drain the nodes during upgrade, select **Drain Nodes > Yes**. +1. Set **Force** and **Delete Local Data** to **true**. +1. Click **Save** to upgrade the cluster. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/vsphere.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/vsphere/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/cloud-providers/vsphere/vsphere.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/custom-nodes/agent-options/agent-options.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/custom-nodes/agent-options/agent-options.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/custom-nodes/custom-nodes.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/custom-nodes/custom-nodes.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/azure-machine-config.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/azure-machine-config.md new file mode 100644 index 0000000000..4c5304eb1e --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/azure-machine-config.md @@ -0,0 +1,121 @@ +--- +title: Azure Machine Configuration +weight: 2 +--- + +For more information about Azure, refer to the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/?product=featured) + +### Environment + +Microsoft provides multiple [clouds](https://docs.microsoft.com/en-us/cli/azure/cloud?view=azure-cli-latest) for compliance with regional laws, which are available for your use: + +- AzurePublicCloud +- AzureGermanCloud +- AzureChinaCloud +- AzureUSGovernmentCloud + +### Location + +Configure the cluster and node [location](https://docs.microsoft.com/en-us/azure/virtual-machines/regions). + +### Resource Group + +A resource group is a container that holds related resources for an Azure solution. The resource group can include all the resources for the solution, or only those resources that you want to manage as a group. You decide how you want to allocate resources to resource groups based on what makes the most sense for your organization. Generally, add resources that share the same lifecycle to the same resource group so you can easily deploy, update, and delete them as a group. + +Use an existing resource group or enter a resource group name and one will be created for you. + +For information on managing resource groups, see the [Azure documentation.](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) + +### Availability Set (unmanaged) + +Name or ID of an existing [availability set](https://docs.microsoft.com/en-us/azure/virtual-machines/availability-set-overview) to add the VM to. + +### Image + +The name of the operating system image provided as an ARM resource identifier. Requires using managed disk. + +### VM Size + +Choose a size for each VM in the node pool. For details about each VM size, see [this page.](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) + +## Advanced Options + +### Fault Domain Count + +Fault domains define the group of virtual machines that share a common power source and network switch. If the availability set has already been created, the fault domain count will be ignored. + +For more information on fault domains, see [refer here](https://docs.microsoft.com/en-us/azure/virtual-machines/availability-set-overview#how-do-availability-sets-work). + +### Update Domain Count + +Update domains indicate groups of virtual machines and underlying physical hardware that can be rebooted at the same time. If the availability set has already been created, the update domain count will be ignored. + +For more information on update domains, see [refer here](https://docs.microsoft.com/en-us/azure/virtual-machines/availability-set-overview#how-do-availability-sets-work). + +### Purchase Plan + +Some VM images in the Azure Marketplace require a plan. If applicable, select a purchase plan, formatted as `publisher:product:plan`, to use with your chosen image. + +### Subnet + +The name of the subnet when creating a new VNet or referencing an existing one. + +Default: `docker-machine` + +### Subnet Prefix + +The subnet IP address prefix to use when creating a new VNet in CIDR format. + +Default: `192.168.0.0/16` + +### Virtual Network + +The [virtual network](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview) to use or create if one does not exist. Formatted as `[resourcegroup:]name`. + +### Public IP Options + +#### No Public IP + +Do not allocate a public IP address. + +#### Static Public IP + +Allocate a static public IP address. + +### Use Private IP + +Use a static private IP address. + +### Private IP Address + +Configure a static private IP address to use. + +### Network Security Group + +The [network security group](https://docs.microsoft.com/en-us/azure/virtual-network/network-security-groups-overview) to use. All nodes using this template will use the supplied network security group. If no network security group is provided, a new one will be created for each node. + +### DNS Label + +A unique DNS name label for the public IP address. + +### Storage Type + +The [storage account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview) type to use with your VMs. Options include Standard LRS, Standard ZRS, Standard GRS, Standard RAGRS, and Premium LRS. + +### Use Managed Disks + +[Azure managed disks](https://docs.microsoft.com/en-us/azure/virtual-machines/managed-disks-overview) are block-level storage volumes that are managed by Azure and used with Azure Virtual Machines. Managed disks are designed for 99.999% availability. Managed disks achieve this by providing you with three replicas of your data, allowing for high durability. + +### Managed Disk Size + +The size in GB for the disk for each node. + +### SSH Username + +The username used to create an SSH connection to your nodes. + +### Open Port + +Opens inbound traffic on specified ports. When using an existing Network Security Group, Open Ports are ignored. + +Default: `2379/tcp, 2380/tcp, 6443/tcp, 9796/tcp, 10250/tcp, 10251/tcp, 10252/tcp, 10256/tcp` and `8472/udp, 4789/udp` \ No newline at end of file diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/azure-node-template-config.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/azure-node-template-config.md new file mode 100644 index 0000000000..e7b00c9363 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure-node-template-config/azure-node-template-config.md @@ -0,0 +1,19 @@ +--- +title: Azure Node Template Configuration +weight: 1 +--- + +For more information about Azure, refer to the official [Azure documentation.](https://docs.microsoft.com/en-us/azure/?product=featured) + +Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. + +- **Placement** sets the geographical region where your cluster is hosted and other location metadata. +- **Network** configures the networking used in your cluster. +- **Instance** customizes your VM configuration. + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) \ No newline at end of file diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure.md new file mode 100644 index 0000000000..37fda12e71 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/azure/azure.md @@ -0,0 +1,142 @@ +--- +title: Creating an Azure Cluster +shortTitle: Azure +weight: 2220 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to install an [RKE]({{}}/rke/latest/en/) Kubernetes cluster in Azure through Rancher. + +First, you will set up your Azure cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in Azure. + +Then you will create an Azure cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +>**Warning:** When the Rancher RKE cluster is running in Azure and has an Azure load balancer in front, the outbound flow will fail. The workaround for this problem is as follows: + +> - Terminate the SSL/TLS on the internal load balancer +> - Use the L7 load balancer + +> For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +For more information on configuring the Kubernetes cluster that Rancher will install on the Azure nodes, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) + +For more information on configuring Azure node templates, refer to the [Azure node template configuration reference.](./azure-node-template-config) + +- [Preparation in Azure](#preparation-in-azure) +- [Creating an Azure Cluster](#creating-an-azure-cluster) + +# Preparation in Azure + +Before creating a node template in Rancher using a cloud infrastructure such as Azure, we must configure Rancher to allow the manipulation of resources in an Azure subscription. + +To do this, we will first create a new Azure **service principal (SP)** in Azure **Active Directory (AD)**, which, in Azure, is an application user who has permission to manage Azure resources. + +The following is a template `az cli` script that you have to run for creating an service principal, where you have to enter your SP name, role, and scope: + +``` +az ad sp create-for-rbac \ + --name="" \ + --role="Contributor" \ + --scopes="/subscriptions/" +``` + +The creation of this service principal returns three pieces of identification information, *The application ID, also called the client ID*, and *The client secret*. This information will be used when you create a node template for Azure. + +# Creating an Azure Cluster + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Azure**. +1. Enter your Azure credentials. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for Azure will allow Rancher to provision new nodes in Azure. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Add Template**. +1. Click **Azure**. +1. Fill out a node template for Azure. For help filling out the form, refer to [Azure Node Template Configuration.](./azure-node-template-config) + +### 3. Create a cluster with node pools using the node template + +Use Rancher to create a Kubernetes cluster in Azure. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Azure**. +1. Enter a **Cluster Name**. +1. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) +1. In the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +### 1. Create your cloud credentials + +If you already have a set of cloud credentials to use, skip this section. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Azure**. +1. Enter your Azure credentials. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create your cluster + +Use Rancher to create a Kubernetes cluster in Azure. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Toggle the switch to **RKE2/K3s**. +1. Click **Azure**. +1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. +1. Enter a **Cluster Name**. +1. Create a machine pool for each Kubernetes role. Refer to the [best practices]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools#node-roles-in-rke2) for recommendations on role assignments and counts. + 1. For each machine pool, define the machine configuration. Refer to the [Azure machine configuration reference]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/azure/azure-machine-config/) for information on configuration options. +1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md new file mode 100644 index 0000000000..28d7e799f3 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/digital-ocean.md @@ -0,0 +1,103 @@ +--- +title: Creating a DigitalOcean Cluster +shortTitle: DigitalOcean +weight: 2215 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in DigitalOcean. + +First, you will set up your DigitalOcean cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in DigitalOcean. + +Then you will create a DigitalOcean cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials](#2-create-a-node-template-with-your-cloud-credentials) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **DigitalOcean**. +1. Enter your Digital Ocean credentials. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials + +Creating a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for DigitalOcean will allow Rancher to provision new nodes in DigitalOcean. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates**. +1. Click **Add Template**. +1. Click **DigitalOcean**. +1. Fill out a node template for DigitalOcean. For help filling out the form, refer to [DigitalOcean Node Template Configuration.](./do-node-template-config) + +### 3. Create a cluster with node pools using the node template + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **DigitalOcean**. +1. Enter a **Cluster Name**. +1. Add one or more node pools to your cluster. Add one or more node pools to your cluster. Each node pool uses a node template to provision new nodes. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) +1. **In the Cluster Configuration** section, choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. To see more cluster options, click on **Show advanced options**. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +### 1. Create your cloud credentials + +If you already have a set of cloud credentials to use, skip this section. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **DigitalOcean**. +1. Enter your Digital Ocean credentials. +1. Click **Create**. + +### 2. Create your cluster + +Use Rancher to create a Kubernetes cluster in DigitalOcean. + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Toggle the switch to **RKE2/K3s**. +1. Click **DigitalOcean**. +1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. +1. Enter a **Cluster Name**. +1. Create a machine pool for each Kubernetes role. Refer to the [best practices]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools#node-roles-in-rke2) for recommendations on role assignments and counts. + 1. For each machine pool, define the machine configuration. Refer to the [DigitalOcean machine configuration reference]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/) for information on configuration options. +1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces +# Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/do-machine-config.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/do-machine-config.md new file mode 100644 index 0000000000..0ae2f8d831 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-machine-config/do-machine-config.md @@ -0,0 +1,34 @@ +--- +title: DigitalOcean Machine Configuration +weight: 2 +--- + +For more details about DigitalOcean, Droplets, refer to the [official documentation](https://docs.digitalocean.com/products/compute/). + +### Region + +Configure the [region](https://docs.digitalocean.com/products/app-platform/concepts/region/) where Droplets are created. + +### Size + +Configure the [size](https://docs.digitalocean.com/products/droplets/resources/choose-plan/) of Droplets. + +### OS Image + +Configure the operating system [image](https://docs.digitalocean.com/products/images/) Droplets are created from. + +### Monitoring + +Enable the DigitalOcean agent for additional [monitoring](https://docs.digitalocean.com/products/monitoring/). + +### IPv6 + +Enable IPv6 for Droplets. + +### Private Networking + +Enable private networking for Droplets. + +### Droplet Tags + +Apply a tag (label) to a Droplet. Tags may only contain letters, numbers, colons, dashes, and underscores. For example, `my_server`. diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/do-node-template-config.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/do-node-template-config.md new file mode 100644 index 0000000000..84171f2729 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/digital-ocean/do-node-template-config/do-node-template-config.md @@ -0,0 +1,19 @@ +--- +title: DigitalOcean Node Template Configuration +weight: 1 +---- + +Account access information is stored as a cloud credential. Cloud credentials are stored as Kubernetes secrets. Multiple node templates can use the same cloud credential. You can use an existing cloud credential or create a new one. + +### Droplet Options + +The **Droplet Options** provision your cluster's geographical region and specifications. + +### Docker Daemon + +The [Docker daemon](https://docs.docker.com/engine/docker-overview/#the-docker-daemon) configuration options include: + +- **Labels:** For information on labels, refer to the [Docker object label documentation.](https://docs.docker.com/config/labels-custom-metadata/) +- **Docker Engine Install URL:** Determines what Docker version will be installed on the instance. +- **Registry mirrors:** Docker Registry mirror to be used by the Docker daemon +- **Other advanced options:** Refer to the [Docker daemon option reference](https://docs.docker.com/engine/reference/commandline/dockerd/) \ No newline at end of file diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-machine-config/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-machine-config/ec2-machine-config.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-machine-config/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-machine-config/ec2-machine-config.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ec2-node-template-config.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-node-template-config/ec2-node-template-config.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md new file mode 100644 index 0000000000..ee0fcf1169 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/ec2/ec2.md @@ -0,0 +1,279 @@ +--- +title: Creating an Amazon EC2 Cluster +shortTitle: Amazon EC2 +description: Learn the prerequisites and steps required in order for you to create an Amazon EC2 cluster using Rancher +weight: 2210 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to use Rancher to install an [RKE](https://rancher.com/docs/rke/latest/en/) Kubernetes cluster in Amazon EC2. + +First, you will set up your EC2 cloud credentials in Rancher. Then you will use your cloud credentials to create a node template, which Rancher will use to provision new nodes in EC2. + +Then you will create an EC2 cluster in Rancher, and when configuring the new cluster, you will define node pools for it. Each node pool will have a Kubernetes role of etcd, controlplane, or worker. Rancher will install RKE Kubernetes on the new nodes, and it will set up each node with the Kubernetes role defined by the node pool. + +### Prerequisites + +- **AWS EC2 Access Key and Secret Key** that will be used to create the instances. See [Amazon Documentation: Creating Access Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey) how to create an Access Key and Secret Key. +- **IAM Policy created** to add to the user of the Access Key And Secret Key. See [Amazon Documentation: Creating IAM Policies (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start) how to create an IAM policy. See our three example JSON policies below: + - [Example IAM Policy](#example-iam-policy) + - [Example IAM Policy with PassRole](#example-iam-policy-with-passrole) (needed if you want to use [Kubernetes Cloud Provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers) or want to pass an IAM Profile to an instance) + - [Example IAM Policy to allow encrypted EBS volumes](#example-iam-policy-to-allow-encrypted-ebs-volumes) +- **IAM Policy added as Permission** to the user. See [Amazon Documentation: Adding Permissions to a User (Console)](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_change-permissions.html#users_change_permissions-add-console) how to attach it to an user. + +# Creating an EC2 Cluster + +The steps to create a cluster differ based on your Rancher version. + + + + +1. [Create your cloud credentials](#1-create-your-cloud-credentials) +2. [Create a node template with your cloud credentials and information from EC2](#2-create-a-node-template-with-your-cloud-credentials-and-information-from-ec2) +3. [Create a cluster with node pools using the node template](#3-create-a-cluster-with-node-pools-using-the-node-template) + +### 1. Create your cloud credentials + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Amazon**. +1. Enter a name for the cloud credential. +1. In the **Default Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key**. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create a node template with your cloud credentials and information from EC2 + +Creating a [node template]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/#node-templates) for EC2 will allow Rancher to provision new nodes in EC2. Node templates can be reused for other clusters. + +1. Click **☰ > Cluster Management**. +1. Click **RKE1 Configuration > Node Templates** +1. Click **Add Template**. +1. Fill out a node template for EC2. For help filling out the form, refer to [EC2 Node Template Configuration.](./ec2-node-template-config) +1. Click **Create**. + + >**Note:** If you want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements]({{}}/rke//latest/en/config-options/dual-stack#requirements) that must be taken into consideration. + +### 3. Create a cluster with node pools using the node template + +Add one or more node pools to your cluster. For more information about node pools, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Click **Amazon EC2**. +1. Create a node pool for each Kubernetes role. For each node pool, choose a node template that you created. For more information about node pools, including best practices for assigning Kubernetes roles to them, see [this section.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools) +1. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Use **Cluster Options** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. Refer to [Selecting Cloud Providers]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/cloud-providers/) to configure the Kubernetes Cloud Provider. For help configuring the cluster, refer to the [RKE cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options) + + >**Note:** If you want to use the [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) feature, there are additional [requirements]({{}}/rke//latest/en/config-options/dual-stack#requirements) that must be taken into consideration. +1. Click **Create**. + + + + +### 1. Create your cloud credentials + +If you already have a set of cloud credentials to use, skip this section. + +1. Click **☰ > Cluster Management**. +1. Click **Cloud Credentials**. +1. Click **Create**. +1. Click **Amazon**. +1. Enter a name for the cloud credential. +1. In the **Default Region** field, select the AWS region where your cluster nodes will be located. +1. Enter your AWS EC2 **Access Key** and **Secret Key**. +1. Click **Create**. + +**Result:** You have created the cloud credentials that will be used to provision nodes in your cluster. You can reuse these credentials for other node templates, or in other clusters. + +### 2. Create your cluster + +1. Click **☰ > Cluster Management**. +1. On the **Clusters** page, click **Create**. +1. Toggle the switch to **RKE2/K3s**. +1. Click **Amazon EC2**. +1. Select a **Cloud Credential**, if more than one exists. Otherwise, it's preselected. +1. Enter a **Cluster Name**. +1. Create a machine pool for each Kubernetes role. Refer to the [best practices]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools#node-roles-in-rke2) for recommendations on role assignments and counts. + 1. For each machine pool, define the machine configuration. Refer to [the EC2 machine configuration reference]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/ec2-machine-config/) for information on configuration options. +1. Use the **Cluster Configuration** to choose the version of Kubernetes that will be installed, what network provider will be used and if you want to enable project network isolation. For help configuring the cluster, refer to the [RKE2 cluster configuration reference.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/rke2-config-reference/) +1. Use **Member Roles** to configure user authorization for the cluster. Click **Add Member** to add users that can access the cluster. Use the **Role** drop-down to set permissions for each user. +1. Click **Create**. + + + + +**Result:** + +Your cluster is created and assigned a state of **Provisioning**. Rancher is standing up your cluster. + +You can access your cluster after its state is updated to **Active**. + +**Active** clusters are assigned two Projects: + +- `Default`, containing the `default` namespace +- `System`, containing the `cattle-system`, `ingress-nginx`, `kube-public`, and `kube-system` namespaces + +### Optional Next Steps + +After creating your cluster, you can access it through the Rancher UI. As a best practice, we recommend setting up these alternate ways of accessing your cluster: + +- **Access your cluster with the kubectl CLI:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#accessing-clusters-with-kubectl-on-your-workstation) to access clusters with kubectl on your workstation. In this case, you will be authenticated through the Rancher server’s authentication proxy, then Rancher will connect you to the downstream cluster. This method lets you manage the cluster without the Rancher UI. +- **Access your cluster with the kubectl CLI, using the authorized cluster endpoint:** Follow [these steps]({{}}/rancher/v2.6/en/cluster-admin/cluster-access/kubectl/#authenticating-directly-with-a-downstream-cluster) to access your cluster with kubectl directly, without authenticating through Rancher. We recommend setting up this alternative method to access your cluster so that in case you can’t connect to Rancher, you can still access the cluster. + +# IAM Policies + +### Example IAM Policy + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` + +### Example IAM Policy with PassRole + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:Describe*", + "ec2:ImportKeyPair", + "ec2:CreateKeyPair", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteKeyPair", + "ec2:ModifyInstanceMetadataOptions" + ], + "Resource": "*" + }, + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": [ + "iam:PassRole", + "ec2:RunInstances" + ], + "Resource": [ + "arn:aws:ec2:REGION::image/ami-*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:placement-group/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:subnet/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:key-pair/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:network-interface/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:security-group/*", + "arn:aws:iam::AWS_ACCOUNT_ID:role/YOUR_ROLE_NAME" + ] + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": [ + "ec2:RebootInstances", + "ec2:TerminateInstances", + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Resource": "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*" + } + ] +} +``` +### Example IAM Policy to allow encrypted EBS volumes +``` json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "kms:Decrypt", + "kms:GenerateDataKeyWithoutPlaintext", + "kms:Encrypt", + "kms:DescribeKey", + "kms:CreateGrant", + "ec2:DetachVolume", + "ec2:AttachVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:CreateSnapshot" + ], + "Resource": [ + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:volume/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:instance/*", + "arn:aws:ec2:REGION:AWS_ACCOUNT_ID:snapshot/*", + "arn:aws:kms:REGION:AWS_ACCOUNT_ID:key/KMS_KEY_ID" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Resource": "*" + } + ] +} +``` diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/node-pools.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/node-pools.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix-node-template-config/nutanix-node-template-config.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix-node-template-config/nutanix-node-template-config.md new file mode 100644 index 0000000000..a4ae10d8ec --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix-node-template-config/nutanix-node-template-config.md @@ -0,0 +1,64 @@ +--- +title: Nutanix Node Template Configuration +weight: 2 +--- + +- [Account Access](#account-access) +- [Scheduling](#scheduling) +- [Instance Options](#instance-options) +- [Networks](#networks) +- [VM Categories](#vm-categories) +- [cloud-init](#cloud-init) + +# Account Access + +| Parameter | Required | Description | Default +|:-----------------------------|:--------:|:-----------------------------------------------------------------|:----- +| Management Endpoint | ✓ | Hostname/IP address of Prism Central | +| Username | ✓ | Username of the Prism Central user | +| Password | ✓ | Password of the Prism Central user | +| Allow insecure communication | | Set to true to allow insecure SSL communication to Prism Central | False + +# Scheduling + +Choose what Nutanix cluster the virtual machine will be scheduled to. + +| Parameter | Required | Description +|:----------|:--------:|:---------------------------------------------------------------------------- +| Cluster | ✓ | Name of the Nutanix cluster where the VM should be deployed (case sensitive) + +# Instance Options + +In the **Instance Options** section, configure the number of vCPUs, memory, and disk size for the VMs created by this template. + +| Parameter | Required | Description | Default +|:---------------------|:--------:|:--------------------------------------------------------------------------------------------|:------- +| CPUs | | Number of vCPUs allocated to the VM (cores) | 2 +| Memory | | Amount of RAM allocated to the VM (MB) | 2 GB +| Template Image | ✓ | Name of the Disk Image template to clone as the VM's primary disk (must support cloud-init) | +| VM Disk Size | | New size of the VM's primary disk (in GiB) | +| Additional Disk Size | | Size of an additional disk to add to the VM (in GiB) | +| Storage Container | | Storage container _UUID_ in which to provision an additional disk | +| Cloud Config YAML | | Cloud-init to provide to the VM (will be patched with Rancher root user) | +| Network | ✓ | Name(s) of the network(s) to attach to the VM | +| VM Categories | | Name(s) of any categories to be applied to the VM | + +The VM may use any modern Linux operating system that is configured with support for [cloud-init](https://cloudinit.readthedocs.io/en/latest/) using the [Config Drive v2 datasource](https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html). + +# Networks + +The node template allows a VM to be provisioned with multiple networks. In the **Network** field, you can click **Add** to add any networks available to you in AOS. + +# VM Categories + +A category is a grouping of entities into a key value pair. Typically, VMs are assigned to a category based on some criteria. Policies can then be tied to those entities that are assigned (grouped by) a specific category value. + +# cloud-init + +[Cloud-init](https://cloudinit.readthedocs.io/en/latest/) allows you to initialize your nodes by applying configuration on the first boot. This may involve things such as creating users or authorizing SSH keys. + +To make use of cloud-init initialization, paste a cloud config using valid YAML syntax into the **Cloud Config YAML** field. Refer to the [cloud-init documentation](https://cloudinit.readthedocs.io/en/latest/topics/examples.html) for a commented set of examples of supported cloud config directives. + +Note that cloud-init based network configuration is not recommended and only supported via user data `runcmd` rather than by NoCloud or other network configuration datasources. + +Nutanix IP Address Management (IPAM) or another DHCP service is recommended. diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/nutanix/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/nutanix/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/nutanix.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/nutanix/provisioning-nutanix-clusters/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/provisioning-nutanix-clusters/provisioning-nutanix-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/nutanix/provisioning-nutanix-clusters/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/nutanix/provisioning-nutanix-clusters/provisioning-nutanix-clusters.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-a-vm-template/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-a-vm-template/creating-a-vm-template.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-a-vm-template/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-a-vm-template/creating-a-vm-template.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/creating-credentials.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/creating-credentials/creating-credentials.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/provisioning-vsphere-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/provisioning-vsphere-clusters/provisioning-vsphere-clusters.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/vsphere-node-template-config.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere-node-template-config/vsphere-node-template-config.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/vsphere/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/node-pools/vsphere/vsphere.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/options/options.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/rancher-agents/rancher-agents.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/rancher-agents/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/rancher-agents/rancher-agents.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/rke-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/rke-clusters.md diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/azure-storageclass.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/azure-storageclass/azure-storageclass.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/host-gateway-requirements.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/host-gateway-requirements.md new file mode 100644 index 0000000000..ee075c394d --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/host-gateway-requirements/host-gateway-requirements.md @@ -0,0 +1,37 @@ +--- +title: Networking Requirements for Host Gateway (L2bridge) +weight: 1000 +--- + +This section describes how to configure custom Windows clusters that are using *Host Gateway (L2bridge)* mode. + +### Disabling Private IP Address Checks + +If you are using *Host Gateway (L2bridge)* mode and hosting your nodes on any of the cloud services listed below, you must disable the private IP address checks for both your Linux or Windows hosts on startup. To disable this check for each node, follow the directions provided by each service below. + +Service | Directions to disable private IP address checks +--------|------------------------------------------------ +Amazon EC2 | [Disabling Source/Destination Checks](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) +Google GCE | [Enabling IP Forwarding for Instances](https://cloud.google.com/vpc/docs/using-routes#canipforward) (By default, a VM cannot forward a packet originated by another VM) +Azure VM | [Enable or Disable IP Forwarding](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-network-interface#enable-or-disable-ip-forwarding) + +### Cloud-hosted VM Routes Configuration + +If you are using the [**Host Gateway (L2bridge)**](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#host-gw) backend of Flannel, all containers on the same node belong to a private subnet, and traffic routes from a subnet on one node to a subnet on another node through the host network. + +- When worker nodes are provisioned on AWS, virtualization clusters, or bare metal servers, make sure they belong to the same layer 2 subnet. If the nodes don't belong to the same layer 2 subnet, `host-gw` networking will not work. + +- When worker nodes are provisioned on GCE or Azure, they are not on the same layer 2 subnet. Nodes on GCE and Azure belong to a routable layer 3 network. Follow the instructions below to configure GCE and Azure so that the cloud network knows how to route the host subnets on each node. + +To configure host subnet routing on GCE or Azure, first run the following command to find out the host subnets on each worker node: + +```bash +kubectl get nodes -o custom-columns=nodeName:.metadata.name,nodeIP:status.addresses[0].address,routeDestination:.spec.podCIDR +``` + +Then follow the instructions for each cloud provider to configure routing rules for each node: + +Service | Instructions +--------|------------- +Google GCE | For GCE, add a static route for each node: [Adding a Static Route](https://cloud.google.com/vpc/docs/using-routes#addingroute). +Azure VM | For Azure, create a routing table: [Custom Routes: User-defined](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-udr-overview#user-defined). diff --git a/content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/windows-clusters.md similarity index 100% rename from content/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/_index.md rename to versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/windows-clusters.md diff --git a/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/windows-parity.md b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/windows-parity.md new file mode 100644 index 0000000000..658b48e8b1 --- /dev/null +++ b/versioned_docs/version-2.6/cluster-provisioning/rke-clusters/windows-clusters/windows-parity/windows-parity.md @@ -0,0 +1,47 @@ +--- +title: Windows and Linux Cluster Feature Parity +weight: 3 +--- + +Windows clusters do not share the same feature support as Linux clusters. + +The following chart describes the feature parity between Windows and Linux on Rancher: + +**Component** | **Linux** | **Windows** +--- | --- | --- +**Distributions** | | +RKE | Supported | Supported +RKE2 | Supported | Supported +K3S | Supported | Not Supported +EKS | Supported | Not Supported +GKE | Supported | Not Supported +AKS | Supported | Not Supported +**Rancher Components** | | +Server | Supported | Not Supported +Agent | Supported | Supported +Fleet | Supported | Supported +EKS Operator | Supported | Not Supported +AKS Operator | Not Supported | Not Supported +GKE Operator | Not Supported | Not Supported +Alerting v1 | Supported | Supported +Monitoring v1 | Supported | Supported +Logging v1 | Supported | Supported +Monitoring/Alerting v2 | Supported | Supported +Logging v2 | Supported | Supported +Istio | Supported | Not Supported +Catalog v1 | Supported | Not Supported +Catalog v2 | Supported | Not Supported +OPA | Supported | Not Supported +Longhorn | Supported | Not Supported +CIS Scans | Supported | Not Supported +Backup/Restore Operator | Supported | Not Supported +**CNI / Add-ons** | | +Flannel | Supported | Supported +Canal | Supported | Not Supported +Calico | Supported | Supported (RKE2 Only) +Cilium | Supported | Not Supported +Multus | Supported | Not Supported +Traefik | Supported | Not Supported +NGINX Ingress | Supported | Not Supported + +For updated information on feature support, you may visit [rancher/windows](https://github.com/rancher/windows) on GitHub. diff --git a/content/rancher/v2.6/en/contributing/_index.md b/versioned_docs/version-2.6/contributing/contributing.md similarity index 100% rename from content/rancher/v2.6/en/contributing/_index.md rename to versioned_docs/version-2.6/contributing/contributing.md diff --git a/content/rancher/v2.6/en/deploy-across-clusters/_index.md b/versioned_docs/version-2.6/deploy-across-clusters/deploy-across-clusters.md similarity index 100% rename from content/rancher/v2.6/en/deploy-across-clusters/_index.md rename to versioned_docs/version-2.6/deploy-across-clusters/deploy-across-clusters.md diff --git a/content/rancher/v2.5/en/deploy-across-clusters/fleet/architecture/_index.md b/versioned_docs/version-2.6/deploy-across-clusters/fleet/architecture/architecture.md similarity index 100% rename from content/rancher/v2.5/en/deploy-across-clusters/fleet/architecture/_index.md rename to versioned_docs/version-2.6/deploy-across-clusters/fleet/architecture/architecture.md diff --git a/versioned_docs/version-2.6/deploy-across-clusters/fleet/fleet.md b/versioned_docs/version-2.6/deploy-across-clusters/fleet/fleet.md new file mode 100644 index 0000000000..c1502c7166 --- /dev/null +++ b/versioned_docs/version-2.6/deploy-across-clusters/fleet/fleet.md @@ -0,0 +1,76 @@ +--- +title: Fleet - GitOps at Scale +weight: 1 +--- + +Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight enough that it works great for a [single cluster](https://fleet.rancher.io/single-cluster-install/) too, but it really shines when you get to a [large scale.](https://fleet.rancher.io/multi-cluster-install/) By large scale we mean either a lot of clusters, a lot of deployments, or a lot of teams in a single organization. + +Fleet is a separate project from Rancher, and can be installed on any Kubernetes cluster with Helm. + +- [Architecture](#architecture) +- [Accessing Fleet in the Rancher UI](#accessing-fleet-in-the-rancher-ui) +- [Windows Support](#windows-support) +- [GitHub Repository](#github-repository) +- [Using Fleet Behind a Proxy](#using-fleet-behind-a-proxy) +- [Helm Chart Dependencies](#helm-chart-dependencies) +- [Troubleshooting](#troubleshooting) +- [Documentation](#documentation) + +# Architecture + +For information about how Fleet works, see [this page.](./architecture) + +# Accessing Fleet in the Rancher UI + +Fleet comes preinstalled in Rancher and is managed by the **Continous Delivery** option in the Rancher UI. For additional information on Continuous Delivery and other Fleet troubleshooting tips, refer [here](https://fleet.rancher.io/troubleshooting/). + +Users can leverage continuous delivery to deploy their applications to the Kubernetes clusters in the git repository without any manual operation by following **gitops** practice. + +Follow the steps below to access Continuous Delivery in the Rancher UI: + +1. Click **☰ > Continuous Delivery**. + +1. Select your namespace at the top of the menu, noting the following: + - By default,`fleet-default` is selected which includes all downstream clusters that are registered through Rancher. + - You may switch to `fleet-local`, which only contains the `local` cluster, or you may create your own workspace to which you may assign and move clusters. + - You can then manage clusters by clicking on **Clusters** on the left navigation bar. + +1. Click on **Gitrepos** on the left navigation bar to deploy the gitrepo into your clusters in the current workspace. + +1. Select your [git repository](https://fleet.rancher.io/gitrepo-add/) and [target clusters/cluster group](https://fleet.rancher.io/gitrepo-structure/). You can also create the cluster group in the UI by clicking on **Cluster Groups** from the left navigation bar. + +1. Once the gitrepo is deployed, you can monitor the application through the Rancher UI. + +# Windows Support + +For details on support for clusters with Windows nodes, see [this page.](./windows) + + +# GitHub Repository + +The Fleet Helm charts are available [here.](https://github.com/rancher/fleet/releases/latest) + + +# Using Fleet Behind a Proxy + +For details on using Fleet behind a proxy, see [this page.](./proxy) + +# Helm Chart Dependencies + +In order for Helm charts with dependencies to deploy successfully, you must run a manual command (as listed below), as it is up to the user to fulfill the dependency list. If you do not do this and proceed to clone your repository and run `helm install`, your installation will fail because the dependencies will be missing. + +The Helm chart in the git repository must include its dependencies in the charts subdirectory. You must either manually run `helm dependencies update $chart` OR run `helm dependencies build $chart` locally, then commit the complete charts directory to your git repository. Note that you will update your commands with the applicable parameters. + +# Troubleshooting + +--- +* **Known Issue:** clientSecretName and helmSecretName secrets for Fleet gitrepos are not included in the backup nor restore created by the [backup-restore-operator]({{}}/rancher/v2.6/en/backups/back-up-rancher/#1-install-the-rancher-backups-operator). We will update the community once a permanent solution is in place. + +* **Temporary Workaround:**
    +By default, user-defined secrets are not backed up in Fleet. It is necessary to recreate secrets if performing a disaster recovery restore or migration of Rancher into a fresh cluster. To modify resourceSet to include extra resources you want to backup, refer to docs [here](https://github.com/rancher/backup-restore-operator#user-flow). + +--- + +# Documentation + +The Fleet documentation is at [https://fleet.rancher.io/.](https://fleet.rancher.io/) diff --git a/versioned_docs/version-2.6/deploy-across-clusters/fleet/proxy/proxy.md b/versioned_docs/version-2.6/deploy-across-clusters/fleet/proxy/proxy.md new file mode 100644 index 0000000000..4e41e1115f --- /dev/null +++ b/versioned_docs/version-2.6/deploy-across-clusters/fleet/proxy/proxy.md @@ -0,0 +1,55 @@ +--- +title: Using Fleet Behind a Proxy +weight: 3 +--- + +In this section, you'll learn how to enable Fleet in a setup that has a Rancher server with a public IP a Kubernetes cluster that has no public IP, but is configured to use a proxy. + +Rancher does not establish connections with registered downstream clusters. The Rancher agent deployed on the downstream cluster must be able to establish the connection with Rancher. + +To set up Fleet to work behind a proxy, you will need to set the **Agent Environment Variables** for the downstream cluster. These are cluster-level configuration options. + +Through the Rancher UI, you can configure these environment variables for any cluster type, including registered and custom clusters. The variables can be added while editing an existing cluster or while provisioning a new cluster. + +For public downstream clusters, it is sufficient to [set the required environment variables in the Rancher UI.](#setting-environment-variables-in-the-rancher-ui) + +For private nodes or private clusters, the environment variables need to be set on the nodes themselves. Then the environment variables are configured from the Rancher UI, typically when provisioning a custom cluster or when registering the private cluster. For an example of how to set the environment variables on Ubuntu node in a K3s Kubernetes cluster, see [this section.](#setting-environment-variables-on-private-nodes) + +# Required Environment Variables + +When adding Fleet agent environment variables for the proxy, replace with your private proxy IP. + +| Variable Name | Value | +|------------------|--------| +| `HTTP_PROXY` | http://:8888 | +| `HTTPS_PROXY` | http://:8888 +| `NO_PROXY` | 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local | + +# Setting Environment Variables in the Rancher UI + +To add the environment variable to an existing cluster, + +1. Click **☰ > Cluster Management**. +1. Go to the cluster where you want to add environment variables and click **⋮ > Edit Config**. +1. Click **Advanced Options**. +1. Click **Add Environment Variable**. +1. Enter the [required environment variables](#required-environment-variables) +1. Click **Save**. + +**Result:** The Fleet agent works behind a proxy. + +# Setting Environment Variables on Private Nodes + +For private nodes and private clusters, the proxy environment variables need to be set on the nodes themselves, as well as configured from the Rancher UI. + +This example shows how the environment variables would be set up on an Ubuntu node in a K3s Kubernetes cluster: + +``` +ssh -o ForwardAgent=yes ubuntu@ +ssh +export proxy_private_ip= +export HTTP_PROXY=http://${proxy_private_ip}:8888 +export HTTPS_PROXY=http://${proxy_private_ip}:8888 +export NO_PROXY=127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local +export KUBECONFIG=/etc/rancher/k3s/k3s.yaml +``` \ No newline at end of file diff --git a/versioned_docs/version-2.6/deploy-across-clusters/fleet/windows/windows.md b/versioned_docs/version-2.6/deploy-across-clusters/fleet/windows/windows.md new file mode 100644 index 0000000000..a2cb842fc0 --- /dev/null +++ b/versioned_docs/version-2.6/deploy-across-clusters/fleet/windows/windows.md @@ -0,0 +1,23 @@ +--- +title: Windows Support +weight: 2 +--- + + +Prior to Rancher v2.5.6, the `agent` did not have native Windows manifests on downstream clusters with Windows nodes. This would result in a failing `agent` pod for the cluster. + +If you are upgrading from an older version of Rancher to v2.5.6+, you can deploy a working `agent` with the following workflow *in the downstream cluster*: + +1. Cordon all Windows nodes. +1. Apply the below toleration to the `agent` workload. +1. Uncordon all Windows nodes. +1. Delete all `agent` pods. New pods should be created with the new toleration. +1. Once the `agent` pods are running, and auto-update is enabled for Fleet, they should be updated to a Windows-compatible `agent` version. + +```yaml +tolerations: +- effect: NoSchedule + key: cattle.io/os + operator: Equal + value: linux +``` \ No newline at end of file diff --git a/content/rancher/v2.6/en/deploy-across-clusters/multi-cluster-apps/_index.md b/versioned_docs/version-2.6/deploy-across-clusters/multi-cluster-apps/multi-cluster-apps.md similarity index 100% rename from content/rancher/v2.6/en/deploy-across-clusters/multi-cluster-apps/_index.md rename to versioned_docs/version-2.6/deploy-across-clusters/multi-cluster-apps/multi-cluster-apps.md diff --git a/content/rancher/v2.6/en/faq/deprecated-features/_index.md b/versioned_docs/version-2.6/faq/deprecated-features/deprecated-features.md similarity index 100% rename from content/rancher/v2.6/en/faq/deprecated-features/_index.md rename to versioned_docs/version-2.6/faq/deprecated-features/deprecated-features.md diff --git a/versioned_docs/version-2.6/faq/dockershim/dockershim.md b/versioned_docs/version-2.6/faq/dockershim/dockershim.md new file mode 100644 index 0000000000..d9ce2fe643 --- /dev/null +++ b/versioned_docs/version-2.6/faq/dockershim/dockershim.md @@ -0,0 +1,46 @@ +--- +title: Dockershim +weight: 300 +--- + +The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). Removal is currently scheduled for Kubernetes 1.24. For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). + +RKE clusters, starting with Kubernetes 1.21, now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. + +To enable the external Dockershim, configure the following option. + +``` +enable_cri_dockerd: true +``` + +For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher even after the removal of in-tree Dockershim in Kubernetes 1.24. + +### FAQ + +
    + +Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim? + +The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on Rancher 2.6 or above to have support for RKE with Kubernetes 1.21. See our [support matrix](https://rancher.com/support-maintenance-terms/all-supported-versions/rancher-v2.6.0/) for details. + +
    + +Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim? + +A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and is not scheduled for removal upstream until Kubernetes 1.24. It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to Kubernetes 1.21 as you would normally, but should consider enabling the external Dockershim by Kubernetes 1.22. The external Dockershim will need to be enabled before upgrading to Kubernetes 1.24, at which point the existing implementation will be removed. + +For more information on the deprecation and its timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). + +
    + +Q: What are my other options if I don’t want to depend on the Dockershim? + +A: You can use a runtime like containerd with Kubernetes that does not require Dockershim support. RKE2 or K3s are two options for doing this. + +
    + +Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options? + +A: Rancher is exploring the possibility of an in-place upgrade path. Alternatively you can always migrate workloads from one cluster to another using kubectl. + +
    diff --git a/versioned_docs/version-2.6/faq/faq.md b/versioned_docs/version-2.6/faq/faq.md new file mode 100644 index 0000000000..92b3894af2 --- /dev/null +++ b/versioned_docs/version-2.6/faq/faq.md @@ -0,0 +1,70 @@ +--- +title: FAQ +weight: 25 +--- + +This FAQ is a work in progress designed to answers the questions our users most frequently ask about Rancher v2.x. + +See [Technical FAQ]({{}}/rancher/v2.6/en/faq/technical/), for frequently asked technical questions. + +
    + +**Does Rancher v2.x support Docker Swarm and Mesos as environment types?** + +When creating an environment in Rancher v2.x, Swarm and Mesos will no longer be standard options you can select. However, both Swarm and Mesos will continue to be available as Catalog applications you can deploy. It was a tough decision to make but, in the end, it came down to adoption. For example, out of more than 15,000 clusters, only about 200 or so are running Swarm. + +
    + +**Is it possible to manage Azure Kubernetes Services with Rancher v2.x?** + +Yes. + +
    + +**Does Rancher support Windows?** + +As of Rancher 2.3.0, we support Windows Server 1809 containers. For details on how to set up a cluster with Windows worker nodes, refer to the section on [configuring custom clusters for Windows.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/) + +
    + +**Does Rancher support Istio?** + +As of Rancher 2.3.0, we support [Istio.]({{}}/rancher/v2.6/en/istio/) + +Furthermore, Istio is implemented in our micro-PaaS "Rio", which works on Rancher 2.x along with any CNCF compliant Kubernetes cluster. You can read more about it [here](https://rio.io/) + +
    + +**Will Rancher v2.x support Hashicorp's Vault for storing secrets?** + +Secrets management is on our roadmap but we haven't assigned it to a specific release yet. + +
    + +**Does Rancher v2.x support RKT containers as well?** + +At this time, we only support Docker. + +
    + +**Does Rancher v2.x support Calico, Contiv, Contrail, Flannel, Weave net, etc., for embedded and registered Kubernetes?** + +Out-of-the-box, Rancher provides the following CNI network providers for Kubernetes clusters: Canal, Flannel, Calico and Weave. Always refer to the [Rancher Support Matrix](https://rancher.com/support-maintenance-terms/) for details about what is officially supported. + +
    + +**Are you planning on supporting Traefik for existing setups?** + +We don't currently plan on providing embedded Traefik support, but we're still exploring load-balancing approaches. + +
    + +**Can I import OpenShift Kubernetes clusters into v2.x?** + +Our goal is to run any upstream Kubernetes clusters. Therefore, Rancher v2.x should work with OpenShift, but we haven't tested it yet. + +
    + +**Are you going to integrate Longhorn?** + +Yes. Longhorn was integrated into Rancher v2.5+. diff --git a/versioned_docs/version-2.6/faq/kubectl/kubectl.md b/versioned_docs/version-2.6/faq/kubectl/kubectl.md new file mode 100644 index 0000000000..cd74d7c059 --- /dev/null +++ b/versioned_docs/version-2.6/faq/kubectl/kubectl.md @@ -0,0 +1,30 @@ +--- +title: Installing and Configuring kubectl +weight: 100 +--- + +`kubectl` is a CLI utility for running commands against Kubernetes clusters. It's required for many maintenance and administrative tasks in Rancher 2.x. + +### Installation + +See [kubectl Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/) for installation on your operating system. + +### Configuration + +When you create a Kubernetes cluster with RKE, RKE creates a `kube_config_cluster.yml` in the local directory that contains credentials to connect to your new cluster with tools like `kubectl` or `helm`. + +You can copy this file as `$HOME/.kube/config` or if you are working with multiple Kubernetes clusters, set the `KUBECONFIG` environmental variable to the path of `kube_config_cluster.yml`. + +``` +export KUBECONFIG=$(pwd)/kube_config_cluster.yml +``` + +Test your connectivity with `kubectl` and see if you can get the list of nodes back. + +``` +kubectl get nodes + NAME STATUS ROLES AGE VERSION +165.227.114.63 Ready controlplane,etcd,worker 11m v1.10.1 +165.227.116.167 Ready controlplane,etcd,worker 11m v1.10.1 +165.227.127.226 Ready controlplane,etcd,worker 11m v1.10.1 +``` diff --git a/versioned_docs/version-2.6/faq/networking/cni-providers/cni-providers.md b/versioned_docs/version-2.6/faq/networking/cni-providers/cni-providers.md new file mode 100644 index 0000000000..a7104cc5df --- /dev/null +++ b/versioned_docs/version-2.6/faq/networking/cni-providers/cni-providers.md @@ -0,0 +1,201 @@ +--- +title: Container Network Interface (CNI) Providers +description: Learn about Container Network Interface (CNI), the CNI providers Rancher provides, the features they offer, and how to choose a provider for you +weight: 2300 +--- + +## What is CNI? + +CNI (Container Network Interface), a [Cloud Native Computing Foundation project](https://cncf.io/), consists of a specification and libraries for writing plugins to configure network interfaces in Linux containers, along with a number of plugins. CNI concerns itself only with network connectivity of containers and removing allocated resources when the container is deleted. + +Kubernetes uses CNI as an interface between network providers and Kubernetes pod networking. + +![CNI Logo]({{}}/img/rancher/cni-logo.png) + +For more information visit [CNI GitHub project](https://github.com/containernetworking/cni). + +## What Network Models are Used in CNI? + +CNI network providers implement their network fabric using either an encapsulated network model such as Virtual Extensible Lan ([VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan)) or an unencapsulated network model such as Border Gateway Protocol ([BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol)). + +### What is an Encapsulated Network? + +This network model provides a logical Layer 2 (L2) network encapsulated over the existing Layer 3 (L3) network topology that spans the Kubernetes cluster nodes. With this model you have an isolated L2 network for containers without needing routing distribution, all at the cost of minimal overhead in terms of processing and increased IP package size, which comes from an IP header generated by overlay encapsulation. Encapsulation information is distributed by UDP ports between Kubernetes workers, interchanging network control plane information about how MAC addresses can be reached. Common encapsulation used in this kind of network model is VXLAN, Internet Protocol Security (IPSec), and IP-in-IP. + +In simple terms, this network model generates a kind of network bridge extended between Kubernetes workers, where pods are connected. + +This network model is used when an extended L2 bridge is preferred. This network model is sensitive to L3 network latencies of the Kubernetes workers. If datacenters are in distinct geolocations, be sure to have low latencies between them to avoid eventual network segmentation. + +CNI network providers using this network model include Flannel, Canal, Weave, and Cilium. By default, Calico is not using this model, but it can be configured to do so. + +![Encapsulated Network]({{}}/img/rancher/encapsulated-network.png) + +### What is an Unencapsulated Network? + +This network model provides an L3 network to route packets between containers. This model doesn't generate an isolated l2 network, nor generates overhead. These benefits come at the cost of Kubernetes workers having to manage any route distribution that's needed. Instead of using IP headers for encapsulation, this network model uses a network protocol between Kubernetes workers to distribute routing information to reach pods, such as [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). + +In simple terms, this network model generates a kind of network router extended between Kubernetes workers, which provides information about how to reach pods. + +This network model is used when a routed L3 network is preferred. This mode dynamically updates routes at the OS level for Kubernetes workers. It's less sensitive to latency. + +CNI network providers using this network model include Calico and Cilium. Cilium may be configured with this model although it is not the default mode. + +![Unencapsulated Network]({{}}/img/rancher/unencapsulated-network.png) + +## What CNI Providers are Provided by Rancher? + +### RKE Kubernetes clusters + +Out-of-the-box, Rancher provides the following CNI network providers for RKE Kubernetes clusters: Canal, Flannel, and Weave. + +You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. + +#### Canal + +![Canal Logo]({{}}/img/rancher/canal-logo.png) + +Canal is a CNI network provider that gives you the best of Flannel and Calico. It allows users to easily deploy Calico and Flannel networking together as a unified networking solution, combining Calico’s network policy enforcement with the rich superset of Calico (unencapsulated) and/or Flannel (encapsulated) network connectivity options. + +In Rancher, Canal is the default CNI network provider combined with Flannel and VXLAN encapsulation. + +Kubernetes workers should open UDP port `8472` (VXLAN) and TCP port `9099` (health checks). If using Wireguard, you should open UDP ports `51820` and `51821`. For more details, refer to [the port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/). + +{{< img "/img/rancher/canal-diagram.png" "Canal Diagram">}} + +For more information, see the [Canal GitHub Page.](https://github.com/projectcalico/canal) + +#### Flannel + +![Flannel Logo]({{}}/img/rancher/flannel-logo.png) + +Flannel is a simple and easy way to configure L3 network fabric designed for Kubernetes. Flannel runs a single binary agent named flanneld on each host, which is responsible for allocating a subnet lease to each host out of a larger, preconfigured address space. Flannel uses either the Kubernetes API or etcd directly to store the network configuration, the allocated subnets, and any auxiliary data (such as the host's public IP). Packets are forwarded using one of several backend mechanisms, with the default encapsulation being [VXLAN](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#vxlan). + +Encapsulated traffic is unencrypted by default. Flannel provides two solutions for encryption: + +* [IPSec](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#ipsec), which makes use of [strongSwan](https://www.strongswan.org/) to establish encrypted IPSec tunnels between Kubernetes workers. It is an experimental backend for encryption. +* [WireGuard](https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md#wireguard), which is a more faster-performing alternative to strongSwan. + +Kubernetes workers should open UDP port `8472` (VXLAN). See [the port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. + +![Flannel Diagram]({{}}/img/rancher/flannel-diagram.png) + +For more information, see the [Flannel GitHub Page](https://github.com/flannel-io/flannel). + +#### Weave + +![Weave Logo]({{}}/img/rancher/weave-logo.png) + +Weave enables networking and network policy in Kubernetes clusters across the cloud. Additionally, it support encrypting traffic between the peers. + +Kubernetes workers should open TCP port `6783` (control port), UDP port `6783` and UDP port `6784` (data ports). See the [port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. + +For more information, see the following pages: + +- [Weave Net Official Site](https://www.weave.works/) + +### RKE2 Kubernetes clusters + +Out-of-the-box, Rancher provides the following CNI network providers for RKE2 Kubernetes clusters: [Canal](#canal) (see above section), Calico, and Cilium. + +You can choose your CNI network provider when you create new Kubernetes clusters from Rancher. + +#### Calico + +![Calico Logo]({{}}/img/rancher/calico-logo.png) + +Calico enables networking and network policy in Kubernetes clusters across the cloud. By default, Calico uses a pure, unencapsulated IP network fabric and policy engine to provide networking for your Kubernetes workloads. Workloads are able to communicate over both cloud infrastructure and on-prem using BGP. + +Calico also provides a stateless IP-in-IP or VXLAN encapsulation mode that can be used, if necessary. Calico also offers policy isolation, allowing you to secure and govern your Kubernetes workloads using advanced ingress and egress policies. + +Kubernetes workers should open TCP port `179` if using BGP or UDP port `4789` if using VXLAN encapsulation. In addition, TCP port `5473` is needed when using Typha. See [the port requirements for user clusters]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/#networking-requirements) for more details. + +>**Important:** In Rancher v2.6.3, Calico probes fail on Windows nodes upon RKE2 installation. Note that this issue is resolved in v2.6.4. +> +>- To work around this issue, first navigate to `https:///v3/settings/windows-rke2-install-script`. +> +>- There, change the current setting: `https://raw.githubusercontent.com/rancher/wins/v0.1.3/install.ps1` to this new setting: `https://raw.githubusercontent.com/rancher/rke2/master/windows/rke2-install.ps1`. + +![Calico Diagram]({{}}/img/rancher/calico-diagram.svg) + +For more information, see the following pages: + +- [Project Calico Official Site](https://www.projectcalico.org/) +- [Project Calico GitHub Page](https://github.com/projectcalico/calico) + +#### Cilium + +![Cilium Logo]({{}}/img/rancher/cilium-logo.png) + +Cilium enables networking and network policies (L3, L4, and L7) in Kubernetes. By default, Cilium uses eBPF technologies to route packets inside the node and VXLAN to send packets to other nodes. Unencapsulated techniques can also be configured. + +Cilium recommends kernel versions greater than 5.2 to be able to leverage the full potential of eBPF. Kubernetes workers should open TCP port `8472` for VXLAN and TCP port `4240` for health checks. In addition, ICMP 8/0 must be enabled for health checks. For more information, check [Cilium System Requirements](https://docs.cilium.io/en/latest/operations/system_requirements/#firewall-requirements). + +##### Ingress Routing Across Nodes in Cilium +
    +By default, Cilium does not allow pods to contact pods on other nodes. To work around this, enable the ingress controller to route requests across nodes with a `CiliumNetworkPolicy`. + +After selecting the Cilium CNI and enabling Project Network Isolation for your new cluster, configure as follows: + +``` +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + name: hn-nodes + namespace: default +spec: + endpointSelector: {} + ingress: + - fromEntities: + - remote-node +``` + +## CNI Features by Provider + +The following table summarizes the different features available for each CNI network provider provided by Rancher. + +| Provider | Network Model | Route Distribution | Network Policies | Mesh | External Datastore | Encryption | Ingress/Egress Policies | +| ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | +| Canal | Encapsulated (VXLAN) | No | Yes | No | K8s API | Yes | Yes | +| Flannel | Encapsulated (VXLAN) | No | No | No | K8s API | Yes | No | +| Calico | Encapsulated (VXLAN,IPIP) OR Unencapsulated | Yes | Yes | Yes | Etcd and K8s API | Yes | Yes | +| Weave | Encapsulated | Yes | Yes | Yes | No | Yes | Yes | +| Cilium | Encapsulated (VXLAN) | Yes | Yes | Yes | Etcd and K8s API | Yes | Yes | + +- Network Model: Encapsulated or unencapsulated. For more information, see [What Network Models are Used in CNI?](#what-network-models-are-used-in-cni) + +- Route Distribution: An exterior gateway protocol designed to exchange routing and reachability information on the Internet. BGP can assist with pod-to-pod networking between clusters. This feature is a must on unencapsulated CNI network providers, and it is typically done by BGP. If you plan to build clusters split across network segments, route distribution is a feature that's nice-to-have. + +- Network Policies: Kubernetes offers functionality to enforce rules about which services can communicate with each other using network policies. This feature is stable as of Kubernetes v1.7 and is ready to use with certain networking plugins. + +- Mesh: This feature allows service-to-service networking communication between distinct Kubernetes clusters. + +- External Datastore: CNI network providers with this feature need an external datastore for its data. + +- Encryption: This feature allows cyphered and secure network control and data planes. + +- Ingress/Egress Policies: This feature allows you to manage routing control for both Kubernetes and non-Kubernetes communications. + + +## CNI Community Popularity + +The following table summarizes different GitHub metrics to give you an idea of each project's popularity and activity. This data was collected in January 2022. + +| Provider | Project | Stars | Forks | Contributors | +| ---- | ---- | ---- | ---- | ---- | +| Canal | https://github.com/projectcalico/canal | 679 | 100 | 21 | +| Flannel | https://github.com/flannel-io/flannel | 7k | 2.5k | 185 | +| Calico | https://github.com/projectcalico/calico | 3.1k | 741 | 224 | +| Weave | https://github.com/weaveworks/weave/ | 6.2k | 635 | 84 | +| Cilium | https://github.com/cilium/cilium | 10.6k | 1.3k | 352 | + +
    + +## Which CNI Provider Should I Use? + +It depends on your project needs. There are many different providers, which each have various features and options. There isn't one provider that meets everyone's needs. + +Canal is the default CNI network provider. We recommend it for most use cases. It provides encapsulated networking for containers with Flannel, while adding Calico network policies that can provide project/namespace isolation in terms of networking. + +## How can I configure a CNI network provider? + +Please see [Cluster Options]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/) on how to configure a network provider for your cluster. For more advanced configuration options, please see how to configure your cluster using a [Config File]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and the options for [Network Plug-ins]({{}}/rke/latest/en/config-options/add-ons/network-plugins/). diff --git a/content/rancher/v2.6/en/faq/networking/_index.md b/versioned_docs/version-2.6/faq/networking/networking.md similarity index 100% rename from content/rancher/v2.6/en/faq/networking/_index.md rename to versioned_docs/version-2.6/faq/networking/networking.md diff --git a/content/rancher/v2.6/en/faq/removing-rancher/_index.md b/versioned_docs/version-2.6/faq/removing-rancher/removing-rancher.md similarity index 100% rename from content/rancher/v2.6/en/faq/removing-rancher/_index.md rename to versioned_docs/version-2.6/faq/removing-rancher/removing-rancher.md diff --git a/versioned_docs/version-2.6/faq/security/security.md b/versioned_docs/version-2.6/faq/security/security.md new file mode 100644 index 0000000000..6759f6d294 --- /dev/null +++ b/versioned_docs/version-2.6/faq/security/security.md @@ -0,0 +1,15 @@ +--- +title: Security +weight: 8007 + +--- + +**Is there a Hardening Guide?** + +The Hardening Guide is now located in the main [Security]({{}}/rancher/v2.6/en/security/) section. + +
    + +**What are the results of Rancher's Kubernetes cluster when it is CIS benchmarked?** + +We have run the CIS Kubernetes benchmark against a hardened Rancher Kubernetes cluster. The results of that assessment can be found in the main [Security]({{}}/rancher/v2.6/en/security/) section. diff --git a/content/rancher/v2.6/en/faq/technical/_index.md b/versioned_docs/version-2.6/faq/technical/technical.md similarity index 100% rename from content/rancher/v2.6/en/faq/technical/_index.md rename to versioned_docs/version-2.6/faq/technical/technical.md diff --git a/versioned_docs/version-2.6/faq/telemetry/telemetry.md b/versioned_docs/version-2.6/faq/telemetry/telemetry.md new file mode 100644 index 0000000000..6ab582667e --- /dev/null +++ b/versioned_docs/version-2.6/faq/telemetry/telemetry.md @@ -0,0 +1,32 @@ +--- +title: Telemetry +weight: 8008 +--- + +### What is Telemetry? + +Telemetry collects aggregate information about the size of Rancher installations, versions of components used, and which features are used. This information is used by Rancher Labs to help make the product better and is not shared with third-parties. + +### What information is collected? + +No specific identifying information like usernames, passwords, or the names or addresses of user resources will ever be collected. + +The primary things collected include: + + - Aggregate counts (smallest, average, largest, total) of nodes per-cluster and their size (e.g. CPU cores & RAM). + - Aggregate counts of logical resources like Clusters, Projects, Namespaces, and Pods. + - Counts of what driver was used to deploy clusters and nodes (e.g. GKE vs EC2 vs Imported vs Custom). + - Versions of Kubernetes components, Operating Systems and Docker that are deployed on nodes. + - Whether some optional components are enabled or not (e.g. which auth providers are used). + - The image name & version of Rancher that is running. + - A unique randomly-generated identifier for this installation. + +### Can I see the information that is being sent? + +If Telemetry is enabled, you can go to `https:///v1-telemetry` in your installation to see the current data. + +If Telemetry is not enabled, the process that collects the data is not running, so there is nothing being collected to look at. + +### How do I turn it on or off? + +After initial setup, an administrator can go to the `Settings` page in the `Global` section of the UI and click Edit to change the `telemetry-opt` setting to either `in` or `out`. diff --git a/versioned_docs/version-2.6/helm-charts/creating-apps/creating-apps.md b/versioned_docs/version-2.6/helm-charts/creating-apps/creating-apps.md new file mode 100644 index 0000000000..3e8555a0f8 --- /dev/null +++ b/versioned_docs/version-2.6/helm-charts/creating-apps/creating-apps.md @@ -0,0 +1,128 @@ +--- +title: Creating Apps +weight: 400 +--- + +Rancher's App Marketplace is based on Helm Repositories and Helm Charts. You can add HTTP based standard Helm Repositories as well as any Git Repository which contains charts. + +> For a complete walkthrough of developing charts, see the [Chart Template Developer's Guide](https://helm.sh/docs/chart_template_guide/) in the official Helm documentation. + +- [Chart types](#chart-types) + - [Helm charts](#helm-charts) + - [Rancher charts](#rancher-charts) +- [Chart directory structure](#chart-directory-structure) +- [Additional Files for Rancher Charts](#additional-files-for-rancher-charts) + - [questions.yml](#questions-yml) + - [Min/Max Rancher versions](#min-max-rancher-versions) + - [Question variable reference](#question-variable-reference) +- [Tutorial: Example Custom Chart Creation](#tutorial-example-custom-chart-creation) + +# Chart Types + +Rancher supports two different types of charts: Helm charts and Rancher charts. + +### Helm Charts + +Native Helm charts include an application along with other software required to run it. When deploying native Helm charts, you' can provide the chart's parameter values in a YAML editor. + +### Rancher Charts + +Rancher charts are native helm charts with two files that enhance user experience: `app-readme.md` and `questions.yaml`. Read more about them in [Additional Files for Rancher Charts.](#additional-files-for-rancher-charts) + +Rancher charts add simplified chart descriptions and configuration forms to make the application deployment easy. Rancher users do not need to read through the entire list of Helm variables to understand how to launch an application. + +# Chart Directory Structure + +You can provide Helm Charts in a standard, HTTP based Helm Repository. For more information see the [Chart Repository Guide](https://helm.sh/docs/topics/chart_repository) in the official Helm documentation. + +Alternatively you can organize your charts in a Git Repository and directly add this to the App Marketplace. + +The following table demonstrates the directory structure for a Git repository. The `charts` directory is the top level directory under the repository base. Adding the repository to Rancher will expose all charts contained within it. The `questions.yaml`, `README.md`, and `requirements.yml` files are specific to Rancher charts, but are optional for chart customization. + +``` +/ + │ + ├── charts/ + │ ├── / # This directory name will be surfaced in the Rancher UI as the chart name + │ │ ├── / # Each directory at this level provides different app versions that will be selectable within the chart in the Rancher UI + │ │ │ ├── Chart.yaml # Required Helm chart information file. + │ │ │ ├── questions.yaml # Form questions displayed within the Rancher UI. Questions display in Configuration Options.* + │ │ │ ├── README.md # Optional: Helm Readme file displayed within Rancher UI. This text displays in Detailed Descriptions. + │ │ │ ├── requirements.yml # Optional: YAML file listing dependencies for the chart. + │ │ │ ├── values.yml # Default configuration values for the chart. + │ │ │ ├── templates/ # Directory containing templates that, when combined with values.yml, generates Kubernetes YAML. +``` + +# Additional Files for Rancher Charts + +Before you create your own custom catalog, you should have a basic understanding about how a Rancher chart differs from a native Helm chart. Rancher charts differ slightly from Helm charts in their directory structures. Rancher charts include two files that Helm charts do not. + +- `app-readme.md` + + A file that provides descriptive text in the chart's UI header. + +- `questions.yml` + + A file that contains questions for a form. These form questions simplify deployment of a chart. Without it, you must configure the deployment using a values YAML config, which is more difficult. The following image displays the difference between a Rancher chart (which includes `questions.yml`) and a native Helm chart (which does not). + + +
    Rancher Chart with questions.yml (top) vs. Helm Chart without (bottom)
    + + ![questions.yml]({{}}/img/rancher/rancher-app-2.6.png) + ![values.yaml]({{}}/img/rancher/helm-app-2.6.png) + + +### Chart.yaml annotations + +Rancher supports additional annotations that you can add to the `Chart.yaml` file. These annotations allow you to define application dependencies or configure additional UI defaults: + +| Annotation | Description | Example | +| --------------------------------- | ----------- | ------- | +| catalog.cattle.io/auto-install | If set, will install the specified chart in the specified version before installing this chart | other-chart-name=1.0.0 | +| catalog.cattle.io/display-name | A display name that should be displayed in the App Marketplace instead of the chart name | Display Name of Chart | +| catalog.cattle.io/namespace | A fixed namespace where the chart should be deployed in. If set, this can't be changed by the user | fixed-namespace | +| catalog.cattle.io/release-name | A fixed release name for the Helm installation. If set, this can't be changed by the user | fixed-release-name | +| catalog.cattle.io/requests-cpu | Total amount of CPU that should be unreserverd in the cluster. If less CPU is available, a warning will be shown | 2000m | +| catalog.cattle.io/requests-memory | Total amount of memory that should be unreserverd in the cluster. If less memory is available, a warning will be shown | 2Gi | +| catalog.cattle.io/os | Restricts the OS where this chart can be installed. Possible values: `linux`, `windows`. Default: no restriction | linux | + +### questions.yml + +Inside the `questions.yml`, most of the content will be around the questions to ask the end user, but there are some additional fields that can be set in this file. + +### Min/Max Rancher versions + +For each chart, you can add the minimum and/or maximum Rancher version, which determines whether or not this chart is available to be deployed from Rancher. + +> **Note:** Even though Rancher release versions are prefixed with a `v`, there is *no* prefix for the release version when using this option. + +``` +rancher_min_version: 2.3.0 +rancher_max_version: 2.3.99 +``` + +### Question Variable Reference + +This reference contains variables that you can use in `questions.yml` nested under `questions:`. + +| Variable | Type | Required | Description | +| ------------- | ------------- | --- |------------- | +| variable | string | true | Define the variable name specified in the `values.yml` file, using `foo.bar` for nested objects. | +| label | string | true | Define the UI label. | +| description | string | false | Specify the description of the variable.| +| type | string | false | Default to `string` if not specified (current supported types are string, multiline, boolean, int, enum, password, storageclass, hostname, pvc, and secret).| +| required | bool | false | Define if the variable is required or not (true \| false)| +| default | string | false | Specify the default value. | +| group | string | false | Group questions by input value. | +| min_length | int | false | Min character length.| +| max_length | int | false | Max character length.| +| min | int | false | Min integer length. | +| max | int | false | Max integer length. | +| options | []string | false | Specify the options when the variable type is `enum`, for example: options:
    - "ClusterIP"
    - "NodePort"
    - "LoadBalancer"| +| valid_chars | string | false | Regular expression for input chars validation. | +| invalid_chars | string | false | Regular expression for invalid input chars validation.| +| subquestions | []subquestion | false| Add an array of subquestions.| +| show_if | string | false | Show current variable if conditional variable is true. For example `show_if: "serviceType=Nodeport"` | +| show\_subquestion_if | string | false | Show subquestions if is true or equal to one of the options. for example `show_subquestion_if: "true"`| + +>**Note:** `subquestions[]` cannot contain `subquestions` or `show_subquestions_if` keys, but all other keys in the above table are supported. diff --git a/content/rancher/v2.6/en/helm-charts/_index.md b/versioned_docs/version-2.6/helm-charts/helm-charts.md similarity index 100% rename from content/rancher/v2.6/en/helm-charts/_index.md rename to versioned_docs/version-2.6/helm-charts/helm-charts.md diff --git a/versioned_docs/version-2.6/installation/install-rancher-on-k8s/aks/aks.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/aks/aks.md new file mode 100644 index 0000000000..b48aca076d --- /dev/null +++ b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/aks/aks.md @@ -0,0 +1,119 @@ +--- +title: Installing Rancher on Azure Kubernetes Service +shortTitle: AKS +weight: 4 +--- + +This page covers how to install Rancher on Microsoft's Azure Kubernetes Service (AKS). + +The guide uses command line tools to provision an AKS cluster with an ingress. If you prefer to provision your cluster using the Azure portal, refer to the [official documentation](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough-portal). + +If you already have an AKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) + +# Prerequisites + +>**Note** +>Deploying to Microsoft Azure will incur charges. + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- Your subscription has sufficient quota for at least 2 vCPUs. For details on Rancher server resource requirements, refer to [this section]({{}}/rancher/v2.6/en/installation/requirements/#rke-and-hosted-kubernetes) +- When installing Rancher with Helm in Azure, use the L7 load balancer to avoid networking issues. For more information, refer to the documentation on [Azure load balancer limitations](https://docs.microsoft.com/en-us/azure/load-balancer/components#limitations). + +# 1. Prepare your Workstation + +Install the following command line tools on your workstation: + +- The Azure CLI, **az:** For help, refer to these [installation steps.](https://docs.microsoft.com/en-us/cli/azure/) +- **kubectl:** For help, refer to these [installation steps.](https://kubernetes.io/docs/tasks/tools/#kubectl) +- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) + +# 2. Create a Resource Group + +After installing the CLI, you will need to log in with your Azure account. + +``` +az login +``` + +Create a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) to hold all relevant resources for your cluster. Use a location that applies to your use case. + +``` +az group create --name rancher-rg --location eastus +``` + +# 3. Create the AKS Cluster + +To create an AKS cluster, run the following command. Use a VM size that applies to your use case. Refer to [this article](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes) for available sizes and options. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +az aks create \ + --resource-group rancher-rg \ + --name rancher-server \ + --kubernetes-version 1.20.7 \ + --node-count 3 \ + --node-vm-size Standard_D2_v3 +``` + +The cluster will take some time to be deployed. + +# 4. Get Access Credentials + +After the cluster is deployed, get the access credentials. + +``` +az aks get-credentials --resource-group rancher-rg --name rancher-server +``` + +This command merges your cluster's credentials into the existing kubeconfig and allows `kubectl` to interact with the cluster. + +# 5. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. Installing an Ingress requires allocating a public IP address. Ensure you have sufficient quota, otherwise it will fail to assign the IP address. Limits for public IP addresses are applicable at a regional level per subscription. + +The following command installs an `nginx-ingress-controller` with a Kubernetes load balancer service. + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +# 6. Get Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + AGE +ingress-nginx-controller LoadBalancer 10.0.116.18 40.31.180.83 80:31229/TCP,443:31050/TCP + 67s +``` + +Save the `EXTERNAL-IP`. + +# 7. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the `EXTERNAL-IP` that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the [Azure DNS documentation](https://docs.microsoft.com/en-us/azure/dns/) + +# 8. Install the Rancher Helm Chart + +Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/versioned_docs/version-2.6/installation/install-rancher-on-k8s/amazon-eks/amazon-eks.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/amazon-eks/amazon-eks.md new file mode 100644 index 0000000000..85e2f93bd5 --- /dev/null +++ b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/amazon-eks/amazon-eks.md @@ -0,0 +1,164 @@ +--- +title: Installing Rancher on Amazon EKS +shortTitle: Amazon EKS +weight: 4 +--- + +This page covers two ways to install Rancher on EKS. + +The first is a guide for deploying the Rancher server on an EKS cluster using CloudFormation. This guide was created in collaboration with Amazon Web Services to show how to deploy Rancher following best practices. + +The second is a guide for installing an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. + +If you already have an EKS Kubernetes cluster, skip to the step about [installing an ingress.](#5-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) + +- [Automated Quickstart using AWS Best Practices](#automated-quickstart-using-aws-best-practices) +- [Creating an EKS Cluster for the Rancher Server](#creating-an-eks-cluster-for-the-rancher-server) + +# Automated Quickstart using AWS Best Practices + +Rancher and Amazon Web Services collaborated on a quick start guide for deploying Rancher on an EKS cluster following AWS best practices. The deployment guide is [here.](https://aws-quickstart.github.io/quickstart-eks-rancher/) + +The quick start guide provides three options for deploying Rancher on EKS: + +- **Deploy Rancher into a new VPC and new Amazon EKS cluster**. This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, Amazon EKS cluster, and other infrastructure components. It then deploys Rancher into this new EKS cluster. +- **Deploy Rancher into an existing VPC and a new Amazon EKS cluster**. This option provisions Rancher in your existing AWS infrastructure. +- **Deploy Rancher into an existing VPC and existing Amazon EKS cluster**. This option provisions Rancher in your existing AWS infrastructure. + +Deploying this Quick Start for a new virtual private cloud (VPC) and new Amazon EKS cluster using default parameters builds the following Rancher environment in the AWS Cloud: + +- A highly available architecture that spans three Availability Zones.* +- A VPC configured with public and private subnets, according to AWS best practices, to provide you with your own virtual network on AWS.* +- In the public subnets: + - Managed network address translation (NAT) gateways to allow outbound internet access for resources.* + - Linux bastion hosts in an Auto Scaling group to allow inbound Secure Shell (SSH) access to Amazon Elastic Compute Cloud (Amazon EC2) instances in public and private subnets.* +- In the private subnets: + - Kubernetes nodes in an Auto Scaling group.* + - A Network Load Balancer (not shown) for accessing the Rancher console. +- Rancher deployment using AWS Systems Manager automation. +- Amazon EKS service for the EKS cluster, which provides the Kubernetes control plane.* +- An Amazon Route 53 DNS record for accessing the Rancher deployment. + +\* The CloudFormation template that deploys the Quick Start into an existing Amazon EKS cluster skips the components marked by asterisks and prompts you for your existing VPC configuration. + +# Creating an EKS Cluster for the Rancher Server + +In this section, you'll install an EKS cluster with an ingress by using command line tools. This guide may be useful if you want to use fewer resources while trying out Rancher on EKS. + +> **Prerequisites:** +> +> - You should already have an AWS account. +> - It is recommended to use an IAM user instead of the root AWS account. You will need the IAM user's access key and secret key to configure the AWS command line interface. +> - The IAM user needs the minimum IAM policies described in the official [eksctl documentation.](https://eksctl.io/usage/minimum-iam-policies/) + +### 1. Prepare your Workstation + +Install the following command line tools on your workstation: + +- **The AWS CLI v2:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) +- **eksctl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) +- **kubectl:** For help, refer to these [installation steps.](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html) +- **helm:** For help, refer to these [installation steps.](https://helm.sh/docs/intro/install/) + +### 2. Configure the AWS CLI + +To configure the AWS CLI, run the following command: + +``` +aws configure +``` + +Then enter the following values: + +| Value | Description | +|-------|-------------| +| AWS Access Key ID | The access key credential for the IAM user with EKS permissions. | +| AWS Secret Access Key | The secret key credential for the IAM user with EKS permissions. | +| Default region name | An [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html#Concepts.RegionsAndAvailabilityZones.Regions) where the cluster nodes will be located. | +| Default output format | Enter `json`. | + +### 3. Create the EKS Cluster + +To create an EKS cluster, run the following command. Use the AWS region that applies to your use case. When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +eksctl create cluster \ + --name rancher-server \ + --version 1.20 \ + --region us-west-2 \ + --nodegroup-name ranchernodes \ + --nodes 3 \ + --nodes-min 1 \ + --nodes-max 4 \ + --managed +``` + +The cluster will take some time to be deployed with CloudFormation. + +### 4. Test the Cluster + +To test the cluster, run: + +``` +eksctl get cluster +``` + +The result should look like the following: + +``` +eksctl get cluster +2021-03-18 15:09:35 [ℹ] eksctl version 0.40.0 +2021-03-18 15:09:35 [ℹ] using region us-west-2 +NAME REGION EKSCTL CREATED +rancher-server-cluster us-west-2 True +``` + +### 5. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. + +The following command installs an `nginx-ingress-controller` with a LoadBalancer service. This will result in an ELB (Elastic Load Balancer) in front of NGINX: + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +### 6. Get Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) + AGE +ingress-nginx-controller LoadBalancer 10.100.90.18 a904a952c73bf4f668a17c46ac7c56ab-962521486.us-west-2.elb.amazonaws.com 80:31229/TCP,443:31050/TCP + 27m +``` + +Save the `EXTERNAL-IP`. + +### 7. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the AWS documentation on [routing traffic to an ELB load balancer.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html) + +### 8. Install the Rancher Helm Chart + +Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use that DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/content/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/_index.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/chart-options/chart-options.md similarity index 100% rename from content/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/_index.md rename to versioned_docs/version-2.6/installation/install-rancher-on-k8s/chart-options/chart-options.md diff --git a/versioned_docs/version-2.6/installation/install-rancher-on-k8s/gke/gke.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/gke/gke.md new file mode 100644 index 0000000000..347c3c8ec9 --- /dev/null +++ b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/gke/gke.md @@ -0,0 +1,186 @@ +--- +title: Installing Rancher on a Google Kubernetes Engine Cluster +shortTitle: GKE +weight: 5 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to install Rancher using Google Kubernetes Engine. + +If you already have a GKE Kubernetes cluster, skip to the step about [installing an ingress.](#7-install-an-ingress) Then install the Rancher Helm chart following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) + +# Prerequisites + +- You will need a Google account. +- You will need a Google Cloud billing account. You can manage your Cloud Billing accounts using the Google Cloud Console. For more information about the Cloud Console, visit [General guide to the console.](https://support.google.com/cloud/answer/3465889?hl=en&ref_topic=3340599) +- You will need a cloud quota for at least one in-use IP address and at least 2 CPUs. For more details about hardware requirements for the Rancher server, refer to [this section.]({{}}/rancher/v2.6/en/installation/requirements/#rke-and-hosted-kubernetes) + +# 1. Enable the Kubernetes Engine API + +Take the following steps to enable the Kubernetes Engine API: + +1. Visit the [Kubernetes Engine page](https://console.cloud.google.com/projectselector/kubernetes?_ga=2.169595943.767329331.1617810440-856599067.1617343886) in the Google Cloud Console. +1. Create or select a project. +1. Open the project and enable the Kubernetes Engine API for the project. Wait for the API and related services to be enabled. This can take several minutes. +1. Make sure that billing is enabled for your Cloud project. For information on how to enable billing for your project, refer to the [Google Cloud documentation.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project) + +# 2. Open the Cloud Shell + +Cloud Shell is a shell environment for managing resources hosted on Google Cloud. Cloud Shell comes preinstalled with the `gcloud` command-line tool and kubectl command-line tool. The `gcloud` tool provides the primary command-line interface for Google Cloud, and `kubectl` provides the primary command-line interface for running commands against Kubernetes clusters. + +The following sections describe how to launch the cloud shell from the Google Cloud Console or from your local workstation. + +### Cloud Shell + +To launch the shell from the [Google Cloud Console,](https://console.cloud.google.com) go to the upper-right corner of the console and click the terminal button. When hovering over the button, it is labeled **Activate Cloud Shell**. + +### Local Shell + +To install `gcloud` and `kubectl`, perform the following steps: + +1. Install the Cloud SDK by following [these steps.](https://cloud.google.com/sdk/docs/install) The Cloud SDK includes the `gcloud` command-line tool. The steps vary based on your OS. +1. After installing Cloud SDK, install the `kubectl` command-line tool by running the following command: + + ``` + gcloud components install kubectl + ``` + In a later step, `kubectl` will be configured to use the new GKE cluster. +1. [Install Helm 3](https://helm.sh/docs/intro/install/) if it is not already installed. +1. Enable Helm experimental [support for OCI images](https://github.com/helm/community/blob/master/hips/hip-0006.md) with the `HELM_EXPERIMENTAL_OCI` variable. Add the following line to `~/.bashrc` (or `~/.bash_profile` in macOS, or wherever your shell stores environment variables): + + ``` + export HELM_EXPERIMENTAL_OCI=1 + ``` +1. Run the following command to load your updated `.bashrc` file: + + ``` + source ~/.bashrc + ``` + If you are running macOS, use this command: + ``` + source ~/.bash_profile + ``` + + + +# 3. Configure the gcloud CLI + + Set up default gcloud settings using one of the following methods: + +- Using gcloud init, if you want to be walked through setting defaults. +- Using gcloud config, to individually set your project ID, zone, and region. + + + + +1. Run gcloud init and follow the directions: + + ``` + gcloud init + ``` + If you are using SSH on a remote server, use the --console-only flag to prevent the command from launching a browser: + + ``` + gcloud init --console-only + ``` +2. Follow the instructions to authorize gcloud to use your Google Cloud account and select the new project that you created. + + + + + + + +# 4. Confirm that gcloud is configured correctly + +Run: + +``` +gcloud config list +``` + +The output should resemble the following: + +``` +[compute] +region = us-west1 # Your chosen region +zone = us-west1-b # Your chosen zone +[core] +account = +disable_usage_reporting = True +project = + +Your active configuration is: [default] +``` + +# 5. Create a GKE Cluster + +The following command creates a three-node cluster. + +Replace `cluster-name` with the name of your new cluster. + +When choosing a Kubernetes version, be sure to first consult the [support matrix](https://rancher.com/support-matrix/) to find the highest version of Kubernetes that has been validated for your Rancher version. + +``` +gcloud container clusters create cluster-name --num-nodes=3 --cluster-version=1.20.8-gke.900 +``` + +# 6. Get Authentication Credentials + +After creating your cluster, you need to get authentication credentials to interact with the cluster: + +``` +gcloud container clusters get-credentials cluster-name +``` + +This command configures `kubectl` to use the cluster you created. + +# 7. Install an Ingress + +The cluster needs an Ingress so that Rancher can be accessed from outside the cluster. + +The following command installs an `nginx-ingress-controller` with a LoadBalancer service: + +``` +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm upgrade --install \ + ingress-nginx ingress-nginx/ingress-nginx \ + --namespace ingress-nginx \ + --set controller.service.type=LoadBalancer \ + --version 3.12.0 \ + --create-namespace +``` + +# 8. Get the Load Balancer IP + +To get the address of the load balancer, run: + +``` +kubectl get service ingress-nginx-controller --namespace=ingress-nginx +``` + +The result should look similar to the following: + +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ingress-nginx-controller LoadBalancer 10.3.244.156 35.233.206.34 80:31876/TCP,443:32497/TCP 81s +``` + +Save the `EXTERNAL-IP`. + +# 9. Set up DNS + +External traffic to the Rancher server will need to be directed at the load balancer you created. + +Set up a DNS to point at the external IP that you saved. This DNS will be used as the Rancher server URL. + +There are many valid ways to set up the DNS. For help, refer to the Google Cloud documentation about [managing DNS records.](https://cloud.google.com/dns/docs/records) + +# 10. Install the Rancher Helm chart + +Next, install the Rancher Helm chart by following the instructions on [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#install-the-rancher-helm-chart) The Helm instructions are the same for installing Rancher on any Kubernetes distribution. + +Use the DNS name from the previous step as the Rancher server URL when you install Rancher. It can be passed in as a Helm option. For example, if the DNS name is `rancher.my.org`, you could run the Helm installation command with the option `--set hostname=rancher.my.org`. diff --git a/versioned_docs/version-2.6/installation/install-rancher-on-k8s/install-rancher-on-k8s.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/install-rancher-on-k8s.md new file mode 100644 index 0000000000..d16d51e655 --- /dev/null +++ b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/install-rancher-on-k8s.md @@ -0,0 +1,301 @@ +--- +title: Install/Upgrade Rancher on a Kubernetes Cluster +description: Learn how to install Rancher in development and production environments. Read about single node and high availability installation +weight: 2 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you'll learn how to deploy Rancher on a Kubernetes cluster using the Helm CLI. + +- [Prerequisites](#prerequisites) +- [Install the Rancher Helm Chart](#install-the-rancher-helm-chart) + +# Prerequisites + +- [Kubernetes Cluster](#kubernetes-cluster) +- [CLI Tools](#cli-tools) +- [Ingress Controller (Only for Hosted Kubernetes)](#ingress-controller-for-hosted-kubernetes) + +### Kubernetes Cluster + +Set up the Rancher server's local Kubernetes cluster. + +Rancher can be installed on any Kubernetes cluster. This cluster can use upstream Kubernetes, or it can use one of Rancher's Kubernetes distributions, or it can be a managed Kubernetes cluster from a provider such as Amazon EKS. + +For help setting up a Kubernetes cluster, we provide these tutorials: + +- **RKE:** For the tutorial to install an RKE Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-rke/) For help setting up the infrastructure for a high-availability RKE cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha) +- **K3s:** For the tutorial to install a K3s Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-with-external-db) For help setting up the infrastructure for a high-availability K3s cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db) +- **RKE2:** For the tutorial to install an RKE2 Kubernetes cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-rke2) For help setting up the infrastructure for a high-availability RKE2 cluster, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha) +- **Amazon EKS:** For details on how to install Rancher on Amazon EKS, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/amazon-eks) +- **AKS:** For details on how to install Rancher with Azure Kubernetes Service, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/aks) +- **GKE:** For details on how to install Rancher with Google Kubernetes Engine, including how to install an ingress so that the Rancher server can be accessed, refer to [this page.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/gke) + +### CLI Tools + +The following CLI tools are required for setting up the Kubernetes cluster. Please make sure these tools are installed and available in your `$PATH`. + +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) - Kubernetes command-line tool. +- [helm](https://docs.helm.sh/using_helm/#installing-helm) - Package management for Kubernetes. Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. Refer to the [instructions provided by the Helm project](https://helm.sh/docs/intro/install/) for your specific platform. + +### Ingress Controller (For Hosted Kubernetes) + +To deploy Rancher on a hosted Kubernetes cluster such as EKS, GKE, or AKS, you should deploy a compatible Ingress controller first to configure [SSL termination on Rancher.](#3-choose-your-ssl-configuration) + +For an example of how to deploy an ingress on EKS, refer to [this section.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/amazon-eks/#5-install-an-ingress) + +# Install the Rancher Helm Chart + +Rancher is installed using the [Helm](https://helm.sh/) package manager for Kubernetes. Helm charts provide templating syntax for Kubernetes YAML manifest documents. With Helm, we can create configurable deployments instead of just using static files. + +For systems without direct internet access, see [Air Gap: Kubernetes install]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/). + +To choose a Rancher version to install, refer to [Choosing a Rancher Version.]({{}}/rancher/v2.6/en/installation/resources/choosing-version) + +To choose a version of Helm to install Rancher with, refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) + +> **Note:** The installation instructions assume you are using Helm 3. + +To set up Rancher, + +1. [Add the Helm chart repository](#1-add-the-helm-chart-repository) +2. [Create a namespace for Rancher](#2-create-a-namespace-for-rancher) +3. [Choose your SSL configuration](#3-choose-your-ssl-configuration) +4. [Install cert-manager](#4-install-cert-manager) (unless you are bringing your own certificates, or TLS will be terminated on a load balancer) +5. [Install Rancher with Helm and your chosen certificate option](#5-install-rancher-with-helm-and-your-chosen-certificate-option) +6. [Verify that the Rancher server is successfully deployed](#6-verify-that-the-rancher-server-is-successfully-deployed) +7. [Save your options](#7-save-your-options) + +### 1. Add the Helm Chart Repository + +Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). + +{{< release-channel >}} + +``` +helm repo add rancher- https://releases.rancher.com/server-charts/ +``` + +### 2. Create a Namespace for Rancher + +We'll need to define a Kubernetes namespace where the resources created by the Chart should be installed. This should always be `cattle-system`: + +``` +kubectl create namespace cattle-system +``` + +### 3. Choose your SSL Configuration + +The Rancher management server is designed to be secure by default and requires SSL/TLS configuration. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). + +There are three recommended options for the source of the certificate used for TLS termination at the Rancher server: + +- **Rancher-generated TLS certificate:** In this case, you will need to install `cert-manager` into the cluster. Rancher utilizes `cert-manager` to issue and maintain its certificates. Rancher will generate a CA certificate of its own, and sign a cert using that CA. `cert-manager` is then responsible for managing that certificate. +- **Let's Encrypt:** The Let's Encrypt option also uses `cert-manager`. However, in this case, cert-manager is combined with a special Issuer for Let's Encrypt that performs all actions (including request and validation) necessary for getting a Let's Encrypt issued cert. This configuration uses HTTP validation (`HTTP-01`), so the load balancer must have a public DNS record and be accessible from the internet. +- **Bring your own certificate:** This option allows you to bring your own public- or private-CA signed certificate. Rancher will use that certificate to secure websocket and HTTPS traffic. In this case, you must upload this certificate (and associated key) as PEM-encoded files with the name `tls.crt` and `tls.key`. If you are using a private CA, you must also upload that certificate. This is due to the fact that this private CA may not be trusted by your nodes. Rancher will take that CA certificate, and generate a checksum from it, which the various Rancher components will use to validate their connection to Rancher. + + +| Configuration | Helm Chart Option | Requires cert-manager | +| ------------------------------ | ----------------------- | ------------------------------------- | +| Rancher Generated Certificates (Default) | `ingress.tls.source=rancher` | [yes](#4-install-cert-manager) | +| Let’s Encrypt | `ingress.tls.source=letsEncrypt` | [yes](#4-install-cert-manager) | +| Certificates from Files | `ingress.tls.source=secret` | no | + +### 4. Install cert-manager + +**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +> You should skip this step if you are bringing your own certificate files (option `ingress.tls.source=secret`), or if you use [TLS termination on an external load balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). + +This step is only required to use certificates issued by Rancher's generated CA (`ingress.tls.source=rancher`) or to request Let's Encrypt issued certificates (`ingress.tls.source=letsEncrypt`). + +
    + Click to Expand + +> **Important:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). + +These instructions are adapted from the [official cert-manager documentation](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). + +``` +# If you have installed the CRDs manually instead of with the `--set installCRDs=true` option added to your Helm install command, you should upgrade your CRD resources before upgrading the Helm chart: +kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml + +# Add the Jetstack Helm repository +helm repo add jetstack https://charts.jetstack.io + +# Update your local Helm chart repository cache +helm repo update + +# Install the cert-manager Helm chart +helm install cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.7.1 +``` + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the cert-manager namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +
    + +### 5. Install Rancher with Helm and Your Chosen Certificate Option + +The exact command to install Rancher differs depending on the certificate configuration. + +However, irrespective of the certificate configuration, the name of the Rancher installation in the `cattle-system` namespace should always be `rancher`. + +> **Tip for testing and development:** This final command to install Rancher requires a domain name that forwards traffic to Rancher. If you are using the Helm CLI to set up a proof-of-concept, you can use a fake domain name when passing the `hostname` option. An example of a fake domain name would be `.sslip.io`, which would expose Rancher on an IP where it is running. Production installs would require a real domain name. + + + + +The default is for Rancher to generate a CA and uses `cert-manager` to issue the certificate for access to the Rancher server interface. + +Because `rancher` is the default option for `ingress.tls.source`, we are not specifying `ingress.tls.source` when running the `helm install` command. + +- Set the `hostname` to the DNS name you pointed at your load balancer. +- Set the `bootstrapPassword` to something unique for the `admin` user. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. +- To install a specific Rancher version, use the `--version` flag, example: `--version 2.3.6` + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + + +This option uses `cert-manager` to automatically request and renew [Let's Encrypt](https://letsencrypt.org/) certificates. This is a free service that provides you with a valid certificate as Let's Encrypt is a trusted CA. + +>**Note:** You need to have port 80 open as the HTTP-01 challenge can only be done on port 80. + +In the following command, + +- `hostname` is set to the public DNS record, +- Set the `bootstrapPassword` to something unique for the `admin` user. +- `ingress.tls.source` is set to `letsEncrypt` +- `letsEncrypt.email` is set to the email address used for communication about your certificate (for example, expiry notices) +- Set `letsEncrypt.ingress.class` to whatever your ingress controller is, e.g., `traefik`, `nginx`, `haproxy`, etc. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin \ + --set ingress.tls.source=letsEncrypt \ + --set letsEncrypt.email=me@example.org \ + --set letsEncrypt.ingress.class=nginx +``` + +Wait for Rancher to be rolled out: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + + + + +In this option, Kubernetes secrets are created from your own certificates for Rancher to use. + +When you run this command, the `hostname` option must match the `Common Name` or a `Subject Alternative Names` entry in the server certificate or the Ingress controller will fail to configure correctly. + +Although an entry in the `Subject Alternative Names` is technically required, having a matching `Common Name` maximizes compatibility with older browsers and applications. + +> If you want to check if your certificates are correct, see [How do I check Common Name and Subject Alternative Names in my server certificate?]({{}}/rancher/v2.6/en/faq/technical/#how-do-i-check-common-name-and-subject-alternative-names-in-my-server-certificate) + +- Set the `hostname`. +- Set the `bootstrapPassword` to something unique for the `admin` user. +- Set `ingress.tls.source` to `secret`. +- If you are installing an alpha version, Helm requires adding the `--devel` option to the command. + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin \ + --set ingress.tls.source=secret +``` + +If you are using a Private CA signed certificate , add `--set privateCA=true` to the command: + +``` +helm install rancher rancher-/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin \ + --set ingress.tls.source=secret \ + --set privateCA=true +``` + +Now that Rancher is deployed, see [Adding TLS Secrets]({{}}/rancher/v2.6/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the Ingress controller can use them. + + + + +The Rancher chart configuration has many options for customizing the installation to suit your specific environment. Here are some common advanced scenarios. + +- [HTTP Proxy]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#http-proxy) +- [Private container image Registry]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#private-registry-and-air-gap-installs) +- [TLS Termination on an External Load Balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) + +See the [Chart Options]({{}}/rancher/v2.6/en/installation/resources/chart-options/) for the full list of options. + + +### 6. Verify that the Rancher Server is Successfully Deployed + +After adding the secrets, check if Rancher was rolled out successfully: + +``` +kubectl -n cattle-system rollout status deploy/rancher +Waiting for deployment "rancher" rollout to finish: 0 of 3 updated replicas are available... +deployment "rancher" successfully rolled out +``` + +If you see the following error: `error: deployment "rancher" exceeded its progress deadline`, you can check the status of the deployment by running the following command: + +``` +kubectl -n cattle-system get deploy rancher +NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE +rancher 3 3 3 3 3m +``` + +It should show the same count for `DESIRED` and `AVAILABLE`. + +### 7. Save Your Options + +Make sure you save the `--set` options you used. You will need to use the same options when you upgrade Rancher to new versions with Helm. + +### Finishing Up + +That's it. You should have a functional Rancher server. + +In a web browser, go to the DNS name that forwards traffic to your load balancer. Then you should be greeted by the colorful login page. + +Doesn't work? Take a look at the [Troubleshooting]({{}}/rancher/v2.6/en/installation/resources/troubleshooting/) Page diff --git a/content/rancher/v2.6/en/installation/install-rancher-on-k8s/rollbacks/_index.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/rollbacks/rollbacks.md similarity index 100% rename from content/rancher/v2.6/en/installation/install-rancher-on-k8s/rollbacks/_index.md rename to versioned_docs/version-2.6/installation/install-rancher-on-k8s/rollbacks/rollbacks.md diff --git a/content/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/air-gap-upgrade.md similarity index 100% rename from content/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/_index.md rename to versioned_docs/version-2.6/installation/install-rancher-on-k8s/upgrades/air-gap-upgrade/air-gap-upgrade.md diff --git a/content/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/_index.md b/versioned_docs/version-2.6/installation/install-rancher-on-k8s/upgrades/upgrades.md similarity index 100% rename from content/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/_index.md rename to versioned_docs/version-2.6/installation/install-rancher-on-k8s/upgrades/upgrades.md diff --git a/content/rancher/v2.6/en/installation/_index.md b/versioned_docs/version-2.6/installation/installation.md similarity index 100% rename from content/rancher/v2.6/en/installation/_index.md rename to versioned_docs/version-2.6/installation/installation.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/air-gap/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/air-gap.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/air-gap/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/air-gap/air-gap.md diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/docker-install-commands.md b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/docker-install-commands.md new file mode 100644 index 0000000000..8c065fb3db --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/install-rancher/docker-install-commands/docker-install-commands.md @@ -0,0 +1,133 @@ +--- +title: Docker Install Commands +weight: 1 +--- + +The Docker installation is for Rancher users who want to test out Rancher. + +Instead of running on a Kubernetes cluster, you install the Rancher server component on a single node using a `docker run` command. Since there is only one node and a single Docker container, if the node goes down, there is no copy of the etcd data available on other nodes and you will lose all the data of your Rancher server. + +The backup application can be used to migrate the Rancher server from a Docker install to a Kubernetes install using [these steps.]({{}}/rancher/v2.6/en/backups/migrating-rancher) + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +| Environment Variable Key | Environment Variable Value | Description | +| -------------------------------- | -------------------------------- | ---- | +| `CATTLE_SYSTEM_DEFAULT_REGISTRY` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `CATTLE_SYSTEM_CATALOG` | `bundled` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | + +> **Do you want to..**. +> +> - Configure custom CA root certificate to access your services? See [Custom CA root certificate]({{}}/rancher/v2.6/en/installation/resources/custom-ca-root-certificate/). +> - Record all transactions with the Rancher API? See [API Auditing]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log). + +Choose from the following options: + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you are installing Rancher in a development or testing environment where identity verification isn't a concern, install Rancher using the self-signed certificate that it generates. This installation option omits the hassle of generating a certificate yourself. + +Log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to install. | + +Privileged access is [required.](#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +In development or testing environments where your team will access your Rancher server, create a self-signed certificate for use with your install so that your team can verify they're connecting to your instance of Rancher. + +> **Prerequisites:** +> From a computer with an internet connection, create a self-signed certificate using [OpenSSL](https://www.openssl.org/) or another method of your choice. +> +> - The certificate files must be in PEM format. +> - In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) + +After creating your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Use the `-v` flag and provide the path to your certificates to mount them in your container. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | The path to the certificate authority's certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to install. | + +Privileged access is [required.](#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +In development or testing environments where you're exposing an app publicly, use a certificate signed by a recognized CA so that your user base doesn't encounter security warnings. + +> **Prerequisite:** The certificate files must be in PEM format. + +After obtaining your certificate, log into your Linux host, and then run the installation command below. When entering the command, use the table below to replace each placeholder. Because your certificate is signed by a recognized CA, mounting an additional CA certificate file is unnecessary. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `` | The path to the directory containing your certificate files. | +| `` | The path to your full certificate chain. | +| `` | The path to the private key for your certificate. | +| `` | Your private registry URL and port. | +| `` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to install. | + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +Privileged access is [required.](#privileged-access-for-rancher) + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged + /rancher/rancher: +``` + +
    + + + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.6/en/faq/telemetry/) during the initial login. + diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/install-rancher/install-rancher.md b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/install-rancher/install-rancher.md new file mode 100644 index 0000000000..6ee7535d61 --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/install-rancher/install-rancher.md @@ -0,0 +1,245 @@ +--- +title: 4. Install Rancher +weight: 400 +--- + +This section is about how to deploy Rancher for your air gapped environment in a high-availability Kubernetes installation. An air gapped environment could be where Rancher server will be installed offline, behind a firewall, or behind a proxy. + +### Privileged Access for Rancher + +When the Rancher server is deployed in the Docker container, a local Kubernetes cluster is installed within the container for Rancher to use. Because many features of Rancher run as deployments, and privileged mode is required to run containers within containers, you will need to install Rancher with the `--privileged` option. + +# Docker Instructions + +If you want to continue the air gapped installation using Docker commands, skip the rest of this page and follow the instructions on [this page.](./docker-install-commands) + +# Kubernetes Instructions + +Rancher recommends installing Rancher on a Kubernetes cluster. A highly available Kubernetes install is comprised of three nodes running the Rancher server components on a Kubernetes cluster. The persistence layer (etcd) is also replicated on these three nodes, providing redundancy and data duplication in case one of the nodes fails. + +This section describes installing Rancher: + +- [1. Add the Helm Chart Repository](#1-add-the-helm-chart-repository) +- [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration) +- [3. Render the Rancher Helm Template](#3-render-the-rancher-helm-template) +- [4. Install Rancher](#4-install-rancher) + +# 1. Add the Helm Chart Repository + +From a system that has access to the internet, fetch the latest Helm chart and copy the resulting manifests to a system that has access to the Rancher server cluster. + +1. If you haven't already, install `helm` locally on a workstation that has internet access. Note: Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. + +2. Use `helm repo add` command to add the Helm chart repository that contains charts to install Rancher. For more information about the repository choices and which is best for your use case, see [Choosing a Version of Rancher]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories). + {{< release-channel >}} + ``` + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +3. Fetch the latest Rancher chart. This will pull down the chart and save it in the current directory as a `.tgz` file. + ```plain + helm fetch rancher-/rancher + ``` + + If you require a specific version of Rancher, you can fetch this with the Helm `--version` parameter like in the following example: + ```plain + helm fetch rancher-stable/rancher --version=v2.4.8 + ``` + +# 2. Choose your SSL Configuration + +Rancher Server is designed to be secure by default and requires SSL/TLS configuration. + +When Rancher is installed on an air gapped Kubernetes cluster, there are two recommended options for the source of the certificate. + +> **Note:** If you want terminate SSL/TLS externally, see [TLS termination on an External Load Balancer]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination). + +| Configuration | Chart option | Description | Requires cert-manager | +| ------------------------------------------ | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Rancher Generated Self-Signed Certificates | `ingress.tls.source=rancher` | Use certificates issued by Rancher's generated CA (self signed)
    This is the **default** and does not need to be added when rendering the Helm template. | yes | +| Certificates from Files | `ingress.tls.source=secret` | Use your own certificate files by creating Kubernetes Secret(s).
    This option must be passed when rendering the Rancher Helm template. | no | + +# Helm Chart Options for Air Gap Installations + +When setting up the Rancher Helm template, there are several options in the Helm chart that are designed specifically for air gap installations. + +| Chart Option | Chart Value | Description | +| ----------------------- | -------------------------------- | ---- | +| `certmanager.version` | `` | Configure proper Rancher TLS issuer depending of running cert-manager version. | +| `systemDefaultRegistry` | `` | Configure Rancher server to always pull from your private registry when provisioning clusters. | +| `useBundledSystemChart` | `true` | Configure Rancher server to use the packaged copy of Helm system charts. The [system charts](https://github.com/rancher/system-charts) repository contains all the catalog items required for features such as monitoring, logging, alerting and global DNS. These [Helm charts](https://github.com/rancher/system-charts) are located in GitHub, but since you are in an air gapped environment, using the charts that are bundled within Rancher is much easier than setting up a Git mirror. | + +# 3. Render the Rancher Helm Template + +Based on the choice your made in [2. Choose your SSL Configuration](#2-choose-your-ssl-configuration), complete one of the procedures below. + +# Option A: Default Self-Signed Certificate + + +By default, Rancher generates a CA and uses cert-manager to issue the certificate for access to the Rancher server interface. + +> **Note:** +> Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.11.0, please see our [upgrade cert-manager documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). + +### 1. Add the cert-manager repo + +From a system connected to the internet, add the cert-manager repo to Helm: + +```plain +helm repo add jetstack https://charts.jetstack.io +helm repo update +``` + +### 2. Fetch the cert-manager chart + +Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + +**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +```plain +helm fetch jetstack/cert-manager --version v1.7.1 +``` + +### 3. Render the cert-manager template + +Render the cert-manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + +```plain +helm template cert-manager ./cert-manager-v1.7.1.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller \ + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook \ + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector \ + --set startupapicheck.image.repository=/quay.io/jetstack/cert-manager-ctl +``` + +### 4. Download the cert-manager CRD + +Download the required CRD file for cert-manager: + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.crds.yaml + ``` + +### 5. Render the Rancher template + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + + +Placeholder | Description +------------|------------- +`` | The version number of the output tarball. +`` | The DNS name you pointed at your load balancer. +`` | The DNS name for your private registry. +`` | Cert-manager version running on k8s cluster. + +```plain +helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set certmanager.version= \ + --set rancherImage=/rancher/rancher \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.5.8` + +# Option B: Certificates From Files using Kubernetes Secrets + + +### 1. Create secrets + +Create Kubernetes secrets from your own certificates for Rancher to use. The common name for the cert will need to match the `hostname` option in the command below, or the ingress controller will fail to provision the site for Rancher. + +### 2. Render the Rancher template + +Render the Rancher template, declaring your chosen options. Use the reference table below to replace each placeholder. Rancher needs to be configured to use the private registry in order to provision any Rancher launched Kubernetes clusters or Rancher tools. + +| Placeholder | Description | +| -------------------------------- | ----------------------------------------------- | +| `` | The version number of the output tarball. | +| `` | The DNS name you pointed at your load balancer. | +| `` | The DNS name for your private registry. | + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +If you are using a Private CA signed cert, add `--set privateCA=true` following `--set ingress.tls.source=secret`: + +```plain + helm template rancher ./rancher-.tgz --output-dir . \ + --no-hooks \ # prevent files for Helm hooks from being generated + --namespace cattle-system \ + --set hostname= \ + --set rancherImage=/rancher/rancher \ + --set ingress.tls.source=secret \ + --set privateCA=true \ + --set systemDefaultRegistry= \ # Set a default private registry to be used in Rancher + --set useBundledSystemChart=true # Use the packaged Rancher system charts +``` + +**Optional**: To install a specific Rancher version, set the `rancherImageTag` value, example: `--set rancherImageTag=v2.3.6` + +Then refer to [Adding TLS Secrets]({{}}/rancher/v2.6/en/installation/resources/tls-secrets/) to publish the certificate files so Rancher and the ingress controller can use them. + +# 4. Install Rancher + +Copy the rendered manifest directories to a system that has access to the Rancher server cluster to complete installation. + +Use `kubectl` to create namespaces and apply the rendered manifests. + +If you choose to use self-signed certificates in [B. Choose your SSL Configuration](#b-choose-your-ssl-configuration), install cert-manager. + +### For Self-Signed Certificate Installs, Install Cert-manager + +
    + Click to expand + +If you are using self-signed certificates, install cert-manager: + +1. Create the namespace for cert-manager. +```plain +kubectl create namespace cert-manager +``` + +1. Create the cert-manager CustomResourceDefinitions (CRDs). +```plain +kubectl apply -f cert-manager/cert-manager-crd.yaml +``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above, or else you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Launch cert-manager. +```plain +kubectl apply -R -f ./cert-manager +``` + +
    + +### Install Rancher with kubectl + +```plain +kubectl create namespace cattle-system +kubectl -n cattle-system apply -R -f ./rancher +``` +The installation is complete. + +> **Note:** If you don't intend to send telemetry data, opt out [telemetry]({{}}/rancher/v2.6/en/faq/telemetry/) during the initial login. Leaving this active in an air-gapped environment can cause issues if the sockets cannot be opened successfully. + +# Additional Resources + +These resources could be helpful when installing Rancher: + +- [Rancher Helm chart options]({{}}/rancher/v2.6/en/installation/resources/chart-options/) +- [Adding TLS secrets]({{}}/rancher/v2.6/en/installation/resources/tls-secrets/) +- [Troubleshooting Rancher Kubernetes Installations]({{}}/rancher/v2.6/en/installation/resources/troubleshooting/) diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md new file mode 100644 index 0000000000..02959acaaa --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/launch-kubernetes/launch-kubernetes.md @@ -0,0 +1,226 @@ +--- +title: '3. Install Kubernetes (Skip for Docker Installs)' +weight: 300 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> Skip this section if you are installing Rancher on a single node with Docker. + +This section describes how to install a Kubernetes cluster according to our [best practices for the Rancher server environment.]({{}}/rancher/v2.6/en/overview/architecture-recommendations/#environment-for-kubernetes-installations) This cluster should be dedicated to run only the Rancher server. + +Rancher can be installed on any Kubernetes cluster, including hosted Kubernetes providers. + +The steps to set up an air-gapped Kubernetes cluster on RKE or K3s are shown below. + + + + +In this guide, we are assuming you have created your nodes in your air gapped environment and have a secure Docker private registry on your bastion server. + +### Installation Outline + +1. [Prepare Images Directory](#1-prepare-images-directory) +2. [Create Registry YAML](#2-create-registry-yaml) +3. [Install K3s](#3-install-k3s) +4. [Save and Start Using the kubeconfig File](#4-save-and-start-using-the-kubeconfig-file) + +### 1. Prepare Images Directory +Obtain the images tar file for your architecture from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be running. + +Place the tar file in the `images` directory before starting K3s on each node, for example: + +```sh +sudo mkdir -p /var/lib/rancher/k3s/agent/images/ +sudo cp ./k3s-airgap-images-$ARCH.tar /var/lib/rancher/k3s/agent/images/ +``` + +### 2. Create Registry YAML +Create the registries.yaml file at `/etc/rancher/k3s/registries.yaml`. This will tell K3s the necessary details to connect to your private registry. + +The registries.yaml file should look like this before plugging in the necessary information: + +``` +--- +mirrors: + customreg: + endpoint: + - "https://ip-to-server:5000" +configs: + customreg: + auth: + username: xxxxxx # this is the registry username + password: xxxxxx # this is the registry password + tls: + cert_file: + key_file: + ca_file: +``` + +Note, at this time only secure registries are supported with K3s (SSL with custom CA). + +For more information on private registries configuration file for K3s, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/private-registry/) + +### 3. Install K3s + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +Obtain the K3s binary from the [releases](https://github.com/rancher/k3s/releases) page, matching the same version used to get the airgap images tar. +Also obtain the K3s install script at https://get.k3s.io + +Place the binary in `/usr/local/bin` on each node. +Place the install script anywhere on each node, and name it `install.sh`. + +Install K3s on each server: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true ./install.sh +``` + +Install K3s on each agent: + +``` +INSTALL_K3S_SKIP_DOWNLOAD=true K3S_URL=https://myserver:6443 K3S_TOKEN=mynodetoken ./install.sh +``` + +Note, take care to ensure you replace `myserver` with the IP or valid DNS of the server and replace `mynodetoken` with the node-token from the server. +The node-token is on the server at `/var/lib/rancher/k3s/server/node-token` + +>**Note:** K3s additionally provides a `--resolv-conf` flag for kubelets, which may help with configuring DNS in air-gap networks. + +### 4. Save and Start Using the kubeconfig File + +When you installed K3s on each Rancher server node, a `kubeconfig` file was created on the node at `/etc/rancher/k3s/k3s.yaml`. This file contains credentials for full access to the cluster, and you should save this file in a secure location. + +To use this `kubeconfig` file, + +1. Install [kubectl,](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) a Kubernetes command-line tool. +2. Copy the file at `/etc/rancher/k3s/k3s.yaml` and save it to the directory `~/.kube/config` on your local machine. +3. In the kubeconfig file, the `server` directive is defined as localhost. Configure the server as the DNS of your load balancer, referring to port 6443. (The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443.) Here is an example `k3s.yaml`: + +``` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: [CERTIFICATE-DATA] + server: [LOAD-BALANCER-DNS]:6443 # Edit this line + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + password: [PASSWORD] + username: admin +``` + +**Result:** You can now use `kubectl` to manage your K3s cluster. If you have more than one kubeconfig file, you can specify which one you want to use by passing in the path to the file when using `kubectl`: + +``` +kubectl --kubeconfig ~/.kube/config/k3s.yaml get pods --all-namespaces +``` + +For more information about the `kubeconfig` file, refer to the [K3s documentation]({{}}/k3s/latest/en/cluster-access/) or the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) about organizing cluster access using `kubeconfig` files. + +### Note on Upgrading + +Upgrading an air-gap environment can be accomplished in the following manner: + +1. Download the new air-gap images (tar file) from the [releases](https://github.com/rancher/k3s/releases) page for the version of K3s you will be upgrading to. Place the tar in the `/var/lib/rancher/k3s/agent/images/` directory on each node. Delete the old tar file. +2. Copy and replace the old K3s binary in `/usr/local/bin` on each node. Copy over the install script at https://get.k3s.io (as it is possible it has changed since the last release). Run the script again just as you had done in the past with the same environment variables. +3. Restart the K3s service (if not restarted automatically by installer). + + + + +We will create a Kubernetes cluster using Rancher Kubernetes Engine (RKE). Before being able to start your Kubernetes cluster, you’ll need to install RKE and create a RKE config file. + +### 1. Install RKE + +Install RKE by following the instructions in the [RKE documentation.]({{}}/rke/latest/en/installation/) + +### 2. Create an RKE Config File + +From a system that can access ports 22/TCP and 6443/TCP on the Linux host node(s) that you set up in a previous step, use the sample below to create a new file named `rancher-cluster.yml`. + +This file is an RKE configuration file, which is a configuration for the cluster you're deploying Rancher to. + +Replace values in the code sample below with help of the _RKE Options_ table. Use the IP address or DNS names of the three nodes you created. + +> **Tip:** For more details on the options available, see the RKE [Config Options]({{}}/rke/latest/en/config-options/). + +
    RKE Options
    + +| Option | Required | Description | +| ------------------ | -------------------- | --------------------------------------------------------------------------------------- | +| `address` | ✓ | The DNS or IP address for the node within the air gapped network. | +| `user` | ✓ | A user that can run Docker commands. | +| `role` | ✓ | List of Kubernetes roles assigned to the node. | +| `internal_address` | optional1 | The DNS or IP address used for internal cluster traffic. | +| `ssh_key_path` | | Path to the SSH private key used to authenticate to the node (defaults to `~/.ssh/id_rsa`). | + +> 1 Some services like AWS EC2 require setting the `internal_address` if you want to use self-referencing security groups or firewalls. + +```yaml +nodes: + - address: 10.10.3.187 # node air gap network IP + internal_address: 172.31.7.22 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.254 # node air gap network IP + internal_address: 172.31.13.132 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + - address: 10.10.3.89 # node air gap network IP + internal_address: 172.31.3.216 # node intra-cluster IP + user: rancher + role: ['controlplane', 'etcd', 'worker'] + ssh_key_path: /home/user/.ssh/id_rsa + +private_registries: + - url: # private registry url + user: rancher + password: '*********' + is_default: true +``` + +### 3. Run RKE + +After configuring `rancher-cluster.yml`, bring up your Kubernetes cluster: + +``` +rke up --config ./rancher-cluster.yml +``` + +### 4. Save Your Files + +> **Important** +> The files mentioned below are needed to maintain, troubleshoot and upgrade your cluster. + +Save a copy of the following files in a secure location: + +- `rancher-cluster.yml`: The RKE cluster configuration file. +- `kube_config_cluster.yml`: The [Kubeconfig file]({{}}/rke/latest/en/kubeconfig/) for the cluster, this file contains credentials for full access to the cluster. +- `rancher-cluster.rkestate`: The [Kubernetes Cluster State file]({{}}/rke/latest/en/installation/#kubernetes-cluster-state), this file contains the current state of the cluster including the RKE configuration and the certificates.

    _The Kubernetes Cluster State file is only created when using RKE v0.2.0 or higher._ + +
    +
    + +> **Note:** The "rancher-cluster" parts of the two latter file names are dependent on how you name the RKE cluster configuration file. + +### Issues or errors? + +See the [Troubleshooting]({{}}/rancher/v2.6/en/installation/resources/troubleshooting/) page. + +### [Next: Install Rancher](../install-rancher) diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md new file mode 100644 index 0000000000..d5565aff80 --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/populate-private-registry/populate-private-registry.md @@ -0,0 +1,294 @@ +--- +title: '2. Collect and Publish Images to your Private Registry' +weight: 200 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to set up your private registry so that when you install Rancher, Rancher will pull all the required images from this registry. + +By default, all images used to [provision Kubernetes clusters]({{}}/rancher/v2.6/en/cluster-provisioning/) or launch any tools in Rancher, e.g. monitoring, pipelines, alerts, are pulled from Docker Hub. In an air gapped installation of Rancher, you will need a private registry that is located somewhere accessible by your Rancher server. Then, you will load the registry with all the images. + +Populating the private registry with images is the same process for installing Rancher with Docker and for installing Rancher on a Kubernetes cluster. + +The steps in this section differ depending on whether or not you are planning to use Rancher to provision a downstream cluster with Windows nodes or not. By default, we provide the steps of how to populate your private registry assuming that Rancher will provision downstream Kubernetes clusters with only Linux nodes. But if you plan on provisioning any [downstream Kubernetes clusters using Windows nodes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/windows-clusters/), there are separate instructions to support the images needed. + +> **Prerequisites:** +> +> You must have a [private registry](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) available to use. +> +> If the registry has certs, follow [this K3s documentation](https://rancher.com/docs/k3s/latest/en/installation/private-registry/) about adding a private registry. The certs and registry configuration files need to be mounted into the Rancher container. + + + + +For Rancher servers that will only provision Linux clusters, these are the steps to populate your private registry. + +1. [Find the required assets for your Rancher version](#1-find-the-required-assets-for-your-rancher-version) +2. [Collect the cert-manager image](#2-collect-the-cert-manager-image) (unless you are bringing your own certificates or terminating TLS on a load balancer) +3. [Save the images to your workstation](#3-save-the-images-to-your-workstation) +4. [Populate the private registry](#4-populate-the-private-registry) + +### Prerequisites + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +If you will use ARM64 hosts, the registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + +### 1. Find the required assets for your Rancher version + +1. Go to our [releases page,](https://github.com/rancher/rancher/releases) find the Rancher v2.x.x release that you want to install, and click **Assets**. Note: Don't use releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's **Assets** section, download the following files, which are required to install Rancher in an air gap environment: + +| Release File | Description | +| ---------------- | -------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + +### 2. Collect the cert-manager image + +> Skip this step if you are using your own certificates, or if you are terminating TLS on an external load balancer. + +In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. + +**Note:** New in v2.6.4, cert-manager versions 1.6.2 and 1.7.1 are compatible. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v1.7.1 + helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + +### 4. Populate the private registry + +Next, you will move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +Move the images in the `rancher-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push `rancher-images.txt` and `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.sh --image-list ./rancher-images.txt --registry + ``` + + + + +For Rancher servers that will provision Linux and Windows clusters, there are distinctive steps to populate your private registry for the Windows images and the Linux images. Since a Windows cluster is a mix of Linux and Windows nodes, the Linux images pushed into the private registry are manifests. + +# Windows Steps + +The Windows images need to be collected and pushed from a Windows server workstation. + +1. Find the required assets for your Rancher version +2. Save the images to your Windows Server workstation +3. Prepare the Docker daemon +4. Populate the private registry + +### Prerequisites + +These steps expect you to use a Windows Server 1809 workstation that has internet access, access to your private registry, and at least 50 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + +Your registry must support manifests. As of April 2020, Amazon Elastic Container Registry does not support manifests. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. + +2. From the release's "Assets" section, download the following files: + +| Release File | Description | +|----------------------------|------------------| +| `rancher-windows-images.txt` | This file contains a list of Windows images needed to provision Windows clusters. | +| `rancher-save-images.ps1` | This script pulls all the images in the `rancher-windows-images.txt` from Docker Hub and saves all of the images as `rancher-windows-images.tar.gz`. | +| `rancher-load-images.ps1` | This script loads the images from the `rancher-windows-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Save the images to your Windows Server workstation + +1. Using `powershell`, go to the directory that has the files that were downloaded in the previous step. + +1. Run `rancher-save-images.ps1` to create a tarball of all the required images: + ```plain + ./rancher-save-images.ps1 + ``` + + **Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-windows-images.tar.gz`. Check that the output is in the directory. + + + +### 3. Prepare the Docker daemon + +Append your private registry address to the `allow-nondistributable-artifacts` config field in the Docker daemon (`C:\ProgramData\Docker\config\daemon.json`). Since the base image of Windows images are maintained by the `mcr.microsoft.com` registry, this step is required as the layers in the Microsoft registry are missing from Docker Hub and need to be pulled into the private registry. + + ``` + { + ... + "allow-nondistributable-artifacts": [ + ... + "" + ] + ... + } + ``` + + + +### 4. Populate the private registry + +Move the images in the `rancher-windows-images.tar.gz` to your private registry using the scripts to load the images. + +The `rancher-windows-images.txt` is expected to be on the workstation in the same directory that you are running the `rancher-load-images.ps1` script. The `rancher-windows-images.tar.gz` should also be in the same directory. + +1. Using `powershell`, log into your private registry if required: + ```plain + docker login + ``` + +1. Using `powershell`, use `rancher-load-images.ps1` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + ```plain + ./rancher-load-images.ps1 --registry + ``` + +# Linux Steps + +The Linux images need to be collected and pushed from a Linux host, but _must be done after_ populating the Windows images into the private registry. These step are different from the Linux only steps as the Linux images that are pushed will actually manifests that support Windows and Linux images. + +1. Find the required assets for your Rancher version +2. Collect all the required images +3. Save the images to your Linux workstation +4. Populate the private registry + +### Prerequisites + +You must populate the private registry with the Windows images before populating the private registry with Linux images. If you have already populated the registry with Linux images, you will need to follow these instructions again as they will publish manifests that support Windows and Linux images. + +These steps expect you to use a Linux workstation that has internet access, access to your private registry, and at least 20 GB of disk space. + +The workstation must have Docker 18.02+ in order to support manifests, which are required when provisioning Windows clusters. + + + +### 1. Find the required assets for your Rancher version + +1. Browse to our [releases page](https://github.com/rancher/rancher/releases) and find the Rancher v2.x.x release that you want to install. Don't download releases marked `rc` or `Pre-release`, as they are not stable for production environments. Click **Assets**. + +2. From the release's **Assets** section, download the following files: + +| Release File | Description | +|----------------------------| -------------------------- | +| `rancher-images.txt` | This file contains a list of images needed to install Rancher, provision clusters and user Rancher tools. | +| `rancher-windows-images.txt` | This file contains a list of images needed to provision Windows clusters. | +| `rancher-save-images.sh` | This script pulls all the images in the `rancher-images.txt` from Docker Hub and saves all of the images as `rancher-images.tar.gz`. | +| `rancher-load-images.sh` | This script loads images from the `rancher-images.tar.gz` file and pushes them to your private registry. | + + + +### 2. Collect all the required images + +**For Kubernetes Installs using Rancher Generated Self-Signed Certificate:** In a Kubernetes Install, if you elect to use the Rancher default self-signed TLS certificates, you must add the [`cert-manager`](https://hub.helm.sh/charts/jetstack/cert-manager) image to `rancher-images.txt` as well. You skip this step if you are using you using your own certificates. + +1. Fetch the latest `cert-manager` Helm chart and parse the template for image details: + > **Note:** Recent changes to cert-manager require an upgrade. If you are upgrading Rancher and using a version of cert-manager older than v0.12.0, please see our [upgrade documentation]({{}}/rancher/v2.6/en/installation/resources/upgrading-cert-manager/). + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm fetch jetstack/cert-manager --version v0.12.0 + helm template ./cert-manager-.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g >> ./rancher-images.txt + ``` + +2. Sort and unique the images list to remove any overlap between the sources: + ```plain + sort -u rancher-images.txt -o rancher-images.txt + ``` + + + +### 3. Save the images to your workstation + +1. Make `rancher-save-images.sh` an executable: + ``` + chmod +x rancher-save-images.sh + ``` + +1. Run `rancher-save-images.sh` with the `rancher-images.txt` image list to create a tarball of all the required images: + ```plain + ./rancher-save-images.sh --image-list ./rancher-images.txt + ``` + +**Result:** Docker begins pulling the images used for an air gap install. Be patient. This process takes a few minutes. When the process completes, your current directory will output a tarball named `rancher-images.tar.gz`. Check that the output is in the directory. + + + +### 4. Populate the private registry + +Move the images in the `rancher-images.tar.gz` to your private registry using the `rancher-load-images.sh script` to load the images. + +The image list, `rancher-images.txt` or `rancher-windows-images.txt`, is expected to be on the workstation in the same directory that you are running the `rancher-load-images.sh` script. The `rancher-images.tar.gz` should also be in the same directory. + +1. Log into your private registry if required: + ```plain + docker login + ``` + +1. Make `rancher-load-images.sh` an executable: + ``` + chmod +x rancher-load-images.sh + ``` + +1. Use `rancher-load-images.sh` to extract, tag and push the images from `rancher-images.tar.gz` to your private registry: + +```plain +./rancher-load-images.sh --image-list ./rancher-images.txt \ + --windows-image-list ./rancher-windows-images.txt \ + --registry +``` + + + + +### [Next step for Kubernetes Installs - Launch a Kubernetes Cluster]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/launch-kubernetes/) + +### [Next step for Docker Installs - Install Rancher]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/install-rancher/) diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md new file mode 100644 index 0000000000..4b181dfaee --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/air-gap/prepare-nodes/prepare-nodes.md @@ -0,0 +1,178 @@ +--- +title: '1. Set up Infrastructure and Private Registry' +weight: 100 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this section, you will provision the underlying infrastructure for your Rancher management server in an air gapped environment. You will also set up the private Docker registry that must be available to your Rancher node(s). + +An air gapped environment is an environment where the Rancher server is installed offline or behind a firewall. + +The infrastructure depends on whether you are installing Rancher on a K3s Kubernetes cluster, an RKE Kubernetes cluster, or a single Docker container. For more information on each installation option, refer to [this page.]({{}}/rancher/v2.6/en/installation/) + +Rancher can be installed on any Kubernetes cluster. The RKE and K3s Kubernetes infrastructure tutorials below are still included for convenience. + + + + +We recommend setting up the following infrastructure for a high-availability installation: + +- **Two Linux nodes,** typically virtual machines, in the infrastructure provider of your choice. +- **An external database** to store the cluster data. PostgreSQL, MySQL, and etcd are supported. +- **A load balancer** to direct traffic to the two nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.6/en/installation/requirements/) + +For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node) for setting up nodes as instances in Amazon EC2. + +### 2. Set up External Datastore + +The ability to run Kubernetes using a datastore other than etcd sets K3s apart from other Kubernetes distributions. This feature provides flexibility to Kubernetes operators. The available options allow you to select a datastore that best fits your use case. + +For a high-availability K3s installation, you will need to set up one of the following external databases: + +* [PostgreSQL](https://www.postgresql.org/) (certified against versions 10.7 and 11.5) +* [MySQL](https://www.mysql.com/) (certified against version 5.7) +* [etcd](https://etcd.io/) (certified against version 3.3.15) + +When you install Kubernetes, you will pass in details for K3s to connect to the database. + +For an example of one way to set up the database, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds) for setting up a MySQL database on Amazon's RDS service. + +For the complete list of options that are available for configuring a K3s cluster datastore, refer to the [K3s documentation.]({{}}/k3s/latest/en/installation/datastore/) + +### 3. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the K3s tool will deploy a Traefik Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the Traefik Ingress controller to listen for traffic destined for the Rancher hostname. The Traefik Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 4. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the load balancer IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 5. Set up a Private Docker Registry + +Rancher supports air gap installs using a private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your K3s Kubernetes cluster, you will create a [private registries configuration file]({{}}/k3s/latest/en/installation/private-registry/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + +To install the Rancher management server on a high-availability RKE cluster, we recommend setting up the following infrastructure: + +- **Three Linux nodes,** typically virtual machines, in an infrastructure provider such as Amazon's EC2, Google Compute Engine, or vSphere. +- **A load balancer** to direct front-end traffic to the three nodes. +- **A DNS record** to map a URL to the load balancer. This will become the Rancher server URL, and downstream Kubernetes clusters will need to reach it. +- **A private Docker registry** to distribute Docker images to your machines. + +These nodes must be in the same region/data center. You may place these servers in separate availability zones. + +### Why three nodes? + +In an RKE cluster, Rancher server data is stored on etcd. This etcd database runs on all three nodes. + +The etcd database requires an odd number of nodes so that it can always elect a leader with a majority of the etcd cluster. If the etcd database cannot elect a leader, etcd can suffer from [split brain](https://www.quora.com/What-is-split-brain-in-distributed-systems), requiring the cluster to be restored from backup. If one of the three etcd nodes fails, the two remaining nodes can elect a leader because they have the majority of the total number of etcd nodes. + +### 1. Set up Linux Nodes + +These hosts will be disconnected from the internet, but require being able to connect with your private registry. + +Make sure that your nodes fulfill the general installation requirements for [OS, container runtime, hardware, and networking.]({{}}/rancher/v2.6/en/installation/requirements/) + +For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/) for setting up nodes as instances in Amazon EC2. + +### 2. Set up the Load Balancer + +You will also need to set up a load balancer to direct traffic to the Rancher replica on both nodes. That will prevent an outage of any single node from taking down communications to the Rancher management server. + +When Kubernetes gets set up in a later step, the RKE tool will deploy an NGINX Ingress controller. This controller will listen on ports 80 and 443 of the worker nodes, answering traffic destined for specific hostnames. + +When Rancher is installed (also in a later step), the Rancher system creates an Ingress resource. That Ingress tells the NGINX Ingress controller to listen for traffic destined for the Rancher hostname. The NGINX Ingress controller, when receiving traffic destined for the Rancher hostname, will forward that traffic to the running Rancher pods in the cluster. + +For your implementation, consider if you want or need to use a Layer-4 or Layer-7 load balancer: + +- **A layer-4 load balancer** is the simpler of the two choices, in which you are forwarding TCP traffic to your nodes. We recommend configuring your load balancer as a Layer 4 balancer, forwarding traffic to ports TCP/80 and TCP/443 to the Rancher management cluster nodes. The Ingress controller on the cluster will redirect HTTP traffic to HTTPS and terminate SSL/TLS on port TCP/443. The Ingress controller will forward traffic to port TCP/80 to the Ingress pod in the Rancher deployment. +- **A layer-7 load balancer** is a bit more complicated but can offer features that you may want. For instance, a layer-7 load balancer is capable of handling TLS termination at the load balancer, as opposed to Rancher doing TLS termination itself. This can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. If you decide to terminate the SSL/TLS traffic on a layer-7 load balancer, you will need to use the `--set tls=external` option when installing Rancher in a later step. For more information, refer to the [Rancher Helm chart options.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#external-tls-termination) + +For an example showing how to set up an NGINX load balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/) + +For a how-to guide for setting up an Amazon ELB Network Load Balancer, refer to [this page.]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/) + +> **Important:** +> Do not use this load balancer (i.e, the `local` cluster Ingress) to load balance applications other than Rancher following installation. Sharing this Ingress with other applications may result in websocket errors to Rancher following Ingress configuration reloads for other apps. We recommend dedicating the `local` cluster to Rancher and no other applications. + +### 3. Set up the DNS Record + +Once you have set up your load balancer, you will need to create a DNS record to send traffic to this load balancer. + +Depending on your environment, this may be an A record pointing to the LB IP, or it may be a CNAME pointing to the load balancer hostname. In either case, make sure this record is the hostname that you intend Rancher to respond on. + +You will need to specify this hostname in a later step when you install Rancher, and it is not possible to change it later. Make sure that your decision is a final one. + +For a how-to guide for setting up a DNS record to route domain traffic to an Amazon ELB load balancer, refer to the [official AWS documentation.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer) + +### 4. Set up a Private Docker Registry + +Rancher supports air gap installs using a secure Docker private registry. You must have your own private registry or other means of distributing Docker images to your machines. + +In a later step, when you set up your RKE Kubernetes cluster, you will create a [private registries configuration file]({{}}/rke/latest/en/config-options/private-registries/) with details from this registry. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/deploying/#run-an-externally-accessible-registry) + + + + +> The Docker installation is for Rancher users that are wanting to test out Rancher. Since there is only one node and a single Docker container, if the node goes down, you will lose all the data of your Rancher server. +> +> The Rancher backup operator can be used to migrate Rancher from the single Docker container install to an installation on a high-availability Kubernetes cluster. For details, refer to the documentation on [migrating Rancher to a new cluster.]({{}}/rancher/v2.6/en/backups/migrating-rancher) + +### 1. Set up a Linux Node + +This host will be disconnected from the Internet, but needs to be able to connect to your private registry. + +Make sure that your node fulfills the general installation requirements for [OS, Docker, hardware, and networking.]({{}}/rancher/v2.6/en/installation/requirements/) + +For an example of one way to set up Linux nodes, refer to this [tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/) for setting up nodes as instances in Amazon EC2. + +### 2. Set up a Private Docker Registry + +Rancher supports air gap installs using a Docker private registry on your bastion server. You must have your own private registry or other means of distributing Docker images to your machines. + +If you need help with creating a private registry, please refer to the [official Docker documentation.](https://docs.docker.com/registry/) + + + + +### [Next: Collect and Publish Images to your Private Registry]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/) diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/behind-proxy.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/behind-proxy.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/install-rancher/install-rancher.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/install-rancher/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/install-rancher/install-rancher.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/launch-kubernetes/launch-kubernetes.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/launch-kubernetes/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/launch-kubernetes/launch-kubernetes.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/prepare-nodes/prepare-nodes.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/behind-proxy/prepare-nodes/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/behind-proxy/prepare-nodes/prepare-nodes.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/other-installation-methods.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/other-installation-methods.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/advanced/advanced.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/advanced/advanced.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/proxy/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/proxy/proxy.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/proxy/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/proxy/proxy.md diff --git a/content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/_index.md b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-docker.md similarity index 100% rename from content/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/_index.md rename to versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-docker.md diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md new file mode 100644 index 0000000000..00e4e1bd25 --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-rollbacks/single-node-rollbacks.md @@ -0,0 +1,84 @@ +--- +title: Rolling Back Rancher Installed with Docker +weight: 1015 +--- + +If a Rancher upgrade does not complete successfully, you'll have to roll back to your Rancher setup that you were using before [Docker Upgrade]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades). Rolling back restores: + +- Your previous version of Rancher. +- Your data backup created before upgrade. + +## Before You Start + +During rollback to a prior version of Rancher, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). Here's an example of a command with a placeholder: + +``` +docker pull rancher/rancher: +``` + +In this command, `` is the version of Rancher you were running before your unsuccessful upgrade. `v2.0.5` for example. + +Cross reference the image and reference table below to learn how to obtain this placeholder data. Write down or copy this information before starting the procedure below. + +Terminal docker ps Command, Displaying Where to Find <PRIOR_RANCHER_VERSION> and <RANCHER_CONTAINER_NAME>![Placeholder Reference]({{}}/img/rancher/placeholder-ref-2.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | ------------------------------------------------------- | +| `` | `v2.0.5` | The rancher/rancher image you used before upgrade. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.0.5` | The version of Rancher that the backup is for. | +| `` | `9-27-18` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher Server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +## Rolling Back Rancher + +If you have issues upgrading Rancher, roll it back to its latest known healthy state by pulling the last version you used and then restoring the backup you made before upgrade. + +>**Warning!** Rolling back to a previous version of Rancher destroys any changes made to Rancher following the upgrade. Unrecoverable data loss may occur. + +1. Using a remote Terminal connection, log into the node running your Rancher Server. + +1. Pull the version of Rancher that you were running before upgrade. Replace the `` with that version. + + For example, if you were running Rancher v2.0.5 before upgrade, pull v2.0.5. + + ``` + docker pull rancher/rancher: + ``` + +1. Stop the container currently running Rancher Server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + You can obtain the name for your Rancher container by entering `docker ps`. + +1. Move the backup tarball that you created during completion of [Docker Upgrade]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades) onto your Rancher Server. Change to the directory that you moved it to. Enter `dir` to confirm that it's there. + + If you followed the naming convention we suggested in [Docker Upgrade]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-upgrades), it will have a name similar to (`rancher-data-backup--.tar.gz`). + +1. Run the following command to replace the data in the `rancher-data` container with the data in the backup tarball, replacing the placeholder. Don't forget to close the quotes. + + ``` + docker run --volumes-from rancher-data \ + -v $PWD:/backup busybox sh -c "rm /var/lib/rancher/* -rf \ + && tar zxvf /backup/rancher-data-backup--.tar.gz" + ``` + +1. Start a new Rancher Server container with the `` tag placeholder pointing to the data container. + ``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: + ``` + Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + + >**Note:** _Do not_ stop the rollback after initiating it, even if the rollback process seems longer than expected. Stopping the rollback may result in database issues during future upgrades. + +1. Wait a few moments and then open Rancher in a web browser. Confirm that the rollback succeeded and that your data is restored. + +**Result:** Rancher is rolled back to its version and data state before upgrade. diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md new file mode 100644 index 0000000000..dcd9d02587 --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/single-node-upgrades/single-node-upgrades.md @@ -0,0 +1,375 @@ +--- +title: Upgrading Rancher Installed with Docker +weight: 1010 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The following instructions will guide you through upgrading a Rancher server that was installed with Docker. + +> **Docker installs are not supported in production environments.** These instructions are provided for testing and development purposes only. If you have already deployed a Docker install in production and need to upgrade to a new Rancher version, we recommend [migrating to the Helm chart install]({{}}/rancher/v2.6/en/backups/migrating-rancher/) before upgrading. + +# Prerequisites + +- **Review the [known upgrade issues]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/#known-upgrade-issues)** section in the Rancher documentation for the most noteworthy issues to consider when upgrading Rancher. A more complete list of known issues for each Rancher version can be found in the release notes on [GitHub](https://github.com/rancher/rancher/releases) and on the [Rancher forums](https://forums.rancher.com/c/announcements/12). Note that upgrades to or from any chart in the [rancher-alpha repository]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/chart-options/#helm-chart-repositories/) aren’t supported. +- **For [air gap installs only,]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap) collect and populate images for the new Rancher server version**. Follow the guide to [populate your private registry]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry/) with the images for the Rancher version that you want to upgrade to. + +# Placeholder Review + +During upgrade, you'll enter a series of commands, filling placeholders with data from your environment. These placeholders are denoted with angled brackets and all capital letters (``). + +Here's an **example** of a command with a placeholder: + +``` +docker stop +``` + +In this command, `` is the name of your Rancher container. + +# Get Data for Upgrade Commands + +To obtain the data to replace the placeholders, run: + +``` +docker ps +``` + +Write down or copy this information before starting the upgrade. + +Terminal docker ps Command, Displaying Where to Find <RANCHER_CONTAINER_TAG> and <RANCHER_CONTAINER_NAME> + +![Placeholder Reference]({{}}/img/rancher/placeholder-ref.png) + +| Placeholder | Example | Description | +| -------------------------- | -------------------------- | --------------------------------------------------------- | +| `` | `v2.1.3` | The rancher/rancher image you pulled for initial install. | +| `` | `festive_mestorf` | The name of your Rancher container. | +| `` | `v2.1.3` | The version of Rancher that you're creating a backup for. | +| `` | `2018-12-19` | The date that the data container or backup was created. | +
    + +You can obtain `` and `` by logging into your Rancher server by remote connection and entering the command to view the containers that are running: `docker ps`. You can also view containers that are stopped using a different command: `docker ps -a`. Use these commands for help anytime during while creating backups. + +# Upgrade Outline + +During upgrade, you create a copy of the data from your current Rancher container and a backup in case something goes wrong. Then you deploy the new version of Rancher in a new container using your existing data. Follow the steps to upgrade Rancher server: + +- [1. Create a copy of the data from your Rancher server container](#1-create-a-copy-of-the-data-from-your-rancher-server-container) +- [2. Create a backup tarball](#2-create-a-backup-tarball) +- [3. Pull the new Docker image](#3-pull-the-new-docker-image) +- [4. Start the new Rancher server container](#4-start-the-new-rancher-server-container) +- [5. Verify the Upgrade](#5-verify-the-upgrade) +- [6. Clean up your old Rancher server container](#6-clean-up-your-old-rancher-server-container) + +# 1. Create a copy of the data from your Rancher server container + +1. Using a remote Terminal connection, log into the node running your Rancher server. + +1. Stop the container currently running Rancher server. Replace `` with the name of your Rancher container. + + ``` + docker stop + ``` + +1. Use the command below, replacing each placeholder, to create a data container from the Rancher container that you just stopped. + + ``` + docker create --volumes-from --name rancher-data rancher/rancher: + ``` + +# 2. Create a backup tarball + +1. From the data container that you just created (rancher-data), create a backup tarball (rancher-data-backup-<RANCHER_VERSION>-<DATE>.tar.gz). + + This tarball will serve as a rollback point if something goes wrong during upgrade. Use the following command, replacing each placeholder. + + + ``` + docker run --volumes-from rancher-data -v "$PWD:/backup" --rm busybox tar zcvf /backup/rancher-data-backup--.tar.gz /var/lib/rancher + ``` + + **Step Result:** When you enter this command, a series of commands should run. + +1. Enter the `ls` command to confirm that the backup tarball was created. It will have a name similar to `rancher-data-backup--.tar.gz`. + + ``` + [rancher@ip-10-0-0-50 ~]$ ls + rancher-data-backup-v2.1.3-20181219.tar.gz + ``` + +1. Move your backup tarball to a safe location external from your Rancher server. + +# 3. Pull the New Docker Image + +Pull the image of the Rancher version that you want to upgrade to. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. + +``` +docker pull rancher/rancher: +``` + +# 4. Start the New Rancher Server Container + +Start a new Rancher server container using the data from the `rancher-data` container. Remember to pass in all the environment variables that you had used when you started the original container. + +>**Important:** _Do not_ stop the upgrade after initiating it, even if the upgrade process seems longer than expected. Stopping the upgrade may result in database migration errors during future upgrades. + +If you used a proxy, see [HTTP Proxy Configuration.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/proxy/) + +If you configured a custom CA root certificate to access your services, see [Custom CA root certificate.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/#custom-ca-certificate) + +If you are recording all transactions with the Rancher API, see [API Auditing]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/advanced/#api-audit-log) + +To see the command to use when starting the new Rancher server container, choose from the following options: + +- Docker Upgrade +- Docker Upgrade for Air Gap Installs + + + + +Select which option you had installed Rancher server + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: +``` + +Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. + +Placeholder | Description +------------|------------- + `` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + --privileged \ + rancher/rancher: +``` + +Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. Remember to include `--no-cacerts` as an argument to the container to disable the default CA certificate generated by Rancher. + +>**Reminder of the Cert Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + --privileged \ + rancher/rancher: \ + --no-cacerts +``` + +Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) +
    + +### Option D: Let's Encrypt Certificate + +
    + Click to expand + +>**Remember:** Let's Encrypt provides rate limits for requesting new certificates. Therefore, limit how often you create or destroy the container. For more information, see [Let's Encrypt documentation on rate limits](https://letsencrypt.org/docs/rate-limits/). + +If you have selected to use [Let's Encrypt](https://letsencrypt.org/) certificates, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to provide the domain that you had used when you originally installed Rancher. + +>**Reminder of the Cert Prerequisites:** +> +>- Create a record in your DNS that binds your Linux host IP address to the hostname that you want to use for Rancher access (`rancher.mydomain.com` for example). +>- Open port `TCP/80` on your Linux host. The Let's Encrypt http-01 challenge can come from any source IP address, so port `TCP/80` must be open to all IP addresses. + +Placeholder | Description +------------|------------- +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. +`` | The domain address that you had originally started with + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --privileged \ + rancher/rancher: \ + --acme-domain +``` + +Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + +
    + +
    + + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +When starting the new Rancher server container, choose from the following options: + +### Option A: Default Self-Signed Certificate + +
    + Click to expand + +If you have selected to use the Rancher generated self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container. + +Placeholder | Description +------------|------------- +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to to upgrade to. + +``` + docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` + +Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + +
    + +### Option B: Bring Your Own Certificate: Self-Signed + +
    + Click to expand + +If you have selected to bring your own self-signed certificate, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificate that you had originally installed with. + +>**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates in the chain. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | The path to the certificate authority's certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -v //:/etc/rancher/ssl/cacerts.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged \ + /rancher/rancher: +``` +Privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + +
    + +### Option C: Bring Your Own Certificate: Signed by Recognized CA + +
    + Click to expand + +If you have selected to use a certificate signed by a recognized CA, you add the `--volumes-from rancher-data` to the command that you had started your original Rancher server container and need to have access to the same certificates that you had originally installed with. + + >**Reminder of the Prerequisite:** The certificate files must be in PEM format. In your certificate file, include all intermediate certificates provided by the recognized CA. Order your certificates with your certificate first, followed by the intermediates. For an example, see [Certificate Troubleshooting.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) + +Placeholder | Description +------------|------------- +`` | The path to the directory containing your certificate files. +`` | The path to your full certificate chain. +`` | The path to the private key for your certificate. +`` | Your private registry URL and port. +`` | The release tag of the [Rancher version]({{}}/rancher/v2.6/en/installation/resources/chart-options/) that you want to upgrade to. + +> **Note:** Use the `--no-cacerts` as argument to the container to disable the default CA certificate generated by Rancher. + +``` +docker run -d --volumes-from rancher-data \ + --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + --no-cacerts \ + -v //:/etc/rancher/ssl/cert.pem \ + -v //:/etc/rancher/ssl/key.pem \ + -e CATTLE_SYSTEM_DEFAULT_REGISTRY= \ # Set a default private registry to be used in Rancher + -e CATTLE_SYSTEM_CATALOG=bundled \ # Use the packaged Rancher system charts + --privileged + /rancher/rancher: +``` +privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher) + +
    + +
    +
    + +**Result:** You have upgraded Rancher. Data from your upgraded server is now saved to the `rancher-data` container for use in future upgrades. + +# 5. Verify the Upgrade + +Log into Rancher. Confirm that the upgrade succeeded by checking the version displayed in the bottom-left corner of the browser window. + +>**Having network issues in your user clusters following upgrade?** +> +> See [Restoring Cluster Networking]({{}}/rancher/v2.0-v2.4/en/installation/install-rancher-on-k8s/upgrades/namespace-migration). + + +# 6. Clean up Your Old Rancher Server Container + +Remove the previous Rancher server container. If you only stop the previous Rancher server container (and don't remove it), the container may restart after the next server reboot. + +# Rolling Back + +If your upgrade does not complete successfully, you can roll back Rancher server and its data back to its last healthy state. For more information, see [Docker Rollback]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/single-node-rollbacks/). diff --git a/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/troubleshooting/troubleshooting.md b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/troubleshooting/troubleshooting.md new file mode 100644 index 0000000000..9a1fc02ee8 --- /dev/null +++ b/versioned_docs/version-2.6/installation/other-installation-methods/single-node-docker/troubleshooting/troubleshooting.md @@ -0,0 +1,88 @@ +--- +title: Certificate Troubleshooting +weight: 4 +--- +### How Do I Know if My Certificates are in PEM Format? + +You can recognize the PEM format by the following traits: + +- The file begins with the following header: + ``` + -----BEGIN CERTIFICATE----- + ``` +- The header is followed by a long string of characters. +- The file ends with a footer: + -----END CERTIFICATE----- + +PEM Certificate Example: + +``` +----BEGIN CERTIFICATE----- +MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV +... more lines +VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== +-----END CERTIFICATE----- +``` + +PEM Certificate Key Example: + +``` +-----BEGIN RSA PRIVATE KEY----- +MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV +... more lines +VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== +-----END RSA PRIVATE KEY----- +``` + +If your key looks like the example below, see [Converting a Certificate Key From PKCS8 to PKCS1.](#converting-a-certificate-key-from-pkcs8-to-pkcs1) + +``` +-----BEGIN PRIVATE KEY----- +MIIGVDCCBDygAwIBAgIJAMiIrEm29kRLMA0GCSqGSIb3DQEBCwUAMHkxCzAJBgNV +... more lines +VWQqljhfacYPgp8KJUJENQ9h5hZ2nSCrI+W00Jcw4QcEdCI8HL5wmg== +-----END PRIVATE KEY----- +``` + +### Converting a Certificate Key From PKCS8 to PKCS1 + +If you are using a PKCS8 certificate key file, Rancher will log the following line: + +``` +ListenConfigController cli-config [listener] failed with : failed to read private key: asn1: structure error: tags don't match (2 vs {class:0 tag:16 length:13 isCompound:true}) +``` + +To make this work, you will need to convert the key from PKCS8 to PKCS1 using the command below: + +``` +openssl rsa -in key.pem -out convertedkey.pem +``` + +You can now use `convertedkey.pem` as certificate key file for Rancher. + +### What is the Order of Certificates if I Want to Add My Intermediate(s)? + +The order of adding certificates is as follows: + +``` +-----BEGIN CERTIFICATE----- +%YOUR_CERTIFICATE% +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +%YOUR_INTERMEDIATE_CERTIFICATE% +-----END CERTIFICATE----- +``` + +### How Do I Validate My Certificate Chain? + +You can validate the certificate chain by using the `openssl` binary. If the output of the command (see the command example below) ends with `Verify return code: 0 (ok)`, your certificate chain is valid. The `ca.pem` file must be the same as you added to the `rancher/rancher` container. + +When using a certificate signed by a recognized Certificate Authority, you can omit the `-CAfile` parameter. + +Command: + +``` +openssl s_client -CAfile ca.pem -connect rancher.yourdomain.com:443 +... + Verify return code: 0 (ok) +``` \ No newline at end of file diff --git a/versioned_docs/version-2.6/installation/requirements/dockershim/dockershim.md b/versioned_docs/version-2.6/installation/requirements/dockershim/dockershim.md new file mode 100644 index 0000000000..610eb3233e --- /dev/null +++ b/versioned_docs/version-2.6/installation/requirements/dockershim/dockershim.md @@ -0,0 +1,44 @@ +--- +title: Dockershim +weight: 300 +--- + +The Dockershim is the CRI compliant layer between the Kubelet and the Docker daemon. As part of the Kubernetes 1.20 release, the [deprecation of the in-tree Dockershim was announced](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/). For more information on the deprecation and its timelines, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). + +RKE clusters now support the external Dockershim to continue leveraging Docker as the CRI runtime. We now implement the upstream open source community external Dockershim announced by [Mirantis and Docker](https://www.mirantis.com/blog/mirantis-to-take-over-support-of-kubernetes-dockershim-2/) to ensure RKE clusters can continue to leverage Docker. + +To enable the external Dockershim, configure the following option. + +``` +enable_cri_dockerd: true +``` + +For users looking to use another container runtime, Rancher has the edge-focused K3s and datacenter-focused RKE2 Kubernetes distributions that use containerd as the default runtime. Imported RKE2 and K3s Kubernetes clusters can then be upgraded and managed through Rancher going forward. + +### FAQ + +
    + +Q. Do I have to upgrade Rancher to get Rancher’s support of the upstream Dockershim? + +A The upstream support of Dockershim begins for RKE in Kubernetes 1.21. You will need to be on a version of Rancher that supports RKE 1.21. See our support matrix for details. + +
    + +Q. I am currently on RKE with Kubernetes 1.20. Do I need to upgrade to RKE with Kubernetes 1.21 sooner to avoid being out of support for Dockershim? + +A. The version of Dockershim in RKE with Kubernetes 1.20 will continue to work and it is not deprecated until a later release. For information on the timeline, see the [Kubernetes Dockershim Deprecation FAQ](https://kubernetes.io/blog/2020/12/02/dockershim-faq/#when-will-dockershim-be-removed). It will only emit a warning of its future deprecation, which Rancher has mitigated in RKE with Kubernetes 1.21. You can plan your upgrade to 1.21 as you would normally. + +
    + +Q: What are my other options if I don’t want to depend on the Dockershim? + +A: You can use a runtime like containerd with Kubernetes that does not require Dockershim support. RKE2 or K3s are two options for doing this. + +
    + +Q: If I am already using RKE1 and want to switch to RKE2, what are my migration options? + +A: Today, you can stand up a new cluster and migrate workloads to a new RKE2 cluster that uses containerd. Rancher is exploring the possibility of an in-place upgrade path. + +
    diff --git a/versioned_docs/version-2.6/installation/requirements/installing-docker/installing-docker.md b/versioned_docs/version-2.6/installation/requirements/installing-docker/installing-docker.md new file mode 100644 index 0000000000..8b16d15750 --- /dev/null +++ b/versioned_docs/version-2.6/installation/requirements/installing-docker/installing-docker.md @@ -0,0 +1,18 @@ +--- +title: Installing Docker +weight: 1 +--- + +Docker is required to be installed on nodes where the Rancher server will be installed with Helm or Docker. + +There are a couple of options for installing Docker. One option is to refer to the [official Docker documentation](https://docs.docker.com/install/) about how to install Docker on Linux. The steps will vary based on the Linux distribution. + +Another option is to use one of Rancher's Docker installation scripts, which are available for most recent versions of Docker. + +For example, this command could be used to install Docker 20.10 on Ubuntu: + +``` +curl https://releases.rancher.com/install-docker/20.10.sh | sh +``` + +Rancher has installation scripts for every version of upstream Docker that Kubernetes supports. To find out whether a script is available for installing a certain Docker version, refer to this [GitHub repository,](https://github.com/rancher/install-docker) which contains all of Rancher's Docker installation scripts. diff --git a/versioned_docs/version-2.6/installation/requirements/ports/common-ports-table/index.md b/versioned_docs/version-2.6/installation/requirements/ports/common-ports-table/index.md new file mode 100644 index 0000000000..d8cce1a548 --- /dev/null +++ b/versioned_docs/version-2.6/installation/requirements/ports/common-ports-table/index.md @@ -0,0 +1,22 @@ +--- +headless: true +--- +| Protocol | Port | Description | +|:--------: |:----------------: |---------------------------------------------------------------------------------- | +| TCP | 22 | Node driver SSH provisioning | +| TCP | 179 | Calico BGP Port | +| TCP | 2376 | Node driver Docker daemon TLS port | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| UDP | 4789 | Flannel VXLAN overlay networking on Windows cluster | +| TCP | 8443 | Rancher webhook | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 9100 | Default port required by Monitoring to scrape metrics from Linux node-exporters | +| TCP | 9443 | Rancher webhook | +| TCP | 9796 | Default port required by Monitoring to scrape metrics from Windows node-exporters | +| TCP | 6783 | Weave Port | +| UDP | 6783-6784 | Weave UDP Ports | +| TCP | 10250 | Metrics server communication with all nodes API | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | +| TCP/UDP | 30000-32767 | NodePort port range | diff --git a/versioned_docs/version-2.6/installation/requirements/ports/ports.md b/versioned_docs/version-2.6/installation/requirements/ports/ports.md new file mode 100644 index 0000000000..efc5c827e9 --- /dev/null +++ b/versioned_docs/version-2.6/installation/requirements/ports/ports.md @@ -0,0 +1,335 @@ +--- +title: Port Requirements +description: Read about port requirements needed in order for Rancher to operate properly, both for Rancher nodes and downstream Kubernetes cluster nodes +weight: 300 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. + +- [Rancher Nodes](#rancher-nodes) + - [Ports for Rancher Server Nodes on K3s](#ports-for-rancher-server-nodes-on-k3s) + - [Ports for Rancher Server Nodes on RKE](#ports-for-rancher-server-nodes-on-rke) + - [Ports for Rancher Server Nodes on RKE2](#ports-for-rancher-server-nodes-on-rke2) + - [Ports for Rancher Server in Docker](#ports-for-rancher-server-in-docker) +- [Downstream Kubernetes Cluster Nodes](#downstream-kubernetes-cluster-nodes) + - [Ports for Rancher Launched Kubernetes Clusters using Node Pools](#ports-for-rancher-launched-kubernetes-clusters-using-node-pools) + - [Ports for Rancher Launched Kubernetes Clusters using Custom Nodes](#ports-for-rancher-launched-kubernetes-clusters-using-custom-nodes) + - [Ports for Hosted Kubernetes Clusters](#ports-for-hosted-kubernetes-clusters) + - [Ports for Registered Clusters](#ports-for-registered-clusters) +- [Other Port Considerations](#other-port-considerations) + - [Commonly Used Ports](#commonly-used-ports) + - [Local Node Traffic](#local-node-traffic) + - [Rancher AWS EC2 Security Group](#rancher-aws-ec2-security-group) + - [Opening SUSE Linux Ports](#opening-suse-linux-ports) + +# Rancher Nodes + +The following table lists the ports that need to be open to and from nodes that are running the Rancher server. + +The port requirements differ based on the Rancher server architecture. + +Rancher can be installed on any Kubernetes cluster. For Rancher installs on a K3s, RKE, or RKE2 Kubernetes cluster, refer to the tabs below. For other Kubernetes distributions, refer to the distribution's documentation for the port requirements for cluster nodes. + +> **Notes:** +> +> - Rancher nodes may also require additional outbound access for any external authentication provider which is configured (LDAP for example). +> - Kubernetes recommends TCP 30000-32767 for node port services. +> - For firewalls, traffic may need to be enabled within the cluster and pod CIDR. +> - Rancher nodes may also need outbound access to an external S3 location which is used for storing cluster backups (Minio for example). + +### Ports for Rancher Server Nodes on K3s + +
    + Click to expand + +The K3s server needs port 6443 to be accessible by the nodes. + +The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +> **Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +The following tables break down the port requirements for inbound and outbound traffic: + +
    Inbound Rules for Rancher Server Nodes
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| TCP | 443 |
    • server nodes
    • agent nodes
    • hosted/registered Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl | +| TCP | 6443 | K3s server nodes | Kubernetes API +| UDP | 8472 | K3s server and agent nodes | Required only for Flannel VXLAN. +| TCP | 10250 | K3s server and agent nodes | kubelet + +
    Outbound Rules for Rancher Nodes
    + +| Protocol | Port | Destination | Description | +| -------- | ---- | -------------------------------------------------------- | --------------------------------------------- | +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using Node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
    + +### Ports for Rancher Server Nodes on RKE + +
    + Click to expand + +Typically Rancher is installed on three RKE nodes that all have the etcd, control plane and worker roles. + +The following tables break down the port requirements for traffic between the Rancher nodes: + +
    Rules for traffic between Rancher nodes
    + +| Protocol | Port | Description | +|-----|-----|----------------| +| TCP | 443 | Rancher agents | +| TCP | 2379 | etcd client requests | +| TCP | 2380 | etcd peer communication | +| TCP | 6443 | Kubernetes apiserver | +| TCP | 8443 | Nginx Ingress's Validating Webhook | +| UDP | 8472 | Canal/Flannel VXLAN overlay networking | +| TCP | 9099 | Canal/Flannel livenessProbe/readinessProbe | +| TCP | 10250 | Metrics server communication with all nodes | +| TCP | 10254 | Ingress controller livenessProbe/readinessProbe | + +The following tables break down the port requirements for inbound and outbound traffic: + +
    Inbound Rules for Rancher Nodes
    + +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | RKE CLI | SSH provisioning of node by RKE | +| TCP | 80 | Load Balancer/Reverse Proxy | HTTP traffic to Rancher UI/API | +| TCP | 443 |
    • Load Balancer/Reverse Proxy
    • IPs of all cluster nodes and other API/UI clients
    | HTTPS traffic to Rancher UI/API | +| TCP | 6443 | Kubernetes API clients | HTTPS traffic to Kubernetes API | + +
    Outbound Rules for Rancher Nodes
    + +| Protocol | Port | Destination | Description | +|-----|-----|----------------|---| +| TCP | 443 | `35.160.43.145`,`35.167.242.46`,`52.33.59.17` | Rancher catalog (git.rancher.io) | +| TCP | 22 | Any node created using a node driver | SSH provisioning of node by node driver | +| TCP | 2376 | Any node created using a node driver | Docker daemon TLS port used by node driver | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | +| TCP | Provider dependent | Port of the Kubernetes API endpoint in hosted cluster | Kubernetes API | + +
    + +### Ports for Rancher Server Nodes on RKE2 + +
    + Click to expand + +The RKE2 server needs port 6443 and 9345 to be accessible by other nodes in the cluster. + +All nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN is used. + +If you wish to utilize the metrics server, you will need to open port 10250 on each node. + +**Important:** The VXLAN port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472. + +
    Inbound Rules for RKE2 Server Nodes
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 9345 | RKE2 agent nodes | Kubernetes API +| TCP | 6443 | RKE2 agent nodes | Kubernetes API +| UDP | 8472 | RKE2 server and agent nodes | Required only for Flannel VXLAN +| TCP | 10250 | RKE2 server and agent nodes | kubelet +| TCP | 2379 | RKE2 server nodes | etcd client port +| TCP | 2380 | RKE2 server nodes | etcd peer port +| TCP | 30000-32767 | RKE2 server and agent nodes | NodePort port range +| TCP | 5473 | Calico-node pod connecting to typha pod | Required when deploying with Calico +| HTTP | 8080 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used | +| HTTPS | 8443 |
    • hosted/registered Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl. Not needed if you have LB doing TLS termination. | + +Typically all outbound traffic is allowed. + +
    + +### Ports for Rancher Server in Docker + +
    + Click to expand + +The following tables break down the port requirements for Rancher nodes, for inbound and outbound traffic: + +
    Inbound Rules for Rancher Node
    + +| Protocol | Port | Source | Description +|-----|-----|----------------|---| +| TCP | 80 | Load balancer/proxy that does external SSL termination | Rancher UI/API when external SSL termination is used +| TCP | 443 |
    • hosted/registered Kubernetes
    • any source that needs to be able to use the Rancher UI or API
    | Rancher agent, Rancher UI/API, kubectl + +
    Outbound Rules for Rancher Node
    + +| Protocol | Port | Source | Description | +|-----|-----|----------------|---| +| TCP | 22 | Any node IP from a node created using Node Driver | SSH provisioning of nodes using Node Driver | +| TCP | 443 | git.rancher.io | Rancher catalog | +| TCP | 2376 | Any node IP from a node created using a node driver | Docker daemon TLS port used by Docker Machine | +| TCP | 6443 | Hosted/Imported Kubernetes API | Kubernetes API server | + +
    + +# Downstream Kubernetes Cluster Nodes + +Downstream Kubernetes clusters run your apps and services. This section describes what ports need to be opened on the nodes in downstream clusters so that Rancher can communicate with them. + +The port requirements differ depending on how the downstream cluster was launched. Each of the tabs below list the ports that need to be opened for different [cluster types]({{}}/rancher/v2.6/en/cluster-provisioning/). + +The following diagram depicts the ports that are opened for each [cluster type]({{}}/rancher/v2.6/en/cluster-provisioning). + +
    Port Requirements for the Rancher Management Plane
    + +![Basic Port Requirements]({{}}/img/rancher/port-communications.svg) + +>**Tip:** +> +>If security isn't a large concern and you're okay with opening a few additional ports, you can use the table in [Commonly Used Ports](#commonly-used-ports) as your port reference instead of the comprehensive tables below. + +### Ports for Rancher Launched Kubernetes Clusters using Node Pools + +
    + Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) with nodes created in an [Infrastructure Provider]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/). + +>**Note:** +>The required ports are automatically opened by Rancher during creation of clusters in cloud providers like Amazon EC2 or DigitalOcean. + +{{< ports-iaas-nodes >}} + +
    + +### Ports for Rancher Launched Kubernetes Clusters using Custom Nodes + +
    + Click to expand + +The following table depicts the port requirements for [Rancher Launched Kubernetes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) with [Custom Nodes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/custom-nodes/). + +{{< ports-custom-nodes >}} + +
    + +### Ports for Hosted Kubernetes Clusters + +
    + Click to expand + +The following table depicts the port requirements for [hosted clusters]({{}}/rancher/v2.6/en/cluster-provisioning/hosted-kubernetes-clusters). + +{{< ports-imported-hosted >}} + +
    + +### Ports for Registered Clusters + +Note: Registered clusters were called imported clusters before Rancher v2.5. + +
    + Click to expand + +The following table depicts the port requirements for [registered clusters]({{}}/rancher/v2.6/en/cluster-provisioning/registered-clusters/). + +{{< ports-imported-hosted >}} + +
    + + +# Other Port Considerations + +### Commonly Used Ports + +These ports are typically opened on your Kubernetes nodes, regardless of what type of cluster it is. + +import CommonPortsTable from '/rancher/v2.6/en/shared-files/_common-ports-table.md'; + + + +---- + +### Local Node Traffic + +Ports marked as `local traffic` (i.e., `9099 TCP`) in the above requirements are used for Kubernetes healthchecks (`livenessProbe` and`readinessProbe`). +These healthchecks are executed on the node itself. In most cloud environments, this local traffic is allowed by default. + +However, this traffic may be blocked when: + +- You have applied strict host firewall policies on the node. +- You are using nodes that have multiple interfaces (multihomed). + +In these cases, you have to explicitly allow this traffic in your host firewall, or in case of public/private cloud hosted machines (i.e. AWS or OpenStack), in your security group configuration. Keep in mind that when using a security group as source or destination in your security group, explicitly opening ports only applies to the private interface of the nodes / instances. + +### Rancher AWS EC2 Security Group + +When using the [AWS EC2 node driver]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/ec2/) to provision cluster nodes in Rancher, you can choose to let Rancher create a security group called `rancher-nodes`. The following rules are automatically added to this security group. + +| Type | Protocol | Port Range | Source/Destination | Rule Type | +|-----------------|:--------:|:-----------:|------------------------|:---------:| +| SSH | TCP | 22 | 0.0.0.0/0 | Inbound | +| HTTP | TCP | 80 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 443 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2376 | 0.0.0.0/0 | Inbound | +| Custom TCP Rule | TCP | 2379-2380 | sg-xxx (rancher-nodes) | Inbound | +| Custom UDP Rule | UDP | 4789 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 6443 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 8472 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10250-10252 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 10256 | sg-xxx (rancher-nodes) | Inbound | +| Custom TCP Rule | TCP | 30000-32767 | 0.0.0.0/0 | Inbound | +| Custom UDP Rule | UDP | 30000-32767 | 0.0.0.0/0 | Inbound | +| All traffic | All | All | 0.0.0.0/0 | Outbound | + +### Opening SUSE Linux Ports + +SUSE Linux may have a firewall that blocks all ports by default. To open the ports needed for adding the host to a custom cluster, + + + + +1. SSH into the instance. +1. Start YaST in text mode: +``` +sudo yast2 +``` + +1. Navigate to **Security and Users** > **Firewall** > **Zones:public** > **Ports**. To navigate within the interface, follow the instructions [here](https://doc.opensuse.org/documentation/leap/reference/html/book.opensuse.reference/cha-yast-text.html#sec-yast-cli-navigate). +1. To open the required ports, enter them into the **TCP Ports** and **UDP Ports** fields. In this example, ports 9796 and 10250 are also opened for monitoring. The resulting fields should look similar to the following: +```yaml +TCP Ports +22, 80, 443, 2376, 2379, 2380, 6443, 9099, 9796, 10250, 10254, 30000-32767 +UDP Ports +8472, 30000-32767 +``` + +1. When all required ports are enter, select **Accept**. + + + + + +1. SSH into the instance. +1. Edit /`etc/sysconfig/SuSEfirewall2` and open the required ports. In this example, ports 9796 and 10250 are also opened for monitoring: + ``` + FW_SERVICES_EXT_TCP="22 80 443 2376 2379 2380 6443 9099 9796 10250 10254 30000:32767" + FW_SERVICES_EXT_UDP="8472 30000:32767" + FW_ROUTE=yes + ``` +1. Restart the firewall with the new ports: + ``` + SuSEfirewall2 + ``` + + + + +**Result:** The node has the open ports required to be added to a custom cluster. diff --git a/versioned_docs/version-2.6/installation/requirements/requirements.md b/versioned_docs/version-2.6/installation/requirements/requirements.md new file mode 100644 index 0000000000..affe086b06 --- /dev/null +++ b/versioned_docs/version-2.6/installation/requirements/requirements.md @@ -0,0 +1,179 @@ +--- +title: Installation Requirements +description: Learn the node requirements for each node running Rancher server when you’re configuring Rancher to run either in a Docker or Kubernetes setup +weight: 1 +--- + +This page describes the software, hardware, and networking requirements for the nodes where the Rancher server will be installed. The Rancher server can be installed on a single node or a high-availability Kubernetes cluster. + +> It is important to note that if you install Rancher on a Kubernetes cluster, requirements are different from the [node requirements for downstream user clusters,]({{}}/rancher/v2.6/en/cluster-provisioning/node-requirements/) which will run your apps and services. + +Make sure the node(s) for the Rancher server fulfill the following requirements: + +- [Operating Systems and Container Runtime Requirements](#operating-systems-and-container-runtime-requirements) + - [RKE Specific Requirements](#rke-specific-requirements) + - [K3s Specific Requirements](#k3s-specific-requirements) + - [RKE2 Specific Requirements](#rke2-specific-requirements) + - [Installing Docker](#installing-docker) +- [Hardware Requirements](#hardware-requirements) +- [CPU and Memory](#cpu-and-memory) + - [RKE and Hosted Kubernetes](#rke-and-hosted-kubernetes) + - [K3s Kubernetes](#k3s-kubernetes) + - [RKE2 Kubernetes](#rke2-kubernetes) + - [Docker](#docker) +- [Ingress](#ingress) + - [Ingress for RKE2](#ingress-for-rke2) + - [Ingress for EKS](#ingress-for-eks) +- [Disks](#disks) +- [Networking Requirements](#networking-requirements) + - [Node IP Addresses](#node-ip-addresses) + - [Port Requirements](#port-requirements) +- [Dockershim Support](#dockershim-support) + +For a list of best practices that we recommend for running the Rancher server in production, refer to the [best practices section.]({{}}/rancher/v2.6/en/best-practices/rancher-server/deployment-types/) + +The Rancher UI works best in Firefox or Chrome. + +# Operating Systems and Container Runtime Requirements + +Rancher should work with any modern Linux distribution. + +Docker is required for nodes that will run RKE Kubernetes clusters. It is not required for Kubernetes installs. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +For details on which OS and Docker versions were tested with each Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +All supported operating systems are 64-bit x86. + +The `ntp` (Network Time Protocol) package should be installed. This prevents errors with certificate validation that can occur when the time is not synchronized between the client and server. + +Some distributions of Linux may have default firewall rules that block communication with Helm. We recommend disabling firewalld. For Kubernetes v1.19, v1.20 and v1.21, firewalld must be turned off. + +If you don't feel comfortable doing so you might check suggestions in the [respective issue](https://github.com/rancher/rancher/issues/28840). Some users were successful [creating a separate firewalld zone with a policy of ACCEPT for the Pod CIDR](https://github.com/rancher/rancher/issues/28840#issuecomment-787404822). + +If you plan to run Rancher on ARM64, see [Running on ARM64 (Experimental).]({{}}/rancher/v2.6/en/installation/resources/advanced/arm64-platform/) + +### RKE Specific Requirements + +For the container runtime, RKE should work with any modern Docker version. + +Note that the following sysctl setting must be applied: + +``` +net.bridge.bridge-nf-call-iptables=1 +``` + +### K3s Specific Requirements + +For the container runtime, K3s should work with any modern version of Docker or containerd. + +Rancher needs to be installed on a supported Kubernetes version. To find out which versions of Kubernetes are supported for your Rancher version, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) To specify the K3s version, use the INSTALL_K3S_VERSION environment variable when running the K3s installation script. + +If you are installing Rancher on a K3s cluster with **Raspbian Buster**, follow [these steps]({{}}/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster) to switch to legacy iptables. + +If you are installing Rancher on a K3s cluster with Alpine Linux, follow [these steps]({{}}/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup) for additional setup. + + + +### RKE2 Specific Requirements + +For details on which OS versions were tested with RKE2, refer to the [support maintenance terms.](https://rancher.com/support-maintenance-terms/) + +Docker is not required for RKE2 installs. + +### Installing Docker + +Docker is required for Helm chart installs, and it can be installed by following the steps in the official [Docker documentation.](https://docs.docker.com/) Rancher also provides [scripts]({{}}/rancher/v2.6/en/installation/requirements/installing-docker) to install Docker with one command. + +# Hardware Requirements + +The following sections describe the CPU, memory, and disk requirements for the nodes where the Rancher server is installed. + +# CPU and Memory + +Hardware requirements scale based on the size of your Rancher deployment. Provision each individual node according to the requirements. The requirements are different depending on if you are installing Rancher in a single container with Docker, or if you are installing Rancher on a Kubernetes cluster. + +### RKE and Hosted Kubernetes + +These CPU and memory requirements apply to each host in the Kubernetes cluster where the Rancher server is installed. + +These requirements apply to RKE Kubernetes clusters, as well as to hosted Kubernetes clusters such as EKS. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | ---------- | ------------ | -------| ------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + +### K3s Kubernetes + +These CPU and memory requirements apply to each host in a [K3s Kubernetes cluster where the Rancher server is installed.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/) + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | Database Size | +| --------------- | ---------- | ------------ | -------| ---------| ------------------------- | +| Small | Up to 150 | Up to 1500 | 2 | 8 GB | 2 cores, 4 GB + 1000 IOPS | +| Medium | Up to 300 | Up to 3000 | 4 | 16 GB | 2 cores, 4 GB + 1000 IOPS | +| Large | Up to 500 | Up to 5000 | 8 | 32 GB | 2 cores, 4 GB + 1000 IOPS | +| X-Large | Up to 1000 | Up to 10,000 | 16 | 64 GB | 2 cores, 4 GB + 1000 IOPS | +| XX-Large | Up to 2000 | Up to 20,000 | 32 | 128 GB | 2 cores, 4 GB + 1000 IOPS | + +Every use case and environment is different. Please [contact Rancher](https://rancher.com/contact/) to review yours. + + +### RKE2 Kubernetes + +These CPU and memory requirements apply to each instance with RKE2 installed. Minimum recommendations are outlined here. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 2 | 5 GB | +| Medium | Up to 15 | Up to 200 | 3 | 9 GB | + +### Docker + +These CPU and memory requirements apply to a host with a [single-node]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker) installation of Rancher. + +| Deployment Size | Clusters | Nodes | vCPUs | RAM | +| --------------- | -------- | --------- | ----- | ---- | +| Small | Up to 5 | Up to 50 | 1 | 4 GB | +| Medium | Up to 15 | Up to 200 | 2 | 8 GB | + +# Ingress + +Each node in the Kubernetes cluster that Rancher is installed on should run an Ingress. + +The Ingress should be deployed as DaemonSet to ensure your load balancer can successfully route traffic to all nodes. + +For RKE and K3s installations, you don't have to install the Ingress manually because it is installed by default. + +For hosted Kubernetes clusters (EKS, GKE, AKS) and RKE2 Kubernetes installations, you will need to set up the ingress. + +### Ingress for EKS +For an example of how to deploy an nginx-ingress-controller with a LoadBalancer service, refer to [this section.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/amazon-eks/#5-install-an-ingress) + +# Disks + +Rancher performance depends on etcd in the cluster performance. To ensure optimal speed, we recommend always using SSD disks to back your Rancher management Kubernetes cluster. On cloud providers, you will also want to use the minimum size that allows the maximum IOPS. In larger clusters, consider using dedicated storage devices for etcd data and wal directories. + +# Networking Requirements + +This section describes the networking requirements for the node(s) where the Rancher server is installed. + +> If a server containing Rancher has the `X-Frame-Options=DENY` header, some pages in the new Rancher UI will not be able to render after upgrading from the legacy UI. This is because some legacy pages are embedded as iFrames in the new UI. + +### Node IP Addresses + +Each node used should have a static IP configured, regardless of whether you are installing Rancher on a single node or on an HA cluster. In case of DHCP, each node should have a DHCP reservation to make sure the node gets the same IP allocated. + +### Port Requirements + +To operate properly, Rancher requires a number of ports to be open on Rancher nodes and on downstream Kubernetes cluster nodes. [Port Requirements]({{}}/rancher/v2.6/en/installation/requirements/ports) lists all the necessary ports for Rancher and Downstream Clusters for the different cluster types. + +# Dockershim Support + +For more information on Dockershim support, refer to [this page]({{}}/rancher/v2.6/en/installation/requirements/dockershim/). diff --git a/versioned_docs/version-2.6/installation/resources/advanced/advanced.md b/versioned_docs/version-2.6/installation/resources/advanced/advanced.md new file mode 100644 index 0000000000..f5e4219553 --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/advanced/advanced.md @@ -0,0 +1,6 @@ +--- +title: Advanced +weight: 1000 +--- + +The documents in this section contain resources for less common use cases. \ No newline at end of file diff --git a/content/rancher/v2.6/en/installation/resources/advanced/api-audit-log/_index.md b/versioned_docs/version-2.6/installation/resources/advanced/api-audit-log/api-audit-log.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/advanced/api-audit-log/_index.md rename to versioned_docs/version-2.6/installation/resources/advanced/api-audit-log/api-audit-log.md diff --git a/content/rancher/v2.6/en/installation/resources/advanced/arm64-platform/_index.md b/versioned_docs/version-2.6/installation/resources/advanced/arm64-platform/arm64-platform.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/advanced/arm64-platform/_index.md rename to versioned_docs/version-2.6/installation/resources/advanced/arm64-platform/arm64-platform.md diff --git a/versioned_docs/version-2.6/installation/resources/advanced/etcd/etcd.md b/versioned_docs/version-2.6/installation/resources/advanced/etcd/etcd.md new file mode 100644 index 0000000000..a605c7343a --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/advanced/etcd/etcd.md @@ -0,0 +1,40 @@ +--- +title: Tuning etcd for Large Installations +weight: 2 +--- + +When running larger Rancher installations with 15 or more clusters it is recommended to increase the default keyspace for etcd from the default 2GB. The maximum setting is 8GB and the host should have enough RAM to keep the entire dataset in memory. When increasing this value you should also increase the size of the host. The keyspace size can also be adjusted in smaller installations if you anticipate a high rate of change of pods during the garbage collection interval. + +The etcd data set is automatically cleaned up on a five minute interval by Kubernetes. There are situations, e.g. deployment thrashing, where enough events could be written to etcd and deleted before garbage collection occurs and cleans things up causing the keyspace to fill up. If you see `mvcc: database space exceeded` errors, in the etcd logs or Kubernetes API server logs, you should consider increasing the keyspace size. This can be accomplished by setting the [quota-backend-bytes](https://etcd.io/docs/v3.4.0/op-guide/maintenance/#space-quota) setting on the etcd servers. + +### Example: This snippet of the RKE cluster.yml file increases the keyspace size to 5GB + +```yaml +# RKE cluster.yml +--- +services: + etcd: + extra_args: + quota-backend-bytes: 5368709120 +``` + +## Scaling etcd disk performance + +You can follow the recommendations from [the etcd docs](https://etcd.io/docs/v3.4.0/tuning/#disk) on how to tune the disk priority on the host. + +Additionally, to reduce IO contention on the disks for etcd, you can use a dedicated device for the data and wal directory. Based on etcd best practices, mirroring RAID configurations are unnecessary because etcd replicates data between the nodes in the cluster. You can use stripping RAID configurations to increase available IOPS. + +To implement this solution in an RKE cluster, the `/var/lib/etcd/data` and `/var/lib/etcd/wal` directories will need to have disks mounted and formatted on the underlying host. In the `extra_args` directive of the `etcd` service, you must include the `wal_dir` directory. Without specifying the `wal_dir`, etcd process will try to manipulate the underlying `wal` mount with insufficient permissions. + +```yaml +# RKE cluster.yml +--- +services: + etcd: + extra_args: + data-dir: '/var/lib/rancher/etcd/data/' + wal-dir: '/var/lib/rancher/etcd/wal/wal_dir' + extra_binds: + - '/var/lib/etcd/data:/var/lib/rancher/etcd/data' + - '/var/lib/etcd/wal:/var/lib/rancher/etcd/wal' +``` diff --git a/content/rancher/v2.6/en/installation/resources/advanced/firewall/_index.md b/versioned_docs/version-2.6/installation/resources/advanced/firewall/firewall.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/advanced/firewall/_index.md rename to versioned_docs/version-2.6/installation/resources/advanced/firewall/firewall.md diff --git a/versioned_docs/version-2.6/installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md b/versioned_docs/version-2.6/installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md new file mode 100644 index 0000000000..fc60748648 --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/advanced/single-node-install-external-lb/single-node-install-external-lb.md @@ -0,0 +1,253 @@ +--- +title: Docker Install with TLS Termination at Layer-7 NGINX Load Balancer +weight: 252 +--- + +For development and testing environments that have a special requirement to terminate TLS/SSL at a load balancer instead of your Rancher Server container, deploy Rancher and configure a load balancer to work with it conjunction. + +A layer-7 load balancer can be beneficial if you want to centralize your TLS termination in your infrastructure. Layer-7 load balancing also offers the capability for your load balancer to make decisions based on HTTP attributes such as cookies, etc. that a layer-4 load balancer is not able to concern itself with. + +This install procedure walks you through deployment of Rancher using a single container, and then provides a sample configuration for a layer-7 NGINX load balancer. + +## Requirements for OS, Docker, Hardware, and Networking + +Make sure that your node fulfills the general [installation requirements.]({{}}/rancher/v2.6/en/installation/requirements/) + +## Installation Outline + + + +- [1. Provision Linux Host](#1-provision-linux-host) +- [2. Choose an SSL Option and Install Rancher](#2-choose-an-ssl-option-and-install-rancher) +- [3. Configure Load Balancer](#3-configure-load-balancer) + + + +## 1. Provision Linux Host + +Provision a single Linux host according to our [Requirements]({{}}/rancher/v2.6/en/installation/requirements) to launch your Rancher Server. + +## 2. Choose an SSL Option and Install Rancher + +For security purposes, SSL (Secure Sockets Layer) is required when using Rancher. SSL secures all Rancher network communication, like when you login or interact with a cluster. + +> **Do you want to..**. +> +> - Complete an Air Gap Installation? +> - Record all transactions with the Rancher API? +> +> See [Advanced Options](#advanced-options) below before continuing. + +Choose from the following options: + +
    + Option A-Bring Your Own Certificate: Self-Signed + +If you elect to use a self-signed certificate to encrypt communication, you must install the certificate on your load balancer (which you'll do later) and your Rancher container. Run the Docker command to deploy Rancher, pointing it toward your certificate. + +> **Prerequisites:** +> Create a self-signed certificate. +> +> - The certificate files must be in PEM format. + +**To Install Rancher Using a Self-Signed Cert:** + +1. While running the Docker command to deploy Rancher, point Docker toward your CA certificate file. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /etc/your_certificate_directory/cacerts.pem:/etc/rancher/ssl/cacerts.pem \ + rancher/rancher:latest + ``` + +
    +
    + Option B-Bring Your Own Certificate: Signed by Recognized CA + +If your cluster is public facing, it's best to use a certificate signed by a recognized CA. + +> **Prerequisites:** +> +> - The certificate files must be in PEM format. + +**To Install Rancher Using a Cert Signed by a Recognized CA:** + +If you use a certificate signed by a recognized CA, installing your certificate in the Rancher container isn't necessary. We do have to make sure there is no default CA certificate generated and stored, you can do this by passing the `--no-cacerts` parameter to the container. + +1. Enter the following command. + + ``` + docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + rancher/rancher:latest --no-cacerts + ``` + +
    + +## 3. Configure Load Balancer + +When using a load balancer in front of your Rancher container, there's no need for the container to redirect port communication from port 80 or port 443. By passing the header `X-Forwarded-Proto: https` header, this redirect is disabled. + +The load balancer or proxy has to be configured to support the following: + +- **WebSocket** connections +- **SPDY** / **HTTP/2** protocols +- Passing / setting the following headers: + + | Header | Value | Description | + |--------|-------|-------------| + | `Host` | Hostname used to reach Rancher. | To identify the server requested by the client. + | `X-Forwarded-Proto` | `https` | To identify the protocol that a client used to connect to the load balancer or proxy.

    **Note:** If this header is present, `rancher/rancher` does not redirect HTTP to HTTPS. + | `X-Forwarded-Port` | Port used to reach Rancher. | To identify the protocol that client used to connect to the load balancer or proxy. + | `X-Forwarded-For` | IP of the client connection. | To identify the originating IP address of a client. +### Example NGINX configuration + +This NGINX configuration is tested on NGINX 1.14. + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - HTTP Load Balancing](https://docs.nginx.com/nginx/admin-guide/load-balancer/http-load-balancer/). + +- Replace `rancher-server` with the IP address or hostname of the node running the Rancher container. +- Replace both occurrences of `FQDN` to the DNS name for Rancher. +- Replace `/certs/fullchain.pem` and `/certs/privkey.pem` to the location of the server certificate and the server certificate key respectively. + +``` +worker_processes 4; +worker_rlimit_nofile 40000; + +events { + worker_connections 8192; +} + +http { + upstream rancher { + server rancher-server:80; + } + + map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; + } + + server { + listen 443 ssl http2; + server_name FQDN; + ssl_certificate /certs/fullchain.pem; + ssl_certificate_key /certs/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } + } + + server { + listen 80; + server_name FQDN; + return 301 https://$server_name$request_uri; + } +} +``` + +
    + +## What's Next? + +- **Recommended:** Review [Single Node Backup and Restore]({{}}/rancher/v2.6/en/backups/docker-installs/). Although you don't have any data you need to back up right now, we recommend creating backups after regular Rancher use. +- Create a Kubernetes cluster: [Provisioning Kubernetes Clusters]({{}}/rancher/v2.6/en/cluster-provisioning/). + +
    + +## FAQ and Troubleshooting + +For help troubleshooting certificates, see [this section.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/troubleshooting) + +## Advanced Options + +### API Auditing + +If you want to record all transactions with the Rancher API, enable the [API Auditing]({{}}/rancher/v2.6/en/installation/resources/advanced/api-audit-log) feature by adding the flags below into your install command. + + -e AUDIT_LEVEL=1 \ + -e AUDIT_LOG_PATH=/var/log/auditlog/rancher-api-audit.log \ + -e AUDIT_LOG_MAXAGE=20 \ + -e AUDIT_LOG_MAXBACKUP=20 \ + -e AUDIT_LOG_MAXSIZE=100 \ + +### Air Gap + +If you are visiting this page to complete an [Air Gap Installation]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap), you must pre-pend your private registry URL to the server tag when running the installation command in the option that you choose. Add `` with your private registry URL in front of `rancher/rancher:latest`. + +**Example:** + + /rancher/rancher:latest + +### Persistent Data + +Rancher uses etcd as a datastore. When Rancher is installed with Docker, the embedded etcd is being used. The persistent data is at the following path in the container: `/var/lib/rancher`. + +You can bind mount a host volume to this location to preserve data on the host it is running on: + +``` +docker run -d --restart=unless-stopped \ + -p 80:80 -p 443:443 \ + -v /opt/rancher:/var/lib/rancher \ + --privileged \ + rancher/rancher:latest +``` + +As of Rancher v2.5, privileged access is [required.]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker/#privileged-access-for-rancher-v2-5) + +This layer 7 NGINX configuration is tested on NGINX version 1.13 (mainline) and 1.14 (stable). + +> **Note:** This NGINX configuration is only an example and may not suit your environment. For complete documentation, see [NGINX Load Balancing - TCP and UDP Load Balancer](https://docs.nginx.com/nginx/admin-guide/load-balancer/tcp-udp-load-balancer/). + +``` +upstream rancher { + server rancher-server:80; +} + +map $http_upgrade $connection_upgrade { + default Upgrade; + '' close; +} + +server { + listen 443 ssl http2; + server_name rancher.yourdomain.com; + ssl_certificate /etc/your_certificate_directory/fullchain.pem; + ssl_certificate_key /etc/your_certificate_directory/privkey.pem; + + location / { + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://rancher; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. + proxy_read_timeout 900s; + proxy_buffering off; + } +} + +server { + listen 80; + server_name rancher.yourdomain.com; + return 301 https://$server_name$request_uri; +} +``` + +
    + diff --git a/versioned_docs/version-2.6/installation/resources/bootstrap-password/bootstrap-password.md b/versioned_docs/version-2.6/installation/resources/bootstrap-password/bootstrap-password.md new file mode 100644 index 0000000000..f0875b5e8a --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/bootstrap-password/bootstrap-password.md @@ -0,0 +1,28 @@ +--- +title: Bootstrap Password +weight: 800 +--- + +When Rancher starts for the first time, a password is randomly generated for the first admin user. When the admin first logs in to Rancher, the UI shows commands that can be used to retrieve the bootstrap password. The admin needs to run those commands and log in with the bootstrap password. Then Rancher gives the admin an opportunity to reset the password. + +The bootstrap password is randomly generated if it is not set during installation with a variable. For details on how to set the bootstrap password using a variable, see below. + +### Specifying the Bootstrap Password in Helm Installs + +For a Helm install, users can specify the bootstrap password variable by configuring it in the Helm chart values with `.Values.bootstrapPassword`. + +The password will be stored in a Kubernetes secret. After Rancher is installed, the UI will show instructions for how to retrieve the password using kubectl: + +``` +kubectl get secret --namespace cattle-system bootstrap-secret -o go-template='{{ .data.bootstrapPassword|base64decode}}{{ "\n" }}' +``` + +### Specifying the Bootstrap Password in Docker Installs + +For a Docker install, you can specify the bootstrap password by passing `-e CATTLE_BOOTSTRAP_PASSWORD=password` to the Docker install command. + +The password will be stored in the Docker container logs. After Rancher is installed, the UI will show instructions for how to retrieve the password using the Docker container ID: + +``` +docker logs container-id 2>&1 | grep "Bootstrap Password:" +``` \ No newline at end of file diff --git a/versioned_docs/version-2.6/installation/resources/choosing-version/choosing-version.md b/versioned_docs/version-2.6/installation/resources/choosing-version/choosing-version.md new file mode 100644 index 0000000000..9c6f2c279d --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/choosing-version/choosing-version.md @@ -0,0 +1,101 @@ +--- +title: Choosing a Rancher Version +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This section describes how to choose a Rancher version. + +For a high-availability installation of Rancher, which is recommended for production, the Rancher server is installed using a **Helm chart** on a Kubernetes cluster. Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. + +For Docker installations of Rancher, which is used for development and testing, you will install Rancher as a **Docker image**. + + + + +When installing, upgrading, or rolling back Rancher Server when it is [installed on a Kubernetes cluster]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/), Rancher server is installed using a Helm chart on a Kubernetes cluster. Therefore, as you prepare to install or upgrade a high availability Rancher configuration, you must add a Helm chart repository that contains the charts for installing Rancher. + +Refer to the [Helm version requirements]({{}}/rancher/v2.6/en/installation/resources/helm-version) to choose a version of Helm to install Rancher. + +### Helm Chart Repositories + +Rancher provides several different Helm chart repositories to choose from. We align our latest and stable Helm chart repositories with the Docker tags that are used for a Docker installation. Therefore, the `rancher-latest` repository will contain charts for all the Rancher versions that have been tagged as `rancher/rancher:latest`. When a Rancher version has been promoted to the `rancher/rancher:stable`, it will get added to the `rancher-stable` repository. + +| Type | Command to Add the Repo | Description of the Repo | +| -------------- | ------------ | ----------------- | +| rancher-latest | `helm repo add rancher-latest https://releases.rancher.com/server-charts/latest` | Adds a repository of Helm charts for the latest versions of Rancher. We recommend using this repo for testing out new Rancher builds. | +| rancher-stable | `helm repo add rancher-stable https://releases.rancher.com/server-charts/stable` | Adds a repository of Helm charts for older, stable versions of Rancher. We recommend using this repo for production environments. | +| rancher-alpha | `helm repo add rancher-alpha https://releases.rancher.com/server-charts/alpha` | Adds a repository of Helm charts for alpha versions of Rancher for previewing upcoming releases. These releases are discouraged in production environments. Upgrades _to_ or _from_ charts in the rancher-alpha repository to any other chart, regardless or repository, aren't supported. | + +
    +Instructions on when to select these repos are available below in [Switching to a Different Helm Chart Repository](#switching-to-a-different-helm-chart-repository). + +> **Note:** All charts in the `rancher-stable` repository will correspond with any Rancher version tagged as `stable`. + +### Helm Chart Versions + +Rancher Helm chart versions match the Rancher version (i.e `appVersion`). Once you've added the repo you can search it to show available versions with the following command:
    +    `helm search repo --versions` + +If you have several repos you can specify the repo name, ie. `helm search repo rancher-stable/rancher --versions`
    +For more information, see https://helm.sh/docs/helm/helm_search_repo/ + +To fetch a specific version of your chosen repo, define the `--version` parameter like in the following example:
    +    `helm fetch rancher-stable/rancher --version=2.4.8` + +### Switching to a Different Helm Chart Repository + +After installing Rancher, if you want to change which Helm chart repository to install Rancher from, you will need to follow these steps. + +> **Note:** Because the rancher-alpha repository contains only alpha charts, switching between the rancher-alpha repository and the rancher-stable or rancher-latest repository for upgrades is not supported. + +{{< release-channel >}} + +1. List the current Helm chart repositories. + + ```plain + helm repo list + + NAME URL + stable https://charts.helm.sh/stable + rancher- https://releases.rancher.com/server-charts/ + ``` + +2. Remove the existing Helm Chart repository that contains your charts to install Rancher, which will either be `rancher-stable` or `rancher-latest` depending on what you had initially added. + + ```plain + helm repo remove rancher- + ``` + +3. Add the Helm chart repository that you want to start installing Rancher from. + + ```plain + helm repo add rancher- https://releases.rancher.com/server-charts/ + ``` + +4. Continue to follow the steps to [upgrade Rancher]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades) from the new Helm chart repository. + +
    + + +When performing [Docker installs]({{}}/rancher/v2.6/en/installation/other-installation-methods/single-node-docker), upgrades, or rollbacks, you can use _tags_ to install a specific version of Rancher. + +### Server Tags + +Rancher Server is distributed as a Docker image, which have tags attached to them. You can specify this tag when entering the command to deploy Rancher. Remember that if you use a tag without an explicit version (like `latest` or `stable`), you must explicitly pull a new version of that image tag. Otherwise, any image cached on the host will be used. + +| Tag | Description | +| -------------------------- | ------ | +| `rancher/rancher:latest` | Our latest development release. These builds are validated through our CI automation framework. These releases are not recommended for production environments. | +| `rancher/rancher:stable` | Our newest stable release. This tag is recommended for production. | +| `rancher/rancher:` | You can install specific versions of Rancher by using the tag from a previous release. See what's available at DockerHub. | + +> **Notes:** +> +> - The `master` tag or any tag with `-rc` or another suffix is meant for the Rancher testing team to validate. You should not use these tags, as these builds are not officially supported. +> - Want to install an alpha review for preview? Install using one of the alpha tags listed on our [announcements page](https://forums.rancher.com/c/announcements) (e.g., `v2.2.0-alpha1`). Caveat: Alpha releases cannot be upgraded to or from any other release. + + +
    diff --git a/content/rancher/v2.6/en/installation/resources/custom-ca-root-certificate/_index.md b/versioned_docs/version-2.6/installation/resources/custom-ca-root-certificate/custom-ca-root-certificate.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/custom-ca-root-certificate/_index.md rename to versioned_docs/version-2.6/installation/resources/custom-ca-root-certificate/custom-ca-root-certificate.md diff --git a/content/rancher/v2.6/en/installation/resources/feature-flags/continuous-delivery/_index.md b/versioned_docs/version-2.6/installation/resources/feature-flags/continuous-delivery/continuous-delivery.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/feature-flags/continuous-delivery/_index.md rename to versioned_docs/version-2.6/installation/resources/feature-flags/continuous-delivery/continuous-delivery.md diff --git a/content/rancher/v2.6/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md b/versioned_docs/version-2.6/installation/resources/feature-flags/enable-not-default-storage-drivers/enable-not-default-storage-drivers.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/feature-flags/enable-not-default-storage-drivers/_index.md rename to versioned_docs/version-2.6/installation/resources/feature-flags/enable-not-default-storage-drivers/enable-not-default-storage-drivers.md diff --git a/content/rancher/v2.6/en/installation/resources/feature-flags/_index.md b/versioned_docs/version-2.6/installation/resources/feature-flags/feature-flags.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/feature-flags/_index.md rename to versioned_docs/version-2.6/installation/resources/feature-flags/feature-flags.md diff --git a/content/rancher/v2.6/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md b/versioned_docs/version-2.6/installation/resources/feature-flags/istio-virtual-service-ui/istio-virtual-service-ui.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/feature-flags/istio-virtual-service-ui/_index.md rename to versioned_docs/version-2.6/installation/resources/feature-flags/istio-virtual-service-ui/istio-virtual-service-ui.md diff --git a/versioned_docs/version-2.6/installation/resources/helm-version/helm-version.md b/versioned_docs/version-2.6/installation/resources/helm-version/helm-version.md new file mode 100644 index 0000000000..0fd69cfb62 --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/helm-version/helm-version.md @@ -0,0 +1,13 @@ +--- +title: Helm Version Requirements +weight: 3 +--- + +This section contains the requirements for Helm, which is the tool used to install Rancher on a high-availability Kubernetes cluster. + +> The installation instructions have been updated for Helm 3. For migration of installs started with Helm 2, refer to the official [Helm 2 to 3 Migration Docs.](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/) [This section]({{}}/rancher/v2.0-v2.4/en/installation/resources/advanced/helm2/) provides a copy of the older high-availability Rancher installation instructions that used Helm 2, and it is intended to be used if upgrading to Helm 3 is not feasible. + +- Helm v3.2.x or higher is required to install or upgrade Rancher v2.5. +- Helm v2.16.0 or higher is required for Kubernetes v1.16. For the default Kubernetes version, refer to the [release notes](https://github.com/rancher/rke/releases) for the version of RKE that you are using. +- Helm v2.15.0 should not be used, because of an issue with converting/comparing numbers. +- Helm v2.12.0 should not be used, because of an issue with `cert-manager`. diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-RKE/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/ha-RKE/ha-RKE.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-RKE/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/ha-RKE/ha-RKE.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-rke2/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/ha-rke2/ha-rke2.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-rke2/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/ha-rke2/ha-rke2.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/ha-with-external-db/ha-with-external-db.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/ha-with-external-db/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/ha-with-external-db/ha-with-external-db.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/how-ha-works/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/how-ha-works/how-ha-works.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/how-ha-works/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/how-ha-works/how-ha-works.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ec2-node.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/ec2-node.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/infra-for-ha-with-external-db.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha-with-external-db/infra-for-ha-with-external-db.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/infra-for-ha.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-ha/infra-for-ha.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/infra-for-rke2-ha.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infra-for-rke2-ha/infra-for-rke2-ha.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infrastructure-tutorials.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/infrastructure-tutorials.md diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/nginx.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/nginx/nginx.md diff --git a/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md new file mode 100644 index 0000000000..be1f6102b7 --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/nlb/nlb.md @@ -0,0 +1,179 @@ +--- +title: Setting up Amazon ELB Network Load Balancer +weight: 5 +--- + +This how-to guide describes how to set up a Network Load Balancer (NLB) in Amazon's EC2 service that will direct traffic to multiple instances on EC2. + +These examples show the load balancer being configured to direct traffic to three Rancher server nodes. If Rancher is installed on an RKE Kubernetes cluster, three nodes are required. If Rancher is installed on a K3s Kubernetes cluster, only two nodes are required. + +This tutorial is about one possible way to set up your load balancer, not the only way. Other types of load balancers, such as a Classic Load Balancer or Application Load Balancer, could also direct traffic to the Rancher server nodes. + +Rancher only supports using the Amazon NLB when terminating traffic in `tcp` mode for port 443 rather than `tls` mode. This is due to the fact that the NLB does not inject the correct headers into requests when terminated at the NLB. This means that if you want to use certificates managed by the Amazon Certificate Manager (ACM), you should use an ALB. + +# Setting up the Load Balancer + +Configuring an Amazon NLB is a multistage process: + +1. [Create Target Groups](#1-create-target-groups) +2. [Register Targets](#2-register-targets) +3. [Create Your NLB](#3-create-your-nlb) +4. [Add listener to NLB for TCP port 80](#4-add-listener-to-nlb-for-tcp-port-80) + +# Requirements + +These instructions assume you have already created Linux instances in EC2. The load balancer will direct traffic to these nodes. + +# 1. Create Target Groups + +Begin by creating two target groups for the **TCP** protocol, one with TCP port 443 and one regarding TCP port 80 (providing redirect to TCP port 443). You'll add your Linux nodes to these groups. + +Your first NLB configuration step is to create two target groups. Technically, only port 443 is needed to access Rancher, but it's convenient to add a listener for port 80, because traffic to port 80 will be automatically redirected to port 443. + +Regardless of whether an NGINX Ingress or Traefik Ingress controller is used, the Ingress should redirect traffic from port 80 to port 443. + +1. Log into the [Amazon AWS Console](https://console.aws.amazon.com/ec2/) to get started. Make sure to select the **Region** where your EC2 instances (Linux nodes) are created. +1. Select **Services** and choose **EC2**, find the section **Load Balancing** and open **Target Groups**. +1. Click **Create target group** to create the first target group, regarding TCP port 443. + +> **Note:** Health checks are handled differently based on the Ingress. For details, refer to [this section.](#health-check-paths-for-nginx-ingress-and-traefik-ingresses) + +### Target Group (TCP port 443) + +Configure the first target group according to the table below. + +| Option | Setting | +|-------------------|-------------------| +| Target Group Name | `rancher-tcp-443` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `443` | +| VPC | Choose your VPC | + +Health check settings: + +| Option | Setting | +|---------------------|-----------------| +| Protocol | TCP | +| Port | `override`,`80` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +Click **Create target group** to create the second target group, regarding TCP port 80. + +### Target Group (TCP port 80) + +Configure the second target group according to the table below. + +| Option | Setting | +|-------------------|------------------| +| Target Group Name | `rancher-tcp-80` | +| Target type | `instance` | +| Protocol | `TCP` | +| Port | `80` | +| VPC | Choose your VPC | + + +Health check settings: + +| Option |Setting | +|---------------------|----------------| +| Protocol | TCP | +| Port | `traffic port` | +| Healthy threshold | `3` | +| Unhealthy threshold | `3` | +| Timeout | `6 seconds` | +| Interval | `10 seconds` | + +# 2. Register Targets + +Next, add your Linux nodes to both target groups. + +Select the target group named **rancher-tcp-443**, click the tab **Targets** and choose **Edit**. + +{{< img "/img/rancher/ha/nlb/edit-targetgroup-443.png" "Edit target group 443">}} + +Select the instances (Linux nodes) you want to add, and click **Add to registered**. + +*** +**Screenshot Add targets to target group TCP port 443**
    + +{{< img "/img/rancher/ha/nlb/add-targets-targetgroup-443.png" "Add targets to target group 443">}} + +*** +**Screenshot Added targets to target group TCP port 443**
    + +{{< img "/img/rancher/ha/nlb/added-targets-targetgroup-443.png" "Added targets to target group 443">}} + +When the instances are added, click **Save** on the bottom right of the screen. + +Repeat those steps, replacing **rancher-tcp-443** with **rancher-tcp-80**. The same instances need to be added as targets to this target group. + +# 3. Create Your NLB + +Use Amazon's Wizard to create a Network Load Balancer. As part of this process, you'll add the target groups you created in [1. Create Target Groups](#1-create-target-groups). + +1. From your web browser, navigate to the [Amazon EC2 Console](https://console.aws.amazon.com/ec2/). + +2. From the navigation pane, choose **LOAD BALANCING** > **Load Balancers**. + +3. Click **Create Load Balancer**. + +4. Choose **Network Load Balancer** and click **Create**. Then complete each form. + +- [Step 1: Configure Load Balancer](#step-1-configure-load-balancer) +- [Step 2: Configure Routing](#step-2-configure-routing) +- [Step 3: Register Targets](#step-3-register-targets) +- [Step 4: Review](#step-4-review) + +### Step 1: Configure Load Balancer + +Set the following fields in the form: + +- **Name:** `rancher` +- **Scheme:** `internal` or `internet-facing`. The scheme that you choose for your NLB is dependent on the configuration of your instances and VPC. If your instances do not have public IPs associated with them, or you will only be accessing Rancher internally, you should set your NLB Scheme to `internal` rather than `internet-facing`. +- **Listeners:** The Load Balancer Protocol should be `TCP` and the corresponding Load Balancer Port should be set to `443`. +- **Availability Zones:** Select Your **VPC** and **Availability Zones**. + +### Step 2: Configure Routing + +1. From the **Target Group** drop-down, choose **Existing target group**. +1. From the **Name** drop-down, choose `rancher-tcp-443`. +1. Open **Advanced health check settings**, and configure **Interval** to `10 seconds`. + +### Step 3: Register Targets + +Since you registered your targets earlier, all you have to do is click **Next: Review**. + +### Step 4: Review + +Look over the load balancer details and click **Create** when you're satisfied. + +After AWS creates the NLB, click **Close**. + +# 4. Add listener to NLB for TCP port 80 + +1. Select your newly created NLB and select the **Listeners** tab. + +2. Click **Add listener**. + +3. Use `TCP`:`80` as **Protocol** : **Port** + +4. Click **Add action** and choose **Forward to..**. + +5. From the **Forward to** drop-down, choose `rancher-tcp-80`. + +6. Click **Save** in the top right of the screen. + +# Health Check Paths for NGINX Ingress and Traefik Ingresses + +K3s and RKE Kubernetes clusters handle health checks differently because they use different Ingresses by default. + +For RKE Kubernetes clusters, NGINX Ingress is used by default, whereas for K3s Kubernetes clusters, Traefik is the default Ingress. + +- **Traefik:** The health check path is `/ping`. By default `/ping` is always matched (regardless of Host), and a response from [Traefik itself](https://docs.traefik.io/operations/ping/) is always served. +- **NGINX Ingress:** The default backend of the NGINX Ingress controller has a `/healthz` endpoint. By default `/healthz` is always matched (regardless of Host), and a response from [`ingress-nginx` itself](https://github.com/kubernetes/ingress-nginx/blob/0cbe783f43a9313c9c26136e888324b1ee91a72f/charts/ingress-nginx/values.yaml#L212) is always served. + +To simulate an accurate health check, it is a best practice to use the Host header (Rancher hostname) combined with `/ping` or `/healthz` (for K3s or for RKE clusters, respectively) wherever possible, to get a response from the Rancher Pods, not the Ingress. diff --git a/content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/rds.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/_index.md rename to versioned_docs/version-2.6/installation/resources/k8s-tutorials/infrastructure-tutorials/rds/rds.md diff --git a/versioned_docs/version-2.6/installation/resources/k8s-tutorials/k8s-tutorials.md b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/k8s-tutorials.md new file mode 100644 index 0000000000..d9947f5ace --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/k8s-tutorials/k8s-tutorials.md @@ -0,0 +1,8 @@ +--- +title: "Don't have a Kubernetes cluster? Try one of these tutorials." +weight: 4 +--- + +This section contains information on how to install a Kubernetes cluster that the Rancher server can be installed on. + +Rancher can run on any Kubernetes cluster. diff --git a/content/rancher/v2.6/en/installation/resources/local-system-charts/_index.md b/versioned_docs/version-2.6/installation/resources/local-system-charts/local-system-charts.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/local-system-charts/_index.md rename to versioned_docs/version-2.6/installation/resources/local-system-charts/local-system-charts.md diff --git a/content/rancher/v2.6/en/installation/resources/_index.md b/versioned_docs/version-2.6/installation/resources/resources.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/_index.md rename to versioned_docs/version-2.6/installation/resources/resources.md diff --git a/content/rancher/v2.6/en/installation/resources/tls-secrets/_index.md b/versioned_docs/version-2.6/installation/resources/tls-secrets/tls-secrets.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/tls-secrets/_index.md rename to versioned_docs/version-2.6/installation/resources/tls-secrets/tls-secrets.md diff --git a/versioned_docs/version-2.6/installation/resources/tls-settings/tls-settings.md b/versioned_docs/version-2.6/installation/resources/tls-settings/tls-settings.md new file mode 100644 index 0000000000..7f3c128157 --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/tls-settings/tls-settings.md @@ -0,0 +1,22 @@ +--- +title: TLS Settings +weight: 3 +--- + +Changing the default TLS settings depends on the chosen installation method. + +# Running Rancher in a highly available Kubernetes cluster + +When you install Rancher inside of a Kubernetes cluster, TLS is offloaded at the cluster's ingress controller. The possible TLS settings depend on the used ingress controller: + +* nginx-ingress-controller (default for RKE1 and RKE2): [Default TLS Version and Ciphers](https://kubernetes.github.io/ingress-nginx/user-guide/tls/#default-tls-version-and-ciphers). +* traefik (default for K3s): [TLS Options](https://doc.traefik.io/traefik/https/tls/#tls-options). + +# Running Rancher in a single Docker container + +The default TLS configuration only accepts TLS 1.2 and secure TLS cipher suites. You can change this by setting the following environment variables: + +| Parameter | Description | Default | Available options | +|-----|-----|-----|-----| +| `CATTLE_TLS_MIN_VERSION` | Minimum TLS version | `1.2` | `1.0`, `1.1`, `1.2`, `1.3` | +| `CATTLE_TLS_CIPHERS` | Allowed TLS cipher suites | `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`,
    `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`,
    `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`,
    `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`,
    `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`,
    `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` | See [Golang tls constants](https://golang.org/pkg/crypto/tls/#pkg-constants) | diff --git a/content/rancher/v2.6/en/installation/resources/troubleshooting/_index.md b/versioned_docs/version-2.6/installation/resources/troubleshooting/troubleshooting.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/troubleshooting/_index.md rename to versioned_docs/version-2.6/installation/resources/troubleshooting/troubleshooting.md diff --git a/content/rancher/v2.6/en/installation/resources/update-rancher-cert/_index.md b/versioned_docs/version-2.6/installation/resources/update-rancher-cert/update-rancher-cert.md similarity index 100% rename from content/rancher/v2.6/en/installation/resources/update-rancher-cert/_index.md rename to versioned_docs/version-2.6/installation/resources/update-rancher-cert/update-rancher-cert.md diff --git a/versioned_docs/version-2.6/installation/resources/upgrading-cert-manager/upgrading-cert-manager.md b/versioned_docs/version-2.6/installation/resources/upgrading-cert-manager/upgrading-cert-manager.md new file mode 100644 index 0000000000..393152b315 --- /dev/null +++ b/versioned_docs/version-2.6/installation/resources/upgrading-cert-manager/upgrading-cert-manager.md @@ -0,0 +1,246 @@ +--- +title: Upgrading Cert-Manager +weight: 4 +--- + +Rancher uses cert-manager to automatically generate and renew TLS certificates for HA deployments of Rancher. As of Fall 2019, three important changes to cert-manager are set to occur that you need to take action on if you have an HA deployment of Rancher: + +1. [Let's Encrypt will be blocking cert-manager instances older than 0.8.0 starting November 1st 2019.](https://community.letsencrypt.org/t/blocking-old-cert-manager-versions/98753) +1. [Cert-manager is deprecating and replacing the certificate.spec.acme.solvers field](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). This change has no exact deadline. +1. [Cert-manager is deprecating `v1alpha1` API and replacing its API group](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/) + +To address these changes, this guide will do two things: + +1. Document the procedure for upgrading cert-manager +1. Explain the cert-manager API changes and link to cert-manager's official documentation for migrating your data + +> **Important:** +> If you are currently running the cert-manager whose version is 1.5 or below, and want to upgrade both Rancher and cert-manager to a new version (1.6+ in the case of cert-manager), then you need to re-install both Rancher and cert-manager due to the API change in cert-manager 1.6. This will also be necessary if you are upgrading from a version of cert manager below 0.11 to a version of cert-manager above 0.11. Follow the steps below: + +> 1. Take a one-time snapshot of your Kubernetes cluster running Rancher server +> 2. Uninstall Rancher, cert-manager, and the CustomResourceDefinition for cert-manager +> 3. Install the newer version of Rancher and cert-manager + +> The reason is that when Helm upgrades Rancher, it will reject the upgrade and show error messages if the running Rancher app does not match the chart template used to install it. Because cert-manager changed its API group and we cannot modify released charts for Rancher, there will always be a mismatch on the cert-manager's API version, therefore the upgrade will be rejected. + +# Upgrade Cert-Manager + +The namespace used in these instructions depends on the namespace cert-manager is currently installed in. If it is in kube-system use that in the instructions below. You can verify by running `kubectl get pods --all-namespaces` and checking which namespace the cert-manager-\* pods are listed in. Do not change the namespace cert-manager is running in or this can cause issues. + +In order to upgrade cert-manager, follow these instructions: + +### Option A: Upgrade cert-manager with Internet Access + +
    + Click to expand + +1. [Back up existing resources](https://cert-manager.io/docs/tutorials/backup/) as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. [Uninstall existing deployment](https://cert-manager.io/docs/installation/uninstall/kubernetes/#uninstalling-with-helm) + + ```plain + helm uninstall cert-manager + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y.Z you installed + + ```plain + kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/vX.Y.Z/cert-manager.crds.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager if needed + + ```plain + kubectl create namespace cert-manager + ``` + +1. Add the Jetstack Helm repository + + ```plain + helm repo add jetstack https://charts.jetstack.io + ``` + +1. Update your local Helm chart repository cache + + ```plain + helm repo update + ``` + +1. Install the new version of cert-manager + + ```plain + helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v0.12.0 + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
    + +### Option B: Upgrade cert-manager in an Air Gap Environment + +
    + Click to expand + +### Prerequisites + +Before you can perform the upgrade, you must prepare your air gapped environment by adding the necessary container images to your private registry and downloading or rendering the required Kubernetes manifest files. + +1. Follow the guide to [Prepare your Private Registry]({{}}/rancher/v2.6/en/installation/other-installation-methods/air-gap/populate-private-registry) with the images needed for the upgrade. + +1. From a system connected to the internet, add the cert-manager repo to Helm + + ```plain + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Fetch the latest cert-manager chart available from the [Helm chart repository](https://hub.helm.sh/charts/jetstack/cert-manager). + + ```plain + helm fetch jetstack/cert-manager --version v0.12.0 + ``` + +1. Render the cert manager template with the options you would like to use to install the chart. Remember to set the `image.repository` option to pull the image from your private registry. This will create a `cert-manager` directory with the Kubernetes manifest files. + + The Helm 3 command is as follows: + + ```plain + helm template cert-manager ./cert-manager-v0.12.0.tgz --output-dir . \ + --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + + The Helm 2 command is as follows: + + ```plain + helm template ./cert-manager-v0.12.0.tgz --output-dir . \ + --name cert-manager --namespace cert-manager \ + --set image.repository=/quay.io/jetstack/cert-manager-controller + --set webhook.image.repository=/quay.io/jetstack/cert-manager-webhook + --set cainjector.image.repository=/quay.io/jetstack/cert-manager-cainjector + ``` + +1. Download the required CRD file for cert-manager (old and new) + + ```plain + curl -L -o cert-manager/cert-manager-crd.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml + curl -L -o cert-manager/cert-manager-crd-old.yaml https://raw.githubusercontent.com/jetstack/cert-manager/release-X.Y/deploy/manifests/00-crds.yaml + ``` + +### Install cert-manager + +1. Back up existing resources as a precaution + + ```plain + kubectl get -o yaml --all-namespaces \ + issuer,clusterissuer,certificates,certificaterequests > cert-manager-backup.yaml + ``` + + > **Important:** + > If you are upgrading from a version older than 0.11.0, Update the apiVersion on all your backed up resources from `certmanager.k8s.io/v1alpha1` to `cert-manager.io/v1alpha2`. If you use any cert-manager annotations on any of your other resources, you will need to update them to reflect the new API group. For details, refer to the documentation on [additional annotation changes.](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/#additional-annotation-changes) + +1. Delete the existing cert-manager installation + + ```plain + kubectl -n cert-manager \ + delete deployment,sa,clusterrole,clusterrolebinding \ + -l 'app=cert-manager' -l 'chart=cert-manager-v0.5.2' + ``` + + Delete the CustomResourceDefinition using the link to the version vX.Y you installed + + ```plain + kubectl delete -f cert-manager/cert-manager-crd-old.yaml + ``` + +1. Install the CustomResourceDefinition resources separately + + ```plain + kubectl apply -f cert-manager/cert-manager-crd.yaml + ``` + + > **Note:** + > If you are running Kubernetes v1.15 or below, you will need to add the `--validate=false` flag to your `kubectl apply` command above. Otherwise, you will receive a validation error relating to the `x-kubernetes-preserve-unknown-fields` field in cert-manager’s CustomResourceDefinition resources. This is a benign error and occurs due to the way kubectl performs resource validation. + +1. Create the namespace for cert-manager + + ```plain + kubectl create namespace cert-manager + ``` + +1. Install cert-manager + + ```plain + kubectl -n cert-manager apply -R -f ./cert-manager + ``` + +1. [Restore back up resources](https://cert-manager.io/docs/tutorials/backup/#restoring-resources) + + ```plain + kubectl apply -f cert-manager-backup.yaml + ``` + +
    + +### Verify the Deployment + +Once you’ve installed cert-manager, you can verify it is deployed correctly by checking the kube-system namespace for running pods: + +``` +kubectl get pods --namespace cert-manager + +NAME READY STATUS RESTARTS AGE +cert-manager-5c6866597-zw7kh 1/1 Running 0 2m +cert-manager-cainjector-577f6d9fd7-tr77l 1/1 Running 0 2m +cert-manager-webhook-787858fcdb-nlzsq 1/1 Running 0 2m +``` + +## Cert-Manager API change and data migration + +--- +_New in v2.6.4_ + +Rancher now supports cert-manager versions 1.6.2 and 1.7.1. We recommend v1.7.x because v 1.6.x will reach end-of-life on March 30, 2022. To read more, see the [cert-manager docs]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/#4-install-cert-manager). For instructions on upgrading cert-manager from version 1.5 to 1.6, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.5-1.6/). For instructions on upgrading cert-manager from version 1.6 to 1.7, see the upstream cert-manager documentation [here](https://cert-manager.io/docs/installation/upgrading/upgrading-1.6-1.7/). + +--- + +Cert-manager has deprecated the use of the `certificate.spec.acme.solvers` field and will drop support for it completely in an upcoming release. + +Per the cert-manager documentation, a new format for configuring ACME certificate resources was introduced in v0.8. Specifically, the challenge solver configuration field was moved. Both the old format and new are supported as of v0.9, but support for the old format will be dropped in an upcoming release of cert-manager. The cert-manager documentation strongly recommends that after upgrading you update your ACME Issuer and Certificate resources to the new format. + +Details about the change and migration instructions can be found in the [cert-manager v0.7 to v0.8 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +The v0.11 release marks the removal of the v1alpha1 API that was used in previous versions of cert-manager, as well as our API group changing to be cert-manager.io instead of certmanager.k8s.io. + +We have also removed support for the old configuration format that was deprecated in the v0.8 release. This means you must transition to using the new solvers style configuration format for your ACME issuers before upgrading to v0.11. For more information, see the [upgrading to v0.8 guide](https://cert-manager.io/docs/installation/upgrading/upgrading-0.7-0.8/). + +Details about the change and migration instructions can be found in the [cert-manager v0.10 to v0.11 upgrade instructions](https://cert-manager.io/docs/installation/upgrading/upgrading-0.10-0.11/). + +More info about [cert-manager upgrade information](https://cert-manager.io/docs/installation/upgrading/). + diff --git a/versioned_docs/version-2.6/istio/configuration-reference/canal-and-project-network/canal-and-project-network.md b/versioned_docs/version-2.6/istio/configuration-reference/canal-and-project-network/canal-and-project-network.md new file mode 100644 index 0000000000..a1718986b0 --- /dev/null +++ b/versioned_docs/version-2.6/istio/configuration-reference/canal-and-project-network/canal-and-project-network.md @@ -0,0 +1,22 @@ +--- +title: Additional Steps for Project Network Isolation +weight: 4 +--- + +In clusters where: + +- You are using the Canal network plugin with Rancher before v2.5.8, or you are using Rancher v2.5.8+ with an any RKE network plug-in that supports the enforcement of Kubernetes network policies, such as Canal or the Cisco ACI plugin +- The Project Network Isolation option is enabled +- You install the Istio Ingress module + +The Istio Ingress Gateway pod won't be able to redirect ingress traffic to the workloads by default. This is because all the namespaces will be inaccessible from the namespace where Istio is installed. You have two options. + +The first option is to add a new Network Policy in each of the namespaces where you intend to have ingress controlled by Istio. Your policy should include the following lines: + +``` +- podSelector: + matchLabels: + app: istio-ingressgateway +``` + +The second option is to move the `istio-system` namespace to the `system` project, which by default is excluded from the network isolation. \ No newline at end of file diff --git a/content/rancher/v2.6/en/istio/configuration-reference/_index.md b/versioned_docs/version-2.6/istio/configuration-reference/configuration-reference.md similarity index 100% rename from content/rancher/v2.6/en/istio/configuration-reference/_index.md rename to versioned_docs/version-2.6/istio/configuration-reference/configuration-reference.md diff --git a/content/rancher/v2.6/en/istio/configuration-reference/enable-istio-with-psp/_index.md b/versioned_docs/version-2.6/istio/configuration-reference/enable-istio-with-psp/enable-istio-with-psp.md similarity index 100% rename from content/rancher/v2.6/en/istio/configuration-reference/enable-istio-with-psp/_index.md rename to versioned_docs/version-2.6/istio/configuration-reference/enable-istio-with-psp/enable-istio-with-psp.md diff --git a/versioned_docs/version-2.6/istio/configuration-reference/rke2/rke2.md b/versioned_docs/version-2.6/istio/configuration-reference/rke2/rke2.md new file mode 100644 index 0000000000..03615c85b6 --- /dev/null +++ b/versioned_docs/version-2.6/istio/configuration-reference/rke2/rke2.md @@ -0,0 +1,38 @@ +--- +title: Additional Steps for Installing Istio on an RKE2 Cluster +weight: 3 +--- + +When installing or upgrading the Istio Helm chart through **Apps & Marketplace,** + +1. If you are installing the chart, click **Customize Helm options before install** and click **Next**. +1. You will see options for configuring the Istio Helm chart. On the **Components** tab, check the box next to **Enabled CNI**. +1. Add a custom overlay file specifying `cniBinDir` and `cniConfDir`. For more information on these options, refer to the [Istio documentation.](https://istio.io/latest/docs/setup/additional-setup/cni/#helm-chart-parameters) An example is below: + + ```yaml + apiVersion: install.istio.io/v1alpha1 + kind: IstioOperator + spec: + components: + cni: + enabled: true + k8s: + overlays: + - apiVersion: "apps/v1" + kind: "DaemonSet" + name: "istio-cni-node" + patches: + - path: spec.template.spec.containers.[name:install-cni].securityContext.privileged + value: true + values: + cni: + image: rancher/mirrored-istio-install-cni:1.9.3 + excludeNamespaces: + - istio-system + - kube-system + logLevel: info + cniBinDir: /opt/cni/bin + cniConfDir: /etc/cni/net.d + ``` + +**Result:** Now you should be able to utilize Istio as desired, including sidecar injection and monitoring via Kiali. diff --git a/content/rancher/v2.6/en/istio/configuration-reference/selectors-and-scrape/_index.md b/versioned_docs/version-2.6/istio/configuration-reference/selectors-and-scrape/selectors-and-scrape.md similarity index 100% rename from content/rancher/v2.6/en/istio/configuration-reference/selectors-and-scrape/_index.md rename to versioned_docs/version-2.6/istio/configuration-reference/selectors-and-scrape/selectors-and-scrape.md diff --git a/content/rancher/v2.6/en/istio/disabling-istio/_index.md b/versioned_docs/version-2.6/istio/disabling-istio/disabling-istio.md similarity index 100% rename from content/rancher/v2.6/en/istio/disabling-istio/_index.md rename to versioned_docs/version-2.6/istio/disabling-istio/disabling-istio.md diff --git a/content/rancher/v2.6/en/istio/_index.md b/versioned_docs/version-2.6/istio/istio.md similarity index 100% rename from content/rancher/v2.6/en/istio/_index.md rename to versioned_docs/version-2.6/istio/istio.md diff --git a/content/rancher/v2.6/en/istio/rbac/_index.md b/versioned_docs/version-2.6/istio/rbac/rbac.md similarity index 100% rename from content/rancher/v2.6/en/istio/rbac/_index.md rename to versioned_docs/version-2.6/istio/rbac/rbac.md diff --git a/content/rancher/v2.6/en/istio/resources/_index.md b/versioned_docs/version-2.6/istio/resources/resources.md similarity index 100% rename from content/rancher/v2.6/en/istio/resources/_index.md rename to versioned_docs/version-2.6/istio/resources/resources.md diff --git a/content/rancher/v2.6/en/istio/setup/deploy-workloads/_index.md b/versioned_docs/version-2.6/istio/setup/deploy-workloads/deploy-workloads.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/deploy-workloads/_index.md rename to versioned_docs/version-2.6/istio/setup/deploy-workloads/deploy-workloads.md diff --git a/content/rancher/v2.6/en/istio/setup/enable-istio-in-cluster/_index.md b/versioned_docs/version-2.6/istio/setup/enable-istio-in-cluster/enable-istio-in-cluster.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/enable-istio-in-cluster/_index.md rename to versioned_docs/version-2.6/istio/setup/enable-istio-in-cluster/enable-istio-in-cluster.md diff --git a/content/rancher/v2.6/en/istio/setup/enable-istio-in-namespace/_index.md b/versioned_docs/version-2.6/istio/setup/enable-istio-in-namespace/enable-istio-in-namespace.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/enable-istio-in-namespace/_index.md rename to versioned_docs/version-2.6/istio/setup/enable-istio-in-namespace/enable-istio-in-namespace.md diff --git a/content/rancher/v2.6/en/istio/setup/gateway/_index.md b/versioned_docs/version-2.6/istio/setup/gateway/gateway.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/gateway/_index.md rename to versioned_docs/version-2.6/istio/setup/gateway/gateway.md diff --git a/content/rancher/v2.6/en/istio/setup/set-up-traffic-management/_index.md b/versioned_docs/version-2.6/istio/setup/set-up-traffic-management/set-up-traffic-management.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/set-up-traffic-management/_index.md rename to versioned_docs/version-2.6/istio/setup/set-up-traffic-management/set-up-traffic-management.md diff --git a/content/rancher/v2.6/en/istio/setup/_index.md b/versioned_docs/version-2.6/istio/setup/setup.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/_index.md rename to versioned_docs/version-2.6/istio/setup/setup.md diff --git a/content/rancher/v2.6/en/istio/setup/view-traffic/_index.md b/versioned_docs/version-2.6/istio/setup/view-traffic/view-traffic.md similarity index 100% rename from content/rancher/v2.6/en/istio/setup/view-traffic/_index.md rename to versioned_docs/version-2.6/istio/setup/view-traffic/view-traffic.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/certificates/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/certificates/certificates.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/certificates/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/certificates/certificates.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/configmaps/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/configmaps/configmaps.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/configmaps/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/configmaps/configmaps.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/horitzontal-pod-autoscaler.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/horitzontal-pod-autoscaler.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/hpa-background.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/hpa-background/hpa-background.md diff --git a/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md new file mode 100644 index 0000000000..54cb32432b --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/manage-hpa-with-kubectl.md @@ -0,0 +1,208 @@ +--- +title: Managing HPAs with kubectl +weight: 3029 +--- + +This section describes HPA management with `kubectl`. This document has instructions for how to: + +- Create an HPA +- Get information on HPAs +- Delete an HPA +- Configure your HPAs to scale with CPU or memory utilization +- Configure your HPAs to scale using custom metrics, if you use a third-party tool such as Prometheus for metrics + + +You can create, view, and delete HPAs from the Rancher UI. You can also configure them to scale based on CPU or memory usage from the Rancher UI. For more information, refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui). For scaling HPAs based on other metrics than CPU or memory, you still need `kubectl`. + +##### Basic kubectl Command for Managing HPAs + +If you have an HPA manifest file, you can create, manage, and delete HPAs using `kubectl`: + +- Creating HPA + + - With manifest: `kubectl create -f ` + + - Without manifest (Just support CPU): `kubectl autoscale deployment hello-world --min=2 --max=5 --cpu-percent=50` + +- Getting HPA info + + - Basic: `kubectl get hpa hello-world` + + - Detailed description: `kubectl describe hpa hello-world` + +- Deleting HPA + + - `kubectl delete hpa hello-world` + +##### HPA Manifest Definition Example + +The HPA manifest is the config file used for managing an HPA with `kubectl`. + +The following snippet demonstrates use of different directives in an HPA manifest. See the list below the sample to understand the purpose of each directive. + +```yml +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: hello-world +spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi +``` + + +Directive | Description +---------|----------| + `apiVersion: autoscaling/v2beta1` | The version of the Kubernetes `autoscaling` API group in use. This example manifest uses the beta version, so scaling by CPU and memory is enabled. | + `name: hello-world` | Indicates that HPA is performing autoscaling for the `hello-word` deployment. | + `minReplicas: 1` | Indicates that the minimum number of replicas running can't go below 1. | + `maxReplicas: 10` | Indicates the maximum number of replicas in the deployment can't go above 10. + `targetAverageUtilization: 50` | Indicates the deployment will scale pods up when the average running pod uses more than 50% of its requested CPU. + `targetAverageValue: 100Mi` | Indicates the deployment will scale pods up when the average running pod uses more that 100Mi of memory. +
    + +##### Configuring HPA to Scale Using Resource Metrics (CPU and Memory) + +Clusters created in Rancher v2.0.7 and higher have all the requirements needed (metrics-server and Kubernetes cluster configuration) to use Horizontal Pod Autoscaler. + +Run the following commands to check if metrics are available in your installation: + +``` +$ kubectl top nodes +NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% +node-controlplane 196m 9% 1623Mi 42% +node-etcd 80m 4% 1090Mi 28% +node-worker 64m 3% 1146Mi 29% +$ kubectl -n kube-system top pods +NAME CPU(cores) MEMORY(bytes) +canal-pgldr 18m 46Mi +canal-vhkgr 20m 45Mi +canal-x5q5v 17m 37Mi +canal-xknnz 20m 37Mi +kube-dns-7588d5b5f5-298j2 0m 22Mi +kube-dns-autoscaler-5db9bbb766-t24hw 0m 5Mi +metrics-server-97bc649d5-jxrlt 0m 12Mi +$ kubectl -n kube-system logs -l k8s-app=metrics-server +I1002 12:55:32.172841 1 heapster.go:71] /metrics-server --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true +I1002 12:55:32.172994 1 heapster.go:72] Metrics Server version v0.2.1 +I1002 12:55:32.173378 1 configs.go:61] Using Kubernetes client with master "https://kubernetes.default.svc" and version +I1002 12:55:32.173401 1 configs.go:62] Using kubelet port 10250 +I1002 12:55:32.173946 1 heapster.go:128] Starting with Metric Sink +I1002 12:55:32.592703 1 serving.go:308] Generated self-signed cert (apiserver.local.config/certificates/apiserver.crt, apiserver.local.config/certificates/apiserver.key) +I1002 12:55:32.925630 1 heapster.go:101] Starting Heapster API server... +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] listing is available at https:///swaggerapi +[restful] 2018/10/02 12:55:32 log.go:33: [restful/swagger] https:///swaggerui/ is mapped to folder /swagger-ui/ +I1002 12:55:32.928597 1 serve.go:85] Serving securely on 0.0.0.0:443 +``` + + +##### Configuring HPA to Scale Using Custom Metrics with Prometheus + +You can configure HPA to autoscale based on custom metrics provided by third-party software. The most common use case for autoscaling using third-party software is based on application-level metrics (i.e., HTTP requests per second). HPA uses the `custom.metrics.k8s.io` API to consume these metrics. This API is enabled by deploying a custom metrics adapter for the metrics collection solution. + +For this example, we are going to use [Prometheus](https://prometheus.io/). We are beginning with the following assumptions: + +- Prometheus is deployed in the cluster. +- Prometheus is configured correctly and collecting proper metrics from pods, nodes, namespaces, etc. +- Prometheus is exposed at the following URL and port: `http://prometheus.mycompany.io:80` + +Prometheus is available for deployment in the Rancher v2.0 catalog. Deploy it from Rancher catalog if it isn't already running in your cluster. + +For HPA to use custom metrics from Prometheus, package [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) is required in the `kube-system` namespace of your cluster. To install `k8s-prometheus-adapter`, we are using the Helm chart available at [banzai-charts](https://github.com/banzaicloud/banzai-charts). + +1. Initialize Helm in your cluster. + ``` + # kubectl -n kube-system create serviceaccount tiller + kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + helm init --service-account tiller + ``` + +1. Clone the `banzai-charts` repo from GitHub: + ``` + # git clone https://github.com/banzaicloud/banzai-charts + ``` + +1. Install the `prometheus-adapter` chart, specifying the Prometheus URL and port number. + ``` + # helm install --name prometheus-adapter banzai-charts/prometheus-adapter --set prometheus.url="http://prometheus.mycompany.io",prometheus.port="80" --namespace kube-system + ``` + +1. Check that `prometheus-adapter` is running properly. Check the service pod and logs in the `kube-system` namespace. + + 1. Check that the service pod is `Running`. Enter the following command. + ``` + # kubectl get pods -n kube-system + ``` + From the resulting output, look for a status of `Running`. + ``` + NAME READY STATUS RESTARTS AGE + ... + prometheus-adapter-prometheus-adapter-568674d97f-hbzfx 1/1 Running 0 7h + ... + ``` + 1. Check the service logs to make sure the service is running correctly by entering the command that follows. + ``` + # kubectl logs prometheus-adapter-prometheus-adapter-568674d97f-hbzfx -n kube-system + ``` + Then review the log output to confirm the service is running. + +
    + Prometheus Adaptor Logs + + ... + I0724 10:18:45.696679 1 round_trippers.go:436] GET https://10.43.0.1:443/api/v1/namespaces/default/pods?labelSelector=app%3Dhello-world 200 OK in 2 milliseconds + I0724 10:18:45.696695 1 round_trippers.go:442] Response Headers: + I0724 10:18:45.696699 1 round_trippers.go:445] Date: Tue, 24 Jul 2018 10:18:45 GMT + I0724 10:18:45.696703 1 round_trippers.go:445] Content-Type: application/json + I0724 10:18:45.696706 1 round_trippers.go:445] Content-Length: 2581 + I0724 10:18:45.696766 1 request.go:836] Response Body: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/default/pods","resourceVersion":"6237"},"items":[{"metadata":{"name":"hello-world-54764dfbf8-q6l82","generateName":"hello-world-54764dfbf8-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/hello-world-54764dfbf8-q6l82","uid":"484cb929-8f29-11e8-99d2-067cac34e79c","resourceVersion":"4066","creationTimestamp":"2018-07-24T10:06:50Z","labels":{"app":"hello-world","pod-template-hash":"1032089694"},"annotations":{"cni.projectcalico.org/podIP":"10.42.0.7/32"},"ownerReferences":[{"apiVersion":"extensions/v1beta1","kind":"ReplicaSet","name":"hello-world-54764dfbf8","uid":"4849b9b1-8f29-11e8-99d2-067cac34e79c","controller":true,"blockOwnerDeletion":true}]},"spec":{"volumes":[{"name":"default-token-ncvts","secret":{"secretName":"default-token-ncvts","defaultMode":420}}],"containers":[{"name":"hello-world","image":"rancher/hello-world","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{"requests":{"cpu":"500m","memory":"64Mi"}},"volumeMounts":[{"name":"default-token-ncvts","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"34.220.18.140","securityContext":{},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":300},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":300}]},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:54Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2018-07-24T10:06:50Z"}],"hostIP":"34.220.18.140","podIP":"10.42.0.7","startTime":"2018-07-24T10:06:50Z","containerStatuses":[{"name":"hello-world","state":{"running":{"startedAt":"2018-07-24T10:06:54Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"rancher/hello-world:latest","imageID":"docker-pullable://rancher/hello-world@sha256:4b1559cb4b57ca36fa2b313a3c7dde774801aa3a2047930d94e11a45168bc053","containerID":"docker://cce4df5fc0408f03d4adf82c90de222f64c302bf7a04be1c82d584ec31530773"}],"qosClass":"Burstable"}}]} + I0724 10:18:45.699525 1 api.go:74] GET http://prometheus-server.prometheus.34.220.18.140.xip.io/api/v1/query?query=sum%28rate%28container_fs_read_seconds_total%7Bpod_name%3D%22hello-world-54764dfbf8-q6l82%22%2Ccontainer_name%21%3D%22POD%22%2Cnamespace%3D%22default%22%7D%5B5m%5D%29%29+by+%28pod_name%29&time=1532427525.697 200 OK + I0724 10:18:45.699620 1 api.go:93] Response Body: {"status":"success","data":{"resultType":"vector","result":[{"metric":{"pod_name":"hello-world-54764dfbf8-q6l82"},"value":[1532427525.697,"0"]}]}} + I0724 10:18:45.699939 1 wrap.go:42] GET /apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/%2A/fs_read?labelSelector=app%3Dhello-world: (12.431262ms) 200 [[kube-controller-manager/v1.10.1 (linux/amd64) kubernetes/d4ab475/system:serviceaccount:kube-system:horizontal-pod-autoscaler] 10.42.0.0:24268] + I0724 10:18:51.727845 1 request.go:836] Request Body: {"kind":"SubjectAccessReview","apiVersion":"authorization.k8s.io/v1beta1","metadata":{"creationTimestamp":null},"spec":{"nonResourceAttributes":{"path":"/","verb":"get"},"user":"system:anonymous","group":["system:unauthenticated"]},"status":{"allowed":false}} + ... + +
    + + + +1. Check that the metrics API is accessible from kubectl. + + - If you are accessing the cluster directly, enter your Server URL in the kubectl config in the following format: `https://:6443`. + ``` + # kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
    + API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} + +
    + + - If you are accessing the cluster through Rancher, enter your Server URL in the kubectl config in the following format: `https:///k8s/clusters/`. Add the suffix `/k8s/clusters/` to API path. + ``` + # kubectl get --raw /k8s/clusters//apis/custom.metrics.k8s.io/v1beta1 + ``` + If the API is accessible, you should receive output that's similar to what follows. + +
    + API Response + + {"kind":"APIResourceList","apiVersion":"v1","groupVersion":"custom.metrics.k8s.io/v1beta1","resources":[{"name":"pods/fs_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_rss","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_period","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_read","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_user","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/last_seen","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/tasks_state","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_quota","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/start_time_seconds","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_write","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_cache","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_cfs_throttled_periods","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_working_set_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_udp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes_free","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_inodes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_time_weighted","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failures","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_swap","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_cpu_shares","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_swap_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_io_current","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_failcnt","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_writes_merged","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/network_tcp_usage","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/memory_max_usage_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/spec_memory_reservation_limit_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_load_average_10s","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/cpu_system","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_reads_bytes","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]},{"name":"pods/fs_sector_reads","singularName":"","namespaced":true,"kind":"MetricValueList","verbs":["get"]}]} + +
    diff --git a/content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-rancher-ui/manage-hpa-with-rancher-ui.md diff --git a/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md new file mode 100644 index 0000000000..d4990e8516 --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/horitzontal-pod-autoscaler/testing-hpa/testing-hpa.md @@ -0,0 +1,531 @@ +--- +title: Testing HPAs with kubectl +weight: 3031 +--- + +This document describes how to check the status of your HPAs after scaling them up or down with your load testing tool. For information on how to check the status from the Rancher UI (at least version 2.3.x), refer to [Managing HPAs with the Rancher UI]({{}}/rancher/v2.6/en/k8s-in-rancher/horitzontal-pod-autoscaler/manage-hpa-with-kubectl/). + +For HPA to work correctly, service deployments should have resources request definitions for containers. Follow this hello-world example to test if HPA is working correctly. + +1. Configure `kubectl` to connect to your Kubernetes cluster. + +1. Copy the `hello-world` deployment manifest below. + +
    + Hello World Manifest + + ``` + apiVersion: apps/v1beta2 + kind: Deployment + metadata: + labels: + app: hello-world + name: hello-world + namespace: default + spec: + replicas: 1 + selector: + matchLabels: + app: hello-world + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app: hello-world + spec: + containers: + - image: rancher/hello-world + imagePullPolicy: Always + name: hello-world + resources: + requests: + cpu: 500m + memory: 64Mi + ports: + - containerPort: 80 + protocol: TCP + restartPolicy: Always + --- + apiVersion: v1 + kind: Service + metadata: + name: hello-world + namespace: default + spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: hello-world + ``` + +
    + +1. Deploy it to your cluster. + + ``` + # kubectl create -f + ``` + +1. Copy one of the HPAs below based on the metric type you're using: + +
    + Hello World HPA: Resource Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 1000Mi + ``` + +
    +
    + Hello World HPA: Custom Metrics + + ``` + apiVersion: autoscaling/v2beta1 + kind: HorizontalPodAutoscaler + metadata: + name: hello-world + namespace: default + spec: + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: hello-world + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 50 + - type: Resource + resource: + name: memory + targetAverageValue: 100Mi + - type: Pods + pods: + metricName: cpu_system + targetAverageValue: 20m + ``` + +
    + +1. View the HPA info and description. Confirm that metric data is shown. + +
    + Resource Metrics + + 1. Enter the following commands. + ``` + # kubectl get hpa + NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE + hello-world Deployment/hello-world 1253376 / 100Mi, 0% / 50% 1 10 1 6m + # kubectl describe hpa + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 20:21:16 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 1253376 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
    +
    + Custom Metrics + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive the output that follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:36:28 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 3514368 / 100Mi + "cpu_system" on pods: 0 / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + ``` + +
    + +1. Generate a load for the service to test that your pods autoscale as intended. You can use any load-testing tool (Hey, Gatling, etc.), but we're using [Hey](https://github.com/rakyll/hey). + +1. Test that pod autoscaling works as intended.

    + **To Test Autoscaling Using Resource Metrics:** + +
    + Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to two pods based on CPU Usage. + + 1. View your HPA. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10928128 / 100Mi + resource cpu on pods (as a percentage of request): 56% (280m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm you've scaled to two pods. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-k8ph2 1/1 Running 0 1m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
    +
    + Upscale to 3 pods: CPU Usage Up to Target + + Use your load testing tool to upscale to 3 pods based on CPU usage with `horizontal-pod-autoscaler-upscale-delay` set to 3 minutes. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 9424896 / 100Mi + resource cpu on pods (as a percentage of request): 66% (333m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 4m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + ``` + 2. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-f46kh 0/1 Running 0 1m + hello-world-54764dfbf8-k8ph2 1/1 Running 0 5m + hello-world-54764dfbf8-q6l4v 1/1 Running 0 3h + ``` + +
    +
    + Downscale to 1 Pod: All Metrics Below Target + + Use your load testing to scale down to 1 pod when all metrics are below target for `horizontal-pod-autoscaler-downscale-delay` (5 minutes by default). + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Mon, 23 Jul 2018 22:22:04 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 10070016 / 100Mi + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 6m horizontal-pod-autoscaler New size: 3; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 1s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + +
    + + **To Test Autoscaling Using Custom Metrics:** + +
    + Upscale to 2 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale two pods based on CPU usage. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8159232 / 100Mi + "cpu_system" on pods: 7m / 20m + resource cpu on pods (as a percentage of request): 64% (321m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 2 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 16s horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm two pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Upscale to 3 Pods: CPU Usage Up to Target + + Use your load testing tool to scale up to three pods when the cpu_system usage limit is up to target. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows: + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3s horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + ``` + 1. Enter the following command to confirm three pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows: + ``` + # kubectl get pods + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-5pfdr 1/1 Running 0 3m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Upscale to 4 Pods: CPU Usage Up to Target + + Use your load testing tool to upscale to four pods based on CPU usage. `horizontal-pod-autoscaler-upscale-delay` is set to three minutes by default. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive output similar to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8374272 / 100Mi + "cpu_system" on pods: 27m / 20m + resource cpu on pods (as a percentage of request): 71% (357m) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 3 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request) + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 3m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 4s horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + ``` + 1. Enter the following command to confirm four pods are running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-2p9xb 1/1 Running 0 5m + hello-world-54764dfbf8-5pfdr 1/1 Running 0 2m + hello-world-54764dfbf8-m2hrl 1/1 Running 0 1s + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    +
    + Downscale to 1 Pod: All Metrics Below Target + + Use your load testing tool to scale down to one pod when all metrics below target for `horizontal-pod-autoscaler-downscale-delay`. + + 1. Enter the following command. + ``` + # kubectl describe hpa + ``` + You should receive similar output to what follows. + ``` + Name: hello-world + Namespace: default + Labels: + Annotations: + CreationTimestamp: Tue, 24 Jul 2018 18:01:11 +0200 + Reference: Deployment/hello-world + Metrics: ( current / target ) + resource memory on pods: 8101888 / 100Mi + "cpu_system" on pods: 8m / 20m + resource cpu on pods (as a percentage of request): 0% (0) / 50% + Min replicas: 1 + Max replicas: 10 + Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True SucceededRescale the HPA controller was able to update the target scale to 1 + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from memory resource + ScalingLimited False DesiredWithinRange the desired count is within the acceptable range + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulRescale 10m horizontal-pod-autoscaler New size: 2; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 8m horizontal-pod-autoscaler New size: 3; reason: pods metric cpu_system above target + Normal SuccessfulRescale 5m horizontal-pod-autoscaler New size: 4; reason: cpu resource utilization (percentage of request) above target + Normal SuccessfulRescale 13s horizontal-pod-autoscaler New size: 1; reason: All metrics below target + ``` + 1. Enter the following command to confirm a single pods is running. + ``` + # kubectl get pods + ``` + You should receive output similar to what follows. + ``` + NAME READY STATUS RESTARTS AGE + hello-world-54764dfbf8-q6l82 1/1 Running 0 6h + ``` + +
    diff --git a/content/rancher/v2.6/en/k8s-in-rancher/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/k8s-in-rancher.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/k8s-in-rancher.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/ingress-config/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/ingress-config/ingress-config.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/ingress-config/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/ingress-config/ingress-config.md diff --git a/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md b/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md new file mode 100644 index 0000000000..bb69c19826 --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/ingress/ingress.md @@ -0,0 +1,20 @@ +--- +title: Adding Ingresses +description: Ingresses can be added for workloads to provide load balancing, SSL termination and host/path-based routing. Learn how to add Rancher ingress +weight: 3042 +--- + +Ingresses can be added for workloads to provide load balancing, SSL termination and host/path based routing. When using ingresses in a project, you can program the ingress hostname to an external DNS by setting up a Global DNS entry. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster that you want to add an ingress to and click **Explore**. +1. Click **Service Discovery > Ingresses**. +1. Click **Create**. +1. Select an existing **Namespace** from the drop-down list. +1. Enter a **Name** for the ingress. +1. Create ingress forwarding **Rules**. For help configuring the rules, refer to [this section.](#ingress-rule-configuration) If any of your ingress rules handle requests for encrypted ports, add a certificate to encrypt/decrypt communications. +1. **Optional:** click **Add Rule** to create additional ingress rules. For example, after you create ingress rules to direct requests for your hostname, you'll likely want to create a default backend to handle 404s. +1. Click **Create** at the bottom right. + +**Result:** Your ingress is added to the project. The ingress begins enforcing your ingress rules. + diff --git a/content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/load-balancers-and-ingress.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/load-balancers-and-ingress/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/load-balancers-and-ingress.md diff --git a/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/load-balancers/load-balancers.md b/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/load-balancers/load-balancers.md new file mode 100644 index 0000000000..0e594f7efd --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/load-balancers-and-ingress/load-balancers/load-balancers.md @@ -0,0 +1,64 @@ +--- +title: "Layer 4 and Layer 7 Load Balancing" +description: "Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. Learn about the support for each way in different deployments" +weight: 3041 +--- +Kubernetes supports load balancing in two ways: Layer-4 Load Balancing and Layer-7 Load Balancing. + +## Layer-4 Load Balancer + +Layer-4 load balancer (or the external load balancer) forwards traffic to Nodeports. Layer-4 load balancer allows you to forward both HTTP and TCP traffic. + +Often, the Layer-4 load balancer is supported by the underlying cloud provider, so when you deploy RKE clusters on bare-metal servers and vSphere clusters, Layer-4 load balancer is not supported. However, a single [globally managed config-map](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) can be used to expose services on NGINX or third-party ingress. + +> **Note:** It is possible to deploy a cluster with a non-cloud load balancer, such as [MetalLB.](https://metallb.universe.tf/) However, that use case is more advanced than the Layer-4 load balancer supported by a cloud provider, and it is not configurable in Rancher or RKE. + +### Support for Layer-4 Load Balancing + +Support for layer-4 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-4 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GCE cloud provider +Azure AKS | Supported by Azure cloud provider +RKE on EC2 | Supported by AWS cloud provider +RKE on DigitalOcean | Limited NGINX or third-party Ingress* +RKE on vSphere | Limited NGINX or third party-Ingress* +RKE on Custom Hosts
    (e.g. bare-metal servers) | Limited NGINX or third-party Ingress* +Third-party MetalLB | Limited NGINX or third-party Ingress* + +\* Services can be exposed through a single [globally managed config-map.](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) + +## Layer-7 Load Balancer + +Layer-7 load balancer (or the ingress controller) supports host and path-based load balancing and SSL termination. Layer-7 load balancer only forwards HTTP and HTTPS traffic and therefore they listen on ports 80 and 443 only. Cloud providers such as Amazon and Google support layer-7 load balancer. In addition, RKE clusters deploys the Nginx Ingress Controller. + +### Support for Layer-7 Load Balancing + +Support for layer-7 load balancer varies based on the underlying cloud provider. + +Cluster Deployment | Layer-7 Load Balancer Support +----------------------------------------------|-------------------------------- +Amazon EKS | Supported by AWS cloud provider +Google GKE | Supported by GKE cloud provider +Azure AKS | Not Supported +RKE on EC2 | Nginx Ingress Controller +RKE on DigitalOcean | Nginx Ingress Controller +RKE on vSphere | Nginx Ingress Controller +RKE on Custom Hosts
    (e.g. bare-metal servers) | Nginx Ingress Controller + +### Host Names in Layer-7 Load Balancer + +Some cloud-managed layer-7 load balancers (such as the ALB ingress controller on AWS) expose DNS addresses for ingress rules. You need to map (via CNAME) your domain name to the DNS address generated by the layer-7 load balancer. + +Other layer-7 load balancers, such as the Google Load Balancer or Nginx Ingress Controller, directly expose one or more IP addresses. Google Load Balancer provides a single routable IP address. Nginx Ingress Controller exposes the external IP of all nodes that run the Nginx Ingress Controller. You can do either of the following: + +1. Configure your own DNS to map (via A records) your domain name to the IP addresses exposes by the Layer-7 load balancer. +2. Ask Rancher to generate an xip.io host name for your ingress rule. Rancher will take one of your exposed IPs, say `a.b.c.d`, and generate a host name `..a.b.c.d.xip.io`. + +The benefit of using xip.io is that you obtain a working entrypoint URL immediately after you create the ingress rule. Setting up your own domain name, on the other hand, requires you to configure DNS servers and wait for DNS to propagate. + +## Related Links + +- [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) diff --git a/content/rancher/v2.6/en/k8s-in-rancher/registries/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/registries/registries.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/registries/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/registries/registries.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/secrets/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/secrets/secrets.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/secrets/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/secrets/secrets.md diff --git a/versioned_docs/version-2.6/k8s-in-rancher/service-discovery/service-discovery.md b/versioned_docs/version-2.6/k8s-in-rancher/service-discovery/service-discovery.md new file mode 100644 index 0000000000..106bce3715 --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/service-discovery/service-discovery.md @@ -0,0 +1,30 @@ +--- +title: Services +weight: 3045 +--- + +Pod configuration is managed by Deployments, StatefulSets and Daemonsets, whereas services direct traffic to pods using selectors. + +For every workload (with at least one port configured) created, a complementing Service Discovery entry is created. This Service Discovery entry enables DNS resolution for the workload's pods using the following naming convention: +`..svc.cluster.local`. + +You can create additional services so that a given namespace resolves with one or more external IP addresses, an external hostname, an alias to another DNS record, other workloads, or a set of pods that match a selector that you create. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to add a service and click **Explore**. +1. Click **Service Discovery > Services**. +1. Click **Create**. +1. Choose the type of service you want to create. +1. Select a **Namespace** from the drop-down list. +1. Enter a **Name** for the service. This name is used for DNS resolution. +1. Fill out the rest of the form. For help, refer to the upstream Kubernetes documentation about [services.](https://kubernetes.io/docs/concepts/services-networking/service/) +1. Click **Create**. + +**Result:** A new service is created. + +- You can view the record by from the project's **Service Discovery** tab. +- When you visit the new DNS name for the new record that you created (`..svc.cluster.local`), it resolves the chosen namespace. + +## Related Links + +- [Adding entries to Pod /etc/hosts with HostAliases](https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/) diff --git a/content/rancher/v2.6/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/workloads/add-a-sidecar/add-a-sidecar.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/workloads/add-a-sidecar/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/workloads/add-a-sidecar/add-a-sidecar.md diff --git a/content/rancher/v2.6/en/k8s-in-rancher/workloads/deploy-workloads/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/workloads/deploy-workloads/deploy-workloads.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/workloads/deploy-workloads/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/workloads/deploy-workloads/deploy-workloads.md diff --git a/versioned_docs/version-2.6/k8s-in-rancher/workloads/rollback-workloads/rollback-workloads.md b/versioned_docs/version-2.6/k8s-in-rancher/workloads/rollback-workloads/rollback-workloads.md new file mode 100644 index 0000000000..1d8662a3d8 --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/workloads/rollback-workloads/rollback-workloads.md @@ -0,0 +1,15 @@ +--- +title: Rolling Back Workloads +weight: 3027 +--- + +Sometimes there is a need to rollback to the previous version of the application, either for debugging purposes or because an upgrade did not go as planned. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to upgrade a workload and click **Explore**. +1. In the left navigation bar, click **Workload**. +1. Find the workload that you want to rollback and select **⋮ > Rollback**. + +1. Choose the revision that you want to roll back to. Click **Rollback**. + +**Result:** Your workload reverts to the previous version that you chose. Wait a few minutes for the action to complete. diff --git a/versioned_docs/version-2.6/k8s-in-rancher/workloads/upgrade-workloads/upgrade-workloads.md b/versioned_docs/version-2.6/k8s-in-rancher/workloads/upgrade-workloads/upgrade-workloads.md new file mode 100644 index 0000000000..f6804adb2e --- /dev/null +++ b/versioned_docs/version-2.6/k8s-in-rancher/workloads/upgrade-workloads/upgrade-workloads.md @@ -0,0 +1,21 @@ +--- +title: Upgrading Workloads +weight: 3028 +--- +When a new version of an application image is released on Docker Hub, you can upgrade any workloads running a previous version of the application to the new one. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to upgrade a workload and click **Explore**. +1. In the left navigation bar, click **Workload**. + +1. Find the workload that you want to upgrade and select **⋮ > Edit Config**. + +1. Update the **Container Image** and any options that you want to change. + +1. Review and edit the workload's **Scaling and Upgrade Policy**. + + These options control how the upgrade rolls out to containers that are currently running. For example, for scalable deployments, you can choose whether you want to stop old pods before deploying new ones, or vice versa, as well as the upgrade batch size. + +1. Click **Save**. + +**Result:** The workload begins upgrading its containers, per your specifications. Note that scaling up the deployment or updating the upgrade/scaling policy won't result in the pods recreation. diff --git a/content/rancher/v2.6/en/k8s-in-rancher/workloads/_index.md b/versioned_docs/version-2.6/k8s-in-rancher/workloads/workloads.md similarity index 100% rename from content/rancher/v2.6/en/k8s-in-rancher/workloads/_index.md rename to versioned_docs/version-2.6/k8s-in-rancher/workloads/workloads.md diff --git a/content/rancher/v2.6/en/logging/architecture/_index.md b/versioned_docs/version-2.6/logging/architecture/architecture.md similarity index 100% rename from content/rancher/v2.6/en/logging/architecture/_index.md rename to versioned_docs/version-2.6/logging/architecture/architecture.md diff --git a/content/rancher/v2.5/en/logging/custom-resource-config/_index.md b/versioned_docs/version-2.6/logging/custom-resource-config/custom-resource-config.md similarity index 100% rename from content/rancher/v2.5/en/logging/custom-resource-config/_index.md rename to versioned_docs/version-2.6/logging/custom-resource-config/custom-resource-config.md diff --git a/versioned_docs/version-2.6/logging/custom-resource-config/flows/flows.md b/versioned_docs/version-2.6/logging/custom-resource-config/flows/flows.md new file mode 100644 index 0000000000..2ad4991dc8 --- /dev/null +++ b/versioned_docs/version-2.6/logging/custom-resource-config/flows/flows.md @@ -0,0 +1,85 @@ +--- +title: Flows and ClusterFlows +weight: 1 +--- + +For the full details on configuring `Flows` and `ClusterFlows`, see the [Banzai Cloud Logging operator documentation.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/flow/) + +- [Configuration](#configuration) +- [YAML Example](#yaml-example) + +# Configuration + +- [Flows](#flows) + - [Matches](#matches) + - [Filters](#filters) + - [Outputs](#outputs) +- [ClusterFlows](#clusterflows) + +# Flows + +A `Flow` defines which logs to collect and filter and which output to send the logs to. + +The `Flow` is a namespaced resource, which means logs will only be collected from the namespace that the `Flow` is deployed in. + +`Flows` can be configured by filling out forms in the Rancher UI. + +For more details about the `Flow` custom resource, see [FlowSpec.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/crds/v1beta1/flow_types/) + +### Matches + +Match statements are used to select which containers to pull logs from. + +You can specify match statements to select or exclude logs according to Kubernetes labels, container and host names. Match statements are evaluated in the order they are defined and processed only until the first matching select or exclude rule applies. + +Matches can be configured by filling out the `Flow` or `ClusterFlow` forms in the Rancher UI. + +For detailed examples on using the match statement, see the [official documentation on log routing.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/log-routing/) + +### Filters + +You can define one or more filters within a `Flow`. Filters can perform various actions on the logs, for example, add additional data, transform the logs, or parse values from the records. The filters in the `Flow` are applied in the order in the definition. + +For a list of filters supported by the Banzai Cloud Logging operator, see [this page.](https://banzaicloud.com/docs/one-eye/logging-operator/configuration/plugins/filters/) + +Filters need to be configured in YAML. + +### Outputs + +This `Output` will receive logs from the `Flow`. Because the `Flow` is a namespaced resource, the `Output` must reside in same namespace as the `Flow`. + +`Outputs` can be referenced when filling out the `Flow` or `ClusterFlow` forms in the Rancher UI. + +# ClusterFlows + +Matches, filters and `Outputs` are configured for `ClusterFlows` in the same way that they are configured for `Flows`. The key difference is that the `ClusterFlow` is scoped at the cluster level and can configure log collection across all namespaces. + +`ClusterFlows` can be configured by filling out forms in the Rancher UI. + +After `ClusterFlow` selects logs from all namespaces in the cluster, logs from the cluster will be collected and logged to the selected `ClusterOutput`. + +# YAML Example + +The following example `Flow` transforms the log messages from the default namespace and sends them to an S3 `Output`: + +```yaml +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Flow +metadata: + name: flow-sample + namespace: default +spec: + filters: + - parser: + remove_key_name_field: true + parse: + type: nginx + - tag_normaliser: + format: ${namespace_name}.${pod_name}.${container_name} + localOutputRefs: + - s3-output + match: + - select: + labels: + app: nginx +``` diff --git a/content/rancher/v2.6/en/logging/custom-resource-config/outputs/_index.md b/versioned_docs/version-2.6/logging/custom-resource-config/outputs/outputs.md similarity index 100% rename from content/rancher/v2.6/en/logging/custom-resource-config/outputs/_index.md rename to versioned_docs/version-2.6/logging/custom-resource-config/outputs/outputs.md diff --git a/content/rancher/v2.6/en/logging/helm-chart-options/_index.md b/versioned_docs/version-2.6/logging/helm-chart-options/helm-chart-options.md similarity index 100% rename from content/rancher/v2.6/en/logging/helm-chart-options/_index.md rename to versioned_docs/version-2.6/logging/helm-chart-options/helm-chart-options.md diff --git a/content/rancher/v2.6/en/logging/_index.md b/versioned_docs/version-2.6/logging/logging.md similarity index 100% rename from content/rancher/v2.6/en/logging/_index.md rename to versioned_docs/version-2.6/logging/logging.md diff --git a/content/rancher/v2.6/en/logging/migrating/_index.md b/versioned_docs/version-2.6/logging/migrating/migrating.md similarity index 100% rename from content/rancher/v2.6/en/logging/migrating/_index.md rename to versioned_docs/version-2.6/logging/migrating/migrating.md diff --git a/content/rancher/v2.6/en/logging/rbac/_index.md b/versioned_docs/version-2.6/logging/rbac/rbac.md similarity index 100% rename from content/rancher/v2.6/en/logging/rbac/_index.md rename to versioned_docs/version-2.6/logging/rbac/rbac.md diff --git a/versioned_docs/version-2.6/logging/taints-tolerations/taints-tolerations.md b/versioned_docs/version-2.6/logging/taints-tolerations/taints-tolerations.md new file mode 100644 index 0000000000..00cee550a8 --- /dev/null +++ b/versioned_docs/version-2.6/logging/taints-tolerations/taints-tolerations.md @@ -0,0 +1,66 @@ +--- +title: Working with Taints and Tolerations +weight: 6 +--- + +"Tainting" a Kubernetes node causes pods to repel running on that node. + +Unless the pods have a `toleration` for that node's taint, they will run on other nodes in the cluster. + +[Taints and tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) can work in conjunction with the `nodeSelector` [field](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) within the `PodSpec`, which enables the *opposite* effect of a taint. + +Using `nodeSelector` gives pods an affinity towards certain nodes. + +Both provide choice for the what node(s) the pod will run on. + +- [Default Implementation in Rancher's Logging Stack](#default-implementation-in-rancher-s-logging-stack) +- [Adding NodeSelector Settings and Tolerations for Custom Taints](#adding-nodeselector-settings-and-tolerations-for-custom-taints) + + +### Default Implementation in Rancher's Logging Stack + +By default, Rancher taints all Linux nodes with `cattle.io/os=linux`, and does not taint Windows nodes. +The logging stack pods have `tolerations` for this taint, which enables them to run on Linux nodes. +Moreover, most logging stack pods run on Linux only and have a `nodeSelector` added to ensure they run on Linux nodes. + +This example Pod YAML file shows a nodeSelector being used with a toleration: + +```yaml +apiVersion: v1 +kind: Pod +# metadata... +spec: + # containers... + tolerations: + - key: cattle.io/os + operator: "Equal" + value: "linux" + effect: NoSchedule + nodeSelector: + kubernetes.io/os: linux +``` + +In the above example, we ensure that our pod only runs on Linux nodes, and we add a `toleration` for the taint we have on all of our Linux nodes. + +You can do the same with Rancher's existing taints, or with your own custom ones. + +### Adding NodeSelector Settings and Tolerations for Custom Taints + +If you would like to add your own `nodeSelector` settings, or if you would like to add `tolerations` for additional taints, you can pass the following to the chart's values. + +```yaml +tolerations: + # insert tolerations... +nodeSelector: + # insert nodeSelector... +``` + +These values will add both settings to the `fluentd`, `fluentbit`, and `logging-operator` containers. +Essentially, these are global settings for all pods in the logging stack. + +However, if you would like to add tolerations for *only* the `fluentbit` container, you can add the following to the chart's values. + +```yaml +fluentbit_tolerations: + # insert tolerations list for fluentbit containers only... +``` diff --git a/content/rancher/v2.6/en/longhorn/_index.md b/versioned_docs/version-2.6/longhorn/longhorn.md similarity index 100% rename from content/rancher/v2.6/en/longhorn/_index.md rename to versioned_docs/version-2.6/longhorn/longhorn.md diff --git a/content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/advanced.md similarity index 100% rename from content/rancher/v2.5/en/monitoring-alerting/configuration/advanced/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/advanced.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/alertmanager/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/alertmanager/alertmanager.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/alertmanager/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/alertmanager/alertmanager.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/prometheus/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/prometheus/prometheus.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/prometheus/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/prometheus/prometheus.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/prometheusrules/prometheusrules.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/advanced/prometheusrules/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/advanced/prometheusrules/prometheusrules.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/configuration.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/configuration.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/examples/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/examples/examples.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/examples/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/examples/examples.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/helm-chart-options/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/helm-chart-options/helm-chart-options.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/helm-chart-options/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/helm-chart-options/helm-chart-options.md diff --git a/versioned_docs/version-2.6/monitoring-alerting/configuration/receiver/receiver.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/receiver/receiver.md new file mode 100644 index 0000000000..8653088990 --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/configuration/receiver/receiver.md @@ -0,0 +1,313 @@ +--- +title: Receiver Configuration +shortTitle: Receivers +weight: 1 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The [Alertmanager Config](https://prometheus.io/docs/alerting/latest/configuration/#configuration-file) Secret contains the configuration of an Alertmanager instance that sends out notifications based on alerts it receives from Prometheus. + +> This section assumes familiarity with how monitoring components work together. For more information about Alertmanager, see [this section.](../../how-monitoring-works/#3-how-alertmanager-works) + +- [Creating Receivers in the Rancher UI](#creating-receivers-in-the-rancher-ui) +- [Receiver Configuration](#receiver-configuration) + - [Slack](#slack) + - [Email](#email) + - [PagerDuty](#pagerduty) + - [Opsgenie](#opsgenie) + - [Webhook](#webhook) + - [Custom](#custom) + - [Teams](#teams) + - [SMS](#sms) +- [Configuring Multiple Receivers](#configuring-multiple-receivers) +- [Example Alertmanager Config](../examples/#example-alertmanager-config) +- [Example Route Config for CIS Scan Alerts](#example-route-config-for-cis-scan-alerts) +- [Trusted CA for Notifiers](#trusted-ca-for-notifiers) + +# Creating Receivers in the Rancher UI + +> **Prerequisites:** +> +>- The monitoring application needs to be installed. +>- If you configured monitoring with an existing Alertmanager Secret, it must have a format that is supported by Rancher's UI. Otherwise you will only be able to make changes based on modifying the Alertmanager Secret directly. Note: We are continuing to make enhancements to what kinds of Alertmanager Configurations we can support using the Routes and Receivers UI, so please [file an issue](https://github.com/rancher/rancher/issues/new) if you have a request for a feature enhancement. + +To create notification receivers in the Rancher UI, + + + + +1. Go to the cluster where you want to create receivers. Click **Monitoring -> Alerting -> AlertManagerConfigs**. +1. Ciick **Create**. +1. Click **Add Receiver**. +1. Enter a **Name** for the receiver. +1. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. +1. Click **Create**. + + + + +1. Go to the cluster where you want to create receivers. Click **Monitoring** and click **Receiver**. +2. Enter a name for the receiver. +3. Configure one or more providers for the receiver. For help filling out the forms, refer to the configuration options below. +4. Click **Create**. + + + + +**Result:** Alerts can be configured to send notifications to the receiver(s). + +# Receiver Configuration + +The notification integrations are configured with the `receiver`, which is explained in the [Prometheus documentation.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) + +### Native vs. Non-native Receivers + +By default, AlertManager provides native integration with some receivers, which are listed in [this section.](https://prometheus.io/docs/alerting/latest/configuration/#receiver) All natively supported receivers are configurable through the Rancher UI. + +For notification mechanisms not natively supported by AlertManager, integration is achieved using the [webhook receiver.](https://prometheus.io/docs/alerting/latest/configuration/#webhook_config) A list of third-party drivers providing such integrations can be found [here.](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) Access to these drivers, and their associated integrations, is provided through the Alerting Drivers app. Once enabled, configuring non-native receivers can also be done through the Rancher UI. + +Currently the Rancher Alerting Drivers app provides access to the following integrations: +- Microsoft Teams, based on the [prom2teams](https://github.com/idealista/prom2teams) driver +- SMS, based on the [Sachet](https://github.com/messagebird/sachet) driver + +The following types of receivers can be configured in the Rancher UI: + +- Slack +- Email +- PagerDuty +- Opsgenie +- Webhook +- Custom +- Teams +- SMS + +The custom receiver option can be used to configure any receiver in YAML that cannot be configured by filling out the other forms in the Rancher UI. + +# Slack + +| Field | Type | Description | +|------|--------------|------| +| URL | String | Enter your Slack webhook URL. For instructions to create a Slack webhook, see the [Slack documentation.](https://get.slack.help/hc/en-us/articles/115005265063-Incoming-WebHooks-for-Slack) | +| Default Channel | String | Enter the name of the channel that you want to send alert notifications in the following format: `#`. | +| Proxy URL | String | Proxy for the webhook notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +# Email + +| Field | Type | Description | +|------|--------------|------| +| Default Recipient Address | String | The email address that will receive notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +SMTP options: + +| Field | Type | Description | +|------|--------------|------| +| Sender | String | Enter an email address available on your SMTP mail server that you want to send the notification from. | +| Host | String | Enter the IP address or hostname for your SMTP server. Example: `smtp.email.com`. | +| Use TLS | Bool | Use TLS for encryption. | +| Username | String | Enter a username to authenticate with the SMTP server. | +| Password | String | Enter a password to authenticate with the SMTP server. | + +# PagerDuty + +| Field | Type | Description | +|------|------|-------| +| Integration Type | String | `Events API v2` or `Prometheus`. | +| Default Integration Key | String | For instructions to get an integration key, see the [PagerDuty documentation.](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) | +| Proxy URL | String | Proxy for the PagerDuty notifications. | +| Enable Send Resolved Alerts | Bool | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +# Opsgenie + +| Field | Description | +|------|-------------| +| API Key | For instructions to get an API key, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/api-key-management) | +| Proxy URL | Proxy for the Opsgenie notifications. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + +Opsgenie Responders: + +| Field | Type | Description | +|-------|------|--------| +| Type | String | Schedule, Team, User, or Escalation. For more information on alert responders, refer to the [Opsgenie documentation.](https://docs.opsgenie.com/docs/alert-recipients-and-teams) | +| Send To | String | Id, Name, or Username of the Opsgenie recipient. | + +# Webhook + +| Field | Description | +|-------|--------------| +| URL | Webhook URL for the app of your choice. | +| Proxy URL | Proxy for the webhook notification. | +| Enable Send Resolved Alerts | Whether to send a follow-up notification if an alert has been resolved (e.g. [Resolved] High CPU Usage). | + + + +# Custom + +The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. + +# Teams + +### Enabling the Teams Receiver for Rancher Managed Clusters + +The Teams receiver is not a native receiver and must be enabled before it can be used. You can enable the Teams receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the Teams option selected. + +1. In the Rancher UI, go to the cluster where you want to install rancher-alerting-drivers and click **Apps & Marketplace**. +1. Click the **Alerting Drivers** app. +1. Click the **Helm Deploy Options** tab. +1. Select the **Teams** option and click **Install**. +1. Take note of the namespace used as it will be required in a later step. + +### Configure the Teams Receiver + +The Teams receiver can be configured by updating its ConfigMap. For example, the following is a minimal Teams receiver configuration. + +```yaml +[Microsoft Teams] +teams-instance-1: https://your-teams-webhook-url +``` + +When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). + +Use the example below as the URL where: + +- `ns-1` is replaced with the namespace where the `rancher-alerting-drivers` app is installed + +```yaml +url: http://rancher-alerting-drivers-prom2teams.ns-1.svc:8089/v2/teams-instance-1 +``` + + + +# SMS + +### Enabling the SMS Receiver for Rancher Managed Clusters + +The SMS receiver is not a native receiver and must be enabled before it can be used. You can enable the SMS receiver for a Rancher managed cluster by going to the Apps page and installing the rancher-alerting-drivers app with the SMS option selected. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to install `rancher-alerting-drivers` and click **Explore**. +1. In the left navigation bar, click +1. Click the **Alerting Drivers** app. +1. Click the **Helm Deploy Options** tab +1. Select the **SMS** option and click **Install**. +1. Take note of the namespace used as it will be required in a later step. + +### Configure the SMS Receiver + +The SMS receiver can be configured by updating its ConfigMap. For example, the following is a minimal SMS receiver configuration. + +```yaml +providers: + telegram: + token: 'your-token-from-telegram' + +receivers: +- name: 'telegram-receiver-1' + provider: 'telegram' + to: + - '123456789' +``` + +When configuration is complete, add the receiver using the steps in [this section](#creating-receivers-in-the-rancher-ui). + +Use the example below as the name and URL, where: + +- the name assigned to the receiver, e.g. `telegram-receiver-1`, must match the name in the `receivers.name` field in the ConfigMap, e.g. `telegram-receiver-1` +- `ns-1` in the URL is replaced with the namespace where the `rancher-alerting-drivers` app is installed + +```yaml +name: telegram-receiver-1 +url http://rancher-alerting-drivers-sachet.ns-1.svc:9876/alert +``` + + + + +# Configuring Multiple Receivers + +By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. + +It is also possible to send alerts to multiple notification systems. One way is to configure the Receiver using custom YAML, in which case you can add the configuration for multiple notification systems, as long as you are sure that both systems should receive the same messages. + +You can also set up multiple receivers by using the `continue` option for a route, so that the alerts sent to a receiver continue being evaluated in the next level of the routing tree, which could contain another receiver. + + +# Example Alertmanager Configs + +### Slack +To set up notifications via Slack, the following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret, where the `api_url` should be updated to use your Webhook URL from Slack: + +```yaml +route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 3h + receiver: 'slack-notifications' +receivers: +- name: 'slack-notifications' + slack_configs: + - send_resolved: true + text: '{{ template "slack.rancher.text" . }}' + api_url: +templates: +- /etc/alertmanager/config/*.tmpl +``` + +### PagerDuty +To set up notifications via PagerDuty, use the example below from the [PagerDuty documentation](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/) as a guideline. This example sets up a route that captures alerts for a database service and sends them to a receiver linked to a service that will directly notify the DBAs in PagerDuty, while all other alerts will be directed to a default receiver with a different PagerDuty integration key. + +The following Alertmanager Config YAML can be placed into the `alertmanager.yaml` key of the Alertmanager Config Secret. The `service_key` should be updated to use your PagerDuty integration key and can be found as per the "Integrating with Global Event Routing" section of the PagerDuty documentation. For the full list of configuration options, refer to the [Prometheus documentation](https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config). + +```yaml +route: + group_by: [cluster] + receiver: 'pagerduty-notifications' + group_interval: 5m + routes: + - match: + service: database + receiver: 'database-notifcations' + +receivers: +- name: 'pagerduty-notifications' + pagerduty_configs: + - service_key: 'primary-integration-key' + +- name: 'database-notifcations' + pagerduty_configs: + - service_key: 'database-integration-key' +``` + +# Example Route Config for CIS Scan Alerts + +While configuring the routes for `rancher-cis-benchmark` alerts, you can specify the matching using the key-value pair `job: rancher-cis-scan`. + +For example, the following example route configuration could be used with a Slack receiver named `test-cis`: + +```yaml +spec: + receiver: test-cis + group_by: +# - string + group_wait: 30s + group_interval: 30s + repeat_interval: 30s + match: + job: rancher-cis-scan +# key: string + match_re: + {} +# key: string +``` + +For more information on enabling alerting for `rancher-cis-benchmark`, see [this section.]({{}}/rancher/v2.6/en/cis-scans/#enabling-alerting-for-rancher-cis-benchmark) + + +# Trusted CA for Notifiers + +If you need to add a trusted CA to your notifier, follow the steps in [this section.](../helm-chart-options/#trusted-ca-for-notifiers) \ No newline at end of file diff --git a/versioned_docs/version-2.6/monitoring-alerting/configuration/route/route.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/route/route.md new file mode 100644 index 0000000000..f6c704fec7 --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/configuration/route/route.md @@ -0,0 +1,87 @@ +--- +title: Route Configuration +shortTitle: Routes +weight: 5 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The route configuration is the section of the Alertmanager custom resource that controls how the alerts fired by Prometheus are grouped and filtered before they reach the receiver. + +When a Route is changed, the Prometheus Operator regenerates the Alertmanager custom resource to reflect the changes. + +For more information about configuring routes, refer to the [official Alertmanager documentation.](https://www.prometheus.io/docs/alerting/latest/configuration/#route) + +> This section assumes familiarity with how monitoring components work together. For more information, see [this section.]({{}}/rancher/v2.6/en/monitoring-alerting/how-monitoring-works) + +- [Route Restrictions](#route-restrictions) +- [Route Configuration](#route-configuration) + - [Receiver](#receiver) + - [Grouping](#grouping) + - [Matching](#matching) + +# Route Restrictions + +Alertmanager proxies alerts for Prometheus based on its receivers and a routing tree that filters alerts to certain receivers based on labels. + +Alerting drivers proxy alerts for Alertmanager to non-native receivers, such as Microsoft Teams and SMS. + +In the Rancher UI for configuring routes and receivers, you can configure routing trees with one root and then a depth of one more level, for a tree with a depth of two. But if you use a `continue` route when configuring Alertmanager directly, you can make the tree deeper. + +Each receiver is for one or more notification providers. So if you know that every alert for Slack should also go to PagerDuty, you can configure both in the same receiver. + +# Route Configuration + +### Note on Labels and Annotations + +Labels should be used for identifying information that can affect the routing of notifications. Identifying information about the alert could consist of a container name, or the name of the team that should be notified. + +Annotations should be used for information that does not affect who receives the alert, such as a runbook url or error message. + + +### Receiver +The route needs to refer to a [receiver](#receiver-configuration) that has already been configured. + +### Grouping + + + + +> **Note** As of Rancher v2.6.5 `Group By` now accepts a list of strings instead of key-value pairs. See the [upstream documentation](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#route) for details. + +| Field | Default | Description | +|-------|--------------|---------| +| Group By | N/a | List of labels to group by. Labels must not be repeated (unique list). Special label "..." (aggregate by all possible labels), if provided, must be the only element in the list. | +| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | +| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | +| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | + + + + +| Field | Default | Description | +|-------|--------------|---------| +| Group By | N/a | The labels by which incoming alerts are grouped together. For example, `[ group_by: '[' , ... ']' ]` Multiple alerts coming in for labels such as `cluster=A` and `alertname=LatencyHigh` can be batched into a single group. To aggregate by all possible labels, use the special value `'...'` as the sole label name, for example: `group_by: ['...']` Grouping by `...` effectively disables aggregation entirely, passing through all alerts as-is. This is unlikely to be what you want, unless you have a very low alert volume or your upstream notification system performs its own grouping. | +| Group Wait | 30s | How long to wait to buffer alerts of the same group before sending initially. | +| Group Interval | 5m | How long to wait before sending an alert that has been added to a group of alerts for which an initial notification has already been sent. | +| Repeat Interval | 4h | How long to wait before re-sending a given alert that has already been sent. | + + + + +### Matching + +The **Match** field refers to a set of equality matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs to the Rancher UI, they correspond to the YAML in this format: + +```yaml +match: + [ : , ... ] +``` + +The **Match Regex** field refers to a set of regex-matchers used to identify which alerts to send to a given Route based on labels defined on that alert. When you add key-value pairs in the Rancher UI, they correspond to the YAML in this format: + +```yaml +match_re: + [ : , ... ] +``` diff --git a/content/rancher/v2.6/en/monitoring-alerting/configuration/servicemonitor-podmonitor/_index.md b/versioned_docs/version-2.6/monitoring-alerting/configuration/servicemonitor-podmonitor/servicemonitor-podmonitor.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/configuration/servicemonitor-podmonitor/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/configuration/servicemonitor-podmonitor/servicemonitor-podmonitor.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/dashboards/_index.md b/versioned_docs/version-2.6/monitoring-alerting/dashboards/dashboards.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/dashboards/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/dashboards/dashboards.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/expression/_index.md b/versioned_docs/version-2.6/monitoring-alerting/expression/expression.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/expression/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/expression/expression.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/customize-grafana/_index.md b/versioned_docs/version-2.6/monitoring-alerting/guides/customize-grafana/customize-grafana.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/guides/customize-grafana/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/guides/customize-grafana/customize-grafana.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/enable-monitoring/_index.md b/versioned_docs/version-2.6/monitoring-alerting/guides/enable-monitoring/enable-monitoring.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/guides/enable-monitoring/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/guides/enable-monitoring/enable-monitoring.md diff --git a/content/rancher/v2.5/en/monitoring-alerting/guides/_index.md b/versioned_docs/version-2.6/monitoring-alerting/guides/guides.md similarity index 100% rename from content/rancher/v2.5/en/monitoring-alerting/guides/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/guides/guides.md diff --git a/versioned_docs/version-2.6/monitoring-alerting/guides/memory-usage/memory-usage.md b/versioned_docs/version-2.6/monitoring-alerting/guides/memory-usage/memory-usage.md new file mode 100644 index 0000000000..9583570c44 --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/guides/memory-usage/memory-usage.md @@ -0,0 +1,20 @@ +--- +title: Debugging High Memory Usage +weight: 8 +--- + +Every time series in Prometheus is uniquely identified by its [metric name](https://prometheus.io/docs/practices/naming/#metric-names) and optional key-value pairs called [labels.](https://prometheus.io/docs/practices/naming/#labels) + +The labels allow the ability to filter and aggregate the time series data, but they also multiply the amount of data that Prometheus collects. + +Each time series has a defined set of labels, and Prometheus generates a new time series for all unique combinations of labels. If a metric has two labels attached, two time series are generated for that metric. Changing any label value, including adding or removing a label, will create a new time series. + +Prometheus is optimized to store data that is index-based on series. It is designed for a relatively consistent number of time series and a relatively large number of samples that need to be collected from the exporters over time. + +Inversely, Prometheus is not optimized to accommodate a rapidly changing number of time series. For that reason, large bursts of memory usage can occur when monitoring is installed on clusters where many resources are being created and destroyed, especially on multi-tenant clusters. + +### Reducing Memory Bursts + +To reduce memory consumption, Prometheus can be configured to store fewer time series, by scraping fewer metrics or by attaching fewer labels to the time series. To see which series use the most memory, you can check the TSDB (time series database) status page in the Prometheus UI. + +Distributed Prometheus solutions such as [Thanos](https://thanos.io/) and [Cortex](https://cortexmetrics.io/) use an alternate architecture in which multiple small Prometheus instances are deployed. In the case of Thanos, the metrics from each Prometheus are aggregated into the common Thanos deployment, and then those metrics are exported to a persistent store, such as S3. This more robust architecture avoids burdening any single Prometheus instance with too many time series, while also preserving the ability to query metrics on a global level. \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/migrating/_index.md b/versioned_docs/version-2.6/monitoring-alerting/guides/migrating/migrating.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/guides/migrating/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/guides/migrating/migrating.md diff --git a/versioned_docs/version-2.6/monitoring-alerting/guides/monitoring-workloads/monitoring-workloads.md b/versioned_docs/version-2.6/monitoring-alerting/guides/monitoring-workloads/monitoring-workloads.md new file mode 100644 index 0000000000..10de8484a9 --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/guides/monitoring-workloads/monitoring-workloads.md @@ -0,0 +1,31 @@ +--- +title: Setting up Monitoring for a Workload +weight: 4 +--- + +- [Display CPU and Memory Metrics for a Workload](#display-cpu-and-memory-metrics-for-a-workload) +- [Setting up Metrics Beyond CPU and Memory](#setting-up-metrics-beyond-cpu-and-memory) + +If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. + +The steps for setting up monitoring for workloads depend on whether you want basic metrics such as CPU and memory for the workload, or whether you want to scrape custom metrics from the workload. + +If you only need CPU and memory time series for the workload, you don't need to deploy a ServiceMonitor or PodMonitor because the monitoring application already collects metrics data on resource usage by default. The resource usage time series data is in Prometheus's local time series database. + +Grafana shows the data in aggregate, but you can see the data for the individual workload by using a PromQL query that extracts the data for that workload. Once you have the PromQL query, you can execute the query individually in the Prometheus UI and see the time series visualized there, or you can use the query to customize a Grafana dashboard to display the workload metrics. For examples of PromQL queries for workload metrics, see [this section.](https://rancher.com/docs/rancher/v2.6/en/monitoring-alerting/expression/#workload-metrics) + +To set up custom metrics for your workload, you will need to set up an exporter and create a new ServiceMonitor custom resource to configure Prometheus to scrape metrics from your exporter. + +### Display CPU and Memory Metrics for a Workload + +By default, the monitoring application already scrapes CPU and memory. + +To get some fine-grained detail for a particular workload, you can customize a Grafana dashboard to display the metrics for a particular workload. + +### Setting up Metrics Beyond CPU and Memory + +For custom metrics, you will need to expose the metrics on your application in a format supported by Prometheus. + +Then we recommend that you should create a new ServiceMonitor custom resource. When this resource is created, the Prometheus custom resource will be automatically updated so that its scrape configuration includes the new custom metrics endpoint. Then Prometheus will begin scraping metrics from the endpoint. + +You can also create a PodMonitor to expose the custom metrics endpoint, but ServiceMonitors are more appropriate for the majority of use cases. diff --git a/versioned_docs/version-2.6/monitoring-alerting/guides/persist-grafana/persist-grafana.md b/versioned_docs/version-2.6/monitoring-alerting/guides/persist-grafana/persist-grafana.md new file mode 100644 index 0000000000..40cd832e9b --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/guides/persist-grafana/persist-grafana.md @@ -0,0 +1,138 @@ +--- +title: Persistent Grafana Dashboards +weight: 6 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +To allow the Grafana dashboard to persist after the Grafana instance restarts, add the dashboard configuration JSON into a ConfigMap. ConfigMaps also allow the dashboards to be deployed with a GitOps or CD based approach. This allows the dashboard to be put under version control. + +- [Creating a Persistent Grafana Dashboard](#creating-a-persistent-grafana-dashboard) +- [Known Issues](#known-issues) + +# Creating a Persistent Grafana Dashboard + + + + +> **Prerequisites:** +> +> - The monitoring application needs to be installed. +> - To create the persistent dashboard, you must have at least the **Manage Config Maps** Rancher RBAC permissions assigned to you in the project or namespace that contains the Grafana Dashboards. This correlates to the `monitoring-dashboard-edit` or `monitoring-dashboard-admin` Kubernetes native RBAC Roles exposed by the Monitoring chart. +> - To see the links to the external monitoring UIs, including Grafana dashboards, you will need at least a [project-member role.]({{}}/rancher/v2.6/en/monitoring-alerting/rbac/#users-with-rancher-cluster-manager-based-permissions) + +### 1. Get the JSON model of the dashboard that you want to persist + +To create a persistent dashboard, you will need to get the JSON model of the dashboard you want to persist. You can use a premade dashboard or build your own. + +To use a premade dashboard, go to [https://grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards), open up its detail page, and click on the **Download JSON** button to get the JSON model for the next step. + +To use your own dashboard: + +1. Click on the link to open Grafana. On the cluster detail page, click **Monitoring**. +1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. + + > **Note:** Regardless of who has the password, in order to access the Grafana instance, you still need at least the Manage Services or View Monitoring permissions in the project that Rancher Monitoring is deployed into. Alternative credentials can also be supplied on deploying or upgrading the chart. +1. Create a dashboard using Grafana's UI. Once complete, go to the dashboard's settings by clicking on the gear icon in the top navigation menu. In the left navigation menu, click **JSON Model**. +1. Copy the JSON data structure that appears. + +### 2. Create a ConfigMap using the Grafana JSON model + +Create a ConfigMap in the namespace that contains your Grafana Dashboards (e.g. cattle-dashboards by default). + +The ConfigMap should look like this: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + grafana_dashboard: "1" + name: + namespace: cattle-dashboards # Change if using a non-default namespace +data: + .json: |- + +``` + +By default, Grafana is configured to watch all ConfigMaps with the `grafana_dashboard` label within the `cattle-dashboards` namespace. + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, refer to [this section.](#configuring-namespaces-for-the-grafana-dashboard-configmap) + +To create the ConfigMap in the Rancher UI, + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to see the visualizations and click **Explore**. +1. Click **More Resources > Core > ConfigMaps**. +1. Click **Create**. +1. Set up the key-value pairs similar to the example above. When entering the value for `.json`, click **Read from File** to upload the JSON data model as the value. +1. Click **Create**. + +**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. + +Dashboards that are persisted using ConfigMaps cannot be deleted or edited from the Grafana UI. + +If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. + +### Configuring Namespaces for the Grafana Dashboard ConfigMap + +To specify that you would like Grafana to watch for ConfigMaps across all namespaces, set this value in the `rancher-monitoring` Helm chart: + +``` +grafana.sidecar.dashboards.searchNamespace=ALL +``` + +Note that the RBAC roles exposed by the Monitoring chart to add Grafana Dashboards are still restricted to giving permissions for users to add dashboards in the namespace defined in `grafana.dashboards.namespace`, which defaults to `cattle-dashboards`. + + + + +> **Prerequisites:** +> +> - The monitoring application needs to be installed. +> - You must have the cluster-admin ClusterRole permission. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to configure the Grafana namespace and click **Explore**. +1. In the left navigation bar, click **Monitoring**. +1. Click **Grafana**. +1. Log in to Grafana. Note: The default Admin username and password for the Grafana instance is `admin/prom-operator`. Alternative credentials can also be supplied on deploying or upgrading the chart. + + > **Note:** Regardless of who has the password, cluster administrator permission in Rancher is still required to access the Grafana instance. +1. Go to the dashboard that you want to persist. In the top navigation menu, go to the dashboard settings by clicking the gear icon. +1. In the left navigation menu, click **JSON Model**. +1. Copy the JSON data structure that appears. +1. Create a ConfigMap in the `cattle-dashboards` namespace. The ConfigMap needs to have the label `grafana_dashboard: "1"`. Paste the JSON into the ConfigMap in the format shown in the example below: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + labels: + grafana_dashboard: "1" + name: + namespace: cattle-dashboards + data: + .json: |- + + ``` + +**Result:** After the ConfigMap is created, it should show up on the Grafana UI and be persisted even if the Grafana pod is restarted. + +Dashboards that are persisted using ConfigMaps cannot be deleted from the Grafana UI. If you attempt to delete the dashboard in the Grafana UI, you will see the error message "Dashboard cannot be deleted because it was provisioned." To delete the dashboard, you will need to delete the ConfigMap. + +To prevent the persistent dashboard from being deleted when Monitoring v2 is uninstalled, add the following annotation to the `cattle-dashboards` namespace: + +``` +helm.sh/resource-policy: "keep" +``` + + + + +# Known Issues + +For users who are using Monitoring V2 v9.4.203 or below, uninstalling the Monitoring chart will delete the `cattle-dashboards` namespace, which will delete all persisted dashboards, unless the namespace is marked with the annotation `helm.sh/resource-policy: "keep"`. + +This annotation will be added by default in the new monitoring chart released by Rancher v2.5.8, but it still needs to be manually applied for users of earlier Rancher versions. diff --git a/content/rancher/v2.6/en/monitoring-alerting/guides/uninstall/_index.md b/versioned_docs/version-2.6/monitoring-alerting/guides/uninstall/uninstall.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/guides/uninstall/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/guides/uninstall/uninstall.md diff --git a/versioned_docs/version-2.6/monitoring-alerting/how-monitoring-works/how-monitoring-works.md b/versioned_docs/version-2.6/monitoring-alerting/how-monitoring-works/how-monitoring-works.md new file mode 100644 index 0000000000..fcd70f138e --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/how-monitoring-works/how-monitoring-works.md @@ -0,0 +1,256 @@ +--- +title: How Monitoring Works +weight: 1 +--- + +1. [Architecture Overview](#1-architecture-overview) +2. [How Prometheus Works](#2-how-prometheus-works) +3. [How Alertmanager Works](#3-how-alertmanager-works) +4. [Monitoring V2 Specific Components](#4-monitoring-v2-specific-components) +5. [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics) + +# 1. Architecture Overview + +_**The following sections describe how data flows through the Monitoring V2 application:**_ + +### Prometheus Operator + +Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created. When the Prometheus configuration resources are created, Prometheus Operator calls the Prometheus API to sync the new configuration. As the diagram at the end of this section shows, the Prometheus Operator acts as the intermediary between Prometheus and Kubernetes, calling the Prometheus API to synchronize Prometheus with the monitoring-related resources in Kubernetes. + +### ServiceMonitors and PodMonitors + +ServiceMonitors and PodMonitors declaratively specify targets, such as Services and Pods, that need to be monitored. + +- Targets are scraped on a recurring schedule based on the configured Prometheus scrape interval, and the metrics that are scraped are stored into the Prometheus Time Series Database (TSDB). + +- In order to perform the scrape, ServiceMonitors and PodMonitors are defined with label selectors that determine which Services or Pods should be scraped and endpoints that determine how the scrape should happen on the given target, e.g., scrape/metrics in TCP 10252, proxying through IP addr x.x.x.x. + +- Out of the box, Monitoring V2 comes with certain pre-configured exporters that are deployed based on the type of Kubernetes cluster that it is deployed on. For more information, see [Scraping and Exposing Metrics](#5-scraping-and-exposing-metrics). + +### How PushProx Works + +- Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called **PushProx**. The Kubernetes components that expose metrics to Prometheus through PushProx are the following: +`kube-controller-manager`, `kube-scheduler`, `etcd`, and `kube-proxy`. + +- For each PushProx exporter, we deploy one PushProx client onto all target nodes. For example, a PushProx client is deployed onto all controlplane nodes for kube-controller-manager, all etcd nodes for kube-etcd, and all nodes for kubelet. + +- We deploy exactly one PushProx proxy per exporter. The process for exporting metrics is as follows: + +1. The PushProx Client establishes an outbound connection with the PushProx Proxy. +1. The client then polls the proxy for scrape requests that have come into the proxy. +1. When the proxy receives a scrape request from Prometheus, the client sees it as a result of the poll. +1. The client scrapes the internal component. +1. The internal component responds by pushing metrics back to the proxy. + + +

    Process for Exporting Metrics with PushProx:
    + +![Process for Exporting Metrics with PushProx]({{}}/img/rancher/pushprox-process.svg) + +### PrometheusRules + +PrometheusRules allow users to define rules for what metrics or time series database queries should result in alerts being fired. Rules are evaluated on an interval. + +- **Recording rules** create a new time series based on existing series that have been collected. They are frequently used to precompute complex queries. +- **Alerting rules** run a particular query and fire an alert from Prometheus if the query evaluates to a non-zero value. + +### Alert Routing + +Once Prometheus determines that an alert needs to be fired, alerts are forwarded to **Alertmanager**. + +- Alerts contain labels that come from the PromQL query itself and additional labels and annotations that can be provided as part of specifying the initial PrometheusRule. + +- Before receiving any alerts, Alertmanager will use the **routes** and **receivers** specified in its configuration to form a routing tree on which all incoming alerts are evaluated. Each node of the routing tree can specify additional grouping, labeling, and filtering that needs to happen based on the labels attached to the Prometheus alert. A node on the routing tree (usually a leaf node) can also specify that an alert that reaches it needs to be sent out to a configured Receiver, e.g., Slack, PagerDuty, SMS, etc. Note that Alertmanager will send an alert first to **alertingDriver**, then alertingDriver will send or forward alert to the proper destination. + +- Routes and receivers are also stored in the Kubernetes API via the Alertmanager Secret. When the Secret is updated, Alertmanager is also updated automatically. Note that routing occurs via labels only (not via annotations, etc.). + +
    How data flows through the monitoring application:
    + + +# 2. How Prometheus Works + +### Storing Time Series Data + +After collecting metrics from exporters, Prometheus stores the time series in a local on-disk time series database. Prometheus optionally integrates with remote systems, but `rancher-monitoring` uses local storage for the time series database. + +Once stored, users can query this TSDB using PromQL, the query language for Prometheus. + +PromQL queries can be visualized in one of two ways: + +1. By supplying the query in Prometheus's Graph UI, which will show a simple graphical view of the data. +1. By creating a Grafana Dashboard that contains the PromQL query and additional formatting directives that label axes, add units, change colors, use alternative visualizations, etc. + +### Defining Rules for Prometheus + +Rules define queries that Prometheus needs to execute on a regular `evaluationInterval` to perform certain actions, such as firing an alert (alerting rules) or precomputing a query based on others existing in its TSDB (recording rules). These rules are encoded in PrometheusRules custom resources. When PrometheusRule custom resources are created or updated, the Prometheus Operator observes the change and calls the Prometheus API to synchronize the set of rules that Prometheus is currently evaluating on a regular interval. + +A PrometheusRule allows you to define one or more RuleGroups. Each RuleGroup consists of a set of Rule objects that can each represent either an alerting or a recording rule with the following fields: + +- The name of the new alert or record +- A PromQL expression for the new alert or record +- Labels that should be attached to the alert or record that identify it (e.g. cluster name or severity) +- Annotations that encode any additional important pieces of information that need to be displayed on the notification for an alert (e.g. summary, description, message, runbook URL, etc.). This field is not required for recording rules. + +On evaluating a [rule](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#rule), Prometheus will execute the provided PromQL query, add additional provided labels (or annotations - only for alerting rules), and execute the appropriate action for the rule. For example, an Alerting Rule that adds `team: front-end` as a label to the provided PromQL query will append that label to the fired alert, which will allow Alertmanager to forward the alert to the correct Receiver. + +### Alerting and Recording Rules + +Prometheus doesn't maintain the state of whether alerts are active. It fires alerts repetitively at every evaluation interval, relying on Alertmanager to group and filter the alerts into meaningful notifications. + +The `evaluation_interval` constant defines how often Prometheus evaluates its alerting rules against the time series database. Similar to the `scrape_interval`, the `evaluation_interval` also defaults to one minute. + +The rules are contained in a set of rule files. Rule files include both alerting rules and recording rules, but only alerting rules result in alerts being fired after their evaluation. + +For recording rules, Prometheus runs a query, then stores it as a time series. This synthetic time series is useful for storing the results of an expensive or time-consuming query so that it can be queried more quickly in the future. + +Alerting rules are more commonly used. Whenever an alerting rule evaluates to a positive number, Prometheus fires an alert. + +The Rule file adds labels and annotations to alerts before firing them, depending on the use case: + +- Labels indicate information that identifies the alert and could affect the routing of the alert. For example, if when sending an alert about a certain container, the container ID could be used as a label. + +- Annotations denote information that doesn't affect where an alert is routed, for example, a runbook or an error message. + +# 3. How Alertmanager Works + +The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of the following tasks: + +- Deduplicating, grouping, and routing alerts to the correct receiver integration such as email, PagerDuty, or OpsGenie + +- Silencing and inhibition of alerts + +- Tracking alerts that fire over time + +- Sending out the status of whether an alert is currently firing, or if it is resolved + +### Alerts Forwarded by alertingDrivers + +When alertingDrivers are installed, this creates a `Service` that can be used as the receiver's URL for Teams or SMS, based on the alertingDriver's configuration. The URL in the Receiver points to the alertingDrivers; so the Alertmanager sends alert first to alertingDriver, then alertingDriver forwards or sends alert to the proper destination. + +### Routing Alerts to Receivers + +Alertmanager coordinates where alerts are sent. It allows you to group alerts based on labels and fire them based on whether certain labels are matched. One top-level route accepts all alerts. From there, Alertmanager continues routing alerts to receivers based on whether they match the conditions of the next route. + +While the Rancher UI forms only allow editing a routing tree that is two levels deep, you can configure more deeply nested routing structures by editing the Alertmanager Secret. + +### Configuring Multiple Receivers + +By editing the forms in the Rancher UI, you can set up a Receiver resource with all the information Alertmanager needs to send alerts to your notification system. + +By editing custom YAML in the Alertmanager or Receiver configuration, you can also send alerts to multiple notification systems. For more information, see the section on configuring [Receivers.](../configuration/receiver/#configuring-multiple-receivers) + +# 4. Monitoring V2 Specific Components + +Prometheus Operator introduces a set of [Custom Resource Definitions](https://github.com/prometheus-operator/prometheus-operator#customresourcedefinitions) that allow users to deploy and manage Prometheus and Alertmanager instances by creating and modifying those custom resources on a cluster. + +Prometheus Operator will automatically update your Prometheus configuration based on the live state of the resources and configuration options that are edited in the Rancher UI. + +### Resources Deployed by Default + +By default, a set of resources curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project are deployed onto your cluster as part of installing the Rancher Monitoring Application to set up a basic Monitoring/Alerting stack. + +The resources that get deployed onto your cluster to support this solution can be found in the [`rancher-monitoring`](https://github.com/rancher/charts/tree/main/charts/rancher-monitoring) Helm chart, which closely tracks the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community with certain changes tracked in the [CHANGELOG.md](https://github.com/rancher/charts/blob/main/charts/rancher-monitoring/CHANGELOG.md). + +### Default Exporters + +Monitoring V2 deploys three default exporters that provide additional metrics for Prometheus to store: + +1. `node-exporter`: exposes hardware and OS metrics for Linux hosts. For more information on `node-exporter`, refer to the [upstream documentation](https://prometheus.io/docs/guides/node-exporter/). + +1. `windows-exporter`: exposes hardware and OS metrics for Windows hosts (only deployed on Windows clusters). For more information on `windows-exporter`, refer to the [upstream documentation](https://github.com/prometheus-community/windows_exporter). + +1. `kube-state-metrics`: expose additional metrics that track the state of resources contained in the Kubernetes API (e.g., pods, workloads, etc.). For more information on `kube-state-metrics`, refer to the [upstream documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs). + +ServiceMonitors and PodMonitors will scrape these exporters, as defined [here](#defining-what-metrics-are-scraped). Prometheus stores these metrics, and you can query the results via either Prometheus's UI or Grafana. + +See the [architecture](#1-architecture-overview) section for more information on recording rules, alerting rules, and Alertmanager. + +### Components Exposed in the Rancher UI + +When the monitoring application is installed, you will be able to edit the following components in the Rancher UI: + +| Component | Type of Component | Purpose and Common Use Cases for Editing | +|--------------|------------------------|---------------------------| +| ServiceMonitor | Custom resource | Sets up Kubernetes Services to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | +| PodMonitor | Custom resource | Sets up Kubernetes Pods to scrape custom metrics from. Automatically updates the scrape configuration in the Prometheus custom resource. | +| Receiver | Configuration block (part of Alertmanager) | Modifies information on where to send an alert (e.g., Slack, PagerDuty, etc.) and any necessary information to send the alert (e.g., TLS certs, proxy URLs, etc.). Automatically updates the Alertmanager custom resource. | +| Route | Configuration block (part of Alertmanager) | Modifies the routing tree that is used to filter, label, and group alerts based on labels and send them to the appropriate Receiver. Automatically updates the Alertmanager custom resource. | +| PrometheusRule | Custom resource | Defines additional queries that need to trigger alerts or define materialized views of existing series that are within Prometheus's TSDB. Automatically updates the Prometheus custom resource. | + +### PushProx + +PushProx allows Prometheus to scrape metrics across a network boundary, which prevents users from having to expose metrics ports for internal Kubernetes components on each node in a Kubernetes cluster. + +Since the metrics for Kubernetes components are generally exposed on the host network of nodes in the cluster, PushProx deploys a DaemonSet of clients that sit on the hostNetwork of each node and make an outbound connection to a single proxy that is sitting on the Kubernetes API. Prometheus can then be configured to proxy scrape requests through the proxy to each client, which allows it to scrape metrics from the internal Kubernetes components without requiring any inbound node ports to be open. + +Refer to [Scraping Metrics with PushProx](#scraping-metrics-with-pushprox) for more. + +# 5. Scraping and Exposing Metrics + +### Defining what Metrics are Scraped + +ServiceMonitors and PodMonitors define targets that are intended for Prometheus to scrape. The [Prometheus custom resource](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/design.md#prometheus) tells Prometheus which ServiceMonitors or PodMonitors it should use to find out where to scrape metrics from. + +The Prometheus Operator observes the ServiceMonitors and PodMonitors. When it observes that they are created or updated, it calls the Prometheus API to update the scrape configuration in the Prometheus custom resource and keep it in sync with the scrape configuration in the ServiceMonitors or PodMonitors. This scrape configuration tells Prometheus which endpoints to scrape metrics from and how it will label the metrics from those endpoints. + +Prometheus scrapes all of the metrics defined in its scrape configuration at every `scrape_interval`, which is one minute by default. + +The scrape configuration can be viewed as part of the Prometheus custom resource that is exposed in the Rancher UI. + +### How the Prometheus Operator Sets up Metrics Scraping + +The Prometheus Deployment or StatefulSet scrapes metrics, and the configuration of Prometheus is controlled by the Prometheus custom resources. The Prometheus Operator watches for Prometheus and Alertmanager resources, and when they are created, the Prometheus Operator creates a Deployment or StatefulSet for Prometheus or Alertmanager with the user-defined configuration. + +When the Prometheus Operator observes ServiceMonitors, PodMonitors, and PrometheusRules being created, it knows that the scrape configuration needs to be updated in Prometheus. It updates Prometheus by first updating the configuration and rules files in the volumes of Prometheus's Deployment or StatefulSet. Then it calls the Prometheus API to sync the new configuration, resulting in the Prometheus Deployment or StatefulSet to be modified in place. + +### How Kubernetes Component Metrics are Exposed + +Prometheus scrapes metrics from deployments known as [exporters,](https://prometheus.io/docs/instrumenting/exporters/) which export the time series data in a format that Prometheus can ingest. In Prometheus, time series consist of streams of timestamped values belonging to the same metric and the same set of labeled dimensions. + +### Scraping Metrics with PushProx + +Certain internal Kubernetes components are scraped via a proxy deployed as part of Monitoring V2 called PushProx. For detailed information on PushProx, refer [here](#how-pushprox-works) and to the above [architecture](#1-architecture-overview) section. + +### Scraping Metrics + +The following Kubernetes components are directly scraped by Prometheus: + +- kubelet* +- ingress-nginx** +- coreDns/kubeDns +- kube-api-server + +\* You can optionally use `hardenedKubelet.enabled` to use a PushProx, but that is not the default. + +** For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. + + +### Scraping Metrics Based on Kubernetes Distribution + +Metrics are scraped differently based on the Kubernetes distribution. For help with terminology, refer [here](#terminology). For details, see the table below: + +
    How Metrics are Exposed to Prometheus
    + +| Kubernetes Component | RKE | RKE2 | KubeADM | K3s | +|-----|-----|-----|-----|-----| +| kube-controller-manager | rkeControllerManager.enabled |rke2ControllerManager.enabled | kubeAdmControllerManager.enabled | k3sServer.enabled | +| kube-scheduler | rkeScheduler.enabled | rke2Scheduler.enabled |kubeAdmScheduler.enabled | k3sServer.enabled | +| etcd | rkeEtcd.enabled | rke2Etcd.enabled | kubeAdmEtcd.enabled | Not available | +| kube-proxy | rkeProxy.enabled | rke2Proxy.enabled | kubeAdmProxy.enabled | k3sServer.enabled | +| kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | Collects metrics directly exposed by kubelet | +| ingress-nginx* | Collects metrics directly exposed by kubelet, exposed by rkeIngressNginx.enabled | Collects metrics directly exposed by kubelet, Exposed by rke2IngressNginx.enabled | Not available | Not available | +| coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | Collects metrics directly exposed by coreDns/kubeDns | +| kube-api-server | Collects metrics directly exposed by kube-api-server |Collects metrics directly exposed by kube-api-server | Collects metrics directly exposed by kube-appi-server | Collects metrics directly exposed by kube-api-server | + +\* For RKE and RKE2 clusters, ingress-nginx is deployed by default and treated as an internal Kubernetes component. + +### Terminology + +- **kube-scheduler:** The internal Kubernetes component that uses information in the pod spec to decide on which node to run a pod. +- **kube-controller-manager:** The internal Kubernetes component that is responsible for node management (detecting if a node fails), pod replication and endpoint creation. +- **etcd:** The internal Kubernetes component that is the distributed key/value store which Kubernetes uses for persistent storage of all cluster information. +- **kube-proxy:** The internal Kubernetes component that watches the API server for pods/services changes in order to maintain the network up to date. +- **kubelet:** The internal Kubernetes component that watches the API server for pods on a node and makes sure they are running. +- **ingress-nginx:** An Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer. +- **coreDns/kubeDns:** The internal Kubernetes component responsible for DNS. +- **kube-api-server:** The main internal Kubernetes component that is responsible for exposing APIs for the other master components. diff --git a/content/rancher/v2.6/en/monitoring-alerting/_index.md b/versioned_docs/version-2.6/monitoring-alerting/monitoring-alerting.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/monitoring-alerting.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/customizing-grafana/_index.md b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/customizing-grafana/customizing-grafana.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/customizing-grafana/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/customizing-grafana/customizing-grafana.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/enable-prom-fed/_index.md b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/enable-prom-fed/enable-prom-fed.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/enable-prom-fed/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/enable-prom-fed/enable-prom-fed.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/prom-fed-workloads/_index.md b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/prom-fed-workloads/prom-fed-workloads.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/guides/prom-fed-workloads/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/prom-fed-workloads/prom-fed-workloads.md diff --git a/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/uninstall-prom-fed/uninstall-prom-fed.md b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/uninstall-prom-fed/uninstall-prom-fed.md new file mode 100644 index 0000000000..2db6cdda41 --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/guides/uninstall-prom-fed/uninstall-prom-fed.md @@ -0,0 +1,14 @@ +--- +title: Uninstall Prometheus Federator +weight: 2 +--- + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. In the left navigation bar, click **Apps & Marketplace**. +1. Click **Installed Apps**. +1. Go to the `cattle-monitoring-system` namespace and check the boxes for `rancher-monitoring-crd` and `rancher-monitoring`. +1. Click **Delete**. +1. Confirm **Delete**. + +**Result:** `prometheus-federator` is uninstalled. diff --git a/content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/_index.md b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/prometheus-federator.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/prometheus-federator/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/prometheus-federator.md diff --git a/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/rbac/rbac.md b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/rbac/rbac.md new file mode 100644 index 0000000000..eb27843156 --- /dev/null +++ b/versioned_docs/version-2.6/monitoring-alerting/prometheus-federator/rbac/rbac.md @@ -0,0 +1,29 @@ +--- +title: Role-Based Access Control +shortTitle: RBAC +weight: 2 +--- + +This section describes the expectations for Role-Based Access Control (RBAC) for Prometheus Federator. + +As described in the section on [namespaces](../prometheus-federator#namespaces), Prometheus Federator expects that Project Owners, Project Members, and other users in the cluster with Project-level permissions (e.g. permissions in a certain set of namespaces identified by a single label selector) have minimal permissions in any namespaces except the Project Registration Namespace (which is imported into the project by default) and those that already comprise their projects. Therefore, in order to allow Project Owners to assign specific chart permissions to other users in their Project namespaces, the Helm Project Operator will automatically watch the following bindings: + +- ClusterRoleBindings +- RoleBindings in the Project Release Namespace + +On observing a change to one of those types of bindings, the Helm Project Operator will check whether the `roleRef` that the the binding points to matches a ClusterRole with the name provided under: + +- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.admin` +- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.edit` +- `helmProjectOperator.releaseRoleBindings.clusterRoleRefs.view` + +By default, these roleRefs will correspond to `admin`, `edit`, and `view` respectively, which are the [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles). + +> **Note** For Rancher RBAC users, these [default Kubernetes user-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) directly correlate to the `Project Owner`, `Project Member`, and `Read-Only` default Project Role Templates. + +If the `roleRef` matches, the Helm Project Operator will filter the `subjects` of the binding for all Users and Groups and use that to automatically construct a RoleBinding for each Role in the Project Release Namespace with the same name as the role and the following labels: + +- `helm.cattle.io/project-helm-chart-role: {{ .Release.Name }}` +- `helm.cattle.io/project-helm-chart-role-aggregate-from: ` + +By default, `rancher-project-monitoring`, the underlying chart deployed by Prometheus Federator, creates three default Roles per Project Release Namespace that provide `admin`, `edit`, and `view` users to permissions to view the Prometheus, Alertmanager, and Grafana UIs of the Project Monitoring Stack to provide least privilege. However, if a Cluster Admin would like to assign additional permissions to certain users, they can either directly assign RoleBindings in the Project Release Namespace to certain users or create Roles with the above two labels on them to allow Project Owners to control assigning those RBAC roles to users in their Project Registration namespaces. \ No newline at end of file diff --git a/content/rancher/v2.6/en/monitoring-alerting/rbac/_index.md b/versioned_docs/version-2.6/monitoring-alerting/rbac/rbac.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/rbac/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/rbac/rbac.md diff --git a/content/rancher/v2.6/en/monitoring-alerting/windows-clusters/_index.md b/versioned_docs/version-2.6/monitoring-alerting/windows-clusters/windows-clusters.md similarity index 100% rename from content/rancher/v2.6/en/monitoring-alerting/windows-clusters/_index.md rename to versioned_docs/version-2.6/monitoring-alerting/windows-clusters/windows-clusters.md diff --git a/content/rancher/v2.6/en/neuvector-integration/_index.md b/versioned_docs/version-2.6/neuvector-integration/neuvector-integration.md similarity index 100% rename from content/rancher/v2.6/en/neuvector-integration/_index.md rename to versioned_docs/version-2.6/neuvector-integration/neuvector-integration.md diff --git a/content/rancher/v2.6/en/opa-gatekeper/_index.md b/versioned_docs/version-2.6/opa-gatekeper/opa-gatekeper.md similarity index 100% rename from content/rancher/v2.6/en/opa-gatekeper/_index.md rename to versioned_docs/version-2.6/opa-gatekeper/opa-gatekeper.md diff --git a/content/rancher/v2.6/en/overview/architecture-recommendations/_index.md b/versioned_docs/version-2.6/overview/architecture-recommendations/architecture-recommendations.md similarity index 100% rename from content/rancher/v2.6/en/overview/architecture-recommendations/_index.md rename to versioned_docs/version-2.6/overview/architecture-recommendations/architecture-recommendations.md diff --git a/content/rancher/v2.6/en/overview/architecture/_index.md b/versioned_docs/version-2.6/overview/architecture/architecture.md similarity index 100% rename from content/rancher/v2.6/en/overview/architecture/_index.md rename to versioned_docs/version-2.6/overview/architecture/architecture.md diff --git a/content/rancher/v2.6/en/overview/concepts/_index.md b/versioned_docs/version-2.6/overview/concepts/concepts.md similarity index 100% rename from content/rancher/v2.6/en/overview/concepts/_index.md rename to versioned_docs/version-2.6/overview/concepts/concepts.md diff --git a/versioned_docs/version-2.6/overview/overview.md b/versioned_docs/version-2.6/overview/overview.md new file mode 100644 index 0000000000..8840c78bf8 --- /dev/null +++ b/versioned_docs/version-2.6/overview/overview.md @@ -0,0 +1,66 @@ +--- +title: Overview +weight: 1 +--- + +Rancher is a container management platform built for organizations that deploy containers in production. Rancher makes it easy to run Kubernetes everywhere, meet IT requirements, and empower DevOps teams. + +# Run Kubernetes Everywhere + +Kubernetes has become the container orchestration standard. Most cloud and virtualization vendors now offer it as standard infrastructure. Rancher users have the choice of creating Kubernetes clusters with Rancher Kubernetes Engine (RKE) or cloud Kubernetes services, such as GKE, AKS, and EKS. Rancher users can also import and manage their existing Kubernetes clusters created using any Kubernetes distribution or installer. + +# Meet IT Requirements + +Rancher supports centralized authentication, access control, and monitoring for all Kubernetes clusters under its control. For example, you can: + +- Use your Active Directory credentials to access Kubernetes clusters hosted by cloud vendors, such as GKE. +- Setup and enforce access control and security policies across all users, groups, projects, clusters, and clouds. +- View the health and capacity of your Kubernetes clusters from a single-pane-of-glass. + +# Empower DevOps Teams + +Rancher provides an intuitive user interface for DevOps engineers to manage their application workload. The user does not need to have in-depth knowledge of Kubernetes concepts to start using Rancher. Rancher catalog contains a set of useful DevOps tools. Rancher is certified with a wide selection of cloud native ecosystem products, including, for example, security tools, monitoring systems, container registries, and storage and networking drivers. + +The following figure illustrates the role Rancher plays in IT and DevOps organizations. Each team deploys their applications on the public or private clouds they choose. IT administrators gain visibility and enforce policies across all users, clusters, and clouds. + +![Platform]({{}}/img/rancher/platform.png) + +# Features of the Rancher API Server + +The Rancher API server is built on top of an embedded Kubernetes API server and an etcd database. It implements the following functionalities: + +### Authorization and Role-Based Access Control + +- **User management:** The Rancher API server [manages user identities]({{}}/rancher/v2.6/en/admin-settings/authentication/) that correspond to external authentication providers like Active Directory or GitHub, in addition to local users. +- **Authorization:** The Rancher API server manages [access control]({{}}/rancher/v2.6/en/admin-settings/rbac/) and [security]({{}}/rancher/v2.6/en/admin-settings/pod-security-policies/) policies. + +### Working with Kubernetes + +- **Provisioning Kubernetes clusters:** The Rancher API server can [provision Kubernetes]({{}}/rancher/v2.6/en/cluster-provisioning/) on existing nodes, or perform [Kubernetes upgrades.]({{}}/rancher/v2.6/en/cluster-admin/upgrading-kubernetes) +- **Catalog management:** Rancher provides the ability to use a [catalog of Helm charts]({{}}/rancher/v2.6/en/helm-charts/) that make it easy to repeatedly deploy applications. +- **Managing projects:** A project is a group of multiple namespaces and access control policies within a cluster. A project is a Rancher concept, not a Kubernetes concept, which allows you to manage multiple namespaces as a group and perform Kubernetes operations in them. The Rancher UI provides features for [project administration]({{}}/rancher/v2.6/en/project-admin/) and for [managing applications within projects.]({{}}/rancher/v2.6/en/k8s-in-rancher/) +- **Pipelines:** Setting up a [pipeline]({{}}/rancher/v2.6/en/project-admin/pipelines/) can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. +- **Istio:** Our [integration with Istio]({{}}/rancher/v2.6/en/istio/) is designed so that a Rancher operator, such as an administrator or cluster owner, can deliver Istio to developers. Then developers can use Istio to enforce security policies, troubleshoot problems, or manage traffic for green/blue deployments, canary deployments, or A/B testing. + +### Working with Cloud Infrastructure + +- **Tracking nodes:** The Rancher API server tracks identities of all the [nodes]({{}}/rancher/v2.6/en/cluster-admin/nodes/) in all clusters. +- **Setting up infrastructure:** When configured to use a cloud provider, Rancher can dynamically provision [new nodes]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/node-pools/) and [persistent storage]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/) in the cloud. + +### Cluster Visibility + +- **Logging:** Rancher can integrate with a variety of popular logging services and tools that exist outside of your Kubernetes clusters. +- **Monitoring:** Using Rancher, you can monitor the state and processes of your cluster nodes, Kubernetes components, and software deployments through integration with Prometheus, a leading open-source monitoring solution. +- **Alerting:** To keep your clusters and applications healthy and driving your organizational productivity forward, you need to stay informed of events occurring in your clusters and projects, both planned and unplanned. + +# Editing Downstream Clusters with Rancher + +The options and settings available for an existing cluster change based on the method that you used to provision it. For example, only clusters [provisioned by RKE]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) have **Cluster Options** available for editing. + +After a cluster is created with Rancher, a cluster administrator can manage cluster membership, enable pod security policies, and manage node pools, among [other options.]({{}}/rancher/v2.6/en/cluster-admin/editing-clusters/) + +The following table summarizes the options and settings available for each cluster type: + +import ClusterCapabilitiesTable from '/rancher/v2.6/en/shared-files/_cluster-capabilities-table.md'; + + diff --git a/versioned_docs/version-2.6/pipelines/concepts/concepts.md b/versioned_docs/version-2.6/pipelines/concepts/concepts.md new file mode 100644 index 0000000000..1f603f0281 --- /dev/null +++ b/versioned_docs/version-2.6/pipelines/concepts/concepts.md @@ -0,0 +1,36 @@ +--- +title: Concepts +weight: 1 +--- + +The purpose of this page is to explain common concepts and terminology related to pipelines. + +- **Pipeline:** + + A _pipeline_ is a software delivery process that is broken into different stages and steps. Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Within Rancher, you can configure pipelines for each of your Rancher projects. A pipeline is based on a specific repository. It defines the process to build, test, and deploy your code. Rancher uses the [pipeline as code](https://jenkins.io/doc/book/pipeline-as-code/) model. Pipeline configuration is represented as a pipeline file in the source code repository, using the file name `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. + +- **Stages:** + + A pipeline stage consists of multiple steps. Stages are executed in the order defined in the pipeline file. The steps in a stage are executed concurrently. A stage starts when all steps in the former stage finish without failure. + +- **Steps:** + + A pipeline step is executed inside a specified stage. A step fails if it exits with a code other than `0`. If a step exits with this failure code, the entire pipeline fails and terminates. + +- **Workspace:** + + The workspace is the working directory shared by all pipeline steps. In the beginning of a pipeline, source code is checked out to the workspace. The command for every step bootstraps in the workspace. During a pipeline execution, the artifacts from a previous step will be available in future steps. The working directory is an ephemeral volume and will be cleaned out with the executor pod when a pipeline execution is finished. + +Typically, pipeline stages include: + +- **Build:** + + Each time code is checked into your repository, the pipeline automatically clones the repo and builds a new iteration of your software. Throughout this process, the software is typically reviewed by automated tests. + +- **Publish:** + + After the build is completed, either a Docker image is built and published to a Docker registry or a catalog template is published. + +- **Deploy:** + + After the artifacts are published, you would release your application so users could start using the updated product. diff --git a/versioned_docs/version-2.6/pipelines/config/config.md b/versioned_docs/version-2.6/pipelines/config/config.md new file mode 100644 index 0000000000..333d4aff51 --- /dev/null +++ b/versioned_docs/version-2.6/pipelines/config/config.md @@ -0,0 +1,643 @@ +--- +title: Pipeline Configuration Reference +weight: 1 +--- + +In this section, you'll learn how to configure pipelines. + +- [Step Types](#step-types) +- [Step Type: Run Script](#step-type-run-script) +- [Step Type: Build and Publish Images](#step-type-build-and-publish-images) +- [Step Type: Publish Catalog Template](#step-type-publish-catalog-template) +- [Step Type: Deploy YAML](#step-type-deploy-yaml) +- [Step Type: Deploy Catalog App](#step-type-deploy-catalog-app) +- [Notifications](#notifications) +- [Timeouts](#timeouts) +- [Triggers and Trigger Rules](#triggers-and-trigger-rules) +- [Environment Variables](#environment-variables) +- [Secrets](#secrets) +- [Pipeline Variable Substitution Reference](#pipeline-variable-substitution-reference) +- [Global Pipeline Execution Settings](#global-pipeline-execution-settings) + - [Executor Quota](#executor-quota) + - [Resource Quota for Executors](#resource-quota-for-executors) + - [Custom CA](#custom-ca) +- [Persistent Data for Pipeline Components](#persistent-data-for-pipeline-components) +- [Example rancher-pipeline.yml](#example-rancher-pipeline-yml) + +# Step Types + +Within each stage, you can add as many steps as you'd like. When there are multiple steps in one stage, they run concurrently. + +Step types include: + +- [Run Script](#step-type-run-script) +- [Build and Publish Images](#step-type-build-and-publish-images) +- [Publish Catalog Template](#step-type-publish-catalog-template) +- [Deploy YAML](#step-type-deploy-yaml) +- [Deploy Catalog App](#step-type-deploy-catalog-app) + + + +### Configuring Steps By UI + +If you haven't added any stages, click **Configure pipeline for this branch** to configure the pipeline through the UI. + +1. Add stages to your pipeline execution by clicking **Add Stage**. + + 1. Enter a **Name** for each stage of your pipeline. + 1. For each stage, you can configure [trigger rules](#triggers-and-trigger-rules) by clicking on **Show Advanced Options**. Note: this can always be updated at a later time. + +1. After you've created a stage, start [adding steps](#step-types) by clicking **Add a Step**. You can add multiple steps to each stage. + +### Configuring Steps by YAML + +For each stage, you can add multiple steps. Read more about each [step type](#step-types) and the advanced options to get all the details on how to configure the YAML. This is only a small example of how to have multiple stages with a singular step in each stage. + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + - name: Publish my image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: rancher/rancher:v2.0.0 + # Optionally push to remote registry + pushRemote: true + registry: reg.example.com +``` +# Step Type: Run Script + +The **Run Script** step executes arbitrary commands in the workspace inside a specified container. You can use it to build, test and do more, given whatever utilities the base image provides. For your convenience, you can use variables to refer to metadata of a pipeline execution. Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configuring Script by UI + +1. From the **Step Type** drop-down, choose **Run Script** and fill in the form. + +1. Click **Add**. + +### Configuring Script by YAML +```yaml +# example +stages: +- name: Build something + steps: + - runScriptConfig: + image: golang + shellScript: go build +``` +# Step Type: Build and Publish Images + +The **Build and Publish Image** step builds and publishes a Docker image. This process requires a Dockerfile in your source code's repository to complete successfully. + +The option to publish an image to an insecure registry is not exposed in the UI, but you can specify an environment variable in the YAML that allows you to publish an image insecurely. + +### Configuring Building and Publishing Images by UI +1. From the **Step Type** drop-down, choose **Build and Publish**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Dockerfile Path | The relative path to the Dockerfile in the source code repo. By default, this path is `./Dockerfile`, which assumes the Dockerfile is in the root directory. You can set it to other paths in different use cases (`./path/to/myDockerfile` for example). | + Image Name | The image name in `name:tag` format. The registry address is not required. For example, to build `example.com/repo/my-image:dev`, enter `repo/my-image:dev`. | + Push image to remote repository | An option to set the registry that publishes the image that's built. To use this option, enable it and choose a registry from the drop-down. If this option is disabled, the image is pushed to the internal registry. | + Build Context

    (**Show advanced options**)| By default, the root directory of the source code (`.`). For more details, see the Docker [build command documentation](https://docs.docker.com/engine/reference/commandline/build/). + +### Configuring Building and Publishing Images by YAML + +You can use specific arguments for Docker daemon and the build. They are not exposed in the UI, but they are available in pipeline YAML format, as indicated in the example below. Available environment variables include: + +Variable Name | Description +------------------------|------------------------------------------------------------ +PLUGIN_DRY_RUN | Disable docker push +PLUGIN_DEBUG | Docker daemon executes in debug mode +PLUGIN_MIRROR | Docker daemon registry mirror +PLUGIN_INSECURE | Docker daemon allows insecure registries +PLUGIN_BUILD_ARGS | Docker build args, a comma separated list + +
    + +```yaml +# This example shows an environment variable being used +# in the Publish Image step. This variable allows you to +# publish an image to an insecure registry: + +stages: +- name: Publish Image + steps: + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + pushRemote: true + registry: example.com + env: + PLUGIN_INSECURE: "true" +``` + +# Step Type: Publish Catalog Template + +The **Publish Catalog Template** step publishes a version of a catalog app template (i.e. Helm chart) to a git hosted chart repository. It generates a git commit and pushes it to your chart repository. This process requires a chart folder in your source code's repository and a pre-configured secret in the dedicated pipeline namespace to complete successfully. Any variables in the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) is supported for any file in the chart folder. + +### Configuring Publishing a Catalog Template by UI + +1. From the **Step Type** drop-down, choose **Publish Catalog Template**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Chart Folder | The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. | + Catalog Template Name | The name of the template. For example, wordpress. | + Catalog Template Version | The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. | + Protocol | You can choose to publish via HTTP(S) or SSH protocol. | + Secret | The secret that stores your Git credentials. You need to create a secret in dedicated pipeline namespace in the project before adding this step. If you use HTTP(S) protocol, store Git username and password in `USERNAME` and `PASSWORD` key of the secret. If you use SSH protocol, store Git deploy key in `DEPLOY_KEY` key of the secret. After the secret is created, select it in this option. | + Git URL | The Git URL of the chart repository that the template will be published to. | + Git Branch | The Git branch of the chart repository that the template will be published to. | + Author Name | The author name used in the commit message. | + Author Email | The author email used in the commit message. | + + +### Configuring Publishing a Catalog Template by YAML + +You can add **Publish Catalog Template** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `publishCatalogConfig`. You will provide the following information: + +* Path: The relative path to the chart folder in the source code repo, where the `Chart.yaml` file is located. +* CatalogTemplate: The name of the template. +* Version: The version of the template you want to publish, it should be consistent with the version defined in the `Chart.yaml` file. +* GitUrl: The git URL of the chart repository that the template will be published to. +* GitBranch: The git branch of the chart repository that the template will be published to. +* GitAuthor: The author name used in the commit message. +* GitEmail: The author email used in the commit message. +* Credentials: You should provide Git credentials by referencing secrets in dedicated pipeline namespace. If you publish via SSH protocol, inject your deploy key to the `DEPLOY_KEY` environment variable. If you publish via HTTP(S) protocol, inject your username and password to `USERNAME` and `PASSWORD` environment variables. + +```yaml +# example +stages: +- name: Publish Wordpress Template + steps: + - publishCatalogConfig: + path: ./charts/wordpress/latest + catalogTemplate: wordpress + version: ${CICD_GIT_TAG} + gitUrl: git@github.com:myrepo/charts.git + gitBranch: master + gitAuthor: example-user + gitEmail: user@example.com + envFrom: + - sourceName: publish-keys + sourceKey: DEPLOY_KEY +``` + +# Step Type: Deploy YAML + +This step deploys arbitrary Kubernetes resources to the project. This deployment requires a Kubernetes manifest file to be present in the source code repository. Pipeline variable substitution is supported in the manifest file. You can view an example file at [GitHub](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml). Please refer to the [pipeline variable substitution reference](#pipeline-variable-substitution-reference) for the list of available variables. + +### Configure Deploying YAML by UI + +1. From the **Step Type** drop-down, choose **Deploy YAML** and fill in the form. + +1. Enter the **YAML Path**, which is the path to the manifest file in the source code. + +1. Click **Add**. + +### Configure Deploying YAML by YAML + +```yaml +# example +stages: +- name: Deploy + steps: + - applyYamlConfig: + path: ./deployment.yaml +``` + +# Step Type :Deploy Catalog App + +The **Deploy Catalog App** step deploys a catalog app in the project. It will install a new app if it is not present, or upgrade an existing one. + +### Configure Deploying Catalog App by UI + +1. From the **Step Type** drop-down, choose **Deploy Catalog App**. + +1. Fill in the rest of the form. Descriptions for each field are listed below. When you're done, click **Add**. + + Field | Description | + ---------|----------| + Catalog | The catalog from which the app template will be used. | + Template Name | The name of the app template. For example, wordpress. | + Template Version | The version of the app template you want to deploy. | + Namespace | The target namespace where you want to deploy the app. | + App Name | The name of the app you want to deploy. | + Answers | Key-value pairs of answers used to deploy the app. | + + +### Configure Deploying Catalog App by YAML + +You can add **Deploy Catalog App** steps directly in the `.rancher-pipeline.yml` file. + +Under the `steps` section, add a step with `applyAppConfig`. You will provide the following information: + +* CatalogTemplate: The ID of the template. This can be found by clicking `Launch app` and selecting `View details` for the app. It is the last part of the URL. +* Version: The version of the template you want to deploy. +* Answers: Key-value pairs of answers used to deploy the app. +* Name: The name of the app you want to deploy. +* TargetNamespace: The target namespace where you want to deploy the app. + +```yaml +# example +stages: +- name: Deploy App + steps: + - applyAppConfig: + catalogTemplate: cattle-global-data:library-mysql + version: 0.3.8 + answers: + persistence.enabled: "false" + name: testmysql + targetNamespace: test +``` + +# Timeouts + +By default, each pipeline execution has a timeout of 60 minutes. If the pipeline execution cannot complete within its timeout period, the pipeline is aborted. + +### Configuring Timeouts by UI + +Enter a new value in the **Timeout** field. + +### Configuring Timeouts by YAML + +In the `timeout` section, enter the timeout value in minutes. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +# timeout in minutes +timeout: 30 +``` + +# Notifications + +You can enable notifications to any notifiers based on the build status of a pipeline. Before enabling notifications, Rancher recommends setting up notifiers so it will be easy to add recipients immediately. + +### Configuring Notifications by UI + +1. Within the **Notification** section, turn on notifications by clicking **Enable**. + +1. Select the conditions for the notification. You can select to get a notification for the following statuses: `Failed`, `Success`, `Changed`. For example, if you want to receive notifications when an execution fails, select **Failed**. + +1. If you don't have any existing notifiers, Rancher will provide a warning that no notifiers are set up and provide a link to be able to go to the notifiers page. Follow the [instructions]({{}}/rancher/v2.0-v2.4/en/cluster-admin/tools/notifiers) to add a notifier. If you already have notifiers, you can add them to the notification by clicking the **Add Recipient** button. + + > **Note:** Notifiers are configured at a cluster level and require a different level of permissions. + +1. For each recipient, select which notifier type from the dropdown. Based on the type of notifier, you can use the default recipient or override the recipient with a different one. For example, if you have a notifier for _Slack_, you can update which channel to send the notification to. You can add additional notifiers by clicking **Add Recipient**. + +### Configuring Notifications by YAML + +In the `notification` section, you will provide the following information: + +* **Recipients:** This will be the list of notifiers/recipients that will receive the notification. + * **Notifier:** The ID of the notifier. This can be found by finding the notifier and selecting **View in API** to get the ID. + * **Recipient:** Depending on the type of the notifier, the "default recipient" can be used or you can override this with a different recipient. For example, when configuring a slack notifier, you select a channel as your default recipient, but if you wanted to send notifications to a different channel, you can select a different recipient. +* **Condition:** Select which conditions of when you want the notification to be sent. +* **Message (Optional):** If you want to change the default notification message, you can edit this in the yaml. Note: This option is not available in the UI. + +```yaml +# Example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls +notification: + recipients: + - # Recipient + recipient: "#mychannel" + # ID of Notifier + notifier: "c-wdcsr:n-c9pg7" + - recipient: "test@example.com" + notifier: "c-wdcsr:n-lkrhd" + # Select which statuses you want the notification to be sent + condition: ["Failed", "Success", "Changed"] + # Ability to override the default message (Optional) + message: "my-message" +``` + +# Triggers and Trigger Rules + +After you configure a pipeline, you can trigger it using different methods: + +- **Manually:** + + After you configure a pipeline, you can trigger a build using the latest CI definition from Rancher UI. When a pipeline execution is triggered, Rancher dynamically provisions a Kubernetes pod to run your CI tasks and then remove it upon completion. + +- **Automatically:** + + When you enable a repository for a pipeline, webhooks are automatically added to the version control system. When project users interact with the repo by pushing code, opening pull requests, or creating a tag, the version control system sends a webhook to Rancher Server, triggering a pipeline execution. + + To use this automation, webhook management permission is required for the repository. Therefore, when users authenticate and fetch their repositories, only those on which they have webhook management permission will be shown. + +Trigger rules can be created to have fine-grained control of pipeline executions in your pipeline configuration. Trigger rules come in two types: + +- **Run this when:** This type of rule starts the pipeline, stage, or step when a trigger explicitly occurs. + +- **Do Not Run this when:** This type of rule skips the pipeline, stage, or step when a trigger explicitly occurs. + +If all conditions evaluate to `true`, then the pipeline/stage/step is executed. Otherwise it is skipped. When a pipeline is skipped, none of the pipeline is executed. When a stage/step is skipped, it is considered successful and follow-up stages/steps continue to run. + +Wildcard character (`*`) expansion is supported in `branch` conditions. + +This section covers the following topics: + +- [Configuring pipeline triggers](#configuring-pipeline-triggers) +- [Configuring stage triggers](#configuring-stage-triggers) +- [Configuring step triggers](#configuring-step-triggers) +- [Configuring triggers by YAML](#configuring-triggers-by-yaml) + +### Configuring Pipeline Triggers + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. +1. Click on **Show Advanced Options**. +1. In the **Trigger Rules** section, configure rules to run or skip the pipeline. + + 1. Click **Add Rule**. In the **Value** field, enter the name of the branch that triggers the pipeline. + + 1. **Optional:** Add more branches that trigger a build. + +1. Click **Done**. + +### Configuring Stage Triggers + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. +1. Find the **stage** that you want to manage trigger rules, click the **Edit** icon for that stage. +1. Click **Show advanced options**. +1. In the **Trigger Rules** section, configure rules to run or skip the stage. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the stage and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the stage. | + | Event | The type of event that triggers the stage. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + +### Configuring Step Triggers + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the repository for which you want to manage trigger rules, select the vertical **⋮ > Edit Config**. +1. Find the **step** that you want to manage trigger rules, click the **Edit** icon for that step. +1. Click **Show advanced options**. +1. In the **Trigger Rules** section, configure rules to run or skip the step. + + 1. Click **Add Rule**. + + 1. Choose the **Type** that triggers the step and enter a value. + + | Type | Value | + | ------ | -------------------------------------------------------------------- | + | Branch | The name of the branch that triggers the step. | + | Event | The type of event that triggers the step. Values are: `Push`, `Pull Request`, `Tag` | + +1. Click **Save**. + + +### Configuring Triggers by YAML + +```yaml +# example +stages: + - name: Build something + # Conditions for stages + when: + branch: master + event: [ push, pull_request ] + # Multiple steps run concurrently + steps: + - runScriptConfig: + image: busybox + shellScript: date -R + # Conditions for steps + when: + branch: [ master, dev ] + event: push +# branch conditions for the pipeline +branch: + include: [ master, feature/*] + exclude: [ dev ] +``` + +# Environment Variables + +When configuring a pipeline, certain [step types](#step-types) allow you to use environment variables to configure the step's script. + +### Configuring Environment Variables by UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. +1. Within one of the stages, find the **step** that you want to add an environment variable for, click the **Edit** icon. +1. Click **Show advanced options**. +1. Click **Add Variable**, and then enter a key and value in the fields that appear. Add more variables if needed. +1. Add your environment variable(s) into either the script or file. +1. Click **Save**. + +### Configuring Environment Variables by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${FIRST_KEY} && echo ${SECOND_KEY} + env: + FIRST_KEY: VALUE + SECOND_KEY: VALUE2 +``` + +# Secrets + +If you need to use security-sensitive information in your pipeline scripts (like a password), you can pass them in using Kubernetes [secrets]({{}}/rancher/v2.6/en/k8s-in-rancher/secrets/). + +### Prerequisite +Create a secret in the same project as your pipeline, or explicitly in the namespace where pipeline build pods run. +
    + +>**Note:** Secret injection is disabled on [pull request events](#triggers-and-trigger-rules). + +### Configuring Secrets by UI + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. From the pipeline for which you want to edit build triggers, select **⋮ > Edit Config**. +1. Within one of the stages, find the **step** that you want to use a secret for, click the **Edit** icon. +1. Click **Show advanced options**. +1. Click **Add From Secret**. Select the secret file that you want to use. Then choose a key. Optionally, you can enter an alias for the key. +1. Click **Save**. + +### Configuring Secrets by YAML + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: echo ${ALIAS_ENV} + # environment variables from project secrets + envFrom: + - sourceName: my-secret + sourceKey: secret-key + targetKey: ALIAS_ENV +``` + +# Pipeline Variable Substitution Reference + +For your convenience, the following variables are available for your pipeline configuration scripts. During pipeline executions, these variables are replaced by metadata. You can reference them in the form of `${VAR_NAME}`. + +Variable Name | Description +------------------------|------------------------------------------------------------ +`CICD_GIT_REPO_NAME` | Repository name (Github organization omitted). +`CICD_GIT_URL` | URL of the Git repository. +`CICD_GIT_COMMIT` | Git commit ID being executed. +`CICD_GIT_BRANCH` | Git branch of this event. +`CICD_GIT_REF` | Git reference specification of this event. +`CICD_GIT_TAG` | Git tag name, set on tag event. +`CICD_EVENT` | Event that triggered the build (`push`, `pull_request` or `tag`). +`CICD_PIPELINE_ID` | Rancher ID for the pipeline. +`CICD_EXECUTION_SEQUENCE` | Build number of the pipeline. +`CICD_EXECUTION_ID` | Combination of `{CICD_PIPELINE_ID}-{CICD_EXECUTION_SEQUENCE}`. +`CICD_REGISTRY` | Address for the Docker registry for the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. +`CICD_IMAGE` | Name of the image built from the previous publish image step, available in the Kubernetes manifest file of a `Deploy YAML` step. It does not contain the image tag.

    [Example](https://github.com/rancher/pipeline-example-go/blob/master/deployment.yaml) + +# Global Pipeline Execution Settings + +After configuring a version control provider, there are several options that can be configured globally on how pipelines are executed in Rancher. + +### Changing Pipeline Settings + +> **Prerequisite:** Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy features before using pipelines. Note that pipelines in Kubernetes 1.21+ are no longer supported. +> +> 1. In the upper left corner, click **☰ > Global Settings**. +> 1. Click **Feature Flags**. +> 1. Go to the `legacy` feature flag and click **⋮ > Activate**. + +To edit these settings: + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. + +- [Executor Quota](#executor-quota) +- [Resource Quota for Executors](#resource-quota-for-executors) +- [Custom CA](#custom-ca) + +### Executor Quota + +Select the maximum number of pipeline executors. The _executor quota_ decides how many builds can run simultaneously in the project. If the number of triggered builds exceeds the quota, subsequent builds will queue until a vacancy opens. By default, the quota is `2`. A value of `0` or less removes the quota limit. + +### Resource Quota for Executors + +Configure compute resources for Jenkins agent containers. When a pipeline execution is triggered, a build pod is dynamically provisioned to run your CI tasks. Under the hood, A build pod consists of one Jenkins agent container and one container for each pipeline step. You can [manage compute resources](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) for every containers in the pod. + +Edit the **Memory Reservation**, **Memory Limit**, **CPU Reservation** or **CPU Limit**, then click **Update Limit and Reservation**. + +To configure compute resources for pipeline-step containers: + +You can configure compute resources for pipeline-step containers in the `.rancher-pipeline.yml` file. + +In a step, you will provide the following information: + +* **CPU Reservation (`CpuRequest`)**: CPU request for the container of a pipeline step. +* **CPU Limit (`CpuLimit`)**: CPU limit for the container of a pipeline step. +* **Memory Reservation (`MemoryRequest`)**: Memory request for the container of a pipeline step. +* **Memory Limit (`MemoryLimit`)**: Memory limit for the container of a pipeline step. + +```yaml +# example +stages: + - name: Build something + steps: + - runScriptConfig: + image: busybox + shellScript: ls + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi + - publishImageConfig: + dockerfilePath: ./Dockerfile + buildContext: . + tag: repo/app:v1 + cpuRequest: 100m + cpuLimit: 1 + memoryRequest:100Mi + memoryLimit: 1Gi +``` + +>**Note:** Rancher sets default compute resources for pipeline steps except for `Build and Publish Images` and `Run Script` steps. You can override the default value by specifying compute resources in the same way. + +### Custom CA + +If you want to use a version control provider with a certificate from a custom/internal CA root, the CA root certificates need to be added as part of the version control provider configuration in order for the pipeline build pods to succeed. + +1. Click **Edit cacerts**. + +1. Paste in the CA root certificates and click **Save cacerts**. + +**Result:** Pipelines can be used and new pods will be able to work with the self-signed-certificate. + +# Persistent Data for Pipeline Components + +The internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +For details on setting up persistent storage for pipelines, refer to [this page.]({{}}/rancher/v2.6/en/pipelines/storage) + +# Example rancher-pipeline.yml + +An example pipeline configuration file is on [this page.]({{}}/rancher/v2.6/en/pipelines/example) diff --git a/content/rancher/v2.6/en/pipelines/example-repos/_index.md b/versioned_docs/version-2.6/pipelines/example-repos/example-repos.md similarity index 100% rename from content/rancher/v2.6/en/pipelines/example-repos/_index.md rename to versioned_docs/version-2.6/pipelines/example-repos/example-repos.md diff --git a/content/rancher/v2.6/en/pipelines/example/_index.md b/versioned_docs/version-2.6/pipelines/example/example.md similarity index 100% rename from content/rancher/v2.6/en/pipelines/example/_index.md rename to versioned_docs/version-2.6/pipelines/example/example.md diff --git a/versioned_docs/version-2.6/pipelines/pipelines.md b/versioned_docs/version-2.6/pipelines/pipelines.md new file mode 100644 index 0000000000..8b0fcca0c7 --- /dev/null +++ b/versioned_docs/version-2.6/pipelines/pipelines.md @@ -0,0 +1,271 @@ +--- +title: Pipelines +weight: 10 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +> As of Rancher v2.5, Git-based deployment pipelines are now deprecated. We recommend handling pipelines with Rancher Continuous Delivery powered by [Fleet]({{}}/rancher/v2.6/en/deploy-across-clusters/fleet). To get to Fleet in Rancher, click ☰ > Continuous Delivery. +> +>**Notice:** +> +> - Pipelines in Kubernetes 1.21+ are no longer supported. +> - Fleet does not replace Rancher pipelines; the distinction is that Rancher pipelines are now powered by Fleet. + +Rancher's pipeline provides a simple CI/CD experience. Use it to automatically checkout code, run builds or scripts, publish Docker images or catalog applications, and deploy the updated software to users. + +Setting up a pipeline can help developers deliver new software as quickly and efficiently as possible. Using Rancher, you can integrate with a GitHub repository to setup a continuous integration (CI) pipeline. + +After configuring Rancher and GitHub, you can deploy containers running Jenkins to automate a pipeline execution: + +- Build your application from code to image. +- Validate your builds. +- Deploy your build images to your cluster. +- Run unit tests. +- Run regression tests. + +>**Note:** Rancher's pipeline provides a simple CI/CD experience, but it does not offer the full power and flexibility of and is not a replacement of enterprise-grade Jenkins or other CI tools your team uses. + +This section covers the following topics: + +- [Concepts](#concepts) +- [How Pipelines Work](#how-pipelines-work) +- [Roles-based Access Control for Pipelines](#roles-based-access-control-for-pipelines) +- [Setting up Pipelines](#setting-up-pipelines) + - [Configure version control providers](#1-configure-version-control-providers) + - [Configure repositories](#2-configure-repositories) + - [Configure the pipeline](#3-configure-the-pipeline) +- [Pipeline Configuration Reference](#pipeline-configuration-reference) +- [Running your Pipelines](#running-your-pipelines) +- [Triggering a Pipeline](#triggering-a-pipeline) + - [Modifying the Event Triggers for the Repository](#modifying-the-event-triggers-for-the-repository) + +# Concepts + +For an explanation of concepts and terminology used in this section, refer to [this page.]({{}}/rancher/v2.6/en/pipelines/concepts) + +# How Pipelines Work + +After enabling the ability to use pipelines in a project, you can configure multiple pipelines in each project. Each pipeline is unique and can be configured independently. + +A pipeline is configured off of a group of files that are checked into source code repositories. Users can configure their pipelines either through the Rancher UI or by adding a `.rancher-pipeline.yml` into the repository. + +Before pipelines can be configured, you will need to configure authentication to your version control provider, e.g. GitHub, GitLab, Bitbucket. If you haven't configured a version control provider, you can always use [Rancher's example repositories]({{}}/rancher/v2.6/en/pipelines/example-repos/) to view some common pipeline deployments. + +When you configure a pipeline in one of your projects, a namespace specifically for the pipeline is automatically created. The following components are deployed to it: + + - **Jenkins:** + + The pipeline's build engine. Because project users do not directly interact with Jenkins, it's managed and locked. + + >**Note:** There is no option to use existing Jenkins deployments as the pipeline engine. + + - **Docker Registry:** + + Out-of-the-box, the default target for your build-publish step is an internal Docker Registry. However, you can make configurations to push to a remote registry instead. The internal Docker Registry is only accessible from cluster nodes and cannot be directly accessed by users. Images are not persisted beyond the lifetime of the pipeline and should only be used in pipeline runs. If you need to access your images outside of pipeline runs, please push to an external registry. + + - **Minio:** + + Minio storage is used to store the logs for pipeline executions. + + >**Note:** The managed Jenkins instance works statelessly, so don't worry about its data persistency. The Docker Registry and Minio instances use ephemeral volumes by default, which is fine for most use cases. If you want to make sure pipeline logs can survive node failures, you can configure persistent volumes for them, as described in [data persistency for pipeline components]({{}}/rancher/v2.6/en/pipelines/storage). + +# Roles-based Access Control for Pipelines + +If you can access a project, you can enable repositories to start building pipelines. + +Only [administrators]({{}}/rancher/v2.6/en/admin-settings/rbac/global-permissions/), [cluster owners or members]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/#cluster-roles), or [project owners]({{}}/rancher/v2.6/en/admin-settings/rbac/cluster-project-roles/#project-roles) can configure version control providers and manage global pipeline execution settings. + +Project members can only configure repositories and pipelines. + +# Setting up Pipelines + +### Prerequisite + +> **Prerequisite:** Because the pipelines app was deprecated in favor of Fleet, you will need to turn on the feature flag for legacy features before using pipelines. Note that pipelines in Kubernetes 1.21+ are no longer supported. +> +> 1. In the upper left corner, click **☰ > Global Settings**. +> 1. Click **Feature Flags**. +> 1. Go to the `legacy` feature flag and click **⋮ > Activate**. + +1. [Configure version control providers](#1-configure-version-control-providers) +2. [Configure repositories](#2-configure-repositories) +3. [Configure the pipeline](#3-configure-the-pipeline) + +### 1. Configure Version Control Providers + +Before you can start configuring a pipeline for your repository, you must configure and authorize a version control provider: + +- GitHub +- GitLab +- Bitbucket + +Select your provider's tab below and follow the directions. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Follow the directions displayed to **Setup a Github application**. Rancher redirects you to Github to set up an OAuth App in Github. +1. From GitHub, copy the **Client ID** and **Client Secret**. Paste them into Rancher. +1. If you're using GitHub for enterprise, select **Use a private github enterprise installation**. Enter the host address of your GitHub installation. +1. Click **Authenticate**. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Click **GitLab**. +1. Follow the directions displayed to **Setup a GitLab application**. Rancher redirects you to GitLab. +1. From GitLab, copy the **Application ID** and **Secret**. Paste them into Rancher. +1. If you're using GitLab for enterprise setup, select **Use a private gitlab enterprise installation**. Enter the host address of your GitLab installation. +1. Click **Authenticate**. + +>**Note:** +> 1. Pipeline uses Gitlab [v4 API](https://docs.gitlab.com/ee/api/v3_to_v4.html) and the supported Gitlab version is 9.0+. +> 2. If you use GitLab 10.7+ and your Rancher setup is in a local network, enable the **Allow requests to the local network from hooks and services** option in GitLab admin settings. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Click **Bitbucket** and leave **Use Bitbucket Cloud** selected by default. +1. Follow the directions displayed to **Setup a Bitbucket Cloud application**. Rancher redirects you to Bitbucket to setup an OAuth consumer in Bitbucket. +1. From Bitbucket, copy the consumer **Key** and **Secret**. Paste them into Rancher. +1. Click **Authenticate**. + + + + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click the **Configuration** tab. +1. Click **Bitbucket** and choose the **Use private Bitbucket Server setup** option. +1. Follow the directions displayed to **Setup a Bitbucket Server application**. +1. Enter the host address of your Bitbucket server installation. +1. Click **Authenticate**. + +>**Note:** +> Bitbucket server needs to do SSL verification when sending webhooks to Rancher. Please ensure that Rancher server's certificate is trusted by the Bitbucket server. There are two options: +> +> 1. Setup Rancher server with a certificate from a trusted CA. +> 1. If you're using self-signed certificates, import Rancher server's certificate to the Bitbucket server. For instructions, see the Bitbucket server documentation for [configuring self-signed certificates](https://confluence.atlassian.com/bitbucketserver/if-you-use-self-signed-certificates-938028692.html). +> + + + + +**Result:** After the version control provider is authenticated, you will be automatically re-directed to start configuring which repositories you want start using with a pipeline. + +### 2. Configure Repositories + +After the version control provider is authorized, you are automatically re-directed to start configuring which repositories that you want start using pipelines with. Even if someone else has set up the version control provider, you will see their repositories and can build a pipeline. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Click on **Configure Repositories**. + +1. A list of repositories are displayed. If you are configuring repositories the first time, click on **Authorize & Fetch Your Own Repositories** to fetch your repository list. + +1. For each repository that you want to set up a pipeline, click on **Enable**. + +1. When you're done enabling all your repositories, click on **Done**. + +**Results:** You have a list of repositories that you can start configuring pipelines for. + +### 3. Configure the Pipeline + +Now that repositories are added to your project, you can start configuring the pipeline by adding automated stages and steps. For your convenience, there are multiple built-in step types for dedicated tasks. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Find the repository that you want to set up a pipeline for. +1. Configure the pipeline through the UI or using a yaml file in the repository, i.e. `.rancher-pipeline.yml` or `.rancher-pipeline.yaml`. Pipeline configuration is split into stages and steps. Stages must fully complete before moving onto the next stage, but steps in a stage run concurrently. For each stage, you can add different step types. Note: As you build out each step, there are different advanced options based on the step type. Advanced options include trigger rules, environment variables, and secrets. For more information on configuring the pipeline through the UI or the YAML file, refer to the [pipeline configuration reference.]({{}}/rancher/v2.6/en/pipelines/config) + + * If you are going to use the UI, select the vertical **⋮ > Edit Config** to configure the pipeline using the UI. After the pipeline is configured, you must view the YAML file and push it to the repository. + * If you are going to use the YAML file, select the vertical **⋮ > View/Edit YAML** to configure the pipeline. If you choose to use a YAML file, you need to push it to the repository after any changes in order for it to be updated in the repository. When editing the pipeline configuration, it takes a few moments for Rancher to check for an existing pipeline configuration. + +1. Select which `branch` to use from the list of branches. + +1. Optional: Set up notifications. + +1. Set up the trigger rules for the pipeline. + +1. Enter a **Timeout** for the pipeline. + +1. When all the stages and steps are configured, click **Done**. + +**Results:** Your pipeline is now configured and ready to be run. + + +# Pipeline Configuration Reference + +Refer to [this page]({{}}/rancher/v2.6/en/pipelines/config) for details on how to configure a pipeline to: + +- Run a script +- Build and publish images +- Publish catalog templates +- Deploy YAML +- Deploy a catalog app + +The configuration reference also covers how to configure: + +- Notifications +- Timeouts +- The rules that trigger a pipeline +- Environment variables +- Secrets + + +# Running your Pipelines + +Run your pipeline for the first time. Find your pipeline and select the vertical **⋮ > Run**. + +During this initial run, your pipeline is tested, and the following pipeline components are deployed to your project as workloads in a new namespace dedicated to the pipeline: + +- `docker-registry` +- `jenkins` +- `minio` + +This process takes several minutes. When it completes, you can view each pipeline component from the project **Workloads** tab. + +# Triggering a Pipeline + +When a repository is enabled, a webhook is automatically set in the version control provider. By default, the pipeline is triggered by a **push** event to a repository, but you can modify the event(s) that trigger running the pipeline. + +Available Events: + +* **Push**: Whenever a commit is pushed to the branch in the repository, the pipeline is triggered. +* **Pull Request**: Whenever a pull request is made to the repository, the pipeline is triggered. +* **Tag**: When a tag is created in the repository, the pipeline is triggered. + +> **Note:** This option doesn't exist for Rancher's [example repositories]({{}}/rancher/v2.6/en/pipelines/example-repos/). + +### Modifying the Event Triggers for the Repository + +1. In the upper left corner, click **☰ > Cluster Management**. +1. Go to the cluster where you want to configure pipelines and click **Explore**. +1. In the dropdown menu in the top navigation bar, select the project where you want to configure pipelines. +1. In the left navigation bar, click **Legacy > Project > Pipelines**. +1. Find the repository where you want to modify the event triggers. Select the vertical **⋮ > Setting**. +1. Select which event triggers (**Push**, **Pull Request** or **Tag**) you want for the repository. +1. Click **Save**. diff --git a/versioned_docs/version-2.6/pipelines/storage/storage.md b/versioned_docs/version-2.6/pipelines/storage/storage.md new file mode 100644 index 0000000000..e22bdc5b51 --- /dev/null +++ b/versioned_docs/version-2.6/pipelines/storage/storage.md @@ -0,0 +1,94 @@ +--- +title: Configuring Persistent Data for Pipeline Components +weight: 600 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The pipelines' internal Docker registry and the Minio workloads use ephemeral volumes by default. This default storage works out-of-the-box and makes testing easy, but you lose the build images and build logs if the node running the Docker Registry or Minio fails. In most cases this is fine. If you want build images and logs to survive node failures, you can configure the Docker Registry and Minio to use persistent volumes. + +This section assumes that you understand how persistent storage works in Kubernetes. For more information, refer to the section on [how storage works.]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/how-storage-works/) + +>**Prerequisites (for both parts A and B):** +> +>[Persistent volumes]({{}}/rancher/v2.6/en/cluster-admin/volumes-and-storage/) must be available for the cluster. + +### A. Configuring Persistent Data for Docker Registry + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. + +1. Find the `docker-registry` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the dropdown. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/var/lib/registry`, which is the data storage path inside the Docker registry container. + +1. Click **Upgrade**. + +### B. Configuring Persistent Data for Minio + +1. Click **☰ > Cluster Management**. +1. Go to the cluster that you created and click **Explore**. +1. Click **Workload**. +1. Go to the `minio` workload and select **⋮ > Edit**. + +1. Scroll to the **Volumes** section and expand it. Make one of the following selections from the **Add Volume** menu, which is near the bottom of the section: + + - **Add Volume > Add a new persistent volume (claim)** + - **Add Volume > Use an existing persistent volume (claim)** + +1. Complete the form that displays to choose a persistent volume for the internal Docker registry. + + + + 1. Enter a **Name** for the volume claim. + 1. Select a volume claim **Source**: + - If you select **Use a Storage Class to provision a new persistent volume**, select a storage class and enter a **Capacity**. + - If you select **Use an existing persistent volume**, choose a **Persistent Volume** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + + 1. Enter a **Name** for the volume claim. + 1. Choose a **Persistent Volume Claim** from the drop-down. + 1. From the **Customize** section, choose the read/write access for the volume. + 1. Click **Define**. + + + + +1. From the **Mount Point** field, enter `/data`, which is the data storage path inside the Minio container. + +1. Click **Upgrade**. + +**Result:** Persistent storage is configured for your pipeline components. diff --git a/content/rancher/v2.6/en/project-admin/namespaces/_index.md b/versioned_docs/version-2.6/project-admin/namespaces/namespaces.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/namespaces/_index.md rename to versioned_docs/version-2.6/project-admin/namespaces/namespaces.md diff --git a/content/rancher/v2.6/en/project-admin/pipelines/_index.md b/versioned_docs/version-2.6/project-admin/pipelines/pipelines.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/pipelines/_index.md rename to versioned_docs/version-2.6/project-admin/pipelines/pipelines.md diff --git a/content/rancher/v2.6/en/project-admin/pod-security-policies/_index.md b/versioned_docs/version-2.6/project-admin/pod-security-policies/pod-security-policies.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/pod-security-policies/_index.md rename to versioned_docs/version-2.6/project-admin/pod-security-policies/pod-security-policies.md diff --git a/content/rancher/v2.6/en/project-admin/_index.md b/versioned_docs/version-2.6/project-admin/project-admin.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/_index.md rename to versioned_docs/version-2.6/project-admin/project-admin.md diff --git a/content/rancher/v2.6/en/project-admin/project-members/_index.md b/versioned_docs/version-2.6/project-admin/project-members/project-members.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/project-members/_index.md rename to versioned_docs/version-2.6/project-admin/project-members/project-members.md diff --git a/versioned_docs/version-2.6/project-admin/resource-quotas/override-container-default/override-container-default.md b/versioned_docs/version-2.6/project-admin/resource-quotas/override-container-default/override-container-default.md new file mode 100644 index 0000000000..f059b7b51f --- /dev/null +++ b/versioned_docs/version-2.6/project-admin/resource-quotas/override-container-default/override-container-default.md @@ -0,0 +1,40 @@ +--- +title: Setting Container Default Resource Limits +weight: 3 +--- + +When setting resource quotas, if you set anything related to CPU or Memory (i.e. limits or reservations) on a project / namespace, all containers will require a respective CPU or Memory field set during creation. See the [Kubernetes documentation](https://kubernetes.io/docs/concepts/policy/resource-quotas/#requests-vs-limits) for more details on why this is required. + +To avoid setting these limits on each and every container during workload creation, a default container resource limit can be specified on the namespace. + +### Editing the Container Default Resource Limit + +Edit the container default resource limit when: + +- You have a CPU or Memory resource quota set on a project, and want to supply the corresponding default values for a container. +- You want to edit the default container resource limit. + +1. In the upper left corner, click **☰ > Cluster Management**. +1. On the **Clusters** page, go to the cluster where you want to edit the default resource limit and click **Explore**. +1. Click **Cluster > Projects/Namespaces**. +1. Find the project that you want to edit the container default resource limit. From that project, select **⋮ > Edit Config**. +1. Expand **Container Default Resource Limit** and edit the values. + +### Resource Limit Propagation + +When the default container resource limit is set at a project level, the parameter will be propagated to any namespace created in the project after the limit has been set. For any existing namespace in a project, this limit will not be automatically propagated. You will need to manually set the default container resource limit for any existing namespaces in the project in order for it to be used when creating any containers. + +You can set a default container resource limit on a project and launch any catalog applications. + +Once a container default resource limit is configured on a namespace, the default will be pre-populated for any containers created in that namespace. These limits/reservations can always be overridden during workload creation. + +### Container Resource Quota Types + +The following resource limits can be configured: + +| Resource Type | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| CPU Limit | The maximum amount of CPU (in [millicores](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu)) allocated to the container.| +| CPU Reservation | The minimum amount of CPU (in millicores) guaranteed to the container. | +| Memory Limit | The maximum amount of memory (in bytes) allocated to the container. | +| Memory Reservation | The minimum amount of memory (in bytes) guaranteed to the container. \ No newline at end of file diff --git a/content/rancher/v2.6/en/project-admin/resource-quotas/override-namespace-default/_index.md b/versioned_docs/version-2.6/project-admin/resource-quotas/override-namespace-default/override-namespace-default.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/resource-quotas/override-namespace-default/_index.md rename to versioned_docs/version-2.6/project-admin/resource-quotas/override-namespace-default/override-namespace-default.md diff --git a/content/rancher/v2.6/en/project-admin/resource-quotas/quota-type-reference/_index.md b/versioned_docs/version-2.6/project-admin/resource-quotas/quota-type-reference/quota-type-reference.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/resource-quotas/quota-type-reference/_index.md rename to versioned_docs/version-2.6/project-admin/resource-quotas/quota-type-reference/quota-type-reference.md diff --git a/content/rancher/v2.6/en/project-admin/resource-quotas/quotas-for-projects/_index.md b/versioned_docs/version-2.6/project-admin/resource-quotas/quotas-for-projects/quotas-for-projects.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/resource-quotas/quotas-for-projects/_index.md rename to versioned_docs/version-2.6/project-admin/resource-quotas/quotas-for-projects/quotas-for-projects.md diff --git a/content/rancher/v2.6/en/project-admin/resource-quotas/_index.md b/versioned_docs/version-2.6/project-admin/resource-quotas/resource-quotas.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/resource-quotas/_index.md rename to versioned_docs/version-2.6/project-admin/resource-quotas/resource-quotas.md diff --git a/content/rancher/v2.6/en/project-admin/tools/_index.md b/versioned_docs/version-2.6/project-admin/tools/tools.md similarity index 100% rename from content/rancher/v2.6/en/project-admin/tools/_index.md rename to versioned_docs/version-2.6/project-admin/tools/tools.md diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/amazon-aws-marketplace-qs/amazon-aws-marketplace-qs.md b/versioned_docs/version-2.6/quick-start-guide/deployment/amazon-aws-marketplace-qs/amazon-aws-marketplace-qs.md new file mode 100644 index 0000000000..bac47239b4 --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/amazon-aws-marketplace-qs/amazon-aws-marketplace-qs.md @@ -0,0 +1,7 @@ +--- +title: Rancher AWS Marketplace Quick Start +description: Use Amazon EKS to deploy Rancher server. +weight: 110 +--- + +There is now an additional way for you to deploy the Rancher server in AWS by using Amazon EKS. To learn more, see our [Amazon Marketplace listing](https://aws.amazon.com/marketplace/pp/prodview-2yzbnvagmi4as). \ No newline at end of file diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md b/versioned_docs/version-2.6/quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md new file mode 100644 index 0000000000..f5e80d1009 --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/amazon-aws-qs/amazon-aws-qs.md @@ -0,0 +1,87 @@ +--- +title: Rancher AWS Quick Start Guide +description: Read this step by step Rancher AWS guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 100 +--- +The following steps will quickly deploy a Rancher server on AWS in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). + +## Prerequisites + +>**Note** +>Deploying to Amazon AWS will incur charges. + +- [Amazon AWS Account](https://aws.amazon.com/account/): An Amazon AWS Account is required to create resources for deploying Rancher and Kubernetes. +- [Amazon AWS Access Key](https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html): Use this link to follow a tutorial to create an Amazon AWS Access Key if you don't have one yet. +- [IAM Policy created](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html#access_policies_create-start): Defines the permissions an account attached with this policy has. +- Install [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Amazon AWS. + +### Example IAM Policy + +The AWS module just creates an EC2 KeyPair, an EC2 SecurityGroup and an EC2 instance. A simple policy would be: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*" + } + ] +} +``` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the AWS folder containing the terraform files by executing `cd quickstart/aws`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `aws_access_key` - Amazon AWS Access Key + - `aws_secret_key` - Amazon AWS Secret Key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [AWS Quickstart Readme](https://github.com/rancher/quickstart/tree/master/aws) for more information. +Suggestions include: + - `aws_region` - Amazon AWS region, choose the closest instead of the default (`us-east-1`) + - `prefix` - Prefix for all created resources + - `instance_type` - EC2 instance size used, minimum is `t3a.medium` but `t3a.large` or `t3a.xlarge` could be used if within budget + - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/aws`. + +##### Result + +Two Kubernetes clusters are deployed into your AWS account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +## What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). + +## Destroying the Environment + +1. From the `quickstart/aws` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/_index.md b/versioned_docs/version-2.6/quick-start-guide/deployment/deployment.md similarity index 100% rename from content/rancher/v2.6/en/quick-start-guide/deployment/_index.md rename to versioned_docs/version-2.6/quick-start-guide/deployment/deployment.md diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md b/versioned_docs/version-2.6/quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md new file mode 100644 index 0000000000..8afb42b9d3 --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/digital-ocean-qs/digital-ocean-qs.md @@ -0,0 +1,68 @@ +--- +title: Rancher DigitalOcean Quick Start Guide +description: Read this step by step Rancher DigitalOcean guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 120 +--- +The following steps will quickly deploy a Rancher server on DigitalOcean in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). + +## Prerequisites + +>**Note** +>Deploying to DigitalOcean will incur charges. + +- [DigitalOcean Account](https://www.digitalocean.com): You will require an account on DigitalOcean as this is where the server and cluster will run. +- [DigitalOcean Access Key](https://www.digitalocean.com/community/tutorials/how-to-create-a-digitalocean-space-and-api-key): Use this link to create a DigitalOcean Access Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to DigitalOcean. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the DigitalOcean folder containing the terraform files by executing `cd quickstart/do`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `do_token` - DigitalOcean access key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [DO Quickstart Readme](https://github.com/rancher/quickstart/tree/master/do) for more information. +Suggestions include: + - `do_region` - DigitalOcean region, choose the closest instead of the default (`nyc1`) + - `prefix` - Prefix for all created resources + - `droplet_size` - Droplet size used, minimum is `s-2vcpu-4gb` but `s-4vcpu-8gb` could be used if within budget + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 15 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/do`. + +#### Result + +Two Kubernetes clusters are deployed into your DigitalOcean account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). + +## Destroying the Environment + +1. From the `quickstart/do` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/deployment/equinix-metal-qs/_index.md b/versioned_docs/version-2.6/quick-start-guide/deployment/equinix-metal-qs/equinix-metal-qs.md similarity index 100% rename from content/rancher/v2.6/en/quick-start-guide/deployment/equinix-metal-qs/_index.md rename to versioned_docs/version-2.6/quick-start-guide/deployment/equinix-metal-qs/equinix-metal-qs.md diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md b/versioned_docs/version-2.6/quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md new file mode 100644 index 0000000000..b589f810c4 --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/google-gcp-qs/google-gcp-qs.md @@ -0,0 +1,70 @@ +--- +title: Rancher GCP Quick Start Guide +description: Read this step by step Rancher GCP guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 130 +--- +The following steps will quickly deploy a Rancher server on GCP in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). + +## Prerequisites + +>**Note** +>Deploying to Google GCP will incur charges. + +- [Google GCP Account](https://console.cloud.google.com/): A Google GCP Account is required to create resources for deploying Rancher and Kubernetes. +- [Google GCP Project](https://cloud.google.com/appengine/docs/standard/nodejs/building-app/creating-project): Use this link to follow a tutorial to create a GCP Project if you don't have one yet. +- [Google GCP Service Account](https://cloud.google.com/iam/docs/creating-managing-service-account-keys): Use this link and follow instructions to create a GCP service account and token file. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Google GCP. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the GCP folder containing the terraform files by executing `cd quickstart/gcp`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `gcp_account_json` - GCP service account file path and file name + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [GCP Quickstart Readme](https://github.com/rancher/quickstart/tree/master/gcp) for more information. +Suggestions include: + - `gcp_region` - Google GCP region, choose the closest instead of the default (`us-east4`) + - `gcp_zone` - Google GCP zone, choose the closest instead of the default (`us-east4-a`) + - `prefix` - Prefix for all created resources + - `machine_type` - Compute instance size used, minimum is `n1-standard-1` but `n1-standard-2` or `n1-standard-4` could be used if within budget + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/gcp`. + +#### Result + +Two Kubernetes clusters are deployed into your GCP account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.6/en/quick-start-guide/workload). + +## Destroying the Environment + +1. From the `quickstart/gcp` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/hetzner-cloud-qs/hetzner-cloud-qs.md b/versioned_docs/version-2.6/quick-start-guide/deployment/hetzner-cloud-qs/hetzner-cloud-qs.md new file mode 100644 index 0000000000..ea7a2bbf1b --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/hetzner-cloud-qs/hetzner-cloud-qs.md @@ -0,0 +1,68 @@ +--- +title: Rancher Hetzner Cloud Quick Start Guide +description: Read this step by step Rancher Hetzner Cloud guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 140 +--- +The following steps will quickly deploy a Rancher server on Hetzner Cloud in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). + +## Prerequisites + +>**Note** +>Deploying to Hetzner Cloud will incur charges. + +- [Hetzner Cloud Account](https://www.hetzner.com): You will require an account on Hetzner as this is where the server and cluster will run. +- [Hetzner API Access Key](https://docs.hetzner.cloud/#getting-started): Use these instructions to create a Hetzner Cloud API Key if you don't have one. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster to Hetzner. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the Hetzner folder containing the terraform files by executing `cd quickstart/hcloud`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `hcloud_token` - Hetzner API access key + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Hetzner Quickstart Readme](https://github.com/rancher/quickstart/tree/master/hcloud) for more information. +Suggestions include: + - `prefix` - Prefix for all created resources + - `instance_type` - Instance type, minimum required is `cx21` + - `hcloud_location` - Hetzner Cloud location, choose the closest instead of the default (`fsn1`) + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 15 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/hcloud`. + +#### Result + +Two Kubernetes clusters are deployed into your Hetzner account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). + +## Destroying the Environment + +1. From the `quickstart/hcloud` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md b/versioned_docs/version-2.6/quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md new file mode 100644 index 0000000000..ec8d16a475 --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/microsoft-azure-qs/microsoft-azure-qs.md @@ -0,0 +1,76 @@ +--- +title: Rancher Azure Quick Start Guide +description: Read this step by step Rancher Azure guide to quickly deploy a Rancher server with a single-node downstream Kubernetes cluster attached. +weight: 115 +--- + +The following steps will quickly deploy a Rancher server on Azure in a single-node K3s Kubernetes cluster, with a single-node downstream Kubernetes cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). + +## Prerequisites + +>**Note** +>Deploying to Microsoft Azure will incur charges. + +- [Microsoft Azure Account](https://azure.microsoft.com/en-us/free/): A Microsoft Azure Account is required to create resources for deploying Rancher and Kubernetes. +- [Microsoft Azure Subscription](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/create-subscription#create-a-subscription-in-the-azure-portal): Use this link to follow a tutorial to create a Microsoft Azure subscription if you don't have one yet. +- [Micsoroft Azure Tenant](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-create-new-tenant): Use this link and follow instructions to create a Microsoft Azure tenant. +- [Microsoft Azure Client ID/Secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal): Use this link and follow instructions to create a Microsoft Azure client and secret. +- [Terraform](https://www.terraform.io/downloads.html): Used to provision the server and cluster in Microsoft Azure. + + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the Azure folder containing the terraform files by executing `cd quickstart/azure`. + +3. Rename the `terraform.tfvars.example` file to `terraform.tfvars`. + +4. Edit `terraform.tfvars` and customize the following variables: + - `azure_subscription_id` - Microsoft Azure Subscription ID + - `azure_client_id` - Microsoft Azure Client ID + - `azure_client_secret` - Microsoft Azure Client Secret + - `azure_tenant_id` - Microsoft Azure Tenant ID + - `rancher_server_admin_password` - Admin password for created Rancher server + +5. **Optional:** Modify optional variables within `terraform.tfvars`. +See the [Quickstart Readme](https://github.com/rancher/quickstart) and the [Azure Quickstart Readme](https://github.com/rancher/quickstart/tree/master/azure) for more information. +Suggestions include: + - `azure_location` - Microsoft Azure region, choose the closest instead of the default (`East US`) + - `prefix` - Prefix for all created resources + - `instance_type` - Compute instance size used, minimum is `Standard_DS2_v2` but `Standard_DS2_v3` or `Standard_DS3_v2` could be used if within budget + - `add_windows_node` - If true, an additional Windows worker node is added to the workload cluster + - `windows_admin_password` - The admin password of the windows worker node + +6. Run `terraform init`. + +7. To initiate the creation of the environment, run `terraform apply --auto-approve`. Then wait for output similar to the following: + + ``` + Apply complete! Resources: 16 added, 0 changed, 0 destroyed. + + Outputs: + + rancher_node_ip = xx.xx.xx.xx + rancher_server_url = https://rancher.xx.xx.xx.xx.sslip.io + workload_node_ip = yy.yy.yy.yy + ``` + +8. Paste the `rancher_server_url` from the output above into the browser. Log in when prompted (default username is `admin`, use the password set in `rancher_server_admin_password`). +9. ssh to the Rancher Server using the `id_rsa` key generated in `quickstart/azure`. + +#### Result + +Two Kubernetes clusters are deployed into your Azure account, one running Rancher Server and the other ready for experimentation deployments. Please note that while this setup is a great way to explore Rancher functionality, a production setup should follow our high availability setup guidelines. SSH keys for the VMs are auto-generated and stored in the module directory. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{< baseurl >}}/rancher/v2.6/en/quick-start-guide/workload). + +## Destroying the Environment + +1. From the `quickstart/azure` folder, execute `terraform destroy --auto-approve`. + +2. Wait for confirmation that all resources have been destroyed. diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md b/versioned_docs/version-2.6/quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md new file mode 100644 index 0000000000..9bf05e7da9 --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/quickstart-manual-setup/quickstart-manual-setup.md @@ -0,0 +1,139 @@ +--- +title: Helm CLI Quick Start +weight: 300 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +These instructions capture a quick way to set up a proof-of-concept Rancher installation. + +These instructions assume you have a Linux virtual machine that you will communicate with from your local workstation. Rancher will be installed on the Linux machine. You will need to retrieve the IP address of that machine so that you can access Rancher from your local workstation. Rancher is designed to manage Kubernetes clusters remotely, so any Kubernetes cluster that Rancher manages in the future will also need to be able to reach this IP address. + +We don't recommend installing Rancher locally because it creates a networking problem. Installing Rancher on localhost does not allow Rancher to communicate with downstream Kubernetes clusters, so on localhost you wouldn't be able to test Rancher's cluster provisioning or cluster management functionality. + +Your Linux machine can be anywhere. It could be an Amazon EC2 instance, a Digital Ocean droplet, or an Azure virtual machine, to name a few examples. Other Rancher docs often use 'node' as a generic term for all of these. One possible way to deploy a Linux machine is by setting up an Amazon EC2 instance as shown in [this tutorial]({{}}/rancher/v2.6/en/installation/resources/k8s-tutorials/infrastructure-tutorials/ec2-node/). + +The full installation requirements are [here]({{}}/rancher/v2.6/en/installation/requirements/). + + +## Install K3s on Linux + +Install a K3s cluster by running this command on the Linux machine: + +``` +curl -sfL https://get.k3s.io | sh -s - server +``` + +Save the IP of the Linux machine. + +## Save the kubeconfig to your workstation + +The kubeconfig file is important for accessing the Kubernetes cluster. Copy the file at `/etc/rancher/k3s/k3s.yaml` from the Linux machine and save it to your local workstation in the directory `~/.kube/config`. One way to do this is by using the `scp` tool and run this command on your local machine: + + + + +``` +scp root@:/etc/rancher/k3s/k3s.yaml ~/.kube/config +``` + + + + +By default, "scp" is not a recognized command, so we need to install a module first. + +In Windows Powershell: + +``` +Find-Module Posh-SSH +Install-Module Posh-SSH + +## Get the remote kubeconfig file +scp root@:/etc/rancher/k3s/k3s.yaml $env:USERPROFILE\.kube\config +``` + + + + +## Edit the Rancher server URL in the kubeconfig + +In the kubeconfig file, you will need to change the value of the `server` field to `:6443`. The Kubernetes API server will be reached at port 6443, while the Rancher server will be reached at ports 80 and 443. This edit is needed so that when you run Helm or kubectl commands from your local workstation, you will be able to communicate with the Kubernetes cluster that Rancher will be installed on. + + + + +One way to open the kubeconfig file for editing is to use Vim: + +``` +vi ~/.kube/config +``` + +Press `i` to put Vim in insert mode. To save your work, press `Esc`. Then press `:wq` and press `Enter`. + + + + +In Windows Powershell, you can use `notepad.exe` for editing the kubeconfig file: + +``` +notepad.exe $env:USERPROFILE\.kube\config +``` + +Once edited, either press `ctrl+s` or go to `File > Save` to save your work. + + + + +## Install Rancher with Helm + +Then from your local workstation, run the following commands. You will need to have [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) and [helm.](https://helm.sh/docs/intro/install/) installed. + +``` +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest + +kubectl create namespace cattle-system + +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.crds.yaml + +helm repo add jetstack https://charts.jetstack.io + +helm repo update + +helm install cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.7.1 + +# Windows Powershell +helm install cert-manager jetstack/cert-manager ` + --namespace cert-manager ` + --create-namespace ` + --version v1.7.1 +``` + +The final command to install Rancher is below. The command requires a domain name that forwards traffic to the Linux machine. For the sake of simplicity in this tutorial, you can use a fake domain name to create your proof-of-concept. An example of a fake domain name would be `.sslip.io`. + +``` +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=.sslip.io \ + --set replicas=1 \ + --set bootstrapPassword= + +# Windows Powershell +helm install rancher rancher-latest/rancher ` + --namespace cattle-system ` + --set hostname=.sslip.io ` + --set replicas=1 ` + --set bootstrapPassword= +``` +``` + +Now if you navigate to `.sslip.io` in a web browser, you should see the Rancher UI. + +To make these instructions simple, we used a fake domain name and self-signed certificates to do this installation. Therefore, you will probably need to add a security exception to your web browser to see the Rancher UI. Note that for production installs, you would need a high-availability setup with a load balancer, a real domain name and real certificates. + +These instructions also left out the full installation requirements and other installation options. If you have any issues with these steps, refer to the full [Helm CLI installation docs.]({{}}/rancher/v2.6/en/installation/install-rancher-on-k8s/) + +To launch new Kubernetes clusters with your new Rancher server, you may need to set up cloud credentials in Rancher. For more information, see [Launching Kubernetes clusters with Rancher.]({{}}/rancher/v2.6/en/cluster-provisioning/rke-clusters/) diff --git a/versioned_docs/version-2.6/quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md b/versioned_docs/version-2.6/quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md new file mode 100644 index 0000000000..86b3bb420c --- /dev/null +++ b/versioned_docs/version-2.6/quick-start-guide/deployment/quickstart-vagrant/quickstart-vagrant.md @@ -0,0 +1,47 @@ +--- +title: Vagrant Quick Start +weight: 200 +--- +The following steps quickly deploy a Rancher Server with a single node cluster attached. + +>**Note:** The intent of these guides is to quickly launch a sandbox that you can use to evaluate Rancher. These guides are not intended for production environments. For comprehensive setup instructions, see [Installation]({{}}/rancher/v2.6/en/installation/). + +## Prerequisites + +- [Vagrant](https://www.vagrantup.com): Vagrant is required as this is used to provision the machine based on the Vagrantfile. +- [Virtualbox](https://www.virtualbox.org): The virtual machines that Vagrant provisions need to be provisioned to VirtualBox. +- At least 4GB of free RAM. + +### Note +- Vagrant will require plugins to create VirtualBox VMs. Install them with the following commands: + + `vagrant plugin install vagrant-vboxmanage` + + `vagrant plugin install vagrant-vbguest` + +## Getting Started + +1. Clone [Rancher Quickstart](https://github.com/rancher/quickstart) to a folder using `git clone https://github.com/rancher/quickstart`. + +2. Go into the folder containing the Vagrantfile by executing `cd quickstart/vagrant`. + +3. **Optional:** Edit `config.yaml` to: + + - Change the number of nodes and the memory allocations, if required. (`node.count`, `node.cpus`, `node.memory`) + - Change the password of the `admin` user for logging into Rancher. (`default_password`) + +4. To initiate the creation of the environment run, `vagrant up --provider=virtualbox`. + +5. Once provisioning finishes, go to `https://192.168.56.101` in the browser. The default user/password is `admin/adminPassword`. + +**Result:** Rancher Server and your Kubernetes cluster is installed on VirtualBox. + +### What's Next? + +Use Rancher to create a deployment. For more information, see [Creating Deployments]({{}}/rancher/v2.6/en/quick-start-guide/workload). + +## Destroying the Environment + +1. From the `quickstart/vagrant` folder execute `vagrant destroy -f`. + +2. Wait for the confirmation that all resources have been destroyed. diff --git a/content/rancher/v2.6/en/quick-start-guide/_index.md b/versioned_docs/version-2.6/quick-start-guide/quick-start-guide.md similarity index 100% rename from content/rancher/v2.6/en/quick-start-guide/_index.md rename to versioned_docs/version-2.6/quick-start-guide/quick-start-guide.md diff --git a/content/rancher/v2.6/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md b/versioned_docs/version-2.6/quick-start-guide/workload/quickstart-deploy-workload-ingress/quickstart-deploy-workload-ingress.md similarity index 100% rename from content/rancher/v2.6/en/quick-start-guide/workload/quickstart-deploy-workload-ingress/_index.md rename to versioned_docs/version-2.6/quick-start-guide/workload/quickstart-deploy-workload-ingress/quickstart-deploy-workload-ingress.md diff --git a/content/rancher/v2.6/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md b/versioned_docs/version-2.6/quick-start-guide/workload/quickstart-deploy-workload-nodeport/quickstart-deploy-workload-nodeport.md similarity index 100% rename from content/rancher/v2.6/en/quick-start-guide/workload/quickstart-deploy-workload-nodeport/_index.md rename to versioned_docs/version-2.6/quick-start-guide/workload/quickstart-deploy-workload-nodeport/quickstart-deploy-workload-nodeport.md diff --git a/content/rancher/v2.0-v2.4/en/quick-start-guide/workload/_index.md b/versioned_docs/version-2.6/quick-start-guide/workload/workload.md similarity index 100% rename from content/rancher/v2.0-v2.4/en/quick-start-guide/workload/_index.md rename to versioned_docs/version-2.6/quick-start-guide/workload/workload.md diff --git a/versioned_docs/version-2.6/rancher-manager.md b/versioned_docs/version-2.6/rancher-manager.md new file mode 100644 index 0000000000..346e997c01 --- /dev/null +++ b/versioned_docs/version-2.6/rancher-manager.md @@ -0,0 +1,24 @@ +--- +slug: / +weight: 1 +title: "Rancher 2.6" +shortTitle: "Rancher 2.6 (Latest)" +description: "Rancher adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." +metaTitle: "Rancher 2.6 Docs: What is New?" +metaDescription: "Rancher 2 adds significant value on top of Kubernetes: managing hundreds of clusters from one interface, centralizing RBAC, enabling monitoring and alerting. Read more." +insertOneSix: false +ctaBanner: 0 +aliases: + - /rancher/v2.x/en/ +--- +Rancher was originally built to work with multiple orchestrators, and it included its own orchestrator called Cattle. With the rise of Kubernetes in the marketplace, Rancher 2 exclusively deploys and manages Kubernetes clusters running anywhere, on any provider. + +Rancher can provision Kubernetes from a hosted provider, provision compute nodes and then install Kubernetes onto them, or import existing Kubernetes clusters running anywhere. + +One Rancher server installation can manage thousands of Kubernetes clusters and thousands of nodes from the same user interface. + +Rancher adds significant value on top of Kubernetes, first by centralizing authentication and role-based access control (RBAC) for all of the clusters, giving global admins the ability to control cluster access from one location. + +It then enables detailed monitoring and alerting for clusters and their resources, ships logs to external providers, and integrates directly with Helm via the Application Catalog. If you have an external CI/CD system, you can plug it into Rancher, but if you don't, Rancher even includes Fleet to help you automatically deploy and upgrade workloads. + +Rancher is a _complete_ container management platform for Kubernetes, giving you the tools to successfully run Kubernetes anywhere. diff --git a/versioned_docs/version-2.6/security/best-practices/best-practices.md b/versioned_docs/version-2.6/security/best-practices/best-practices.md new file mode 100644 index 0000000000..4dc70b3d51 --- /dev/null +++ b/versioned_docs/version-2.6/security/best-practices/best-practices.md @@ -0,0 +1,12 @@ +--- +title: Kubernetes Security Best Practices +weight: 5 +--- + +### Restricting cloud metadata API access + +Cloud providers such as AWS, Azure, DigitalOcean or GCP often expose metadata services locally to instances. By default, this endpoint is accessible by pods running on a cloud instance, including pods in hosted Kubernetes providers such as EKS, AKS, DigitalOcean Kubernetes or GKE, and can contain cloud credentials for that node, provisioning data such as kubelet credentials, or other sensitive data. To mitigate this risk when running on a cloud platform, follow the [Kubernetes security recommendations](https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster/#restricting-cloud-metadata-api-access): limit permissions given to instance credentials, use network policies to restrict pod access to the metadata API, and avoid using provisioning data to deliver secrets. + +It is advised to consult your cloud provider's security best practices for further recommendations and specific details on how to restrict access to cloud instance metadata API. + +Further references: MITRE ATT&CK knowledge base on - [Unsecured Credentials: Cloud Instance Metadata API](https://attack.mitre.org/techniques/T1552/005/). diff --git a/content/rancher/v2.6/en/security/cve/_index.md b/versioned_docs/version-2.6/security/cve/cve.md similarity index 100% rename from content/rancher/v2.6/en/security/cve/_index.md rename to versioned_docs/version-2.6/security/cve/cve.md diff --git a/content/rancher/v2.6/en/security/hardening-guides/_index.md b/versioned_docs/version-2.6/security/hardening-guides/hardening-guides.md similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/_index.md rename to versioned_docs/version-2.6/security/hardening-guides/hardening-guides.md diff --git a/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf new file mode 100644 index 0000000000..25f33709b3 Binary files /dev/null and b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-benchmark-2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf differ diff --git a/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-benchmark-2.6/rke-1.6-benchmark-2.6.md b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-benchmark-2.6/rke-1.6-benchmark-2.6.md new file mode 100644 index 0000000000..3f561e5ff6 --- /dev/null +++ b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-benchmark-2.6/rke-1.6-benchmark-2.6.md @@ -0,0 +1,3100 @@ +--- +title: RKE CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 +weight: 101 +aliases: + - /rancher/v2.6/en/security/hardening-guides/1.6-benchmark-2.6/ +--- + +### RKE CIS v1.6 Kubernetes Benchmark - Rancher v2.6 with Kubernetes v1.18 to v1.23 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). + +#### Overview + +This document is a companion to the [Rancher v2.6 RKE security hardening guide]({{}}/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: + +| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | +| ----------------------- | --------------- | --------------------- | ------------------- | +| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6 | CIS v1.6 | Kubernetes v1.18 up to v1.23 | + +Because Rancher and RKE install Kubernetes services as Docker containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +Rancher and RKE install Kubernetes services via Docker containers. Configuration is defined by arguments passed to the container at the time of initialization, not via configuration files. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. + +> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. + +### Controls +## 1.1 Master Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. +All configuration is passed in as arguments at container run time. + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. +All configuration is passed in as arguments at container run time. + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 + +**Audit:** + +```bash +stat -c permissions=%a +``` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root + +**Audit:** + +```bash +stat -c %U:%G +``` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit:** + +```bash +stat -c %a /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'700' is equal to '700' +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +A system service account is required for etcd data directory ownership. +Refer to Rancher's hardening guide for more details on how to configure this ownership. + +**Audit:** + +```bash +stat -c %U:%G /node/var/lib/etcd +``` + +**Expected Result**: + +```console +'etcd:etcd' is present +``` + +**Returned Value**: + +```console +etcd:etcd +``` + +### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. +All configuration is passed in as arguments at container run time. + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. +All configuration is passed in as arguments at container run time. + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit Script:** `check_files_owner_in_dir.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the owner is set to root:root for +# the given directory and all the files in it +# +# inputs: +# $1 = /full/path/to/directory +# +# outputs: +# true/false + +INPUT_DIR=$1 + +if [[ "${INPUT_DIR}" == "" ]]; then + echo "false" + exit +fi + +if [[ $(stat -c %U:%G ${INPUT_DIR}) != "root:root" ]]; then + echo "false" + exit +fi + +statInfoLines=$(stat -c "%n %U:%G" ${INPUT_DIR}/*) +while read -r statInfoLine; do + f=$(echo ${statInfoLine} | cut -d' ' -f1) + p=$(echo ${statInfoLine} | cut -d' ' -f2) + + if [[ $(basename "$f" .pem) == "kube-etcd-"* ]]; then + if [[ "$p" != "root:root" && "$p" != "etcd:etcd" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "root:root" ]]; then + echo "false" + exit + fi + fi +done <<< "${statInfoLines}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_owner_in_dir.sh /node/etc/kubernetes/ssl +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 644 /etc/kubernetes/pki/*.crt + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /node/etc/kubernetes/ssl/!(*key).pem +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 600 /etc/kubernetes/ssl/*key.pem + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /node/etc/kubernetes/ssl/*key.pem +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--basic-auth-file' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the --kubelet-https parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is not present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/etc/kubernetes/manifests/kube-apiserver.yaml on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'Node' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes RBAC, +for example: +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'RBAC' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'EventRateLimit' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes PodSecurityPolicy: +--enable-admission-plugins=...,PodSecurityPolicy,... +Then restart the API Server. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and remove the --insecure-bind-address parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--insecure-bind-address' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--insecure-port=0 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'0' is equal to '0' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +6443 is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.21 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example: +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: +--audit-log-maxage=30 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +30 is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. +--audit-log-maxbackup=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +10 is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB: +--audit-log-maxsize=100 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +100 is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +and set the below parameter as appropriate and if needed. +For example, +--request-timeout=300s + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--request-timeout' is not present OR '--request-timeout' is not present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --service-account-key-file parameter +to the public key file for service accounts: +`--service-account-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the etcd certificate and key file parameters. +`--etcd-certfile=` +`--etcd-keyfile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the TLS certificate and private key file parameters. +`--tls-cert-file=` +`--tls-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the client certificate authority file. +`--client-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the etcd certificate authority file parameter. +`--etcd-cafile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit Script:** `check_encryption_provider_config.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to check the encrption provider config is set to aesbc +# +# outputs: +# true/false + +# TODO: Figure out the file location from the kube-apiserver commandline args +ENCRYPTION_CONFIG_FILE="/node/etc/kubernetes/ssl/encryption.yaml" + +if [[ ! -f "${ENCRYPTION_CONFIG_FILE}" ]]; then + echo "false" + exit +fi + +for provider in "$@" +do + if grep "$provider" "${ENCRYPTION_CONFIG_FILE}"; then + echo "true" + exit + fi +done + +echo "false" +exit + +``` + +**Audit Execution:** + +```bash +./check_encryption_provider_config.sh aescbc +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +- aescbc: true +``` + +### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM +_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM +_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM +_SHA384 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example: +--terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'true' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +`--service-account-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --root-ca-file parameter to the certificate bundle file`. +`--root-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +Cluster provisioned by RKE handles certificate rotation directly through RKE. + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /etc/kubernetes/manifests/kube-controller-manager.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 121366 121346 1 12:27 ? 00:01:13 kube-controller-manager --cluster-cidr=10.42.0.0/16 --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --allocate-node-cidrs=true --configure-cloud-routes=false --leader-elect=true --pod-eviction-timeout=5m0s --authentication-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --feature-gates=RotateKubeletServerCertificate=true --bind-address=127.0.0.1 --enable-hostpath-provisioner=false --address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --cloud-provider= --service-account-private-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --service-cluster-ip-range=10.43.0.0/16 --authorization-kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-controller-manager.yaml --root-ca-file=/etc/kubernetes/ssl/kube-ca.pem --node-monitor-grace-period=40s --profiling=false --terminated-pod-gc-threshold=1000 --v=2 --allow-untagged-cloud=true --use-service-account-credentials=true +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml file +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 121587 121567 0 12:27 ? 00:00:12 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --address=127.0.0.1 --leader-elect=true --profiling=false --v=2 --bind-address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /etc/kubernetes/manifests/kube-scheduler.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 121587 121567 0 12:27 ? 00:00:12 kube-scheduler --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-scheduler.yaml --address=127.0.0.1 --leader-elect=true --profiling=false --v=2 --bind-address=127.0.0.1 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +``` + +## 2 Etcd Node Configuration Files +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +`--cert-file=` +`--key-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--cert-file' is present AND '--key-file' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 2 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--client-cert-auth' is present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 2 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 1 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameters. +`--peer-client-file=` +`--peer-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-cert-file' is present AND '--peer-key-file' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 5 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-client-cert-auth' is present OR 'true' is equal to 'true' +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 4 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-auto-tls' is not present OR '--peer-auto-tls' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 4 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Automated) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml on the +master node and set the below parameter. +`--trusted-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--trusted-ca-file' is present +``` + +**Returned Value**: + +```console +etcd 120679 120657 1 12:27 ? 00:01:17 /usr/local/bin/etcd --trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --heartbeat-interval=500 --election-timeout=5000 --initial-cluster-token=etcd-cluster-1 --initial-cluster=etcd-=https://:2380 --peer-trusted-ca-file=/etc/kubernetes/ssl/kube-ca.pem --peer-cert-file=/etc/kubernetes/ssl/kube-etcd-.pem --peer-key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --peer-client-cert-auth=true --data-dir=/var/lib/rancher/etcd/ --initial-advertise-peer-urls=https://:2380 --initial-cluster-state=new --advertise-client-urls=https://:2379 --client-cert-auth=true --enable-v2=true --name=etcd- --listen-client-urls=https://:2379 --listen-peer-urls=https://:2380 --key-file=/etc/kubernetes/ssl/kube-etcd--key.pem --cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 root 120728 120707 0 12:27 ? 00:00:00 /opt/rke-tools/rke-etcd-backup etcd-backup save --cacert /etc/kubernetes/ssl/kube-ca.pem --cert /etc/kubernetes/ssl/kube-node.pem --key /etc/kubernetes/ssl/kube-node-key.pem --name etcd-rolling-snapshots --endpoints=:2379 --retention=72h --creation=12h root 121142 121120 7 12:27 ? 00:06:27 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json root 214939 214868 3 13:56 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-policy-file' is present +``` + +**Returned Value**: + +```console +root 121142 121120 7 12:27 ? 00:06:28 kube-apiserver --audit-log-maxsize=100 --etcd-keyfile=/etc/kubernetes/ssl/kube-node-key.pem --service-cluster-ip-range=10.43.0.0/16 --encryption-provider-config=/etc/kubernetes/ssl/encryption.yaml --requestheader-username-headers=X-Remote-User --bind-address=0.0.0.0 --advertise-address= --requestheader-allowed-names=kube-apiserver-proxy-client --etcd-certfile=/etc/kubernetes/ssl/kube-node.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/kube-apiserver-requestheader-ca.pem --allow-privileged=true --requestheader-extra-headers-prefix=X-Remote-Extra- --admission-control-config-file=/etc/kubernetes/admission.yaml --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --service-account-lookup=true --runtime-config=policy/v1beta1/podsecuritypolicy=true --authorization-mode=Node,RBAC --audit-log-maxage=30 --profiling=false --storage-backend=etcd3 --etcd-cafile=/etc/kubernetes/ssl/kube-ca.pem --etcd-servers=https://:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/kube-ca.pem --secure-port=6443 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 --insecure-port=0 --api-audiences=unknown --audit-policy-file=/etc/kubernetes/audit-policy.yaml --etcd-prefix=/registry --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem --proxy-client-key-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client-key.pem --service-account-issuer=rke --service-account-signing-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --requestheader-group-headers=X-Remote-Group --cloud-provider= --proxy-client-cert-file=/etc/kubernetes/ssl/kube-apiserver-proxy-client.pem --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem --anonymous-auth=false --audit-log-path=/var/log/kube-audit/audit-log.json --audit-log-maxbackup=10 --service-account-key-file=/etc/kubernetes/ssl/kube-service-account-token-key.pem --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction,Priority,TaintNodesByCondition,PersistentVolumeClaimResize,PodSecurityPolicy,EventRateLimit --audit-log-format=json +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Consider modification of the audit policy in use on the cluster to include these items, at a +minimum. + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. +All configuration is passed in as arguments at container run time. + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 $proykubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %a /node/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is present OR '640' is present OR '600' is equal to '600' OR '444' is present OR '440' is present OR '400' is present OR '000' is present +``` + +**Returned Value**: + +```console +600 +``` + +### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; then stat -c %U:%G /etc/kubernetes/ssl/kubecfg-kube-proxy.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is not present OR '/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml' is not present +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/ssl/kubecfg-kube-node.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c permissions=%a /etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/kubernetes/ssl/kubecfg-kube-node.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; then stat -c %U:%G /node/etc/kubernetes/ssl/kubecfg-kube-node.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the file permissions of the +`--client-ca-file chmod 644 ` + +**Audit Script:** `check_cafile_permissions.sh` + +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + +``` + +**Audit Execution:** + +```bash +./check_cafile_permissions.sh +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +`chown root:root ` + +**Audit Script:** `check_cafile_ownership.sh` + +```bash +#!/usr/bin/env bash + +CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') +if test -z $CAFILE; then CAFILE=$kubeletcafile; fi +if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + +``` + +**Audit Execution:** + +```bash +./check_cafile_ownership.sh +``` + +**Expected Result**: + +```console +'root:root' is not present +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/kubelet/config.yaml + +Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. +All configuration is passed in as arguments at container run time. + +## 4.2 Kubelet +### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +`--client-ca-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present OR '' is not present +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'30m' is not equal to '0' OR '--streaming-connection-idle-timeout' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 121813 121792 4 12:27 ? 00:03:37 kubelet --fail-swap-on=false --resolv-conf=/etc/resolv.conf --authorization-mode=Webhook --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --v=2 --volume-plugin-dir=/var/lib/kubelet/volumeplugins --address=0.0.0.0 --make-iptables-util-chains=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --hostname-override= --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-.pem --network-plugin=cni --streaming-connection-idle-timeout=30m --root-dir=/var/lib/kubelet --event-qps=0 --feature-gates=RotateKubeletServerCertificate=true --protect-kernel-defaults=true --cloud-provider= --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet--key.pem --cgroups-per-qos=True --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=rancher/mirrored-pause:3.5 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --anonymous-auth=false --authentication-token-webhook=true --node-ip= --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --read-only-port=0 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present OR '' is not present +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set tlsCertFile to the location +of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +`--tls-cert-file=` +`--tls-private-key-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present AND '' is not present +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line rotateCertificates: true or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'--rotate-certificates' is not present OR '--rotate-certificates' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 121813 121792 4 12:27 ? 00:03:37 kubelet --fail-swap-on=false --resolv-conf=/etc/resolv.conf --authorization-mode=Webhook --kubeconfig=/etc/kubernetes/ssl/kubecfg-kube-node.yaml --v=2 --volume-plugin-dir=/var/lib/kubelet/volumeplugins --address=0.0.0.0 --make-iptables-util-chains=true --client-ca-file=/etc/kubernetes/ssl/kube-ca.pem --hostname-override= --tls-cert-file=/etc/kubernetes/ssl/kube-kubelet-.pem --network-plugin=cni --streaming-connection-idle-timeout=30m --root-dir=/var/lib/kubelet --event-qps=0 --feature-gates=RotateKubeletServerCertificate=true --protect-kernel-defaults=true --cloud-provider= --tls-private-key-file=/etc/kubernetes/ssl/kube-kubelet--key.pem --cgroups-per-qos=True --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=rancher/mirrored-pause:3.5 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 --anonymous-auth=false --authentication-token-webhook=true --node-ip= --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --read-only-port=0 --cgroup-driver=cgroupfs --resolv-conf=/run/systemd/resolve/resolv.conf +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +Clusters provisioned by RKE handles certificate rotation directly through RKE. + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set TLSCipherSuites: to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/kubelet/config.yaml +``` + +**Expected Result**: + +```console +'' is not present +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +**Audit Script:** `check_for_default_sa.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` + +**Audit Execution:** + +```bash +./check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +## 5.2 Pod Security Policies +### 5.2.1 Minimize the admission of privileged containers (Manual) + + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that +the .spec.privileged field is omitted or set to false. + +### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostPID field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostIPC field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostNetwork field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.allowPrivilegeEscalation field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.6 Minimize the admission of root containers (Manual) + + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of +UIDs not including 0. + +### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) + + +**Result:** warn + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + +### 5.2.8 Minimize the admission of containers with added capabilities (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that allowedCapabilities is not present in PSPs for the cluster unless +it is set to an empty array. + +### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications runnning on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports Network Policies (Manual) + + +**Result:** warn + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +**Audit Script:** `check_for_network_policies.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in $(kubectl get namespaces --all-namespaces -o json | jq -r '.items[].metadata.name'); do + policy_count=$(kubectl get networkpolicy -n ${namespace} -o json | jq '.items | length') + if [[ ${policy_count} -eq 0 ]]; then + echo "false" + exit + fi +done + +echo "true" + +``` + +**Audit Execution:** + +```bash +./check_for_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 5.4 Secrets Management +### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +if possible, rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you +would need to enable alpha features in the apiserver by passing "--feature- +gates=AllAlpha=true" argument. +Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS +parameter to "--feature-gates=AllAlpha=true" +KUBE_API_ARGS="--feature-gates=AllAlpha=true" +Based on your system, restart the kube-apiserver service. For example: +systemctl restart kube-apiserver.service +Use annotations to enable the docker/default seccomp profile in your pod definitions. An +example is as below: +apiVersion: v1 +kind: Pod +metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default +spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + +### 5.7.3 Apply Security Context to Your Pods and Containers (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply security contexts to your pods. For a +suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Automated) + + +**Result:** pass + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + +**Audit Script:** `check_for_default_ns.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) +if [[ ${count} -gt 0 ]]; then + echo "false" + exit +fi + +echo "true" + + +``` + +**Audit Execution:** + +```bash +./check_for_default_ns.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + diff --git a/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf new file mode 100644 index 0000000000..8984cc57c5 Binary files /dev/null and b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-hardening-2.6/Rancher_v2-6_CIS_v1-6_Hardening_Guide.pdf differ diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/_index.md b/versioned_docs/version-2.6/security/hardening-guides/rke-1.6-hardening-2.6/rke-1.6-hardening-2.6.md similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/rke-1.6-hardening-2.6/_index.md rename to versioned_docs/version-2.6/security/hardening-guides/rke-1.6-hardening-2.6/rke-1.6-hardening-2.6.md diff --git a/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf new file mode 100644 index 0000000000..a99a551e3d Binary files /dev/null and b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-benchmark-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf differ diff --git a/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-benchmark-2.6/rke2-1.6-benchmark-2.6.md b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-benchmark-2.6/rke2-1.6-benchmark-2.6.md new file mode 100644 index 0000000000..f50043d7cb --- /dev/null +++ b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-benchmark-2.6/rke2-1.6-benchmark-2.6.md @@ -0,0 +1,3326 @@ +--- +title: RKE2 CIS v1.6 Benchmark - Self-Assessment Guide - Rancher v2.6 +weight: 101 +--- + +### CIS v1.6 Kubernetes Benchmark - Rancher v2.6 RKE2 with Kubernetes v1.21 up to v1.23 + +[Click here to download a PDF version of this document](https://releases.rancher.com/documents/security/2.6/Rancher_RKE2_v2-6_CIS_v1-6_Benchmark_Assessment.pdf). + +#### Overview + +This document is a companion to the [Rancher v2.6 RKE2 security hardening guide]({{}}/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/). The hardening guide provides prescriptive guidance for hardening a production installation of Rancher with RKE2 provisioned clusters, and this benchmark guide is meant to help you evaluate the level of security of the hardened cluster against each control in the benchmark. + +This guide corresponds to specific versions of the hardening guide, Rancher, CIS Benchmark and Kubernetes: + +| Hardening Guide Version | Rancher Version | CIS Benchmark Version | Kubernetes Version | +| ----------------------- | --------------- | --------------------- | ------------------- | +| Hardening Guide CIS v1.6 Benchmark | Rancher v2.6.5+ | CIS v1.6 | Kubernetes v1.21 up to v1.23 | + +Because Rancher and RKE2 install Kubernetes services as containers, many of the control verification checks in the CIS Kubernetes Benchmark do not apply and will have a result of `Not Applicable`. This guide will walk through the various controls and provide updated example commands to audit compliance in Rancher created clusters. + +This document is to be used by Rancher operators, security teams, auditors and decision makers. + +For more detail about each audit, including rationales and remediations for failing tests, you can refer to the corresponding section of the CIS Kubernetes Benchmark v1.6. You can download the benchmark, after creating a free account, in [Center for Internet Security (CIS)](https://www.cisecurity.org/benchmark/kubernetes/). + +#### Testing controls methodology + +RKE2 launches control plane components as static pods, managed by the kubelet, and uses containerd as the container runtime. Configuration is defined by arguments passed to the container at the time of initialization or via configuration file. + +Where control audits differ from the original CIS benchmark, the audit commands specific to Rancher are provided for testing. When performing the tests, you will need access to the command line on the hosts of all RKE2 nodes. The commands also make use of the [kubectl](https://kubernetes.io/docs/tasks/tools/) (with a valid configuration file) and [jq](https://stedolan.github.io/jq/) tools, which are required in the testing and evaluation of test results. + +> NOTE: Only `automated` tests (previously called `scored`) are covered in this guide. + +### Controls + +--- +## 1.1 Master Node Configuration Files +### 1.1.1 Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the +master node. +For example, chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.2 Ensure that the API server pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.3 Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.5 Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.7 Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %a /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.8 Ensure that the etcd pod specification file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; then stat -c %U:%G /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.9 Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 + +**Audit:** + +```bash +stat -c %a +``` + +### 1.1.10 Ensure that the Container Network Interface file ownership is set to root:root (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root + +**Audit:** + +```bash +stat -c %U:%G +``` + +### 1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). For example, +chmod 700 /var/lib/etcd + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/db/etcd +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +700 +``` + +### 1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd (Automated) + + +**Result:** pass + +**Remediation:** +On the etcd server node, get the etcd data directory, passed as an argument --data-dir, +from the below command: +ps -ef | grep etcd +Run the below command (based on the etcd data directory found above). +For example, chown etcd:etcd /var/lib/etcd + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd +``` + +**Expected Result**: + +```console +'etcd:etcd' is present +``` + +**Returned Value**: + +```console +etcd:etcd +``` + +### 1.1.13 Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 /etc/kubernetes/admin.conf + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/cred/admin.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.14 Ensure that the admin.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root /etc/kubernetes/admin.conf + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.15 Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 scheduler + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.16 Ensure that the scheduler.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root scheduler + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.17 Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod 644 controllermanager + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown root:root controllermanager + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chown -R root:root /etc/kubernetes/pki/ + +**Audit:** + +```bash +stat -c %U:%G /var/lib/rancher/rke2/server/tls +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 644 /var/lib/rancher/rke2/server/tls/*.crt + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /var/lib/rancher/rke2/server/tls/*.crt +``` + +### 1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the master node. +For example, +chmod -R 600 /etc/kubernetes/pki/*.key + +**Audit Script:** `check_files_permissions.sh` + +```bash +#!/usr/bin/env bash + +# This script is used to ensure the file permissions are set to 644 or +# more restrictive for all files in a given directory or a wildcard +# selection of files +# +# inputs: +# $1 = /full/path/to/directory or /path/to/fileswithpattern +# ex: !(*key).pem +# +# $2 (optional) = permission (ex: 600) +# +# outputs: +# true/false + +# Turn on "extended glob" for use of '!' in wildcard +shopt -s extglob + +# Turn off history to avoid surprises when using '!' +set -H + +USER_INPUT=$1 + +if [[ "${USER_INPUT}" == "" ]]; then + echo "false" + exit +fi + + +if [[ -d ${USER_INPUT} ]]; then + PATTERN="${USER_INPUT}/*" +else + PATTERN="${USER_INPUT}" +fi + +PERMISSION="" +if [[ "$2" != "" ]]; then + PERMISSION=$2 +fi + +FILES_PERMISSIONS=$(stat -c %n\ %a ${PATTERN}) + +while read -r fileInfo; do + p=$(echo ${fileInfo} | cut -d' ' -f2) + + if [[ "${PERMISSION}" != "" ]]; then + if [[ "$p" != "${PERMISSION}" ]]; then + echo "false" + exit + fi + else + if [[ "$p" != "644" && "$p" != "640" && "$p" != "600" ]]; then + echo "false" + exit + fi + fi +done <<< "${FILES_PERMISSIONS}" + + +echo "true" +exit + +``` + +**Audit Execution:** + +```bash +./check_files_permissions.sh /var/lib/rancher/rke2/server/tls/*.key +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +**Returned Value**: + +```console +true +``` + +## 1.2 API Server +### 1.2.1 Ensure that the --anonymous-auth argument is set to false (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--anonymous-auth=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.2 Ensure that the --basic-auth-file argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the `--basic-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--basic-auth-file' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.3 Ensure that the --token-auth-file parameter is not set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and configure alternate mechanisms for authentication. Then, +edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the `--token-auth-file=` parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--token-auth-file' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.4 Ensure that the --kubelet-https argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the --kubelet-https parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-https' is not present OR '--kubelet-https' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.5 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the +apiserver and kubelets. Then, edit API server pod specification file +/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the master node and set the +kubelet client certificate and key parameters as below. +--kubelet-client-certificate= +--kubelet-client-key= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-client-certificate' is present AND '--kubelet-client-key' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.6 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and setup the TLS connection between +the apiserver and kubelets. Then, edit the API server pod specification file +/var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml on the master node and set the +`--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority. +`--kubelet-certificate-authority=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--kubelet-certificate-authority' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.7 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. +One such example could be as below. +--authorization-mode=RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' not have 'AlwaysAllow' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.8 Ensure that the --authorization-mode argument includes Node (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes Node. +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'Node' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.9 Ensure that the --authorization-mode argument includes RBAC (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --authorization-mode parameter to a value that includes RBAC, +for example: +--authorization-mode=Node,RBAC + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'Node,RBAC' has 'RBAC' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.10 Ensure that the admission control plugin EventRateLimit is set (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and set the desired limits in a configuration file. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +and set the below parameters. +--enable-admission-plugins=...,EventRateLimit,... +--admission-control-config-file= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.11 Ensure that the admission control plugin AlwaysAdmit is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and either remove the --enable-admission-plugins parameter, or set it to a +value that does not include AlwaysAdmit. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NodeRestriction,PodSecurityPolicy' not have 'AlwaysAdmit' OR '--enable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.12 Ensure that the admission control plugin AlwaysPullImages is set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +AlwaysPullImages. +--enable-admission-plugins=...,AlwaysPullImages,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.13 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) + + +**Result:** warn + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to include +SecurityContextDeny, unless PodSecurityPolicy is already in place. +--enable-admission-plugins=...,SecurityContextDeny,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +### 1.2.14 Ensure that the admission control plugin ServiceAccount is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create ServiceAccount objects as per your environment. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and ensure that the --disable-admission-plugins parameter is set to a +value that does not include ServiceAccount. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.15 Ensure that the admission control plugin NamespaceLifecycle is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --disable-admission-plugins parameter to +ensure it does not include NamespaceLifecycle. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--disable-admission-plugins' is not present OR '--disable-admission-plugins' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.16 Ensure that the admission control plugin PodSecurityPolicy is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create Pod Security Policy objects as per your environment. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes PodSecurityPolicy: +--enable-admission-plugins=...,PodSecurityPolicy,... +Then restart the API Server. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NodeRestriction,PodSecurityPolicy' has 'PodSecurityPolicy' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.17 Ensure that the admission control plugin NodeRestriction is set (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --enable-admission-plugins parameter to a +value that includes NodeRestriction. +--enable-admission-plugins=...,NodeRestriction,... + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'NodeRestriction,PodSecurityPolicy' has 'NodeRestriction' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.18 Ensure that the --insecure-bind-address argument is not set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and remove the --insecure-bind-address parameter. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--insecure-bind-address' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.19 Ensure that the --insecure-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--insecure-port=0 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'0' is equal to '0' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.20 Ensure that the --secure-port argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and either remove the --secure-port parameter or +set it to a different (non-zero) desired port. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +6443 is greater than 0 OR '--secure-port' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.21 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.22 Ensure that the --audit-log-path argument is set (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-path parameter to a suitable path and +file where you would like audit logs to be written, for example: +--audit-log-path=/var/log/apiserver/audit.log + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--audit-log-path' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.23 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: +--audit-log-maxage=30 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +30 is greater or equal to 30 +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.24 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate +value. +--audit-log-maxbackup=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +10 is greater or equal to 10 +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.25 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. +For example, to set it as 100 MB: +--audit-log-maxsize=100 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +100 is greater or equal to 100 +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.26 Ensure that the --request-timeout argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +and set the below parameter as appropriate and if needed. +For example, +--request-timeout=300s + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--request-timeout' is not present OR '--request-timeout' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.27 Ensure that the --service-account-lookup argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--service-account-lookup=true +Alternatively, you can delete the --service-account-lookup parameter from this file so +that the default takes effect. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-lookup' is not present OR '--service-account-lookup' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.28 Ensure that the --service-account-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --service-account-key-file parameter +to the public key file for service accounts: +`--service-account-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-key-file' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.29 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the etcd certificate and key file parameters. +`--etcd-certfile=` +`--etcd-keyfile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-certfile' is present AND '--etcd-keyfile' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.30 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the TLS certificate and private key file parameters. +`--tls-cert-file=` +`--tls-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.31 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection on the apiserver. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the client certificate authority file. +`--client-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--client-ca-file' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.32 Ensure that the --etcd-cafile argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the etcd certificate authority file parameter. +`--etcd-cafile=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--etcd-cafile' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.33 Ensure that the --encryption-provider-config argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +Then, edit the API server pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml +on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--encryption-provider-config' is present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.2.34 Ensure that encryption providers are appropriately configured (Automated) + + +**Result:** pass + +**Remediation:** +Follow the Kubernetes documentation and configure a EncryptionConfig file. +In this file, choose aescbc, kms or secretbox as the encryption provider. + +**Audit:** + +```bash +/bin/sh -c 'if grep aescbc /var/lib/rancher/rke2/server/cred/encryption-config.json; then echo 0; fi' +``` + +**Expected Result**: + +```console +'0' is present +``` + +**Returned Value**: + +```console +{"kind":"EncryptionConfiguration","apiVersion":"apiserver.config.k8s.io/v1","resources":[{"resources":["secrets"],"providers":[{"aescbc":{"keys":[{"name":"aescbckey","secret":"ZP3yNnlCjzcKMBXfmNBmpGbiY+oXne+WP6EM42lZIbE="}]}},{"identity":{}}]}]} 0 +``` + +### 1.2.35 Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** pass + +**Remediation:** +Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml +on the master node and set the below parameter. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM +_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM +_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM +_SHA384 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' is not present +``` + +**Returned Value**: + +```console +root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +## 1.3 Controller Manager +### 1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, +for example: +--terminated-pod-gc-threshold=10 + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--terminated-pod-gc-threshold' is present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.2 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.3 Ensure that the --use-service-account-credentials argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node to set the below parameter. +--use-service-account-credentials=true + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'true' is not equal to 'false' +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --service-account-private-key-file parameter +to the private key file for service accounts. +`--service-account-private-key-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--service-account-private-key-file' is present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.5 Ensure that the --root-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --root-ca-file parameter to the certificate bundle file`. +`--root-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'--root-ca-file' is present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +### 1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. +--feature-gates=RotateKubeletServerCertificate=true + +### 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Controller Manager pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-controller-manager | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 5522 5416 3 14:58 ? 00:00:16 kube-controller-manager --flex-volume-plugin-dir=/var/lib/kubelet/volumeplugins --terminated-pod-gc-threshold=1000 --permit-port-sharing=true --address=127.0.0.1 --allocate-node-cidrs=true --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-controller-manager --cluster-cidr=10.42.0.0/16 --cluster-signing-kube-apiserver-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kube-apiserver-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-client-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-kubelet-client-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --cluster-signing-kubelet-serving-cert-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --cluster-signing-kubelet-serving-key-file=/var/lib/rancher/rke2/server/tls/server-ca.key --cluster-signing-legacy-unknown-cert-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --cluster-signing-legacy-unknown-key-file=/var/lib/rancher/rke2/server/tls/client-ca.key --configure-cloud-routes=false --controllers=*,-service,-route,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/rke2/server/cred/controller.kubeconfig --port=10252 --profiling=false --root-ca-file=/var/lib/rancher/rke2/server/tls/server-ca.crt --secure-port=10257 --service-account-private-key-file=/var/lib/rancher/rke2/server/tls/service.key --use-service-account-credentials=true +``` + +## 1.4 Scheduler +### 1.4.1 Ensure that the --profiling argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml file +on the master node and set the below parameter. +--profiling=false + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +root 5533 5414 0 14:58 ? 00:00:02 kube-scheduler --permit-port-sharing=true --address=127.0.0.1 --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=10259 +``` + +### 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated) + + +**Result:** pass + +**Remediation:** +Edit the Scheduler pod specification file /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml +on the master node and ensure the correct value for the --bind-address parameter + +**Audit:** + +```bash +/bin/ps -ef | grep kube-scheduler | grep -v grep +``` + +**Expected Result**: + +```console +'127.0.0.1' is equal to '127.0.0.1' OR '--bind-address' is not present +``` + +**Returned Value**: + +```console +root 5533 5414 0 14:58 ? 00:00:02 kube-scheduler --permit-port-sharing=true --address=127.0.0.1 --bind-address=127.0.0.1 --cert-dir=/var/lib/rancher/rke2/server/tls/kube-scheduler --kubeconfig=/var/lib/rancher/rke2/server/cred/scheduler.kubeconfig --port=10251 --profiling=false --secure-port=10259 +``` + +## 2 Etcd Node Configuration Files +### 2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Follow the etcd service documentation and configure TLS encryption. +Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml +on the master node and set the below parameters. +`--cert-file=` +`--key-file=` + +### 2.2 Ensure that the --client-cert-auth argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and set the below parameter. +--client-cert-auth="true" + +### 2.3 Ensure that the --auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--auto-tls' is not present OR '--auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 0 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Follow the etcd service documentation and configure peer TLS encryption as appropriate +for your etcd cluster. +Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the +master node and set the below parameters. +`--peer-client-file=` +`--peer-key-file=` + +### 2.5 Ensure that the --peer-client-cert-auth argument is set to true (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and set the below parameter. +--peer-client-cert-auth=true + +### 2.6 Ensure that the --peer-auto-tls argument is not set to true (Automated) + + +**Result:** pass + +**Remediation:** +Edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the master +node and either remove the --peer-auto-tls parameter or set it to false. +--peer-auto-tls=false + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--peer-auto-tls' is not present OR '--peer-auto-tls' is not present +``` + +**Returned Value**: + +```console +etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 6 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +### 2.7 Ensure that a unique Certificate Authority is used for etcd (Manual) + + +**Result:** pass + +**Remediation:** +[Manual test] +Follow the etcd documentation and create a dedicated certificate authority setup for the +etcd service. +Then, edit the etcd pod specification file /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml on the +master node and set the below parameter. +`--trusted-ca-file=` + +**Audit:** + +```bash +/bin/ps -ef | /bin/grep etcd | /bin/grep -v grep +``` + +**Expected Result**: + +```console +'--trusted-ca-file' is not present +``` + +**Returned Value**: + +```console +etcd 5059 5033 0 14:58 ? 00:00:00 /pause etcd 5121 5033 3 14:58 ? 00:00:18 etcd --config-file=/var/lib/rancher/rke2/server/db/etcd/config root 5275 5222 15 14:58 ? 00:01:26 kube-apiserver --audit-policy-file=/etc/rancher/rke2/audit-policy.yaml --audit-log-path=/var/lib/rancher/rke2/server/logs/audit.log --audit-log-maxage=30 --audit-log-maxbackup=10 --audit-log-maxsize=100 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --allow-privileged=true --anonymous-auth=false --api-audiences=https://kubernetes.default.svc.cluster.local,rke2 --authorization-mode=Node,RBAC --bind-address=0.0.0.0 --cert-dir=/var/lib/rancher/rke2/server/tls/temporary-certs --client-ca-file=/var/lib/rancher/rke2/server/tls/client-ca.crt --enable-admission-plugins=NodeRestriction,PodSecurityPolicy --encryption-provider-config=/var/lib/rancher/rke2/server/cred/encryption-config.json --etcd-cafile=/var/lib/rancher/rke2/server/tls/etcd/server-ca.crt --etcd-certfile=/var/lib/rancher/rke2/server/tls/etcd/client.crt --etcd-keyfile=/var/lib/rancher/rke2/server/tls/etcd/client.key --etcd-servers=https://127.0.0.1:2379 --insecure-port=0 --kubelet-certificate-authority=/var/lib/rancher/rke2/server/tls/server-ca.crt --kubelet-client-certificate=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt --kubelet-client-key=/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key --profiling=false --proxy-client-cert-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.crt --proxy-client-key-file=/var/lib/rancher/rke2/server/tls/client-auth-proxy.key --requestheader-allowed-names=system:auth-proxy --requestheader-client-ca-file=/var/lib/rancher/rke2/server/tls/request-header-ca.crt --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc.cluster.local --service-account-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-account-signing-key-file=/var/lib/rancher/rke2/server/tls/service.key --service-cluster-ip-range=10.43.0.0/16 --service-node-port-range=30000-32767 --storage-backend=etcd3 --tls-cert-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.crt --tls-private-key-file=/var/lib/rancher/rke2/server/tls/serving-kube-apiserver.key root 16473 16413 3 15:07 ? 00:00:00 kube-bench run --targets etcd --scored --nosummary --noremediations --v=5 --config-dir=/etc/kube-bench/cfg --benchmark rke2-cis-1.6-hardened --json --log_dir /tmp/results/logs --outputfile /tmp/results/etcd.json +``` + +## 3.1 Authentication and Authorization +### 3.1.1 Client certificate authentication should not be used for users (Manual) + + +**Result:** warn + +**Remediation:** +Alternative mechanisms provided by Kubernetes such as the use of OIDC should be +implemented in place of client certificates. + +## 3.2 Logging +### 3.2.1 Ensure that a minimal audit policy is created (Automated) + + +**Result:** pass + +**Remediation:** +Create an audit policy file for your cluster. + +**Audit:** + +```bash +/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file +``` + +**Expected Result**: + +```console +'audit-policy-file' is equal to 'audit-policy-file' +``` + +**Returned Value**: + +```console +audit-policy-file +``` + +### 3.2.2 Ensure that the audit policy covers key security concerns (Manual) + + +**Result:** warn + +**Remediation:** +Consider modification of the audit policy in use on the cluster to include these items, at a +minimum. + +## 4.1 Worker Node Configuration Files +### 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +**Audit:** + +```bash +/bin/sh -c 'if test -e /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then stat -c permissions=%a /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; fi' +``` + +**Expected Result**: + +```console +'permissions' is not present +``` + +### 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) + + +**Result:** Not Applicable + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + +### 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /node/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %a /node/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'permissions' is present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +644 +``` + +### 4.1.4 Ensure that the proxy kubeconfig file ownership is set to root:root (Manual) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubeproxy.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is not present OR '/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig' is not present +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the below command (based on the file location on your system) on the each worker node. +For example, +chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) + + +**Result:** warn + +**Remediation:** +Run the following command to modify the file permissions of the +`--client-ca-file chmod 644 ` + +**Audit:** + +```bash +stat -c %a /var/lib/rancher/rke2/server/tls/server-ca.crt +``` + +### 4.1.8 Ensure that the client certificate authorities file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command to modify the ownership of the --client-ca-file. +`chown root:roset: trueot ` + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/client-ca.crt; then stat -c %U:%G /var/lib/rancher/rke2/agent/client-ca.crt; fi' +``` + +**Expected Result**: + +```console +'root:root' is equal to 'root:root' +``` + +**Returned Value**: + +```console +root:root +``` + +### 4.1.9 Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chmod 644 /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c permissions=%a /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'644' is equal to '644' +``` + +**Returned Value**: + +```console +permissions=644 +``` + +### 4.1.10 Ensure that the kubelet --config configuration file ownership is set to root:root (Automated) + + +**Result:** pass + +**Remediation:** +Run the following command (using the config file location identified in the Audit step) +chown root:root /var/lib/rancher/rke2/agent/kubelet.kubeconfig + +**Audit:** + +```bash +/bin/sh -c 'if test -e /var/lib/rancher/rke2/agent/kubelet.kubeconfig; then stat -c %U:%G /var/lib/rancher/rke2/agent/kubelet.kubeconfig; fi' +``` + +**Expected Result**: + +```console +'root:root' is present +``` + +**Returned Value**: + +```console +root:root +``` + +## 4.2 Kubelet +### 4.2.1 Ensure that the anonymous-auth argument is set to false (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'false' is equal to 'false' +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If +using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +--authorization-mode=Webhook +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key +``` + +### 4.2.3 Ensure that the --client-ca-file argument is set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to +the location of the client CA file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_AUTHZ_ARGS variable. +`--client-ca-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key +``` + +### 4.2.4 Ensure that the --read-only-port argument is set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'0' is equal to '0' AND '--read-only-port' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a +value other than 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--streaming-connection-idle-timeout=5m +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--streaming-connection-idle-timeout' is not present OR '--streaming-connection-idle-timeout' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.6 Ensure that the --protect-kernel-defaults argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'' is not present +``` + +**Returned Value**: + +```console +apiVersion: v1 clusters: - cluster: server: https://127.0.0.1:6443 certificate-authority: /var/lib/rancher/rke2/agent/server-ca.crt name: local contexts: - context: cluster: local namespace: default user: user name: Default current-context: Default kind: Config preferences: {} users: - name: user user: client-certificate: /var/lib/rancher/rke2/agent/client-kubelet.crt client-key: /var/lib/rancher/rke2/agent/client-kubelet.key +``` + +### 4.2.7 Ensure that the --make-iptables-util-chains argument is set to true (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove the --make-iptables-util-chains argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--make-iptables-util-chains' is not present OR '--make-iptables-util-chains' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.8 Ensure that the --hostname-override argument is not set (Manual) + + +**Result:** warn + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and remove the --hostname-override argument from the +KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +### 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--event-qps' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set tlsCertFile to the location +of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile +to the location of the corresponding private key file. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameters in KUBELET_CERTIFICATE_ARGS variable. +`--tls-cert-file=` +`--tls-private-key-file=` +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--tls-cert-file' is present AND '--tls-private-key-file' is present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to add the line rotateCertificates: true or +remove it altogether to use the default value. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS +variable. +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--rotate-certificates' is not present OR '--rotate-certificates' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) + + +**Result:** pass + +**Remediation:** +Edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. +--feature-gates=RotateKubeletServerCertificate=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'RotateKubeletServerCertificate' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +### 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) + + +**Result:** pass + +**Remediation:** +If using a Kubelet config file, edit the file to set TLSCipherSuites: to +TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +or to a subset of these values. +If using executable arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the --tls-cipher-suites parameter as follows, or to a subset of these values. +--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +**Audit:** + +```bash +/bin/ps -fC kubelet +``` + +**Audit Config:** + +```bash +/bin/cat /var/lib/rancher/rke2/agent/kubelet.kubeconfig +``` + +**Expected Result**: + +```console +'--tls-cipher-suites' is not present +``` + +**Returned Value**: + +```console +UID PID PPID C STIME TTY TIME CMD root 4785 4751 3 14:58 ? 00:00:21 kubelet --volume-plugin-dir=/var/lib/kubelet/volumeplugins --file-check-frequency=5s --sync-frequency=30s --address=0.0.0.0 --alsologtostderr=false --anonymous-auth=false --authentication-token-webhook=true --authorization-mode=Webhook --cgroup-driver=cgroupfs --client-ca-file=/var/lib/rancher/rke2/agent/client-ca.crt --cloud-provider=external --cluster-dns=10.43.0.10 --cluster-domain=cluster.local --container-runtime-endpoint=unix:///run/k3s/containerd/containerd.sock --container-runtime=remote --containerd=/run/k3s/containerd/containerd.sock --eviction-hard=imagefs.available<5%,nodefs.available<5% --eviction-minimum-reclaim=imagefs.available=10%,nodefs.available=10% --fail-swap-on=false --healthz-bind-address=127.0.0.1 --hostname-override= --kubeconfig=/var/lib/rancher/rke2/agent/kubelet.kubeconfig --log-file-max-size=50 --log-file=/var/lib/rancher/rke2/agent/logs/kubelet.log --logtostderr=false --node-labels=cattle.io/os=linux,rke.cattle.io/machine=7c32844c-359f-45f7-88c5-a7173d27690a --pod-manifest-path=/var/lib/rancher/rke2/agent/pod-manifests --protect-kernel-defaults=true --read-only-port=0 --resolv-conf=/run/systemd/resolve/resolv.conf --serialize-image-pulls=false --stderrthreshold=FATAL --tls-cert-file=/var/lib/rancher/rke2/agent/serving-kubelet.crt --tls-private-key-file=/var/lib/rancher/rke2/agent/serving-kubelet.key +``` + +## 5.1 RBAC and Service Accounts +### 5.1.1 Ensure that the cluster-admin role is only used where required (Manual) + + +**Result:** warn + +**Remediation:** +Identify all clusterrolebindings to the cluster-admin role. Check if they are used and +if they need this role or if they could use a role with fewer privileges. +Where possible, first bind users to a lower privileged role and then remove the +clusterrolebinding to the cluster-admin role : +kubectl delete clusterrolebinding [name] + +### 5.1.2 Minimize access to secrets (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove get, list and watch access to secret objects in the cluster. + +### 5.1.3 Minimize wildcard use in Roles and ClusterRoles (Manual) + + +**Result:** warn + +**Remediation:** +Where possible replace any use of wildcards in clusterroles and roles with specific +objects or actions. + +### 5.1.4 Minimize access to create pods (Manual) + + +**Result:** warn + +**Remediation:** +Where possible, remove create access to pod objects in the cluster. + +### 5.1.5 Ensure that default service accounts are not actively used. (Automated) + + +**Result:** pass + +**Remediation:** +Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +**Audit Script:** `check_for_default_sa.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +count_sa=$(kubectl get serviceaccounts --all-namespaces -o json | jq -r '.items[] | select(.metadata.name=="default") | select((.automountServiceAccountToken == null) or (.automountServiceAccountToken == true))' | jq .metadata.namespace | wc -l) +if [[ ${count_sa} -gt 0 ]]; then + echo "false" + exit +fi + +for ns in $(kubectl get ns --no-headers -o custom-columns=":metadata.name") +do + for result in $(kubectl get clusterrolebinding,rolebinding -n $ns -o json | jq -r '.items[] | select((.subjects[].kind=="ServiceAccount" and .subjects[].name=="default") or (.subjects[].kind=="Group" and .subjects[].name=="system:serviceaccounts"))' | jq -r '"\(.roleRef.kind),\(.roleRef.name)"') + do + read kind name <<<$(IFS=","; echo $result) + resource_count=$(kubectl get $kind $name -n $ns -o json | jq -r '.rules[] | select(.resources[] != "podsecuritypolicies")' | wc -l) + if [[ ${resource_count} -gt 0 ]]; then + echo "false" + exit + fi + done +done + + +echo "true" +``` + +**Audit Execution:** + +```bash +./check_for_default_sa.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +### 5.1.6 Ensure that Service Account Tokens are only mounted where necessary (Manual) + + +**Result:** warn + +**Remediation:** +Modify the definition of pods and service accounts which do not need to mount service +account tokens to disable it. + +## 5.2 Pod Security Policies +### 5.2.1 Minimize the admission of privileged containers (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that +the .spec.privileged field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp global-restricted-psp -o json | jq -r '.spec.runAsUser.rule' +``` + +**Expected Result**: + +```console +'MustRunAsNonRoot' is equal to 'MustRunAsNonRoot' +``` + +**Returned Value**: + +```console +MustRunAsNonRoot +``` + +### 5.2.2 Minimize the admission of containers wishing to share the host process ID namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostPID field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.3 Minimize the admission of containers wishing to share the host IPC namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostIPC field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.4 Minimize the admission of containers wishing to share the host network namespace (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.hostNetwork field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.5 Minimize the admission of containers with allowPrivilegeEscalation (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.allowPrivilegeEscalation field is omitted or set to false. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.6 Minimize the admission of root containers (Automated) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of +UIDs not including 0. + +**Audit:** + +```bash +kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.2.7 Minimize the admission of containers with the NET_RAW capability (Manual) + + +**Result:** pass + +**Remediation:** +Create a PSP as described in the Kubernetes documentation, ensuring that the +.spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + +**Audit:** + +```bash +kubectl get psp global-restricted-psp -o json | jq -r .spec.requiredDropCapabilities[] +``` + +**Expected Result**: + +```console +'ALL' is equal to 'ALL' +``` + +**Returned Value**: + +```console +ALL +``` + +### 5.2.8 Minimize the admission of containers with added capabilities (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that allowedCapabilities is not present in PSPs for the cluster unless +it is set to an empty array. + +### 5.2.9 Minimize the admission of containers with capabilities assigned (Manual) + + +**Result:** warn + +**Remediation:** +Review the use of capabilites in applications runnning on your cluster. Where a namespace +contains applicaions which do not require any Linux capabities to operate consider adding +a PSP which forbids the admission of containers which do not drop all capabilities. + +## 5.3 Network Policies and CNI +### 5.3.1 Ensure that the CNI in use supports Network Policies (Automated) + + +**Result:** pass + +**Remediation:** +If the CNI plugin in use does not support network policies, consideration should be given to +making use of a different plugin, or finding an alternate mechanism for restricting traffic +in the Kubernetes cluster. + +**Audit:** + +```bash +kubectl get pods -n kube-system -l k8s-app=canal -o json | jq .items[] | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' +``` + +**Expected Result**: + +```console +1 is greater than 0 +``` + +**Returned Value**: + +```console +--count=1 +``` + +### 5.3.2 Ensure that all Namespaces have Network Policies defined (Automated) + + +**Result:** pass + +**Remediation:** +Follow the documentation and create NetworkPolicy objects as you need them. + +**Audit Script:** `check_for_rke2_network_policies.sh` + +```bash +#!/bin/bash + +set -eE + +handle_error() { + echo "false" +} + +trap 'handle_error' ERR + +for namespace in kube-system kube-public default; do + policy_count=$(/var/lib/rancher/rke2/bin/kubectl get networkpolicy -n ${namespace} -o json | jq -r '.items | length') + if [ ${policy_count} -eq 0 ]; then + echo "false" + exit + fi +done + +echo "true" + +``` + +**Audit Execution:** + +```bash +./check_for_rke2_network_policies.sh +``` + +**Expected Result**: + +```console +'true' is equal to 'true' +``` + +**Returned Value**: + +```console +true +``` + +## 5.4 Secrets Management +### 5.4.1 Prefer using secrets as files over secrets as environment variables (Manual) + + +**Result:** warn + +**Remediation:** +if possible, rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + +### 5.4.2 Consider external secret storage (Manual) + + +**Result:** warn + +**Remediation:** +Refer to the secrets management options offered by your cloud provider or a third-party +secrets management solution. + +## 5.5 Extensible Admission Control +### 5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller (Manual) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and setup image provenance. + +## 5.6 The v1.5.1 guide skips 5.6 and goes from 5.5 to 5.7. We are including it here merely for explanation. +## 5.7 General Policies +### 5.7.1 Create administrative boundaries between resources using namespaces (Manual) + + +**Result:** warn + +**Remediation:** +Follow the documentation and create namespaces for objects in your deployment as you need +them. + +### 5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual) + + +**Result:** warn + +**Remediation:** +Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you +would need to enable alpha features in the apiserver by passing "--feature- +gates=AllAlpha=true" argument. +Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS +parameter to "--feature-gates=AllAlpha=true" +KUBE_API_ARGS="--feature-gates=AllAlpha=true" +Based on your system, restart the kube-apiserver service. For example: +systemctl restart kube-apiserver.service +Use annotations to enable the docker/default seccomp profile in your pod definitions. An +example is as below: +apiVersion: v1 +kind: Pod +metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default +spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + +### 5.7.3 Apply Security Context to Your Pods and Containers (Automated) + + +**Result:** warn + +**Remediation:** +Follow the Kubernetes documentation and apply security contexts to your pods. For a +suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker +Containers. + +### 5.7.4 The default namespace should not be used (Manual) + + +**Result:** warn + +**Remediation:** +Ensure that namespaces are created to allow for appropriate segregation of Kubernetes +resources and that all new resources are created in a specific namespace. + diff --git a/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf new file mode 100644 index 0000000000..35251edd3d Binary files /dev/null and b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-hardening-2.6/Rancher_RKE2_v2-6_CIS_v1-6_Hardening_Guide.pdf differ diff --git a/content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/_index.md b/versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-hardening-2.6/rke2-1.6-hardening-2.6.md similarity index 100% rename from content/rancher/v2.6/en/security/hardening-guides/rke2-1.6-hardening-2.6/_index.md rename to versioned_docs/version-2.6/security/hardening-guides/rke2-1.6-hardening-2.6/rke2-1.6-hardening-2.6.md diff --git a/content/rancher/v2.6/en/security/security-scan/_index.md b/versioned_docs/version-2.6/security/security-scan/security-scan.md similarity index 100% rename from content/rancher/v2.6/en/security/security-scan/_index.md rename to versioned_docs/version-2.6/security/security-scan/security-scan.md diff --git a/content/rancher/v2.6/en/security/_index.md b/versioned_docs/version-2.6/security/security.md similarity index 100% rename from content/rancher/v2.6/en/security/_index.md rename to versioned_docs/version-2.6/security/security.md diff --git a/content/rancher/v2.6/en/security/selinux/_index.md b/versioned_docs/version-2.6/security/selinux/selinux.md similarity index 100% rename from content/rancher/v2.6/en/security/selinux/_index.md rename to versioned_docs/version-2.6/security/selinux/selinux.md diff --git a/content/rancher/v2.6/en/system-tools/_index.md b/versioned_docs/version-2.6/system-tools/system-tools.md similarity index 100% rename from content/rancher/v2.6/en/system-tools/_index.md rename to versioned_docs/version-2.6/system-tools/system-tools.md diff --git a/content/rancher/v2.6/en/troubleshooting/dns/_index.md b/versioned_docs/version-2.6/troubleshooting/dns/dns.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/dns/_index.md rename to versioned_docs/version-2.6/troubleshooting/dns/dns.md diff --git a/content/rancher/v2.6/en/troubleshooting/expired-webhook-certificates/_index.md b/versioned_docs/version-2.6/troubleshooting/expired-webhook-certificates/expired-webhook-certificates.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/expired-webhook-certificates/_index.md rename to versioned_docs/version-2.6/troubleshooting/expired-webhook-certificates/expired-webhook-certificates.md diff --git a/content/rancher/v2.6/en/troubleshooting/imported-clusters/_index.md b/versioned_docs/version-2.6/troubleshooting/imported-clusters/imported-clusters.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/imported-clusters/_index.md rename to versioned_docs/version-2.6/troubleshooting/imported-clusters/imported-clusters.md diff --git a/content/rancher/v2.6/en/troubleshooting/kubernetes-components/controlplane/_index.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/controlplane/controlplane.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/kubernetes-components/controlplane/_index.md rename to versioned_docs/version-2.6/troubleshooting/kubernetes-components/controlplane/controlplane.md diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/etcd/etcd.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/etcd/etcd.md new file mode 100644 index 0000000000..f83d241a08 --- /dev/null +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/etcd/etcd.md @@ -0,0 +1,365 @@ +--- +title: Troubleshooting etcd Nodes +weight: 1 +--- + +This section contains commands and tips for troubleshooting nodes with the `etcd` role. + +This page covers the following topics: + +- [Checking if the etcd Container is Running](#checking-if-the-etcd-container-is-running) +- [etcd Container Logging](#etcd-container-logging) +- [etcd Cluster and Connectivity Checks](#etcd-cluster-and-connectivity-checks) + - [Check etcd Members on all Nodes](#check-etcd-members-on-all-nodes) + - [Check Endpoint Status](#check-endpoint-status) + - [Check Endpoint Health](#check-endpoint-health) + - [Check Connectivity on Port TCP/2379](#check-connectivity-on-port-tcp-2379) + - [Check Connectivity on Port TCP/2380](#check-connectivity-on-port-tcp-2380) +- [etcd Alarms](#etcd-alarms) +- [etcd Space Errors](#etcd-space-errors) +- [Log Level](#log-level) +- [etcd Content](#etcd-content) + - [Watch Streaming Events](#watch-streaming-events) + - [Query etcd Directly](#query-etcd-directly) +- [Replacing Unhealthy etcd Nodes](#replacing-unhealthy-etcd-nodes) + +# Checking if the etcd Container is Running + +The container for etcd should have status **Up**. The duration shown after **Up** is the time the container has been running. + +``` +docker ps -a -f=name=etcd$ +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +605a124503b9 rancher/coreos-etcd:v3.2.18 "/usr/local/bin/et..." 2 hours ago Up 2 hours etcd +``` + +# etcd Container Logging + +The logging of the container can contain information on what the problem could be. + +``` +docker logs etcd +``` +| Log | Explanation | +|-----|------------------| +| `health check for peer xxx could not connect: dial tcp IP:2380: getsockopt: connection refused` | A connection to the address shown on port 2380 cannot be established. Check if the etcd container is running on the host with the address shown. | +| `xxx is starting a new election at term x` | The etcd cluster has lost its quorum and is trying to establish a new leader. This can happen when the majority of the nodes running etcd go down/unreachable. | +| `connection error: desc = "transport: Error while dialing dial tcp 0.0.0.0:2379: i/o timeout"; Reconnecting to {0.0.0.0:2379 0 }` | The host firewall is preventing network communication. | +| `rafthttp: request cluster ID mismatch` | The node with the etcd instance logging `rafthttp: request cluster ID mismatch` is trying to join a cluster that has already been formed with another peer. The node should be removed from the cluster, and re-added. | +| `rafthttp: failed to find member` | The cluster state (`/var/lib/etcd`) contains wrong information to join the cluster. The node should be removed from the cluster, the state directory should be cleaned and the node should be re-added. + +# etcd Cluster and Connectivity Checks + +The address where etcd is listening depends on the address configuration of the host etcd is running on. If an internal address is configured for the host etcd is running on, the endpoint for `etcdctl` needs to be specified explicitly. If any of the commands respond with `Error: context deadline exceeded`, the etcd instance is unhealthy (either quorum is lost or the instance is not correctly joined in the cluster) + +### Check etcd Members on all Nodes + +Output should contain all the nodes with the `etcd` role and the output should be identical on all nodes. + +Command: +``` +docker exec etcd etcdctl member list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list" +``` + +Example output: +``` +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +xxx, started, etcd-xxx, https://IP:2380, https://IP:2379,https://IP:4001 +``` + +### Check Endpoint Status + +The values for `RAFT TERM` should be equal and `RAFT INDEX` should be not be too far apart from each other. + +Command: +``` +docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint status --write-out table +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table +``` + +Example output: +``` ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| https://IP:2379 | 333ef673fc4add56 | 3.2.18 | 24 MB | false | 72 | 66887 | +| https://IP:2379 | 5feed52d940ce4cf | 3.2.18 | 24 MB | true | 72 | 66887 | +| https://IP:2379 | db6b3bdb559a848d | 3.2.18 | 25 MB | false | 72 | 66887 | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +``` + +### Check Endpoint Health + +Command: +``` +docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint health +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl endpoint health --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") +``` + +Example output: +``` +https://IP:2379 is healthy: successfully committed proposal: took = 2.113189ms +https://IP:2379 is healthy: successfully committed proposal: took = 2.649963ms +https://IP:2379 is healthy: successfully committed proposal: took = 2.451201ms +``` + +### Check Connectivity on Port TCP/2379 + +Command: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5"); do + echo "Validating connection to ${endpoint}/health" + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" +done +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5"); do + echo "Validating connection to ${endpoint}/health"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/health" +done +``` + +Example output: +``` +Validating connection to https://IP:2379/health +{"health": "true"} +Validating connection to https://IP:2379/health +{"health": "true"} +Validating connection to https://IP:2379/health +{"health": "true"} +``` + +### Check Connectivity on Port TCP/2380 + +Command: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f4"); do + echo "Validating connection to ${endpoint}/version"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" +done +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +for endpoint in $(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f4"); do + echo "Validating connection to ${endpoint}/version"; + docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl --http1.1 -s -w "\n" --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) "${endpoint}/version" +done +``` + +Example output: +``` +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +Validating connection to https://IP:2380/version +{"etcdserver":"3.2.18","etcdcluster":"3.2.0"} +``` + +# etcd Alarms + +etcd will trigger alarms, for instance when it runs out of space. + +Command: +``` +docker exec etcd etcdctl alarm list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +``` + +Example output when NOSPACE alarm is triggered: +``` +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +``` + +# etcd Space Errors + +Related error messages are `etcdserver: mvcc: database space exceeded` or `applying raft message exceeded backend quota`. Alarm `NOSPACE` will be triggered. + +Resolutions: + +- [Compact the Keyspace](#compact-the-keyspace) +- [Defrag All etcd Members](#defrag-all-etcd-members) +- [Check Endpoint Status](#check-endpoint-status) +- [Disarm Alarm](#disarm-alarm) + +### Compact the Keyspace + +Command: +``` +rev=$(docker exec etcd etcdctl endpoint status --write-out json | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*') +docker exec etcd etcdctl compact "$rev" +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +rev=$(docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT endpoint status --write-out json | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*'") +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT compact \"$rev\"" +``` + +Example output: +``` +compacted revision xxx +``` + +### Defrag All etcd Members + +Command: +``` +docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl defrag +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl defrag --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','")" +``` + +Example output: +``` +Finished defragmenting etcd member[https://IP:2379] +Finished defragmenting etcd member[https://IP:2379] +Finished defragmenting etcd member[https://IP:2379] +``` + +### Check Endpoint Status + +Command: +``` +docker exec -e ETCDCTL_ENDPOINTS=$(docker exec etcd /bin/sh -c "etcdctl member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") etcd etcdctl endpoint status --write-out table +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl endpoint status --endpoints=$(docker exec etcd /bin/sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT member list | cut -d, -f5 | sed -e 's/ //g' | paste -sd ','") --write-out table" +``` + +Example output: +``` ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +| https://IP:2379 | e973e4419737125 | 3.2.18 | 553 kB | false | 32 | 2449410 | +| https://IP:2379 | 4a509c997b26c206 | 3.2.18 | 553 kB | false | 32 | 2449410 | +| https://IP:2379 | b217e736575e9dd3 | 3.2.18 | 553 kB | true | 32 | 2449410 | ++-----------------+------------------+---------+---------+-----------+-----------+------------+ +``` + +### Disarm Alarm + +After verifying that the DB size went down after compaction and defragmenting, the alarm needs to be disarmed for etcd to allow writes again. + +Command: +``` +docker exec etcd etcdctl alarm list +docker exec etcd etcdctl alarm disarm +docker exec etcd etcdctl alarm list +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm disarm" +docker exec etcd sh -c "etcdctl --endpoints=\$ETCDCTL_ENDPOINT alarm list" +``` + +Example output: +``` +docker exec etcd etcdctl alarm list +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +memberID:x alarm:NOSPACE +docker exec etcd etcdctl alarm disarm +docker exec etcd etcdctl alarm list +``` + +# Log Level + +The log level of etcd can be changed dynamically via the API. You can configure debug logging using the commands below. + +Command: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"DEBUG"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log +``` + +To reset the log level back to the default (`INFO`), you can use the following command. + +Command: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINTS)/config/local/log +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker run --net=host -v $(docker inspect kubelet --format '{{ range .Mounts }}{{ if eq .Destination "/etc/kubernetes" }}{{ .Source }}{{ end }}{{ end }}')/ssl:/etc/kubernetes/ssl:ro appropriate/curl -s -XPUT -d '{"Level":"INFO"}' --cacert $(docker exec etcd printenv ETCDCTL_CACERT) --cert $(docker exec etcd printenv ETCDCTL_CERT) --key $(docker exec etcd printenv ETCDCTL_KEY) $(docker exec etcd printenv ETCDCTL_ENDPOINT)/config/local/log +``` + +# etcd Content + +If you want to investigate the contents of your etcd, you can either watch streaming events or you can query etcd directly, see below for examples. + +### Watch Streaming Events + +Command: +``` +docker exec etcd etcdctl watch --prefix /registry +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT watch --prefix /registry +``` + +If you only want to see the affected keys (and not the binary data), you can append `| grep -a ^/registry` to the command to filter for keys only. + +### Query etcd Directly + +Command: +``` +docker exec etcd etcdctl get /registry --prefix=true --keys-only +``` + +Command when using etcd version lower than 3.3.x (Kubernetes 1.13.x and lower) and `--internal-address` was specified when adding the node: +``` +docker exec etcd etcdctl --endpoints=\$ETCDCTL_ENDPOINT get /registry --prefix=true --keys-only +``` + +You can process the data to get a summary of count per key, using the command below: + +``` +docker exec etcd etcdctl get /registry --prefix=true --keys-only | grep -v ^$ | awk -F'/' '{ if ($3 ~ /cattle.io/) {h[$3"/"$4]++} else { h[$3]++ }} END { for(k in h) print h[k], k }' | sort -nr +``` + +# Replacing Unhealthy etcd Nodes + +When a node in your etcd cluster becomes unhealthy, the recommended approach is to fix or remove the failed or unhealthy node before adding a new etcd node to the cluster. diff --git a/content/rancher/v2.6/en/troubleshooting/kubernetes-components/_index.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/kubernetes-components.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/kubernetes-components/_index.md rename to versioned_docs/version-2.6/troubleshooting/kubernetes-components/kubernetes-components.md diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/nginx-proxy/nginx-proxy.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/nginx-proxy/nginx-proxy.md new file mode 100644 index 0000000000..70505e9628 --- /dev/null +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/nginx-proxy/nginx-proxy.md @@ -0,0 +1,69 @@ +--- +title: Troubleshooting nginx-proxy +weight: 3 +--- + +The `nginx-proxy` container is deployed on every node that does not have the `controlplane` role. It provides access to all the nodes with the `controlplane` role by dynamically generating the NGINX configuration based on available nodes with the `controlplane` role. + +# Check if the Container is Running + +The container is called `nginx-proxy` and should have status `Up`. The duration shown after `Up` is the time the container has been running. + +``` +docker ps -a -f=name=nginx-proxy +``` + +Example output: + +``` +docker ps -a -f=name=nginx-proxy +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +c3e933687c0e rancher/rke-tools:v0.1.15 "nginx-proxy CP_HO..." 3 hours ago Up 3 hours nginx-proxy +``` + +# Check Generated NGINX Configuration + +The generated configuration should include the IP addresses of the nodes with the `controlplane` role. The configuration can be checked using the following command: + +``` +docker exec nginx-proxy cat /etc/nginx/nginx.conf +``` + +Example output: +``` +error_log stderr notice; + +worker_processes auto; +events { + multi_accept on; + use epoll; + worker_connections 1024; +} + +stream { + upstream kube_apiserver { + + server ip_of_controlplane_node1:6443; + + server ip_of_controlplane_node2:6443; + + } + + server { + listen 6443; + proxy_pass kube_apiserver; + proxy_timeout 30; + proxy_connect_timeout 2s; + + } + +} +``` + +# nginx-proxy Container Logging + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs nginx-proxy +``` \ No newline at end of file diff --git a/versioned_docs/version-2.6/troubleshooting/kubernetes-components/worker-and-generic/worker-and-generic.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/worker-and-generic/worker-and-generic.md new file mode 100644 index 0000000000..28ee4499bb --- /dev/null +++ b/versioned_docs/version-2.6/troubleshooting/kubernetes-components/worker-and-generic/worker-and-generic.md @@ -0,0 +1,35 @@ +--- +title: Troubleshooting Worker Nodes and Generic Components +weight: 4 +--- + +This section applies to every node as it includes components that run on nodes with any role. + +# Check if the Containers are Running + +There are two specific containers launched on nodes with the `worker` role: + +* kubelet +* kube-proxy + +The containers should have status `Up`. The duration shown after `Up` is the time the container has been running. + +``` +docker ps -a -f=name='kubelet|kube-proxy' +``` + +Example output: +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +158d0dcc33a5 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kube-proxy +a30717ecfb55 rancher/hyperkube:v1.11.5-rancher1 "/opt/rke-tools/en..." 3 hours ago Up 3 hours kubelet +``` + +# Container Logging + +The logging of the containers can contain information on what the problem could be. + +``` +docker logs kubelet +docker logs kube-proxy +``` diff --git a/content/rancher/v2.6/en/troubleshooting/kubernetes-resources/_index.md b/versioned_docs/version-2.6/troubleshooting/kubernetes-resources/kubernetes-resources.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/kubernetes-resources/_index.md rename to versioned_docs/version-2.6/troubleshooting/kubernetes-resources/kubernetes-resources.md diff --git a/versioned_docs/version-2.6/troubleshooting/logging/logging.md b/versioned_docs/version-2.6/troubleshooting/logging/logging.md new file mode 100644 index 0000000000..13a7a752ba --- /dev/null +++ b/versioned_docs/version-2.6/troubleshooting/logging/logging.md @@ -0,0 +1,48 @@ +--- +title: Logging +weight: 110 +--- + +The following log levels are used in Rancher: + +| Name | Description | +|---------|-------------| +| `info` | Logs informational messages. This is the default log level. | +| `debug` | Logs more detailed messages that can be used to debug. | +| `trace` | Logs very detailed messages on internal functions. This is very verbose and can contain sensitive information. | + +### How to configure a log level + +* Kubernetes install + * Configure debug log level +``` +$ KUBECONFIG=./kube_config_cluster.yml +$ kubectl -n cattle-system get pods -l app=rancher --no-headers -o custom-columns=name:.metadata.name | while read rancherpod; do kubectl -n cattle-system exec $rancherpod -c rancher -- loglevel --set debug; done +OK +OK +OK +$ kubectl -n cattle-system logs -l app=rancher -c rancher +``` + + * Configure info log level +``` +$ KUBECONFIG=./kube_config_cluster.yml +$ kubectl -n cattle-system get pods -l app=rancher --no-headers -o custom-columns=name:.metadata.name | while read rancherpod; do kubectl -n cattle-system exec $rancherpod -c rancher -- loglevel --set info; done +OK +OK +OK +``` + +* Docker Install + * Configure debug log level +``` +$ docker exec -ti loglevel --set debug +OK +$ docker logs -f +``` + + * Configure info log level +``` +$ docker exec -ti loglevel --set info +OK +``` diff --git a/content/rancher/v2.6/en/troubleshooting/networking/_index.md b/versioned_docs/version-2.6/troubleshooting/networking/networking.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/networking/_index.md rename to versioned_docs/version-2.6/troubleshooting/networking/networking.md diff --git a/versioned_docs/version-2.6/troubleshooting/rancherha/rancherha.md b/versioned_docs/version-2.6/troubleshooting/rancherha/rancherha.md new file mode 100644 index 0000000000..d724c778cd --- /dev/null +++ b/versioned_docs/version-2.6/troubleshooting/rancherha/rancherha.md @@ -0,0 +1,80 @@ +--- +title: Rancher HA +weight: 104 +--- + +The commands/steps listed on this page can be used to check your Rancher Kubernetes Installation. + +Make sure you configured the correct kubeconfig (for example, `export KUBECONFIG=$PWD/kube_config_cluster.yml`). + +### Check Rancher pods + +Rancher pods are deployed as a Deployment in the `cattle-system` namespace. + +Check if the pods are running on all nodes: + +``` +kubectl -n cattle-system get pods -l app=rancher -o wide +``` + +Example output: + +``` +NAME READY STATUS RESTARTS AGE IP NODE +rancher-7dbd7875f7-n6t5t 1/1 Running 0 8m x.x.x.x x.x.x.x +rancher-7dbd7875f7-qbj5k 1/1 Running 0 8m x.x.x.x x.x.x.x +rancher-7dbd7875f7-qw7wb 1/1 Running 0 8m x.x.x.x x.x.x.x +``` + +If a pod is unable to run (Status is not **Running**, Ready status is not showing `1/1` or you see a high count of Restarts), check the pod details, logs and namespace events. + +#### Pod details + +``` +kubectl -n cattle-system describe pods -l app=rancher +``` + +#### Pod container logs + +``` +kubectl -n cattle-system logs -l app=rancher +``` + +#### Namespace events + +``` +kubectl -n cattle-system get events +``` + +### Check ingress + +Ingress should have the correct `HOSTS` (showing the configured FQDN) and `ADDRESS` (host address(es) it will be routed to). + +``` +kubectl -n cattle-system get ingress +``` + +Example output: + +``` +NAME HOSTS ADDRESS PORTS AGE +rancher rancher.yourdomain.com x.x.x.x,x.x.x.x,x.x.x.x 80, 443 2m +``` + +### Check ingress controller logs + +When accessing your configured Rancher FQDN does not show you the UI, check the ingress controller logging to see what happens when you try to access Rancher: + +``` +kubectl -n ingress-nginx logs -l app=ingress-nginx +``` + +### Leader election + +The leader is determined by a leader election process. After the leader has been determined, the leader (`holderIdentity`) is saved in the `cattle-controllers` ConfigMap (in this example, `rancher-7dbd7875f7-qbj5k`). + +``` +kubectl -n kube-system get configmap cattle-controllers -o jsonpath='{.metadata.annotations.control-plane\.alpha\.kubernetes\.io/leader}' +{"holderIdentity":"rancher-7dbd7875f7-qbj5k","leaseDurationSeconds":45,"acquireTime":"2019-04-04T11:53:12Z","renewTime":"2019-04-04T12:24:08Z","leaderTransitions":0} +``` + diff --git a/content/rancher/v2.6/en/troubleshooting/_index.md b/versioned_docs/version-2.6/troubleshooting/troubleshooting.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/_index.md rename to versioned_docs/version-2.6/troubleshooting/troubleshooting.md diff --git a/content/rancher/v2.6/en/troubleshooting/userid-tracking-in-audit-logs/_index.md b/versioned_docs/version-2.6/troubleshooting/userid-tracking-in-audit-logs/userid-tracking-in-audit-logs.md similarity index 100% rename from content/rancher/v2.6/en/troubleshooting/userid-tracking-in-audit-logs/_index.md rename to versioned_docs/version-2.6/troubleshooting/userid-tracking-in-audit-logs/userid-tracking-in-audit-logs.md diff --git a/content/rancher/v2.6/en/user-settings/api-keys/_index.md b/versioned_docs/version-2.6/user-settings/api-keys/api-keys.md similarity index 100% rename from content/rancher/v2.6/en/user-settings/api-keys/_index.md rename to versioned_docs/version-2.6/user-settings/api-keys/api-keys.md diff --git a/content/rancher/v2.6/en/user-settings/cloud-credentials/_index.md b/versioned_docs/version-2.6/user-settings/cloud-credentials/cloud-credentials.md similarity index 100% rename from content/rancher/v2.6/en/user-settings/cloud-credentials/_index.md rename to versioned_docs/version-2.6/user-settings/cloud-credentials/cloud-credentials.md diff --git a/content/rancher/v2.6/en/user-settings/node-templates/_index.md b/versioned_docs/version-2.6/user-settings/node-templates/node-templates.md similarity index 100% rename from content/rancher/v2.6/en/user-settings/node-templates/_index.md rename to versioned_docs/version-2.6/user-settings/node-templates/node-templates.md diff --git a/versioned_docs/version-2.6/user-settings/preferences/preferences.md b/versioned_docs/version-2.6/user-settings/preferences/preferences.md new file mode 100644 index 0000000000..fc2fe8c1f2 --- /dev/null +++ b/versioned_docs/version-2.6/user-settings/preferences/preferences.md @@ -0,0 +1,18 @@ +--- +title: User Preferences +weight: 7012 +--- + +Each user can choose preferences to personalize their Rancher experience. To change preference settings, open the **User Settings** menu and then select **Preferences**. + +## Theme + +Choose your background color for the Rancher UI. If you choose **Auto**, the background color changes from light to dark at 6 PM, and then changes back at 6 AM. + +## My Account + +This section displays the **Name** (your display name) and **Username** (your login) used for your session. To change your login's current password, click the **Change Password** button. + +## Table Row per Page + +On pages that display system objects like clusters or deployments in a table, you can set the number of objects that display on the page before you must paginate. The default setting is `50`. diff --git a/content/rancher/v2.6/en/user-settings/_index.md b/versioned_docs/version-2.6/user-settings/user-settings.md similarity index 100% rename from content/rancher/v2.6/en/user-settings/_index.md rename to versioned_docs/version-2.6/user-settings/user-settings.md diff --git a/content/rancher/v2.6/en/virtualization-admin/_index.md b/versioned_docs/version-2.6/virtualization-admin/virtualization-admin.md similarity index 100% rename from content/rancher/v2.6/en/virtualization-admin/_index.md rename to versioned_docs/version-2.6/virtualization-admin/virtualization-admin.md diff --git a/versioned_sidebars/version-2.0-2.4-sidebars.json b/versioned_sidebars/version-2.0-2.4-sidebars.json new file mode 100644 index 0000000000..421e23162c --- /dev/null +++ b/versioned_sidebars/version-2.0-2.4-sidebars.json @@ -0,0 +1,1323 @@ +{ + "tutorialSidebar": [ + "rancher-manager", + { + "type": "category", + "label": "Getting Started", + "link": { + "type": "doc", + "id": "getting-started" + }, + "items": [ + { + "type": "category", + "label": "Introduction", + "link": { + "type": "doc", + "id": "pages-for-subheaders/introduction" + }, + "items": [ + "getting-started/introduction/overview", + "getting-started/introduction/what-are-divio-docs" + ] + }, + { + "type": "category", + "label": "Quick Start Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/quick-start-guides" + }, + "items": [ + "getting-started/quick-start-guides/cli", + { + "type": "category", + "label": "Deploy Rancher Manager", + "link": { + "type": "doc", + "id": "pages-for-subheaders/deploy-rancher-manager" + }, + "items": [ + "getting-started/quick-start-guides/deploy-rancher-manager/aws", + "getting-started/quick-start-guides/deploy-rancher-manager/azure", + "getting-started/quick-start-guides/deploy-rancher-manager/digitalocean", + "getting-started/quick-start-guides/deploy-rancher-manager/gcp", + "getting-started/quick-start-guides/deploy-rancher-manager/vagrant", + "getting-started/quick-start-guides/deploy-rancher-manager/helm-cli" + ] + }, + { + "type": "category", + "label": "Deploy Rancher Workloads", + "link": { + "type": "doc", + "id": "pages-for-subheaders/deploy-rancher-workloads" + }, + "items": [ + "getting-started/quick-start-guides/deploy-workloads/workload-ingress", + "getting-started/quick-start-guides/deploy-workloads/nodeports" + ] + } + ] + }, + { + "type": "category", + "label": "Installation and Upgrade", + "link": { + "type": "doc", + "id": "pages-for-subheaders/installation-and-upgrade" + }, + "items": [ + { + "type": "category", + "label": "Installation Requirements", + "link": { + "type": "doc", + "id": "pages-for-subheaders/installation-requirements" + }, + "items": [ + "getting-started/installation-and-upgrade/installation-requirements/install-docker", + "getting-started/installation-and-upgrade/installation-requirements/port-requirements" + ] + }, + { + "type": "category", + "label": "Install/Upgrade on a Kubernetes Cluster", + "link": { + "type": "doc", + "id": "pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster" + }, + "items": [ + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting", + { + "type": "category", + "label": "Upgrades", + "link": { + "type": "doc", + "id": "pages-for-subheaders/upgrades" + }, + "items": [ + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/migrating-from-rke-add-on", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/namespace-migration", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades/helm2" + ] + } + ] + }, + { + "type": "category", + "label": "Other Installation Methods", + "link": { + "type": "doc", + "id": "pages-for-subheaders/other-installation-methods" + }, + "items": [ + { + "type": "category", + "label": "Air-Gapped Helm CLI Install", + "link": { + "type": "doc", + "id": "pages-for-subheaders/air-gapped-helm-cli-install" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha" + ] + }, + { + "type": "category", + "label": "Rancher on a Single Node with Docker", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-on-a-single-node-with-docker" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting" + ] + }, + { + "type": "category", + "label": "Rancher Behind an HTTP Proxy", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-behind-an-http-proxy" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher" + ] + } + ] + }, + { + "type": "category", + "label": "Resources", + "link": { + "type": "doc", + "id": "pages-for-subheaders/resources" + }, + "items": [ + "getting-started/installation-and-upgrade/resources/choose-a-rancher-version", + "getting-started/installation-and-upgrade/resources/helm-version-requirements", + "getting-started/installation-and-upgrade/resources/add-tls-secrets", + "getting-started/installation-and-upgrade/resources/custom-ca-root-certificates", + "getting-started/installation-and-upgrade/resources/upgrade-cert-manager", + "getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2", + "getting-started/installation-and-upgrade/resources/update-rancher-certificate", + "getting-started/installation-and-upgrade/resources/local-system-charts" + ] + }, + "getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes", + "getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher", + { + "type": "category", + "label": "Advanced Options", + "link": { + "type": "doc", + "id": "pages-for-subheaders/advanced-options" + }, + "items": [ + { + "type": "category", + "label": "Enable Experimental Features", + "link": { + "type": "doc", + "id": "pages-for-subheaders/enable-experimental-features" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features" + ] + }, + { + "type": "category", + "label": "Advanced Use Cases", + "items": [ + { + "type": "category", + "label": "Kubernetes Installation Using Helm 2", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2" + }, + "items": [ + { + "type": "category", + "label": "Create Nodes and Load Balancer", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-create-nodes-lb" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nginx", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/create-nodes-lb/nlb" + ] + }, + { + "type": "category", + "label": "Install Kubernetes with RKE", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-kubernetes-rke" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/kubernetes-rke/troubleshooting" + ] + }, + { + "type": "category", + "label": "Initialize Helm: Install the Tiller Service", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-helm-init" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-init/troubleshooting" + ] + }, + { + "type": "category", + "label": "Install Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm-rancher" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/tls-secrets", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/chart-options", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/helm-rancher/troubleshooting" + ] + }, + { + "type": "category", + "label": "RKE Add-On Install", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-rke-add-on" + }, + "items": [ + { + "type": "category", + "label": "Kubernetes Install with External Load Balancer (TCP/Layer 4)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-rke-add-on-layer-4-lb" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-4-lb/nlb" + ] + }, + { + "type": "category", + "label": "Kubernetes Install with External Load Balancer (HTTPS/Layer 7)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-rke-add-on-layer-7-lb" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/alb", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/layer-7-lb/nginx" + ] + }, + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/proxy", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/api-auditing", + { + "type": "category", + "label": "Troubleshooting HA RKE Add-On Install", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm2-rke-add-on-troubleshooting" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/generic-troubleshooting", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/job-complete-status", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/helm2/rke-add-on/troubleshooting/default-backend" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Installing Rancher in an Air Gapped Environment with Helm 2", + "link": { + "type": "doc", + "id": "pages-for-subheaders/air-gap-helm2" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/prepare-nodes", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/populate-private-registry", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/launch-kubernetes", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/air-gap-helm2/install-rancher" + ] + }, + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log", + { + "type": "category", + "label": "cluster.yml Templates for RKE add-on installs", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cluster-yml" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-certificate-recognizedca", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-certificate", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/cluster-yml-templates/node-externalssl-recognizedca" + ] + }, + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer", + { + "type": "category", + "label": "RKE Add-On", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rke-add-on" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-4-lb", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/rke-add-on/layer-7-lb" + ] + } + ] + } + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "How-to Guides", + "link": { + "type": "doc", + "id": "how-to-guides" + }, + "items": [ + { + "type": "category", + "label": "New User Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/new-user-guides" + }, + "items": [ + { + "type": "category", + "label": "Kubernetes Cluster Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-cluster-setup" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher" + ] + }, + { + "type": "category", + "label": "Infrastructure Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/infrastructure-setup" + }, + "items": [ + "how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2", + "how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds", + "how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer", + "how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer" + ] + }, + { + "type": "category", + "label": "Kubernetes Clusters in Rancher Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-clusters-in-rancher-setup" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters", + { + "type": "category", + "label": "Checklist for Production-Ready Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/checklist-for-production-ready-clusters" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes" + ] + }, + { + "type": "category", + "label": "Set Up Clusters from Hosted Kubernetes Providers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei" + ] + }, + { + "type": "category", + "label": "Launch Kubernetes with Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/launch-kubernetes-with-rancher" + }, + "items": [ + { + "type": "category", + "label": "Use New Nodes in an Infra Provider", + "link": { + "type": "doc", + "id": "pages-for-subheaders/use-new-nodes-in-an-infra-provider" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster", + { + "type": "category", + "label": "vSphere", + "link": { + "type": "doc", + "id": "pages-for-subheaders/vsphere" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials" + ] + } + ] + }, + { + "type": "category", + "label": "Use Windows Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/use-windows-clusters" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/v2.1-v2.2" + ] + }, + { + "type": "category", + "label": "Set Up Cloud Providers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/set-up-cloud-providers" + }, + "items": [ + { + "type": "category", + "label": "Other Cloud Providers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/other-cloud-providers" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/vsphere" + ] + } + ] + }, + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents" + ] + }, + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/import-existing-clusters" + ] + }, + { + "type": "category", + "label": "Kubernetes Resources Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-resources-setup" + }, + "items": [ + { + "type": "category", + "label": "Workloads and Pods", + "link": { + "type": "doc", + "id": "pages-for-subheaders/workloads-and-pods" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar" + ] + }, + { + "type": "category", + "label": "Horizontal Pod Autoscaler", + "link": { + "type": "doc", + "id": "pages-for-subheaders/horizontal-pod-autoscaler" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/hpa-for-rancher-before-2.0.7", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl" + ] + }, + { + "type": "category", + "label": "Load Balancer and Ingress Controller", + "link": { + "type": "doc", + "id": "pages-for-subheaders/load-balancer-and-ingress-controller" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing", + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses" + ] + }, + "how-to-guides/new-user-guides/kubernetes-resources-setup/create-services", + "how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication", + "how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps", + "how-to-guides/new-user-guides/kubernetes-resources-setup/secrets", + "how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries" + ] + }, + { + "type": "category", + "label": "Helm Charts in Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm-charts-in-rancher" + }, + "items": [ + "how-to-guides/new-user-guides/helm-charts-in-rancher/built-in", + "how-to-guides/new-user-guides/helm-charts-in-rancher/adding-catalogs", + "how-to-guides/new-user-guides/helm-charts-in-rancher/catalog-config", + "how-to-guides/new-user-guides/helm-charts-in-rancher/creating-apps", + "how-to-guides/new-user-guides/helm-charts-in-rancher/managing-apps", + "how-to-guides/new-user-guides/helm-charts-in-rancher/multi-cluster-apps", + "how-to-guides/new-user-guides/helm-charts-in-rancher/launching-apps", + "how-to-guides/new-user-guides/helm-charts-in-rancher/tutorial", + "how-to-guides/new-user-guides/helm-charts-in-rancher/globaldns" + ] + }, + "how-to-guides/new-user-guides/deploy-apps-across-clusters", + { + "type": "category", + "label": "Backup, Restore, and Disaster Recovery", + "link": { + "type": "doc", + "id": "pages-for-subheaders/backup-restore-and-disaster-recovery" + }, + "items": [ + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-k3s-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-k3s-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup/roll-back-to-v2.0-v2.1" + ] + }, + { + "type": "category", + "label": "Migrating from v1.6 to v2.x", + "link": { + "type": "doc", + "id": "pages-for-subheaders/migrate-from-v1.6-v2.x" + }, + "items": [ + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/kubernetes-introduction", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/install-and-configure-rancher", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/migrate-services", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/expose-services", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/monitor-apps", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/schedule-services", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/discover-services", + "how-to-guides/new-user-guides/migrate-from-v1.6-v2.x/load-balancing" + ] + } + ] + }, + { + "type": "category", + "label": "Advanced User Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/advanced-user-guides" + }, + "items": [ + { + "type": "category", + "label": "Authentication, Permissions, and Global Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/authentication-permissions-and-global-configuration" + }, + "items": [ + { + "type": "category", + "label": "About Authentication", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-authentication" + }, + "items": [ + { + "type": "category", + "label": "Authentication Config", + "link": { + "type": "doc", + "id": "pages-for-subheaders/authentication-config" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml" + ] + }, + { + "type": "category", + "label": "Configure Microsoft AD Federation Service (SAML)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configure-microsoft-ad-federation-service-saml" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs" + ] + }, + { + "type": "category", + "label": "Configure Shibboleth (SAML)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configure-shibboleth-saml" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions" + ] + } + ] + }, + { + "type": "category", + "label": "Manage Role-Based Access Control (RBAC)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-role-based-access-control-rbac" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles" + ] + }, + { + "type": "category", + "label": "About Provisioning Drivers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-provisioning-drivers" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers" + ] + }, + { + "type": "category", + "label": "About RKE1 Templates", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-rke1-templates" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases" + ] + }, + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry" + ] + }, + { + "type": "category", + "label": "Manage Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-clusters" + }, + "items": [ + { + "type": "category", + "label": "Access Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/access-clusters" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig", + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint", + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters" + ] + }, + "how-to-guides/advanced-user-guides/manage-clusters/backing-up-etcd", + "how-to-guides/advanced-user-guides/manage-clusters/restoring-etcd", + { + "type": "category", + "label": "Install Cluster Autoscaler", + "link": { + "type": "doc", + "id": "pages-for-subheaders/install-cluster-autoscaler" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups" + ] + }, + { + "type": "category", + "label": "Create Kubernetes Persistent Storage", + "link": { + "type": "doc", + "id": "pages-for-subheaders/create-kubernetes-persistent-storage" + }, + "items": [ + { + "type": "category", + "label": "Manage Persistent Storage", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-persistent-storage" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes" + ] + }, + { + "type": "category", + "label": "Provisioning Storage Examples", + "link": { + "type": "doc", + "id": "pages-for-subheaders/provisioning-storage-examples" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage" + ] + } + ] + }, + "how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces", + "how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration", + "how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates", + "how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools", + "how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes", + "how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy", + "how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies" + ] + }, + { + "type": "category", + "label": "Manage Projects", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-projects" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects", + "how-to-guides/advanced-user-guides/manage-projects/manage-namespaces", + "how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines", + "how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies", + { + "type": "category", + "label": "Manage Project Resource Quotas", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-project-resource-quotas" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types" + ] + } + ] + }, + { + "type": "category", + "label": "Istio Setup Guide", + "link": { + "type": "doc", + "id": "pages-for-subheaders/istio-setup-guide" + }, + "items": [ + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster", + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster-with-psp", + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace", + "how-to-guides/advanced-user-guides/istio-setup-guide/node-selectors", + "how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar", + "how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway", + "how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management", + "how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic" + ] + }, + { + "type": "category", + "label": "CIS Scan Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cis-scan-guides" + }, + "items": [ + "how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan", + "how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule", + "how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests", + "how-to-guides/advanced-user-guides/cis-scan-guides/view-reports", + "how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule", + "how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Reference Guides", + "link": { + "type": "doc", + "id": "reference-guides" + }, + "items": [ + { + "type": "category", + "label": "Best Practices", + "link": { + "type": "doc", + "id": "pages-for-subheaders/best-practices" + }, + "items": [ + "reference-guides/best-practices/containers", + "reference-guides/best-practices/deployment-strategies", + "reference-guides/best-practices/deployment-types", + "reference-guides/best-practices/management" + ] + }, + { + "type": "category", + "label": "Rancher Manager Architecture", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-manager-architecture" + }, + "items": [ + "reference-guides/rancher-manager-architecture/rancher-server-and-components", + "reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters", + "reference-guides/rancher-manager-architecture/architecture-recommendations" + ] + }, + { + "type": "category", + "label": "Cluster Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cluster-configuration" + }, + "items": [ + { + "type": "category", + "label": "Rancher Server Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-server-configuration" + }, + "items": [ + "reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration", + { + "type": "category", + "label": "Use Existing Nodes", + "link": { + "type": "doc", + "id": "pages-for-subheaders/use-existing-nodes" + }, + "items": [ + "reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options" + ] + } + ] + }, + { + "type": "category", + "label": "Downstream Cluster Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/downstream-cluster-configuration" + }, + "items": [ + { + "type": "category", + "label": "Node Template Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/node-template-configuration" + }, + "items": [ + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure", + { + "type": "category", + "label": "Creating a vSphere Cluster", + "link": { + "type": "doc", + "id": "pages-for-subheaders/creating-a-vsphere-cluster" + }, + "items": [ + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.3", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.3.0", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.2.0", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/v2.0.4", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere/prior-to-v2.0.4" + ] + } + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Single-Node Rancher in Docker", + "link": { + "type": "doc", + "id": "pages-for-subheaders/single-node-rancher-in-docker" + }, + "items": [ + "reference-guides/single-node-rancher-in-docker/http-proxy-configuration", + "reference-guides/single-node-rancher-in-docker/advanced-options" + ] + }, + { + "type": "category", + "label": "Installation References", + "link": { + "type": "doc", + "id": "pages-for-subheaders/installation-references" + }, + "items": [ + "reference-guides/installation-references/helm-chart-options", + "reference-guides/installation-references/tls-settings", + "reference-guides/installation-references/feature-flags" + ] + }, + "reference-guides/installation-references/amazon-eks-permissions", + { + "type": "category", + "label": "Configure OpenLDAP", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configure-openldap" + }, + "items": [ + "reference-guides/configure-openldap/openldap-config-reference" + ] + }, + "reference-guides/kubernetes-concepts", + { + "type": "category", + "label": "User Settings", + "link": { + "type": "doc", + "id": "pages-for-subheaders/user-settings" + }, + "items": [ + "reference-guides/user-settings/api-keys", + "reference-guides/user-settings/manage-node-templates", + "reference-guides/user-settings/manage-cloud-credentials", + "reference-guides/user-settings/user-preferences" + ] + }, + { + "type": "category", + "label": "CLI with Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cli-with-rancher" + }, + "items": [ + "reference-guides/cli-with-rancher/rancher-cli", + "reference-guides/cli-with-rancher/kubectl-utility" + ] + }, + { + "type": "category", + "label": "About the API", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-the-api" + }, + "items": [ + "reference-guides/about-the-api/api-tokens" + ] + }, + "reference-guides/rancher-cluster-tools", + { + "type": "category", + "label": "Project Tools", + "link": { + "type": "doc", + "id": "pages-for-subheaders/project-tools" + }, + "items": [ + "reference-guides/rancher-project-tools/project-alerts", + "reference-guides/rancher-project-tools/project-logging" + ] + }, + "reference-guides/system-tools", + "reference-guides/rke1-template-example-yaml", + { + "type": "category", + "label": "Pipelines", + "link": { + "type": "doc", + "id": "pages-for-subheaders/pipelines" + }, + "items": [ + "reference-guides/pipelines/concepts", + "reference-guides/pipelines/pipeline-configuration", + "reference-guides/pipelines/configure-persistent-data", + "reference-guides/pipelines/example-repositories", + "reference-guides/pipelines/example-yaml", + "reference-guides/pipelines/v2.0.x" + ] + }, + { + "type": "category", + "label": "Rancher Security", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-security" + }, + "items": [ + { + "type": "category", + "label": "Rancher v2.1 Hardening Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-v2.1-hardening-guides" + }, + "items": [ + "reference-guides/rancher-security/rancher-v2.1-hardening-guides/self-assessment-guide-with-cis-v1.3-benchmark", + "reference-guides/rancher-security/rancher-v2.1-hardening-guides/hardening-guide-with-cis-v1.3-benchmark" + ] + }, + { + "type": "category", + "label": "Rancher v2.2 Hardening Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-v2.2-hardening-guides" + }, + "items": [ + "reference-guides/rancher-security/rancher-v2.2-hardening-guides/self-assessment-guide-with-cis-v1.4-benchmark", + "reference-guides/rancher-security/rancher-v2.2-hardening-guides/hardening-guide-with-cis-v1.4-benchmark" + ] + }, + { + "type": "category", + "label": "Rancher v2.3 Hardening Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-v2.3-hardening-guides" + }, + "items": [ + "reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-self-assessment-guide-with-cis-v1.4.1-benchmark", + "reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.0-hardening-guide-with-cis-v1.4.1-benchmark", + "reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-self-assessment-guide-with-cis-v1.4.1-benchmark", + "reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.3-hardening-guide-with-cis-v1.4.1-benchmark", + "reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-self-assessment-guide-with-cis-v1.5-benchmark", + "reference-guides/rancher-security/rancher-v2.3-hardening-guides/rancher-v2.3.5-hardening-guide-with-cis-v1.5-benchmark" + ] + }, + { + "type": "category", + "label": "Rancher v2.4 Hardening Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-v2.4-hardening-guides" + }, + "items": [ + "reference-guides/rancher-security/rancher-v2.4-hardening-guides/self-assessment-guide-with-cis-v1.5-benchmark", + "reference-guides/rancher-security/rancher-v2.4-hardening-guides/hardening-guide-with-cis-v1.5-benchmark" + ] + }, + "reference-guides/rancher-security/security-advisories-and-cves" + ] + }, + "reference-guides/v1.6-migration/migration-tools-cli-reference" + ] + }, + { + "type": "category", + "label": "Explanations", + "link": { + "type": "doc", + "id": "explanations" + }, + "items": [ + { + "type": "category", + "label": "Integrations in Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/integrations-in-rancher" + }, + "items": [ + { + "type": "category", + "label": "CIS Scans", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cis-scans" + }, + "items": [ + "explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests" + ] + }, + { + "type": "category", + "label": "Istio", + "link": { + "type": "doc", + "id": "pages-for-subheaders/istio" + }, + "items": [ + "explanations/integrations-in-rancher/istio/cpu-and-memory-allocations", + "explanations/integrations-in-rancher/istio/rbac-for-istio", + "explanations/integrations-in-rancher/istio/disable-istio", + "explanations/integrations-in-rancher/istio/release-notes" + ] + }, + { + "type": "category", + "label": "Logging", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cluster-logging" + }, + "items": [ + "explanations/integrations-in-rancher/cluster-logging/elasticsearch", + "explanations/integrations-in-rancher/cluster-logging/fluentd", + "explanations/integrations-in-rancher/cluster-logging/kafka", + "explanations/integrations-in-rancher/cluster-logging/splunk", + "explanations/integrations-in-rancher/cluster-logging/syslog" + ] + }, + { + "type": "category", + "label": "Alerting", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cluster-alerts" + }, + "items": [ + "explanations/integrations-in-rancher/cluster-alerts/default-alerts" + ] + }, + { + "type": "category", + "label": "Monitoring", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cluster-monitoring" + }, + "items": [ + "explanations/integrations-in-rancher/cluster-monitoring/cluster-metrics", + "explanations/integrations-in-rancher/cluster-monitoring/custom-metrics", + "explanations/integrations-in-rancher/cluster-monitoring/expression", + "explanations/integrations-in-rancher/cluster-monitoring/project-monitoring", + "explanations/integrations-in-rancher/cluster-monitoring/prometheus", + "explanations/integrations-in-rancher/cluster-monitoring/viewing-metrics" + ] + }, + "explanations/integrations-in-rancher/opa-gatekeeper", + "explanations/integrations-in-rancher/notifiers" + ] + } + ] + }, + { + "type": "category", + "label": "FAQ", + "link": { + "type": "doc", + "id": "faq" + }, + "items": [ + "faq/install-and-configure-kubectl", + "faq/technical-items", + "faq/security", + "faq/telemetry", + "faq/upgrades-to-2x", + "faq/networking", + "faq/container-network-interface-providers", + "faq/rancher-is-no-longer-needed" + ] + }, + { + "type": "category", + "label": "Troubleshooting", + "link": { + "type": "doc", + "id": "troubleshooting" + }, + "items": [ + { + "type": "category", + "label": "Kubernetes Components", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-components" + }, + "items": [ + "troubleshooting/kubernetes-components/troubleshooting-etcd-nodes", + "troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes", + "troubleshooting/kubernetes-components/troubleshooting-nginx-proxy", + "troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components" + ] + }, + { + "type": "category", + "label": "Other Troubleshooting Tips", + "link": { + "type": "doc", + "id": "pages-for-subheaders/other-troubleshooting-tips" + }, + "items": [ + "troubleshooting/other-troubleshooting-tips/kubernetes-resources", + "troubleshooting/other-troubleshooting-tips/networking", + "troubleshooting/other-troubleshooting-tips/dns", + "troubleshooting/other-troubleshooting-tips/rancher-ha", + "troubleshooting/other-troubleshooting-tips/registered-clusters", + "troubleshooting/other-troubleshooting-tips/logging" + ] + } + ] + }, + "contribute-to-rancher" + ] +} diff --git a/versioned_sidebars/version-2.5-sidebars.json b/versioned_sidebars/version-2.5-sidebars.json new file mode 100644 index 0000000000..7b0d0aaa7c --- /dev/null +++ b/versioned_sidebars/version-2.5-sidebars.json @@ -0,0 +1,1269 @@ +{ + "tutorialSidebar": [ + "rancher-manager", + { + "type": "category", + "label": "Getting Started", + "link": { + "type": "doc", + "id": "getting-started" + }, + "items": [ + { + "type": "category", + "label": "Introduction", + "link": { + "type": "doc", + "id": "pages-for-subheaders/introduction" + }, + "items": [ + "getting-started/introduction/overview", + "getting-started/introduction/what-are-divio-docs" + ] + }, + { + "type": "category", + "label": "Quick Start Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/quick-start-guides" + }, + "items": [ + { + "type": "category", + "label": "Deploy Rancher Manager", + "link": { + "type": "doc", + "id": "pages-for-subheaders/deploy-rancher-manager" + }, + "items": [ + "getting-started/quick-start-guides/deploy-rancher-manager/aws", + "getting-started/quick-start-guides/deploy-rancher-manager/azure", + "getting-started/quick-start-guides/deploy-rancher-manager/digitalocean", + "getting-started/quick-start-guides/deploy-rancher-manager/gcp", + "getting-started/quick-start-guides/deploy-rancher-manager/vagrant", + "getting-started/quick-start-guides/deploy-rancher-manager/helm-cli" + ] + }, + { + "type": "category", + "label": "Deploy Rancher Workloads", + "link": { + "type": "doc", + "id": "pages-for-subheaders/deploy-rancher-workloads" + }, + "items": [ + "getting-started/quick-start-guides/deploy-workloads/workload-ingress", + "getting-started/quick-start-guides/deploy-workloads/nodeports" + ] + } + ] + }, + { + "type": "category", + "label": "Installation and Upgrade", + "link": { + "type": "doc", + "id": "pages-for-subheaders/installation-and-upgrade" + }, + "items": [ + { + "type": "category", + "label": "Installation Requirements", + "link": { + "type": "doc", + "id": "pages-for-subheaders/installation-requirements" + }, + "items": [ + "getting-started/installation-and-upgrade/installation-requirements/install-docker", + "getting-started/installation-and-upgrade/installation-requirements/port-requirements" + ] + }, + { + "type": "category", + "label": "Install/Upgrade on a Kubernetes Cluster", + "link": { + "type": "doc", + "id": "pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster" + }, + "items": [ + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rollbacks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/upgrades", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/air-gapped-upgrades", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke", + "getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/troubleshooting" + ] + }, + { + "type": "category", + "label": "Other Installation Methods", + "link": { + "type": "doc", + "id": "pages-for-subheaders/other-installation-methods" + }, + "items": [ + { + "type": "category", + "label": "Air-Gapped Helm CLI Install", + "link": { + "type": "doc", + "id": "pages-for-subheaders/air-gapped-helm-cli-install" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/infrastructure-private-registry", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-kubernetes", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/install-rancher-ha", + "getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/docker-install-commands" + ] + }, + { + "type": "category", + "label": "Rancher on a Single Node with Docker", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-on-a-single-node-with-docker" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/upgrade-docker-installed-rancher", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/roll-back-docker-installed-rancher", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-on-a-single-node-with-docker/certificate-troubleshooting" + ] + }, + { + "type": "category", + "label": "Rancher Behind an HTTP Proxy", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-behind-an-http-proxy" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/set-up-infrastructure", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-kubernetes", + "getting-started/installation-and-upgrade/other-installation-methods/rancher-behind-an-http-proxy/install-rancher" + ] + }, + { + "type": "category", + "label": "Install/Upgrade Rancher with RancherD", + "link": { + "type": "doc", + "id": "pages-for-subheaders/install-rancher-on-linux" + }, + "items": [ + "getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/roll-back-rancherd", + "getting-started/installation-and-upgrade/other-installation-methods/install-rancher-on-linux/upgrade-rancherd" + ] + } + ] + }, + { + "type": "category", + "label": "Resources", + "link": { + "type": "doc", + "id": "pages-for-subheaders/resources" + }, + "items": [ + "getting-started/installation-and-upgrade/resources/choose-a-rancher-version", + "getting-started/installation-and-upgrade/resources/helm-version-requirements", + "getting-started/installation-and-upgrade/resources/add-tls-secrets", + "getting-started/installation-and-upgrade/resources/custom-ca-root-certificates", + "getting-started/installation-and-upgrade/resources/upgrade-cert-manager", + "getting-started/installation-and-upgrade/resources/upgrade-cert-manager-helm-2", + "getting-started/installation-and-upgrade/resources/update-rancher-certificate", + "getting-started/installation-and-upgrade/resources/local-system-charts" + ] + }, + "getting-started/installation-and-upgrade/upgrade-and-roll-back-kubernetes", + "getting-started/installation-and-upgrade/upgrade-kubernetes-without-upgrading-rancher", + { + "type": "category", + "label": "Advanced Options", + "link": { + "type": "doc", + "id": "pages-for-subheaders/advanced-options" + }, + "items": [ + { + "type": "category", + "label": "Enable Experimental Features", + "link": { + "type": "doc", + "id": "pages-for-subheaders/enable-experimental-features" + }, + "items": [ + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/rancher-on-arm64", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/unsupported-storage-drivers", + "getting-started/installation-and-upgrade/advanced-options/enable-experimental-features/istio-traffic-management-features" + ] + }, + { + "type": "category", + "label": "Advanced Use Cases", + "items": [ + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/open-ports-with-firewalld", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/tune-etcd-for-large-installs", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/enable-api-audit-log", + "getting-started/installation-and-upgrade/advanced-options/advanced-use-cases/configure-layer-7-nginx-load-balancer" + ] + } + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "How-to Guides", + "link": { + "type": "doc", + "id": "how-to-guides" + }, + "items": [ + { + "type": "category", + "label": "New User Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/new-user-guides" + }, + "items": [ + { + "type": "category", + "label": "Kubernetes Cluster Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-cluster-setup" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-cluster-setup/high-availability-installs", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/rke1-for-rancher", + "how-to-guides/new-user-guides/kubernetes-cluster-setup/rke2-for-rancher" + ] + }, + { + "type": "category", + "label": "Infrastructure Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/infrastructure-setup" + }, + "items": [ + "how-to-guides/new-user-guides/infrastructure-setup/ha-k3s-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/ha-rke1-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/ha-rke2-kubernetes-cluster", + "how-to-guides/new-user-guides/infrastructure-setup/nodes-in-amazon-ec2", + "how-to-guides/new-user-guides/infrastructure-setup/mysql-database-in-amazon-rds", + "how-to-guides/new-user-guides/infrastructure-setup/nginx-load-balancer", + "how-to-guides/new-user-guides/infrastructure-setup/amazon-elb-load-balancer" + ] + }, + { + "type": "category", + "label": "Kubernetes Clusters in Rancher Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-clusters-in-rancher-setup" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/node-requirements-for-rancher-managed-clusters", + { + "type": "category", + "label": "Checklist for Production-Ready Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/checklist-for-production-ready-clusters" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/recommended-cluster-architecture", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/checklist-for-production-ready-clusters/roles-for-nodes-in-kubernetes" + ] + }, + { + "type": "category", + "label": "Set Up Clusters from Hosted Kubernetes Providers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/set-up-clusters-from-hosted-kubernetes-providers" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/gke", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/aks", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/alibaba", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/tencent", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/set-up-clusters-from-hosted-kubernetes-providers/huawei" + ] + }, + { + "type": "category", + "label": "Launch Kubernetes with Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/launch-kubernetes-with-rancher" + }, + "items": [ + { + "type": "category", + "label": "Use New Nodes in an Infra Provider", + "link": { + "type": "doc", + "id": "pages-for-subheaders/use-new-nodes-in-an-infra-provider" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-amazon-ec2-cluster", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-a-digitalocean-cluster", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/create-an-azure-cluster", + { + "type": "category", + "label": "vSphere", + "link": { + "type": "doc", + "id": "pages-for-subheaders/vsphere" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/provision-kubernetes-clusters-in-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-new-nodes-in-an-infra-provider/vsphere/create-credentials" + ] + } + ] + }, + { + "type": "category", + "label": "Use Windows Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/use-windows-clusters" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/azure-storageclass-configuration", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/windows-linux-cluster-feature-parity", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/use-windows-clusters/network-requirements-for-host-gateway" + ] + }, + { + "type": "category", + "label": "Set Up Cloud Providers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/set-up-cloud-providers" + }, + "items": [ + { + "type": "category", + "label": "Other Cloud Providers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/other-cloud-providers" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/amazon", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/azure", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/other-cloud-providers/google-compute-engine" + ] + }, + { + "type": "category", + "label": "vSphere", + "link": { + "type": "doc", + "id": "pages-for-subheaders/vsphere-cloud-provider" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-in-tree-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/configure-out-of-tree-vsphere", + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/set-up-cloud-providers/vsphere/migrate-from-in-tree-to-out-of-tree" + ] + } + ] + }, + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/launch-kubernetes-with-rancher/about-rancher-agents" + ] + }, + "how-to-guides/new-user-guides/kubernetes-clusters-in-rancher-setup/register-existing-clusters" + ] + }, + { + "type": "category", + "label": "Kubernetes Resources Setup", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-resources-setup" + }, + "items": [ + { + "type": "category", + "label": "Workloads and Pods", + "link": { + "type": "doc", + "id": "pages-for-subheaders/workloads-and-pods" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/deploy-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/roll-back-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/upgrade-workloads", + "how-to-guides/new-user-guides/kubernetes-resources-setup/workloads-and-pods/add-a-sidecar" + ] + }, + { + "type": "category", + "label": "Horizontal Pod Autoscaler", + "link": { + "type": "doc", + "id": "pages-for-subheaders/horizontal-pod-autoscaler" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/about-hpas", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-ui", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/manage-hpas-with-kubectl", + "how-to-guides/new-user-guides/kubernetes-resources-setup/horizontal-pod-autoscaler/test-hpas-with-kubectl" + ] + }, + { + "type": "category", + "label": "Load Balancer and Ingress Controller", + "link": { + "type": "doc", + "id": "pages-for-subheaders/load-balancer-and-ingress-controller" + }, + "items": [ + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/layer-4-and-layer-7-load-balancing", + "how-to-guides/new-user-guides/kubernetes-resources-setup/load-balancer-and-ingress-controller/add-ingresses" + ] + }, + "how-to-guides/new-user-guides/kubernetes-resources-setup/create-services", + "how-to-guides/new-user-guides/kubernetes-resources-setup/encrypt-http-communication", + "how-to-guides/new-user-guides/kubernetes-resources-setup/configmaps", + "how-to-guides/new-user-guides/kubernetes-resources-setup/secrets", + "how-to-guides/new-user-guides/kubernetes-resources-setup/kubernetes-and-docker-registries" + ] + }, + { + "type": "category", + "label": "Helm Charts in Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/helm-charts-in-rancher" + }, + "items": [ + + ] + }, + { + "type": "category", + "label": "Deploy Apps Across Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/deploy-apps-across-clusters" + }, + "items": [ + "how-to-guides/new-user-guides/deploy-apps-across-clusters/fleet", + "how-to-guides/new-user-guides/deploy-apps-across-clusters/multi-cluster-apps" + ] + }, + { + "type": "category", + "label": "Backup, Restore, and Disaster Recovery", + "link": { + "type": "doc", + "id": "pages-for-subheaders/backup-restore-and-disaster-recovery" + }, + "items": [ + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/migrate-rancher-to-new-cluster", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-docker-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-docker-installed-rancher", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/back-up-rancher-launched-kubernetes-clusters", + "how-to-guides/new-user-guides/backup-restore-and-disaster-recovery/restore-rancher-launched-kubernetes-clusters-from-backup" + ] + } + ] + }, + { + "type": "category", + "label": "Advanced User Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/advanced-user-guides" + }, + "items": [ + { + "type": "category", + "label": "Authentication, Permissions, and Global Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/authentication-permissions-and-global-configuration" + }, + "items": [ + { + "type": "category", + "label": "About Authentication", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-authentication" + }, + "items": [ + { + "type": "category", + "label": "Authentication Config", + "link": { + "type": "doc", + "id": "pages-for-subheaders/authentication-config" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/manage-users-and-groups", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/create-local-users", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-google-oauth", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-active-directory", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-freeipa", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-azure-ad", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-github", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-keycloak", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-pingidentity", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/authentication-config/configure-okta-saml" + ] + }, + { + "type": "category", + "label": "Configure Microsoft AD Federation Service (SAML)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configure-microsoft-ad-federation-service-saml" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-ms-adfs-for-rancher", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-microsoft-ad-federation-service-saml/configure-rancher-for-ms-adfs" + ] + }, + { + "type": "category", + "label": "Configure Shibboleth (SAML)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configure-shibboleth-saml" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-authentication/configure-shibboleth-saml/about-group-permissions" + ] + } + ] + }, + { + "type": "category", + "label": "Manage Role-Based Access Control (RBAC)", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-role-based-access-control-rbac" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/global-permissions", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/cluster-and-project-roles", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/custom-roles", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/manage-role-based-access-control-rbac/locked-roles" + ] + }, + { + "type": "category", + "label": "About Provisioning Drivers", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-provisioning-drivers" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-cluster-drivers", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-provisioning-drivers/manage-node-drivers" + ] + }, + { + "type": "category", + "label": "About RKE1 Templates", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-rke1-templates" + }, + "items": [ + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/creator-permissions", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/access-or-share-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/manage-rke1-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/enforce-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/override-template-settings", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/apply-templates", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/infrastructure", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/about-rke1-templates/example-use-cases" + ] + }, + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/create-pod-security-policies", + "how-to-guides/advanced-user-guides/authentication-permissions-and-global-configuration/global-default-private-registry" + ] + }, + { + "type": "category", + "label": "Manage Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-clusters" + }, + "items": [ + { + "type": "category", + "label": "Access Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/access-clusters" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/use-kubectl-and-kubeconfig", + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/authorized-cluster-endpoint", + "how-to-guides/advanced-user-guides/manage-clusters/access-clusters/add-users-to-clusters" + ] + }, + { + "type": "category", + "label": "Install Cluster Autoscaler", + "link": { + "type": "doc", + "id": "pages-for-subheaders/install-cluster-autoscaler" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/install-cluster-autoscaler/use-aws-ec2-auto-scaling-groups" + ] + }, + { + "type": "category", + "label": "Create Kubernetes Persistent Storage", + "link": { + "type": "doc", + "id": "pages-for-subheaders/create-kubernetes-persistent-storage" + }, + "items": [ + { + "type": "category", + "label": "Manage Persistent Storage", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-persistent-storage" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-persistent-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/set-up-existing-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/dynamically-provision-new-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/use-external-ceph-driver", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/about-glusterfs-volumes", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/manage-persistent-storage/install-iscsi-volumes" + ] + }, + { + "type": "category", + "label": "Provisioning Storage Examples", + "link": { + "type": "doc", + "id": "pages-for-subheaders/provisioning-storage-examples" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/persistent-storage-in-amazon-ebs", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/nfs-storage", + "how-to-guides/advanced-user-guides/manage-clusters/create-kubernetes-persistent-storage/provisioning-storage-examples/vsphere-storage" + ] + } + ] + }, + "how-to-guides/advanced-user-guides/manage-clusters/projects-and-namespaces", + "how-to-guides/advanced-user-guides/manage-clusters/clone-cluster-configuration", + "how-to-guides/advanced-user-guides/manage-clusters/rotate-certificates", + "how-to-guides/advanced-user-guides/manage-clusters/nodes-and-node-pools", + "how-to-guides/advanced-user-guides/manage-clusters/clean-cluster-nodes", + "how-to-guides/advanced-user-guides/manage-clusters/add-a-pod-security-policy", + "how-to-guides/advanced-user-guides/manage-clusters/assign-pod-security-policies" + ] + }, + { + "type": "category", + "label": "Manage Projects", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-projects" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-projects/add-users-to-projects", + "how-to-guides/advanced-user-guides/manage-projects/manage-namespaces", + "how-to-guides/advanced-user-guides/manage-projects/ci-cd-pipelines", + "how-to-guides/advanced-user-guides/manage-projects/manage-pod-security-policies", + { + "type": "category", + "label": "Manage Project Resource Quotas", + "link": { + "type": "doc", + "id": "pages-for-subheaders/manage-project-resource-quotas" + }, + "items": [ + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/about-project-resource-quotas", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/override-default-limit-in-namespaces", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/set-container-default-resource-limits", + "how-to-guides/advanced-user-guides/manage-projects/manage-project-resource-quotas/resource-quota-types" + ] + } + ] + }, + { + "type": "category", + "label": "Monitoring/Alerting Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/monitoring-alerting-guides" + }, + "items": [ + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/enable-monitoring", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/uninstall-monitoring", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/set-up-monitoring-for-workloads", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/customize-grafana-dashboard", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/create-persistent-grafana-dashboard", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/debug-high-memory-usage", + "how-to-guides/advanced-user-guides/monitoring-alerting-guides/migrate-to-rancher-v2.5+-monitoring" + ] + }, + { + "type": "category", + "label": "Monitoring V2 Configuration Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/monitoring-v2-configuration-guides" + }, + "items": [ + { + "type": "category", + "label": "Advanced Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/advanced-configuration" + }, + "items": [ + "how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/alertmanager", + "how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheus", + "how-to-guides/advanced-user-guides/monitoring-v2-configuration-guides/advanced-configuration/prometheusrules" + ] + } + ] + }, + { + "type": "category", + "label": "Istio Setup Guide", + "link": { + "type": "doc", + "id": "pages-for-subheaders/istio-setup-guide" + }, + "items": [ + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-cluster", + "how-to-guides/advanced-user-guides/istio-setup-guide/enable-istio-in-namespace", + "how-to-guides/advanced-user-guides/istio-setup-guide/use-istio-sidecar", + "how-to-guides/advanced-user-guides/istio-setup-guide/set-up-istio-gateway", + "how-to-guides/advanced-user-guides/istio-setup-guide/set-up-traffic-management", + "how-to-guides/advanced-user-guides/istio-setup-guide/generate-and-view-traffic" + ] + }, + { + "type": "category", + "label": "CIS Scan Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cis-scan-guides" + }, + "items": [ + "how-to-guides/advanced-user-guides/cis-scan-guides/install-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/uninstall-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan", + "how-to-guides/advanced-user-guides/cis-scan-guides/run-a-scan-periodically-on-a-schedule", + "how-to-guides/advanced-user-guides/cis-scan-guides/skip-tests", + "how-to-guides/advanced-user-guides/cis-scan-guides/view-reports", + "how-to-guides/advanced-user-guides/cis-scan-guides/enable-alerting-for-rancher-cis-benchmark", + "how-to-guides/advanced-user-guides/cis-scan-guides/configure-alerts-for-periodic-scan-on-a-schedule", + "how-to-guides/advanced-user-guides/cis-scan-guides/create-a-custom-benchmark-version-to-run" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Reference Guides", + "link": { + "type": "doc", + "id": "reference-guides" + }, + "items": [ + { + "type": "category", + "label": "Best Practices", + "link": { + "type": "doc", + "id": "pages-for-subheaders/best-practices" + }, + "items": [ + { + "type": "category", + "label": "Rancher Server", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-server" + }, + "items": [ + "reference-guides/best-practices/rancher-server/on-premises-rancher-in-vsphere", + "reference-guides/best-practices/rancher-server/rancher-deployment-strategy", + "reference-guides/best-practices/rancher-server/tips-for-running-rancher" + ] + }, + { + "type": "category", + "label": "Rancher-Managed Clusters", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-managed-clusters" + }, + "items": [ + "reference-guides/best-practices/rancher-managed-clusters/logging-best-practices", + "reference-guides/best-practices/rancher-managed-clusters/monitoring-best-practices", + "reference-guides/best-practices/rancher-managed-clusters/tips-to-set-up-containers", + "reference-guides/best-practices/rancher-managed-clusters/rancher-managed-clusters-in-vsphere" + ] + } + ] + }, + { + "type": "category", + "label": "Rancher Manager Architecture", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-manager-architecture" + }, + "items": [ + "reference-guides/rancher-manager-architecture/rancher-server-and-components", + "reference-guides/rancher-manager-architecture/communicating-with-downstream-user-clusters", + "reference-guides/rancher-manager-architecture/architecture-recommendations" + ] + }, + { + "type": "category", + "label": "Cluster Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cluster-configuration" + }, + "items": [ + { + "type": "category", + "label": "Rancher Server Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-server-configuration" + }, + "items": [ + "reference-guides/cluster-configuration/rancher-server-configuration/rke1-cluster-configuration", + "reference-guides/cluster-configuration/rancher-server-configuration/eks-cluster-configuration", + { + "type": "category", + "label": "GKE Cluster Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/gke-cluster-configuration" + }, + "items": [ + "reference-guides/cluster-configuration/rancher-server-configuration/gke-cluster-configuration/gke-private-clusters" + ] + }, + { + "type": "category", + "label": "Use Existing Nodes", + "link": { + "type": "doc", + "id": "pages-for-subheaders/use-existing-nodes" + }, + "items": [ + "reference-guides/cluster-configuration/rancher-server-configuration/use-existing-nodes/rancher-agent-options" + ] + }, + "reference-guides/cluster-configuration/rancher-server-configuration/sync-clusters", + "reference-guides/cluster-configuration/rancher-server-configuration/rancherd-configuration-reference" + ] + }, + { + "type": "category", + "label": "Downstream Cluster Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/downstream-cluster-configuration" + }, + "items": [ + { + "type": "category", + "label": "Node Template Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/node-template-configuration" + }, + "items": [ + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/amazon-ec2", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/digitalocean", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/azure", + "reference-guides/cluster-configuration/downstream-cluster-configuration/node-template-configuration/vsphere" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "Single-Node Rancher in Docker", + "link": { + "type": "doc", + "id": "pages-for-subheaders/single-node-rancher-in-docker" + }, + "items": [ + "reference-guides/single-node-rancher-in-docker/http-proxy-configuration", + "reference-guides/single-node-rancher-in-docker/advanced-options" + ] + }, + { + "type": "category", + "label": "Installation References", + "link": { + "type": "doc", + "id": "pages-for-subheaders/installation-references" + }, + "items": [ + "reference-guides/installation-references/helm-chart-options", + "reference-guides/installation-references/tls-settings", + "reference-guides/installation-references/feature-flags" + ] + }, + { + "type": "category", + "label": "Amazon EKS Permissions", + "link": { + "type": "doc", + "id": "pages-for-subheaders/amazon-eks-permissions" + }, + "items": [ + "reference-guides/amazon-eks-permissions/minimum-eks-permissions" + ] + }, + { + "type": "category", + "label": "Backup & Restore Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/backup-restore-configuration" + }, + "items": [ + "reference-guides/backup-restore-configuration/backup-configuration", + "reference-guides/backup-restore-configuration/restore-configuration", + "reference-guides/backup-restore-configuration/storage-configuration", + "reference-guides/backup-restore-configuration/examples" + ] + }, + { + "type": "category", + "label": "Configure OpenLDAP", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configure-openldap" + }, + "items": [ + "reference-guides/configure-openldap/openldap-config-reference" + ] + }, + "reference-guides/kubernetes-concepts", + { + "type": "category", + "label": "Monitoring V2 Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/monitoring-v2-configuration" + }, + "items": [ + "reference-guides/monitoring-v2-configuration/receivers", + "reference-guides/monitoring-v2-configuration/routes", + "reference-guides/monitoring-v2-configuration/servicemonitors-and-podmonitors", + "reference-guides/monitoring-v2-configuration/helm-chart-options", + "reference-guides/monitoring-v2-configuration/examples" + ] + }, + { + "type": "category", + "label": "User Settings", + "link": { + "type": "doc", + "id": "pages-for-subheaders/user-settings" + }, + "items": [ + "reference-guides/user-settings/api-keys", + "reference-guides/user-settings/manage-node-templates", + "reference-guides/user-settings/manage-cloud-credentials", + "reference-guides/user-settings/user-preferences" + ] + }, + { + "type": "category", + "label": "CLI with Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cli-with-rancher" + }, + "items": [ + "reference-guides/cli-with-rancher/rancher-cli", + "reference-guides/cli-with-rancher/kubectl-utility" + ] + }, + { + "type": "category", + "label": "About the API", + "link": { + "type": "doc", + "id": "pages-for-subheaders/about-the-api" + }, + "items": [ + "reference-guides/about-the-api/api-tokens" + ] + }, + "reference-guides/rancher-cluster-tools", + "reference-guides/rancher-project-tools", + "reference-guides/system-tools", + "reference-guides/rke1-template-example-yaml", + { + "type": "category", + "label": "Pipelines", + "link": { + "type": "doc", + "id": "pages-for-subheaders/pipelines" + }, + "items": [ + "reference-guides/pipelines/concepts", + "reference-guides/pipelines/pipeline-configuration", + "reference-guides/pipelines/configure-persistent-data", + "reference-guides/pipelines/example-repositories", + "reference-guides/pipelines/example-yaml" + ] + }, + { + "type": "category", + "label": "Rancher Security", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-security" + }, + "items": [ + { + "type": "category", + "label": "Rancher v2.6 Hardening Guides", + "link": { + "type": "doc", + "id": "pages-for-subheaders/rancher-v2.5-hardening-guides" + }, + "items": [ + "reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.6-benchmark", + "reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.6-benchmark", + "reference-guides/rancher-security/rancher-v2.5-hardening-guides/hardening-guide-with-cis-v1.6-benchmark", + "reference-guides/rancher-security/rancher-v2.5-hardening-guides/self-assessment-guide-with-cis-v1.6-benchmark" + ] + }, + { + "type": "category", + "label": "SELinux RPM", + "link": { + "type": "doc", + "id": "pages-for-subheaders/selinux-rpm" + }, + "items": [ + "reference-guides/rancher-security/selinux-rpm/about-rancher-selinux", + "reference-guides/rancher-security/selinux-rpm/about-rke2-selinux" + ] + }, + "reference-guides/rancher-security/kubernetes-security-best-practices", + "reference-guides/rancher-security/security-advisories-and-cves" + ] + } + ] + }, + { + "type": "category", + "label": "Explanations", + "link": { + "type": "doc", + "id": "explanations" + }, + "items": [ + { + "type": "category", + "label": "Integrations in Rancher", + "link": { + "type": "doc", + "id": "pages-for-subheaders/integrations-in-rancher" + }, + "items": [ + { + "type": "category", + "label": "CIS Scans", + "link": { + "type": "doc", + "id": "pages-for-subheaders/cis-scans" + }, + "items": [ + "explanations/integrations-in-rancher/cis-scans/configuration-reference", + "explanations/integrations-in-rancher/cis-scans/rbac-for-cis-scans", + "explanations/integrations-in-rancher/cis-scans/skipped-and-not-applicable-tests", + "explanations/integrations-in-rancher/cis-scans/custom-benchmark" + ] + }, + { + "type": "category", + "label": "Fleet - GitOps at Scale", + "link": { + "type": "doc", + "id": "pages-for-subheaders/fleet-gitops-at-scale" + }, + "items": [ + "explanations/integrations-in-rancher/fleet-gitops-at-scale/architecture", + "explanations/integrations-in-rancher/fleet-gitops-at-scale/windows-support", + "explanations/integrations-in-rancher/fleet-gitops-at-scale/use-fleet-behind-a-proxy" + ] + }, + { + "type": "category", + "label": "Istio", + "link": { + "type": "doc", + "id": "pages-for-subheaders/istio" + }, + "items": [ + "explanations/integrations-in-rancher/istio/cpu-and-memory-allocations", + "explanations/integrations-in-rancher/istio/rbac-for-istio", + "explanations/integrations-in-rancher/istio/disable-istio", + { + "type": "category", + "label": "Configuration Options", + "link": { + "type": "doc", + "id": "pages-for-subheaders/configuration-options" + }, + "items": [ + "explanations/integrations-in-rancher/istio/configuration-options/pod-security-policies", + "explanations/integrations-in-rancher/istio/configuration-options/selectors-and-scrape-configurations", + "explanations/integrations-in-rancher/istio/configuration-options/install-istio-on-rke2-cluster", + "explanations/integrations-in-rancher/istio/configuration-options/project-network-isolation" + ] + } + ] + }, + "explanations/integrations-in-rancher/longhorn", + { + "type": "category", + "label": "Logging", + "link": { + "type": "doc", + "id": "pages-for-subheaders/logging" + }, + "items": [ + "explanations/integrations-in-rancher/logging/logging-architecture", + "explanations/integrations-in-rancher/logging/migrate-to-rancher-v2.5+-logging", + "explanations/integrations-in-rancher/logging/rbac-for-logging", + "explanations/integrations-in-rancher/logging/logging-helm-chart-options", + "explanations/integrations-in-rancher/logging/taints-and-tolerations", + { + "type": "category", + "label": "Custom Resource Configuration", + "link": { + "type": "doc", + "id": "pages-for-subheaders/custom-resource-configuration" + }, + "items": [ + "explanations/integrations-in-rancher/logging/custom-resource-configuration/flows-and-clusterflows", + "explanations/integrations-in-rancher/logging/custom-resource-configuration/outputs-and-clusteroutputs" + ] + } + ] + }, + { + "type": "category", + "label": "Monitoring and Alerting", + "link": { + "type": "doc", + "id": "pages-for-subheaders/monitoring-and-alerting" + }, + "items": [ + "explanations/integrations-in-rancher/monitoring-and-alerting/how-monitoring-works", + "explanations/integrations-in-rancher/monitoring-and-alerting/rbac-for-monitoring", + "explanations/integrations-in-rancher/monitoring-and-alerting/built-in-dashboards", + "explanations/integrations-in-rancher/monitoring-and-alerting/windows-support", + "explanations/integrations-in-rancher/monitoring-and-alerting/promql-expressions" + ] + }, + "explanations/integrations-in-rancher/opa-gatekeeper" + ] + } + ] + }, + { + "type": "category", + "label": "FAQ", + "link": { + "type": "doc", + "id": "faq" + }, + "items": [ + "faq/deprecated-features-in-v2.5", + "faq/install-and-configure-kubectl", + "faq/technical-items", + "faq/security", + "faq/telemetry", + "faq/container-network-interface-providers", + "faq/rancher-is-no-longer-needed" + ] + }, + { + "type": "category", + "label": "Troubleshooting", + "link": { + "type": "doc", + "id": "troubleshooting" + }, + "items": [ + { + "type": "category", + "label": "Kubernetes Components", + "link": { + "type": "doc", + "id": "pages-for-subheaders/kubernetes-components" + }, + "items": [ + "troubleshooting/kubernetes-components/troubleshooting-etcd-nodes", + "troubleshooting/kubernetes-components/troubleshooting-controlplane-nodes", + "troubleshooting/kubernetes-components/troubleshooting-nginx-proxy", + "troubleshooting/kubernetes-components/troubleshooting-worker-nodes-and-generic-components" + ] + }, + { + "type": "category", + "label": "Other Troubleshooting Tips", + "link": { + "type": "doc", + "id": "pages-for-subheaders/other-troubleshooting-tips" + }, + "items": [ + "troubleshooting/other-troubleshooting-tips/kubernetes-resources", + "troubleshooting/other-troubleshooting-tips/networking", + "troubleshooting/other-troubleshooting-tips/dns", + "troubleshooting/other-troubleshooting-tips/rancher-ha", + "troubleshooting/other-troubleshooting-tips/registered-clusters", + "troubleshooting/other-troubleshooting-tips/logging" + ] + } + ] + }, + "contribute-to-rancher" + ] +} diff --git a/versioned_sidebars/version-2.6-sidebars.json b/versioned_sidebars/version-2.6-sidebars.json new file mode 100644 index 0000000000..caea0c03ba --- /dev/null +++ b/versioned_sidebars/version-2.6-sidebars.json @@ -0,0 +1,8 @@ +{ + "tutorialSidebar": [ + { + "type": "autogenerated", + "dirName": "." + } + ] +} diff --git a/versions.json b/versions.json new file mode 100644 index 0000000000..d388d83067 --- /dev/null +++ b/versions.json @@ -0,0 +1,4 @@ +[ + "2.5", + "2.0-2.4" +] diff --git a/yarn.lock b/yarn.lock index 7ac9eb5d15..4460cdc18c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2,2602 +2,5784 @@ # yarn lockfile v1 -"@babel/cli@^7.2.0": - version "7.2.3" - resolved "https://registry.yarnpkg.com/@babel/cli/-/cli-7.2.3.tgz#1b262e42a3e959d28ab3d205ba2718e1923cfee6" - integrity sha512-bfna97nmJV6nDJhXNPeEfxyMjWnt6+IjUAaDPiYRTBlm8L41n8nvw6UAqUCbvpFfU246gHPxW7sfWwqtF4FcYA== +"@algolia/autocomplete-core@1.6.3": + version "1.6.3" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-core/-/autocomplete-core-1.6.3.tgz#76832fffb6405ac2c87bac5a040b8a31a1cdef80" + integrity sha512-dqQqRt01fX3YuVFrkceHsoCnzX0bLhrrg8itJI1NM68KjrPYQPYsE+kY8EZTCM4y8VDnhqJErR73xe/ZsV+qAA== + dependencies: + "@algolia/autocomplete-shared" "1.6.3" + +"@algolia/autocomplete-shared@1.6.3": + version "1.6.3" + resolved "https://registry.yarnpkg.com/@algolia/autocomplete-shared/-/autocomplete-shared-1.6.3.tgz#52085ce89a755977841ed0a463aa31ce8f1dea97" + integrity sha512-UV46bnkTztyADFaETfzFC5ryIdGVb2zpAoYgu0tfcuYWjhg1KbLXveFffZIrGVoboqmAk1b+jMrl6iCja1i3lg== + +"@algolia/cache-browser-local-storage@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.13.1.tgz#ffacb9230119f77de1a6f163b83680be999110e4" + integrity sha512-UAUVG2PEfwd/FfudsZtYnidJ9eSCpS+LW9cQiesePQLz41NAcddKxBak6eP2GErqyFagSlnVXe/w2E9h2m2ttg== + dependencies: + "@algolia/cache-common" "4.13.1" + +"@algolia/cache-common@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/cache-common/-/cache-common-4.13.1.tgz#c933fdec9f73b4f7c69d5751edc92eee4a63d76b" + integrity sha512-7Vaf6IM4L0Jkl3sYXbwK+2beQOgVJ0mKFbz/4qSxKd1iy2Sp77uTAazcX+Dlexekg1fqGUOSO7HS4Sx47ZJmjA== + +"@algolia/cache-in-memory@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/cache-in-memory/-/cache-in-memory-4.13.1.tgz#c19baa67b4597e1a93e987350613ab3b88768832" + integrity sha512-pZzybCDGApfA/nutsFK1P0Sbsq6fYJU3DwIvyKg4pURerlJM4qZbB9bfLRef0FkzfQu7W11E4cVLCIOWmyZeuQ== + dependencies: + "@algolia/cache-common" "4.13.1" + +"@algolia/client-account@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/client-account/-/client-account-4.13.1.tgz#fea591943665477a23922ab31863ad0732e26c66" + integrity sha512-TFLiZ1KqMiir3FNHU+h3b0MArmyaHG+eT8Iojio6TdpeFcAQ1Aiy+2gb3SZk3+pgRJa/BxGmDkRUwE5E/lv3QQ== + dependencies: + "@algolia/client-common" "4.13.1" + "@algolia/client-search" "4.13.1" + "@algolia/transporter" "4.13.1" + +"@algolia/client-analytics@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/client-analytics/-/client-analytics-4.13.1.tgz#5275956b2d0d16997148f2085f1701b6c39ecc32" + integrity sha512-iOS1JBqh7xaL5x00M5zyluZ9+9Uy9GqtYHv/2SMuzNW1qP7/0doz1lbcsP3S7KBbZANJTFHUOfuqyRLPk91iFA== + dependencies: + "@algolia/client-common" "4.13.1" + "@algolia/client-search" "4.13.1" + "@algolia/requester-common" "4.13.1" + "@algolia/transporter" "4.13.1" + +"@algolia/client-common@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/client-common/-/client-common-4.13.1.tgz#3bf9e3586f20ef85bbb56ccca390f7dbe57c8f4f" + integrity sha512-LcDoUE0Zz3YwfXJL6lJ2OMY2soClbjrrAKB6auYVMNJcoKZZ2cbhQoFR24AYoxnGUYBER/8B+9sTBj5bj/Gqbg== + dependencies: + "@algolia/requester-common" "4.13.1" + "@algolia/transporter" "4.13.1" + +"@algolia/client-personalization@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/client-personalization/-/client-personalization-4.13.1.tgz#438a1f58576ef19c4ad4addb8417bdacfe2fce2e" + integrity sha512-1CqrOW1ypVrB4Lssh02hP//YxluoIYXAQCpg03L+/RiXJlCs+uIqlzC0ctpQPmxSlTK6h07kr50JQoYH/TIM9w== + dependencies: + "@algolia/client-common" "4.13.1" + "@algolia/requester-common" "4.13.1" + "@algolia/transporter" "4.13.1" + +"@algolia/client-search@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/client-search/-/client-search-4.13.1.tgz#5501deed01e23c33d4aaa9f9eb96a849f0fce313" + integrity sha512-YQKYA83MNRz3FgTNM+4eRYbSmHi0WWpo019s5SeYcL3HUan/i5R09VO9dk3evELDFJYciiydSjbsmhBzbpPP2A== + dependencies: + "@algolia/client-common" "4.13.1" + "@algolia/requester-common" "4.13.1" + "@algolia/transporter" "4.13.1" + +"@algolia/events@^4.0.1": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@algolia/events/-/events-4.0.1.tgz#fd39e7477e7bc703d7f893b556f676c032af3950" + integrity sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ== + +"@algolia/logger-common@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/logger-common/-/logger-common-4.13.1.tgz#4221378e701e3f1eacaa051bcd4ba1f25ddfaf4d" + integrity sha512-L6slbL/OyZaAXNtS/1A8SAbOJeEXD5JcZeDCPYDqSTYScfHu+2ePRTDMgUTY4gQ7HsYZ39N1LujOd8WBTmM2Aw== + +"@algolia/logger-console@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/logger-console/-/logger-console-4.13.1.tgz#423d358e4992dd4bceab0d9a4e99d1fd68107043" + integrity sha512-7jQOTftfeeLlnb3YqF8bNgA2GZht7rdKkJ31OCeSH2/61haO0tWPoNRjZq9XLlgMQZH276pPo0NdiArcYPHjCA== dependencies: - commander "^2.8.1" - convert-source-map "^1.1.0" - fs-readdir-recursive "^1.1.0" - glob "^7.0.0" - lodash "^4.17.10" - mkdirp "^0.5.1" - output-file-sync "^2.0.0" - slash "^2.0.0" - source-map "^0.5.0" - optionalDependencies: - chokidar "^2.0.3" + "@algolia/logger-common" "4.13.1" -"@babel/code-frame@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.0.0.tgz#06e2ab19bdb535385559aabb5ba59729482800f8" - integrity sha512-OfC2uemaknXr87bdLUkWog7nYuliM9Ij5HUcajsVcMCpQrcLmtxRbVFTIqmcSkSeYRBFBRxs2FiUqFJDLdiebA== +"@algolia/requester-browser-xhr@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.13.1.tgz#f8ea79233cf6f0392feaf31e35a6b40d68c5bc9e" + integrity sha512-oa0CKr1iH6Nc7CmU6RE7TnXMjHnlyp7S80pP/LvZVABeJHX3p/BcSCKovNYWWltgTxUg0U1o+2uuy8BpMKljwA== dependencies: - "@babel/highlight" "^7.0.0" + "@algolia/requester-common" "4.13.1" -"@babel/core@^7.2.0": - version "7.2.2" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.2.2.tgz#07adba6dde27bb5ad8d8672f15fde3e08184a687" - integrity sha512-59vB0RWt09cAct5EIe58+NzGP4TFSD3Bz//2/ELy3ZeTeKF6VTD1AXlH8BGGbCX0PuobZBsIzO7IAI9PH67eKw== +"@algolia/requester-common@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/requester-common/-/requester-common-4.13.1.tgz#daea143d15ab6ed3909c4c45877f1b6c36a16179" + integrity sha512-eGVf0ID84apfFEuXsaoSgIxbU3oFsIbz4XiotU3VS8qGCJAaLVUC5BUJEkiFENZIhon7hIB4d0RI13HY4RSA+w== + +"@algolia/requester-node-http@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/requester-node-http/-/requester-node-http-4.13.1.tgz#32c63d4c009f22d97e396406de7af9b66fb8e89d" + integrity sha512-7C0skwtLdCz5heKTVe/vjvrqgL/eJxmiEjHqXdtypcE5GCQCYI15cb+wC4ytYioZDMiuDGeVYmCYImPoEgUGPw== dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/generator" "^7.2.2" - "@babel/helpers" "^7.2.0" - "@babel/parser" "^7.2.2" - "@babel/template" "^7.2.2" - "@babel/traverse" "^7.2.2" - "@babel/types" "^7.2.2" - convert-source-map "^1.1.0" + "@algolia/requester-common" "4.13.1" + +"@algolia/transporter@4.13.1": + version "4.13.1" + resolved "https://registry.yarnpkg.com/@algolia/transporter/-/transporter-4.13.1.tgz#509e03e9145102843d5be4a031c521f692d4e8d6" + integrity sha512-pICnNQN7TtrcYJqqPEXByV8rJ8ZRU2hCiIKLTLRyNpghtQG3VAFk6fVtdzlNfdUGZcehSKGarPIZEHlQXnKjgw== + dependencies: + "@algolia/cache-common" "4.13.1" + "@algolia/logger-common" "4.13.1" + "@algolia/requester-common" "4.13.1" + +"@ampproject/remapping@^2.1.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.0.tgz#56c133824780de3174aed5ab6834f3026790154d" + integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w== + dependencies: + "@jridgewell/gen-mapping" "^0.1.0" + "@jridgewell/trace-mapping" "^0.3.9" + +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.16.0", "@babel/code-frame@^7.16.7", "@babel/code-frame@^7.8.3": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.16.7.tgz#44416b6bd7624b998f5b1af5d470856c40138789" + integrity sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg== + dependencies: + "@babel/highlight" "^7.16.7" + +"@babel/compat-data@^7.13.11", "@babel/compat-data@^7.17.10": + version "7.17.10" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.17.10.tgz#711dc726a492dfc8be8220028b1b92482362baab" + integrity sha512-GZt/TCsG70Ms19gfZO1tM4CVnXsPgEPBCpJu+Qz3L0LUDsY5nZqFZglIoPC1kIYOtNBZlrnFT+klg12vFGZXrw== + +"@babel/core@7.12.9": + version "7.12.9" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.12.9.tgz#fd450c4ec10cdbb980e2928b7aa7a28484593fc8" + integrity sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ== + dependencies: + "@babel/code-frame" "^7.10.4" + "@babel/generator" "^7.12.5" + "@babel/helper-module-transforms" "^7.12.1" + "@babel/helpers" "^7.12.5" + "@babel/parser" "^7.12.7" + "@babel/template" "^7.12.7" + "@babel/traverse" "^7.12.9" + "@babel/types" "^7.12.7" + convert-source-map "^1.7.0" debug "^4.1.0" - json5 "^2.1.0" - lodash "^4.17.10" + gensync "^1.0.0-beta.1" + json5 "^2.1.2" + lodash "^4.17.19" resolve "^1.3.2" semver "^5.4.1" source-map "^0.5.0" -"@babel/generator@^7.2.2": - version "7.3.2" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.3.2.tgz#fff31a7b2f2f3dad23ef8e01be45b0d5c2fc0132" - integrity sha512-f3QCuPppXxtZOEm5GWPra/uYUjmNQlu9pbAD8D/9jze4pTY83rTtB1igTBSwvkeNlC5gR24zFFkz+2WHLFQhqQ== - dependencies: - "@babel/types" "^7.3.2" - jsesc "^2.5.1" - lodash "^4.17.10" - source-map "^0.5.0" - trim-right "^1.0.1" +"@babel/core@^7.15.5", "@babel/core@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.18.2.tgz#87b2fcd7cce9becaa7f5acebdc4f09f3dd19d876" + integrity sha512-A8pri1YJiC5UnkdrWcmfZTJTV85b4UXTAfImGmCfYmax4TR9Cw8sDS0MOk++Gp2mE/BefVJ5nwy5yzqNJbP/DQ== + dependencies: + "@ampproject/remapping" "^2.1.0" + "@babel/code-frame" "^7.16.7" + "@babel/generator" "^7.18.2" + "@babel/helper-compilation-targets" "^7.18.2" + "@babel/helper-module-transforms" "^7.18.0" + "@babel/helpers" "^7.18.2" + "@babel/parser" "^7.18.0" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.18.2" + "@babel/types" "^7.18.2" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.1" + semver "^6.3.0" -"@babel/helper-annotate-as-pure@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.0.0.tgz#323d39dd0b50e10c7c06ca7d7638e6864d8c5c32" - integrity sha512-3UYcJUj9kvSLbLbUIfQTqzcy5VX7GRZ/CCDrnOaZorFFM01aXp1+GJwuFGV4NDDoAS+mOUyHcO6UD/RfqOks3Q== +"@babel/generator@^7.12.5", "@babel/generator@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.18.2.tgz#33873d6f89b21efe2da63fe554460f3df1c5880d" + integrity sha512-W1lG5vUwFvfMd8HVXqdfbuG7RuaSrTCCD8cl8fP8wOivdbtbIg2Db3IWUcgvfxKbbn6ZBGYRW/Zk1MIwK49mgw== dependencies: - "@babel/types" "^7.0.0" + "@babel/types" "^7.18.2" + "@jridgewell/gen-mapping" "^0.3.0" + jsesc "^2.5.1" -"@babel/helper-builder-binary-assignment-operator-visitor@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.1.0.tgz#6b69628dfe4087798e0c4ed98e3d4a6b2fbd2f5f" - integrity sha512-qNSR4jrmJ8M1VMM9tibvyRAHXQs2PmaksQF7c1CGJNipfe3D8p+wgNwgso/P2A2r2mdgBWAXljNWR0QRZAMW8w== - dependencies: - "@babel/helper-explode-assignable-expression" "^7.1.0" - "@babel/types" "^7.0.0" +"@babel/helper-annotate-as-pure@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.16.7.tgz#bb2339a7534a9c128e3102024c60760a3a7f3862" + integrity sha512-s6t2w/IPQVTAET1HitoowRGXooX8mCgtuP5195wD/QJPV6wYjpujCGF7JuMODVX2ZAJOf1GT6DT9MHEZvLOFSw== + dependencies: + "@babel/types" "^7.16.7" + +"@babel/helper-builder-binary-assignment-operator-visitor@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz#38d138561ea207f0f69eb1626a418e4f7e6a580b" + integrity sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA== + dependencies: + "@babel/helper-explode-assignable-expression" "^7.16.7" + "@babel/types" "^7.16.7" + +"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.16.7", "@babel/helper-compilation-targets@^7.17.10", "@babel/helper-compilation-targets@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.18.2.tgz#67a85a10cbd5fc7f1457fec2e7f45441dc6c754b" + integrity sha512-s1jnPotJS9uQnzFtiZVBUxe67CuBa679oWFHpxYYnTpRL/1ffhyX44R9uYiXoa/pLXcY9H2moJta0iaanlk/rQ== + dependencies: + "@babel/compat-data" "^7.17.10" + "@babel/helper-validator-option" "^7.16.7" + browserslist "^4.20.2" + semver "^6.3.0" + +"@babel/helper-create-class-features-plugin@^7.17.12", "@babel/helper-create-class-features-plugin@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.0.tgz#fac430912606331cb075ea8d82f9a4c145a4da19" + integrity sha512-Kh8zTGR9de3J63e5nS0rQUdRs/kbtwoeQQ0sriS0lItjC96u8XXZN6lKpuyWd2coKSU13py/y+LTmThLuVX0Pg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-function-name" "^7.17.9" + "@babel/helper-member-expression-to-functions" "^7.17.7" + "@babel/helper-optimise-call-expression" "^7.16.7" + "@babel/helper-replace-supers" "^7.16.7" + "@babel/helper-split-export-declaration" "^7.16.7" + +"@babel/helper-create-regexp-features-plugin@^7.16.7", "@babel/helper-create-regexp-features-plugin@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.17.12.tgz#bb37ca467f9694bbe55b884ae7a5cc1e0084e4fd" + integrity sha512-b2aZrV4zvutr9AIa6/gA3wsZKRwTKYoDxYiFKcESS3Ug2GTXzwBEvMuuFLhCQpEnRXs1zng4ISAXSUxxKBIcxw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.16.7" + regexpu-core "^5.0.1" + +"@babel/helper-define-polyfill-provider@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.1.tgz#52411b445bdb2e676869e5a74960d2d3826d2665" + integrity sha512-J9hGMpJQmtWmj46B3kBHmL38UhJGhYX7eqkcq+2gsstyYt341HmPeWspihX43yVRA0mS+8GGk2Gckc7bY/HCmA== + dependencies: + "@babel/helper-compilation-targets" "^7.13.0" + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/traverse" "^7.13.0" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + semver "^6.1.2" + +"@babel/helper-environment-visitor@^7.16.7", "@babel/helper-environment-visitor@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.2.tgz#8a6d2dedb53f6bf248e31b4baf38739ee4a637bd" + integrity sha512-14GQKWkX9oJzPiQQ7/J36FTXcD4kSp8egKjO9nINlSKiHITRA9q/R74qu8S9xlc/b/yjsJItQUeeh3xnGN0voQ== + +"@babel/helper-explode-assignable-expression@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz#12a6d8522fdd834f194e868af6354e8650242b7a" + integrity sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ== + dependencies: + "@babel/types" "^7.16.7" + +"@babel/helper-function-name@^7.16.7", "@babel/helper-function-name@^7.17.9": + version "7.17.9" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.17.9.tgz#136fcd54bc1da82fcb47565cf16fd8e444b1ff12" + integrity sha512-7cRisGlVtiVqZ0MW0/yFB4atgpGLWEHUVYnb448hZK4x+vih0YO5UoS11XIYtZYqHd0dIPMdUSv8q5K4LdMnIg== + dependencies: + "@babel/template" "^7.16.7" + "@babel/types" "^7.17.0" + +"@babel/helper-hoist-variables@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz#86bcb19a77a509c7b77d0e22323ef588fa58c246" + integrity sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg== + dependencies: + "@babel/types" "^7.16.7" + +"@babel/helper-member-expression-to-functions@^7.17.7": + version "7.17.7" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.17.7.tgz#a34013b57d8542a8c4ff8ba3f747c02452a4d8c4" + integrity sha512-thxXgnQ8qQ11W2wVUObIqDL4p148VMxkt5T/qpN5k2fboRyzFGFmKsTGViquyM5QHKUy48OZoca8kw4ajaDPyw== + dependencies: + "@babel/types" "^7.17.0" + +"@babel/helper-module-imports@^7.12.13", "@babel/helper-module-imports@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.16.7.tgz#25612a8091a999704461c8a222d0efec5d091437" + integrity sha512-LVtS6TqjJHFc+nYeITRo6VLXve70xmq7wPhWTqDJusJEgGmkAACWwMiTNrvfoQo6hEhFwAIixNkvB0jPXDL8Wg== + dependencies: + "@babel/types" "^7.16.7" + +"@babel/helper-module-transforms@^7.12.1", "@babel/helper-module-transforms@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.18.0.tgz#baf05dec7a5875fb9235bd34ca18bad4e21221cd" + integrity sha512-kclUYSUBIjlvnzN2++K9f2qzYKFgjmnmjwL4zlmU5f8ZtzgWe8s0rUPSTGy2HmK4P8T52MQsS+HTQAgZd3dMEA== + dependencies: + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-module-imports" "^7.16.7" + "@babel/helper-simple-access" "^7.17.7" + "@babel/helper-split-export-declaration" "^7.16.7" + "@babel/helper-validator-identifier" "^7.16.7" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.18.0" + "@babel/types" "^7.18.0" + +"@babel/helper-optimise-call-expression@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.16.7.tgz#a34e3560605abbd31a18546bd2aad3e6d9a174f2" + integrity sha512-EtgBhg7rd/JcnpZFXpBy0ze1YRfdm7BnBX4uKMBd3ixa3RGAE002JZB66FJyNH7g0F38U05pXmA5P8cBh7z+1w== + dependencies: + "@babel/types" "^7.16.7" + +"@babel/helper-plugin-utils@7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz#2f75a831269d4f677de49986dff59927533cf375" + integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg== + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.13.0", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.17.12", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.17.12.tgz#86c2347da5acbf5583ba0a10aed4c9bf9da9cf96" + integrity sha512-JDkf04mqtN3y4iAbO1hv9U2ARpPyPL1zqyWs/2WG1pgSq9llHFjStX5jdxb84himgJm+8Ng+x0oiWF/nw/XQKA== + +"@babel/helper-remap-async-to-generator@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz#29ffaade68a367e2ed09c90901986918d25e57e3" + integrity sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-wrap-function" "^7.16.8" + "@babel/types" "^7.16.8" + +"@babel/helper-replace-supers@^7.16.7", "@babel/helper-replace-supers@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.18.2.tgz#41fdfcc9abaf900e18ba6e5931816d9062a7b2e0" + integrity sha512-XzAIyxx+vFnrOxiQrToSUOzUOn0e1J2Li40ntddek1Y69AXUTXoDJ40/D5RdjFu7s7qHiaeoTiempZcbuVXh2Q== + dependencies: + "@babel/helper-environment-visitor" "^7.18.2" + "@babel/helper-member-expression-to-functions" "^7.17.7" + "@babel/helper-optimise-call-expression" "^7.16.7" + "@babel/traverse" "^7.18.2" + "@babel/types" "^7.18.2" + +"@babel/helper-simple-access@^7.17.7", "@babel/helper-simple-access@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.18.2.tgz#4dc473c2169ac3a1c9f4a51cfcd091d1c36fcff9" + integrity sha512-7LIrjYzndorDY88MycupkpQLKS1AFfsVRm2k/9PtKScSy5tZq0McZTj+DiMRynboZfIqOKvo03pmhTaUgiD6fQ== + dependencies: + "@babel/types" "^7.18.2" + +"@babel/helper-skip-transparent-expression-wrappers@^7.16.0": + version "7.16.0" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz#0ee3388070147c3ae051e487eca3ebb0e2e8bb09" + integrity sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw== + dependencies: + "@babel/types" "^7.16.0" + +"@babel/helper-split-export-declaration@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.16.7.tgz#0b648c0c42da9d3920d85ad585f2778620b8726b" + integrity sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw== + dependencies: + "@babel/types" "^7.16.7" + +"@babel/helper-validator-identifier@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.16.7.tgz#e8c602438c4a8195751243da9031d1607d247cad" + integrity sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw== + +"@babel/helper-validator-option@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.16.7.tgz#b203ce62ce5fe153899b617c08957de860de4d23" + integrity sha512-TRtenOuRUVo9oIQGPC5G9DgK4743cdxvtOw0weQNpZXaS16SCBi5MNjZF8vba3ETURjZpTbVn7Vvcf2eAwFozQ== + +"@babel/helper-wrap-function@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz#58afda087c4cd235de92f7ceedebca2c41274200" + integrity sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw== + dependencies: + "@babel/helper-function-name" "^7.16.7" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.16.8" + "@babel/types" "^7.16.8" + +"@babel/helpers@^7.12.5", "@babel/helpers@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.18.2.tgz#970d74f0deadc3f5a938bfa250738eb4ac889384" + integrity sha512-j+d+u5xT5utcQSzrh9p+PaJX94h++KN+ng9b9WEJq7pkUPAd61FGqhjuUEdfknb3E/uDBb7ruwEeKkIxNJPIrg== + dependencies: + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.18.2" + "@babel/types" "^7.18.2" + +"@babel/highlight@^7.16.7": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.17.12.tgz#257de56ee5afbd20451ac0a75686b6b404257351" + integrity sha512-7yykMVF3hfZY2jsHZEEgLc+3x4o1O+fYyULu11GynEUQNwB6lua+IIQn1FiJxNucd5UlyJryrwsOh8PL9Sn8Qg== + dependencies: + "@babel/helper-validator-identifier" "^7.16.7" + chalk "^2.0.0" + js-tokens "^4.0.0" -"@babel/helper-call-delegate@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-call-delegate/-/helper-call-delegate-7.1.0.tgz#6a957f105f37755e8645343d3038a22e1449cc4a" - integrity sha512-YEtYZrw3GUK6emQHKthltKNZwszBcHK58Ygcis+gVUrF4/FmTVr5CCqQNSfmvg2y+YDEANyYoaLz/SHsnusCwQ== - dependencies: - "@babel/helper-hoist-variables" "^7.0.0" - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.0.0" +"@babel/parser@^7.12.7", "@babel/parser@^7.16.7", "@babel/parser@^7.18.0", "@babel/parser@^7.18.3": + version "7.18.4" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.18.4.tgz#6774231779dd700e0af29f6ad8d479582d7ce5ef" + integrity sha512-FDge0dFazETFcxGw/EXzOkN8uJp0PC7Qbm+Pe9T+av2zlBpOgunFHkQPPn+eRuClU73JF+98D531UgayY89tow== -"@babel/helper-define-map@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.1.0.tgz#3b74caec329b3c80c116290887c0dd9ae468c20c" - integrity sha512-yPPcW8dc3gZLN+U1mhYV91QU3n5uTbx7DUdf8NnPbjS0RMwBuHi9Xt2MUgppmNz7CJxTBWsGczTiEp1CSOTPRg== +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.17.12.tgz#1dca338caaefca368639c9ffb095afbd4d420b1e" + integrity sha512-xCJQXl4EeQ3J9C4yOmpTrtVGmzpm2iSzyxbkZHw7UCnZBftHpF/hpII80uWVyVrc40ytIClHjgWGTG1g/yB+aw== dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/types" "^7.0.0" - lodash "^4.17.10" - -"@babel/helper-explode-assignable-expression@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.1.0.tgz#537fa13f6f1674df745b0c00ec8fe4e99681c8f6" - integrity sha512-NRQpfHrJ1msCHtKjbzs9YcMmJZOg6mQMmGRB+hbamEdG5PNpaSm95275VD92DvJKuyl0s2sFiDmMZ+EnnvufqA== + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.17.12.tgz#0d498ec8f0374b1e2eb54b9cb2c4c78714c77753" + integrity sha512-/vt0hpIw0x4b6BLKUkwlvEoiGZYYLNZ96CzyHYPbtG2jZGz6LBe7/V+drYrc/d+ovrF9NBi0pmtvmNb/FsWtRQ== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + "@babel/plugin-proposal-optional-chaining" "^7.17.12" + +"@babel/plugin-proposal-async-generator-functions@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.17.12.tgz#094a417e31ce7e692d84bab06c8e2a607cbeef03" + integrity sha512-RWVvqD1ooLKP6IqWTA5GyFVX2isGEgC5iFxKzfYOIy/QEFdxYyCybBDtIGjipHpb9bDWHzcqGqFakf+mVmBTdQ== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-remap-async-to-generator" "^7.16.8" + "@babel/plugin-syntax-async-generators" "^7.8.4" + +"@babel/plugin-proposal-class-properties@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.17.12.tgz#84f65c0cc247d46f40a6da99aadd6438315d80a4" + integrity sha512-U0mI9q8pW5Q9EaTHFPwSVusPMV/DV9Mm8p7csqROFLtIE9rBF5piLqyrBGigftALrBcsBGu4m38JneAe7ZDLXw== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.17.12" + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-proposal-class-static-block@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.0.tgz#7d02253156e3c3793bdb9f2faac3a1c05f0ba710" + integrity sha512-t+8LsRMMDE74c6sV7KShIw13sqbqd58tlqNrsWoWBTIMw7SVQ0cZ905wLNS/FBCy/3PyooRHLFFlfrUNyyz5lA== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.18.0" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-class-static-block" "^7.14.5" + +"@babel/plugin-proposal-dynamic-import@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz#c19c897eaa46b27634a00fee9fb7d829158704b2" + integrity sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + +"@babel/plugin-proposal-export-namespace-from@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.17.12.tgz#b22864ccd662db9606edb2287ea5fd1709f05378" + integrity sha512-j7Ye5EWdwoXOpRmo5QmRyHPsDIe6+u70ZYZrd7uz+ebPYFKfRcLcNu3Ro0vOlJ5zuv8rU7xa+GttNiRzX56snQ== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-export-namespace-from" "^7.8.3" + +"@babel/plugin-proposal-json-strings@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.17.12.tgz#f4642951792437233216d8c1af370bb0fbff4664" + integrity sha512-rKJ+rKBoXwLnIn7n6o6fulViHMrOThz99ybH+hKHcOZbnN14VuMnH9fo2eHE69C8pO4uX1Q7t2HYYIDmv8VYkg== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-json-strings" "^7.8.3" + +"@babel/plugin-proposal-logical-assignment-operators@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.17.12.tgz#c64a1bcb2b0a6d0ed2ff674fd120f90ee4b88a23" + integrity sha512-EqFo2s1Z5yy+JeJu7SFfbIUtToJTVlC61/C7WLKDntSw4Sz6JNAIfL7zQ74VvirxpjB5kz/kIx0gCcb+5OEo2Q== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + +"@babel/plugin-proposal-nullish-coalescing-operator@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.17.12.tgz#1e93079bbc2cbc756f6db6a1925157c4a92b94be" + integrity sha512-ws/g3FSGVzv+VH86+QvgtuJL/kR67xaEIF2x0iPqdDfYW6ra6JF3lKVBkWynRLcNtIC1oCTfDRVxmm2mKzy+ag== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + +"@babel/plugin-proposal-numeric-separator@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz#d6b69f4af63fb38b6ca2558442a7fb191236eba9" + integrity sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + +"@babel/plugin-proposal-object-rest-spread@7.12.1": + version "7.12.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz#def9bd03cea0f9b72283dac0ec22d289c7691069" + integrity sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA== dependencies: - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-function-name@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz#a0ceb01685f73355d4360c1247f582bfafc8ff53" - integrity sha512-A95XEoCpb3TO+KZzJ4S/5uW5fNe26DjBGqf1o9ucyLyCmi1dXq/B3c8iaWTfBk3VvetUxl16e8tIrd5teOCfGw== + "@babel/helper-plugin-utils" "^7.10.4" + "@babel/plugin-syntax-object-rest-spread" "^7.8.0" + "@babel/plugin-transform-parameters" "^7.12.1" + +"@babel/plugin-proposal-object-rest-spread@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.0.tgz#79f2390c892ba2a68ec112eb0d895cfbd11155e8" + integrity sha512-nbTv371eTrFabDfHLElkn9oyf9VG+VKK6WMzhY2o4eHKaG19BToD9947zzGMO6I/Irstx9d8CwX6njPNIAR/yw== dependencies: - "@babel/helper-get-function-arity" "^7.0.0" - "@babel/template" "^7.1.0" - "@babel/types" "^7.0.0" + "@babel/compat-data" "^7.17.10" + "@babel/helper-compilation-targets" "^7.17.10" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.17.12" -"@babel/helper-get-function-arity@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz#83572d4320e2a4657263734113c42868b64e49c3" - integrity sha512-r2DbJeg4svYvt3HOS74U4eWKsUAMRH01Z1ds1zx8KNTPtpTL5JAsdFv8BNyOpVqdFhHkkRDIg5B4AsxmkjAlmQ== +"@babel/plugin-proposal-optional-catch-binding@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz#c623a430674ffc4ab732fd0a0ae7722b67cb74cf" + integrity sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA== dependencies: - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" -"@babel/helper-hoist-variables@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.0.0.tgz#46adc4c5e758645ae7a45deb92bab0918c23bb88" - integrity sha512-Ggv5sldXUeSKsuzLkddtyhyHe2YantsxWKNi7A+7LeD12ExRDWTRk29JCXpaHPAbMaIPZSil7n+lq78WY2VY7w== +"@babel/plugin-proposal-optional-chaining@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.17.12.tgz#f96949e9bacace3a9066323a5cf90cfb9de67174" + integrity sha512-7wigcOs/Z4YWlK7xxjkvaIw84vGhDv/P1dFGQap0nHkc8gFKY/r+hXc8Qzf5k1gY7CvGIcHqAnOagVKJJ1wVOQ== dependencies: - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" -"@babel/helper-member-expression-to-functions@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.0.0.tgz#8cd14b0a0df7ff00f009e7d7a436945f47c7a16f" - integrity sha512-avo+lm/QmZlv27Zsi0xEor2fKcqWG56D5ae9dzklpIaY7cQMK5N8VSpaNVPPagiqmy7LrEjK1IWdGMOqPu5csg== +"@babel/plugin-proposal-private-methods@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.17.12.tgz#c2ca3a80beb7539289938da005ad525a038a819c" + integrity sha512-SllXoxo19HmxhDWm3luPz+cPhtoTSKLJE9PXshsfrOzBqs60QP0r8OaJItrPhAj0d7mZMnNF0Y1UUggCDgMz1A== dependencies: - "@babel/types" "^7.0.0" + "@babel/helper-create-class-features-plugin" "^7.17.12" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/helper-module-imports@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.0.0.tgz#96081b7111e486da4d2cd971ad1a4fe216cc2e3d" - integrity sha512-aP/hlLq01DWNEiDg4Jn23i+CXxW/owM4WpDLFUbpjxe4NS3BhLVZQ5i7E0ZrxuQ/vwekIeciyamgB1UIYxxM6A== +"@babel/plugin-proposal-private-property-in-object@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.17.12.tgz#b02efb7f106d544667d91ae97405a9fd8c93952d" + integrity sha512-/6BtVi57CJfrtDNKfK5b66ydK2J5pXUKBKSPD2G1whamMuEnZWgoOIfO8Vf9F/DoD4izBLD/Au4NMQfruzzykg== dependencies: - "@babel/types" "^7.0.0" + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-create-class-features-plugin" "^7.17.12" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-private-property-in-object" "^7.14.5" -"@babel/helper-module-transforms@^7.1.0": - version "7.2.2" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.2.2.tgz#ab2f8e8d231409f8370c883d20c335190284b963" - integrity sha512-YRD7I6Wsv+IHuTPkAmAS4HhY0dkPobgLftHp0cRGZSdrRvmZY8rFvae/GVu3bD00qscuvK3WPHB3YdNpBXUqrA== +"@babel/plugin-proposal-unicode-property-regex@^7.17.12", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.17.12.tgz#3dbd7a67bd7f94c8238b394da112d86aaf32ad4d" + integrity sha512-Wb9qLjXf3ZazqXA7IvI7ozqRIXIGPtSo+L5coFmEkhTQK18ao4UDDD0zdTGAarmbLj2urpRwrc6893cu5Bfh0A== dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-simple-access" "^7.1.0" - "@babel/helper-split-export-declaration" "^7.0.0" - "@babel/template" "^7.2.2" - "@babel/types" "^7.2.2" - lodash "^4.17.10" + "@babel/helper-create-regexp-features-plugin" "^7.17.12" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/helper-optimise-call-expression@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.0.0.tgz#a2920c5702b073c15de51106200aa8cad20497d5" - integrity sha512-u8nd9NQePYNQV8iPWu/pLLYBqZBa4ZaY1YWRFMuxrid94wKI1QNt67NEZ7GAe5Kc/0LLScbim05xZFWkAdrj9g== +"@babel/plugin-syntax-async-generators@^7.8.4": + version "7.8.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" + integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== dependencies: - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/helper-plugin-utils@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0.tgz#bbb3fbee98661c569034237cc03967ba99b4f250" - integrity sha512-CYAOUCARwExnEixLdB6sDm2dIJ/YgEAKDM1MOeMeZu9Ld/bDgVo8aiWrXwcY7OBh+1Ea2uUcVRcxKk0GJvW7QA== - -"@babel/helper-regex@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.0.0.tgz#2c1718923b57f9bbe64705ffe5640ac64d9bdb27" - integrity sha512-TR0/N0NDCcUIUEbqV6dCO+LptmmSQFQ7q70lfcEB4URsjD0E1HzicrwUH+ap6BAQ2jhCX9Q4UqZy4wilujWlkg== +"@babel/plugin-syntax-class-properties@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" + integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== dependencies: - lodash "^4.17.10" + "@babel/helper-plugin-utils" "^7.12.13" -"@babel/helper-remap-async-to-generator@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.1.0.tgz#361d80821b6f38da75bd3f0785ece20a88c5fe7f" - integrity sha512-3fOK0L+Fdlg8S5al8u/hWE6vhufGSn0bN09xm2LXMy//REAF8kDCrYoOBKYmA8m5Nom+sV9LyLCwrFynA8/slg== +"@babel/plugin-syntax-class-static-block@^7.14.5": + version "7.14.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz#195df89b146b4b78b3bf897fd7a257c84659d406" + integrity sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw== dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-wrap-function" "^7.1.0" - "@babel/template" "^7.1.0" - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.14.5" -"@babel/helper-replace-supers@^7.1.0": - version "7.2.3" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.2.3.tgz#19970020cf22677d62b3a689561dbd9644d8c5e5" - integrity sha512-GyieIznGUfPXPWu0yLS6U55Mz67AZD9cUk0BfirOWlPrXlBcan9Gz+vHGz+cPfuoweZSnPzPIm67VtQM0OWZbA== +"@babel/plugin-syntax-dynamic-import@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz#62bf98b2da3cd21d626154fc96ee5b3cb68eacb3" + integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ== dependencies: - "@babel/helper-member-expression-to-functions" "^7.0.0" - "@babel/helper-optimise-call-expression" "^7.0.0" - "@babel/traverse" "^7.2.3" - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/helper-simple-access@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.1.0.tgz#65eeb954c8c245beaa4e859da6188f39d71e585c" - integrity sha512-Vk+78hNjRbsiu49zAPALxTb+JUQCz1aolpd8osOF16BGnLtseD21nbHgLPGUwrXEurZgiCOUmvs3ExTu4F5x6w== +"@babel/plugin-syntax-export-namespace-from@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz#028964a9ba80dbc094c915c487ad7c4e7a66465a" + integrity sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q== dependencies: - "@babel/template" "^7.1.0" - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.8.3" -"@babel/helper-split-export-declaration@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.0.0.tgz#3aae285c0311c2ab095d997b8c9a94cad547d813" - integrity sha512-MXkOJqva62dfC0w85mEf/LucPPS/1+04nmmRMPEBUB++hiiThQ2zPtX/mEWQ3mtzCEjIJvPY8nuwxXtQeQwUag== +"@babel/plugin-syntax-import-assertions@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.17.12.tgz#58096a92b11b2e4e54b24c6a0cc0e5e607abcedd" + integrity sha512-n/loy2zkq9ZEM8tEOwON9wTQSTNDTDEz6NujPtJGLU7qObzT1N4c4YZZf8E6ATB2AjNQg/Ib2AIpO03EZaCehw== dependencies: - "@babel/types" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/helper-wrap-function@^7.1.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.2.0.tgz#c4e0012445769e2815b55296ead43a958549f6fa" - integrity sha512-o9fP1BZLLSrYlxYEYyl2aS+Flun5gtjTIG8iln+XuEzQTs0PLagAGSXUcqruJwD5fM48jzIEggCKpIfWTcR7pQ== +"@babel/plugin-syntax-json-strings@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" + integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/template" "^7.1.0" - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.2.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/helpers@^7.2.0": - version "7.3.1" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.3.1.tgz#949eec9ea4b45d3210feb7dc1c22db664c9e44b9" - integrity sha512-Q82R3jKsVpUV99mgX50gOPCWwco9Ec5Iln/8Vyu4osNIOQgSrd9RFrQeUvmvddFNoLwMyOUWU+5ckioEKpDoGA== +"@babel/plugin-syntax-jsx@7.12.1": + version "7.12.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz#9d9d357cc818aa7ae7935917c1257f67677a0926" + integrity sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg== dependencies: - "@babel/template" "^7.1.2" - "@babel/traverse" "^7.1.5" - "@babel/types" "^7.3.0" + "@babel/helper-plugin-utils" "^7.10.4" -"@babel/highlight@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.0.0.tgz#f710c38c8d458e6dd9a201afb637fcb781ce99e4" - integrity sha512-UFMC4ZeFC48Tpvj7C8UgLvtkaUuovQX+5xNWrsIoMG8o2z+XFKjKaN9iVmS84dPwVN00W4wPmqvYoZF3EGAsfw== +"@babel/plugin-syntax-jsx@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.17.12.tgz#834035b45061983a491f60096f61a2e7c5674a47" + integrity sha512-spyY3E3AURfxh/RHtjx5j6hs8am5NbUBGfcZ2vB3uShSpZdQyXSf5rR5Mk76vbtlAZOelyVQ71Fg0x9SG4fsog== dependencies: - chalk "^2.0.0" - esutils "^2.0.2" - js-tokens "^4.0.0" - -"@babel/parser@^7.2.2", "@babel/parser@^7.2.3": - version "7.3.2" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.3.2.tgz#95cdeddfc3992a6ca2a1315191c1679ca32c55cd" - integrity sha512-QzNUC2RO1gadg+fs21fi0Uu0OuGNzRKEmgCxoLNzbCdoprLwjfmZwzUrpUNfJPaVRwBpDY47A17yYEGWyRelnQ== + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-proposal-async-generator-functions@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.2.0.tgz#b289b306669dce4ad20b0252889a15768c9d417e" - integrity sha512-+Dfo/SCQqrwx48ptLVGLdE39YtWRuKc/Y9I5Fy0P1DDBB9lsAHpjcEJQt+4IifuSOSTLBKJObJqMvaO1pIE8LQ== +"@babel/plugin-syntax-logical-assignment-operators@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699" + integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-remap-async-to-generator" "^7.1.0" - "@babel/plugin-syntax-async-generators" "^7.2.0" + "@babel/helper-plugin-utils" "^7.10.4" -"@babel/plugin-proposal-json-strings@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.2.0.tgz#568ecc446c6148ae6b267f02551130891e29f317" - integrity sha512-MAFV1CA/YVmYwZG0fBQyXhmj0BHCB5egZHCKWIFVv/XCxAeVGIHfos3SwDck4LvCllENIAg7xMKOG5kH0dzyUg== +"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" + integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-json-strings" "^7.2.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-proposal-object-rest-spread@^7.3.1": - version "7.3.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.3.2.tgz#6d1859882d4d778578e41f82cc5d7bf3d5daf6c1" - integrity sha512-DjeMS+J2+lpANkYLLO+m6GjoTMygYglKmRe6cDTbFv3L9i6mmiE8fe6B8MtCSLZpVXscD5kn7s6SgtHrDoBWoA== +"@babel/plugin-syntax-numeric-separator@^7.10.4": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz#b9b070b3e33570cd9fd07ba7fa91c0dd37b9af97" + integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" + "@babel/helper-plugin-utils" "^7.10.4" -"@babel/plugin-proposal-optional-catch-binding@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.2.0.tgz#135d81edb68a081e55e56ec48541ece8065c38f5" - integrity sha512-mgYj3jCcxug6KUcX4OBoOJz3CMrwRfQELPQ5560F70YQUBZB7uac9fqaWamKR1iWUzGiK2t0ygzjTScZnVz75g== +"@babel/plugin-syntax-object-rest-spread@7.8.3", "@babel/plugin-syntax-object-rest-spread@^7.8.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" + integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-proposal-unicode-property-regex@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.2.0.tgz#abe7281fe46c95ddc143a65e5358647792039520" - integrity sha512-LvRVYb7kikuOtIoUeWTkOxQEV1kYvL5B6U3iWEGCzPNRus1MzJweFqORTj+0jkxozkTSYNJozPOddxmqdqsRpw== +"@babel/plugin-syntax-optional-catch-binding@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" + integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.0.0" - regexpu-core "^4.2.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-async-generators@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.2.0.tgz#69e1f0db34c6f5a0cf7e2b3323bf159a76c8cb7f" - integrity sha512-1ZrIRBv2t0GSlcwVoQ6VgSLpLgiN/FVQUzt9znxo7v2Ov4jJrs8RY8tv0wvDmFN3qIdMKWrmMMW6yZ0G19MfGg== +"@babel/plugin-syntax-optional-chaining@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" + integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-json-strings@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.2.0.tgz#72bd13f6ffe1d25938129d2a186b11fd62951470" - integrity sha512-5UGYnMSLRE1dqqZwug+1LISpA403HzlSfsg6P9VXU6TBjcSHeNlw4DxDx7LgpF+iKZoOG/+uzqoRHTdcUpiZNg== +"@babel/plugin-syntax-private-property-in-object@^7.14.5": + version "7.14.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz#0dc6671ec0ea22b6e94a1114f857970cd39de1ad" + integrity sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-object-rest-spread@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.2.0.tgz#3b7a3e733510c57e820b9142a6579ac8b0dfad2e" - integrity sha512-t0JKGgqk2We+9may3t0xDdmneaXmyxq0xieYcKHxIsrJO64n1OiMWNUtc5gQK1PA0NpdCRrtZp4z+IUaKugrSA== +"@babel/plugin-syntax-top-level-await@^7.14.5": + version "7.14.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c" + integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-optional-catch-binding@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.2.0.tgz#a94013d6eda8908dfe6a477e7f9eda85656ecf5c" - integrity sha512-bDe4xKNhb0LI7IvZHiA13kff0KEfaGX/Hv4lMA9+7TEc63hMNvfKo6ZFpXhKuEp+II/q35Gc4NoMeDZyaUbj9w== +"@babel/plugin-syntax-typescript@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.17.12.tgz#b54fc3be6de734a56b87508f99d6428b5b605a7b" + integrity sha512-TYY0SXFiO31YXtNg3HtFwNJHjLsAyIIhAhNWkQ5whPPS7HWUFlg9z0Ta4qAQNjQbP1wsSt/oKkmZ/4/WWdMUpw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-arrow-functions@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.2.0.tgz#9aeafbe4d6ffc6563bf8f8372091628f00779550" - integrity sha512-ER77Cax1+8/8jCB9fo4Ud161OZzWN5qawi4GusDuRLcDbDG+bIGYY20zb2dfAFdTRGzrfq2xZPvF0R64EHnimg== +"@babel/plugin-transform-arrow-functions@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.17.12.tgz#dddd783b473b1b1537ef46423e3944ff24898c45" + integrity sha512-PHln3CNi/49V+mza4xMwrg+WGYevSF1oaiXaC2EQfdp4HWlSjRsrDXWJiQBKpP7749u6vQ9mcry2uuFOv5CXvA== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-async-to-generator@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.2.0.tgz#68b8a438663e88519e65b776f8938f3445b1a2ff" - integrity sha512-CEHzg4g5UraReozI9D4fblBYABs7IM6UerAVG7EJVrTLC5keh00aEuLUT+O40+mJCEzaXkYfTCUKIyeDfMOFFQ== +"@babel/plugin-transform-async-to-generator@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.17.12.tgz#dbe5511e6b01eee1496c944e35cdfe3f58050832" + integrity sha512-J8dbrWIOO3orDzir57NRsjg4uxucvhby0L/KZuGsWDj0g7twWK3g7JhJhOrXtuXiw8MeiSdJ3E0OW9H8LYEzLQ== dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-remap-async-to-generator" "^7.1.0" + "@babel/helper-module-imports" "^7.16.7" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-remap-async-to-generator" "^7.16.8" -"@babel/plugin-transform-block-scoped-functions@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.2.0.tgz#5d3cc11e8d5ddd752aa64c9148d0db6cb79fd190" - integrity sha512-ntQPR6q1/NKuphly49+QiQiTN0O63uOwjdD6dhIjSWBI5xlrbUFh720TIpzBhpnrLfv2tNH/BXvLIab1+BAI0w== +"@babel/plugin-transform-block-scoped-functions@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz#4d0d57d9632ef6062cdf354bb717102ee042a620" + integrity sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-block-scoping@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.2.0.tgz#f17c49d91eedbcdf5dd50597d16f5f2f770132d4" - integrity sha512-vDTgf19ZEV6mx35yiPJe4fS02mPQUUcBNwWQSZFXSzTSbsJFQvHt7DqyS3LK8oOWALFOsJ+8bbqBgkirZteD5Q== +"@babel/plugin-transform-block-scoping@^7.17.12": + version "7.18.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.18.4.tgz#7988627b3e9186a13e4d7735dc9c34a056613fb9" + integrity sha512-+Hq10ye+jlvLEogSOtq4mKvtk7qwcUQ1f0Mrueai866C82f844Yom2cttfJdMdqRLTxWpsbfbkIkOIfovyUQXw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - lodash "^4.17.10" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-classes@^7.2.0": - version "7.2.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.2.2.tgz#6c90542f210ee975aa2aa8c8b5af7fa73a126953" - integrity sha512-gEZvgTy1VtcDOaQty1l10T3jQmJKlNVxLDCs+3rCVPr6nMkODLELxViq5X9l+rfxbie3XrfrMCYYY6eX3aOcOQ== +"@babel/plugin-transform-classes@^7.17.12": + version "7.18.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.18.4.tgz#51310b812a090b846c784e47087fa6457baef814" + integrity sha512-e42NSG2mlKWgxKUAD9EJJSkZxR67+wZqzNxLSpc51T8tRU5SLFHsPmgYR5yr7sdgX4u+iHA1C5VafJ6AyImV3A== dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-define-map" "^7.1.0" - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-optimise-call-expression" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-replace-supers" "^7.1.0" - "@babel/helper-split-export-declaration" "^7.0.0" + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-environment-visitor" "^7.18.2" + "@babel/helper-function-name" "^7.17.9" + "@babel/helper-optimise-call-expression" "^7.16.7" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-replace-supers" "^7.18.2" + "@babel/helper-split-export-declaration" "^7.16.7" globals "^11.1.0" -"@babel/plugin-transform-computed-properties@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.2.0.tgz#83a7df6a658865b1c8f641d510c6f3af220216da" - integrity sha512-kP/drqTxY6Xt3NNpKiMomfgkNn4o7+vKxK2DDKcBG9sHj51vHqMBGy8wbDS/J4lMxnqs153/T3+DmCEAkC5cpA== +"@babel/plugin-transform-computed-properties@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.17.12.tgz#bca616a83679698f3258e892ed422546e531387f" + integrity sha512-a7XINeplB5cQUWMg1E/GI1tFz3LfK021IjV1rj1ypE+R7jHm+pIHmHl25VNkZxtx9uuYp7ThGk8fur1HHG7PgQ== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-destructuring@^7.2.0": - version "7.3.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.3.2.tgz#f2f5520be055ba1c38c41c0e094d8a461dd78f2d" - integrity sha512-Lrj/u53Ufqxl/sGxyjsJ2XNtNuEjDyjpqdhMNh5aZ+XFOdThL46KBj27Uem4ggoezSYBxKWAil6Hu8HtwqesYw== +"@babel/plugin-transform-destructuring@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.0.tgz#dc4f92587e291b4daa78aa20cc2d7a63aa11e858" + integrity sha512-Mo69klS79z6KEfrLg/1WkmVnB8javh75HX4pi2btjvlIoasuxilEyjtsQW6XPrubNd7AQy0MMaNIaQE4e7+PQw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-dotall-regex@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.2.0.tgz#f0aabb93d120a8ac61e925ea0ba440812dbe0e49" - integrity sha512-sKxnyHfizweTgKZf7XsXu/CNupKhzijptfTM+bozonIuyVrLWVUvYjE2bhuSBML8VQeMxq4Mm63Q9qvcvUcciQ== +"@babel/plugin-transform-dotall-regex@^7.16.7", "@babel/plugin-transform-dotall-regex@^7.4.4": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz#6b2d67686fab15fb6a7fd4bd895d5982cfc81241" + integrity sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.0.0" - regexpu-core "^4.1.3" + "@babel/helper-create-regexp-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-duplicate-keys@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.2.0.tgz#d952c4930f312a4dbfff18f0b2914e60c35530b3" - integrity sha512-q+yuxW4DsTjNceUiTzK0L+AfQ0zD9rWaTLiUqHA8p0gxx7lu1EylenfzjeIWNkPy6e/0VG/Wjw9uf9LueQwLOw== +"@babel/plugin-transform-duplicate-keys@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.17.12.tgz#a09aa709a3310013f8e48e0e23bc7ace0f21477c" + integrity sha512-EA5eYFUG6xeerdabina/xIoB95jJ17mAkR8ivx6ZSu9frKShBjpOGZPn511MTDTkiCO+zXnzNczvUM69YSf3Zw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-exponentiation-operator@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.2.0.tgz#a63868289e5b4007f7054d46491af51435766008" - integrity sha512-umh4hR6N7mu4Elq9GG8TOu9M0bakvlsREEC+ialrQN6ABS4oDQ69qJv1VtR3uxlKMCQMCvzk7vr17RHKcjx68A== +"@babel/plugin-transform-exponentiation-operator@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz#efa9862ef97e9e9e5f653f6ddc7b665e8536fe9b" + integrity sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA== dependencies: - "@babel/helper-builder-binary-assignment-operator-visitor" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-for-of@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.2.0.tgz#ab7468befa80f764bb03d3cb5eef8cc998e1cad9" - integrity sha512-Kz7Mt0SsV2tQk6jG5bBv5phVbkd0gd27SgYD4hH1aLMJRchM0dzHaXvrWhVZ+WxAlDoAKZ7Uy3jVTW2mKXQ1WQ== +"@babel/plugin-transform-for-of@^7.18.1": + version "7.18.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.1.tgz#ed14b657e162b72afbbb2b4cdad277bf2bb32036" + integrity sha512-+TTB5XwvJ5hZbO8xvl2H4XaMDOAK57zF4miuC9qQJgysPNEAZZ9Z69rdF5LJkozGdZrjBIUAIyKUWRMmebI7vg== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-function-name@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.2.0.tgz#f7930362829ff99a3174c39f0afcc024ef59731a" - integrity sha512-kWgksow9lHdvBC2Z4mxTsvc7YdY7w/V6B2vy9cTIPtLEE9NhwoWivaxdNM/S37elu5bqlLP/qOY906LukO9lkQ== +"@babel/plugin-transform-function-name@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz#5ab34375c64d61d083d7d2f05c38d90b97ec65cf" + integrity sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA== dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-compilation-targets" "^7.16.7" + "@babel/helper-function-name" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-literals@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.2.0.tgz#690353e81f9267dad4fd8cfd77eafa86aba53ea1" - integrity sha512-2ThDhm4lI4oV7fVQ6pNNK+sx+c/GM5/SaML0w/r4ZB7sAneD/piDJtwdKlNckXeyGK7wlwg2E2w33C/Hh+VFCg== +"@babel/plugin-transform-literals@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.17.12.tgz#97131fbc6bbb261487105b4b3edbf9ebf9c830ae" + integrity sha512-8iRkvaTjJciWycPIZ9k9duu663FT7VrBdNqNgxnVXEFwOIp55JWcZd23VBRySYbnS3PwQ3rGiabJBBBGj5APmQ== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-modules-amd@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.2.0.tgz#82a9bce45b95441f617a24011dc89d12da7f4ee6" - integrity sha512-mK2A8ucqz1qhrdqjS9VMIDfIvvT2thrEsIQzbaTdc5QFzhDjQv2CkJJ5f6BXIkgbmaoax3zBr2RyvV/8zeoUZw== +"@babel/plugin-transform-member-expression-literals@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz#6e5dcf906ef8a098e630149d14c867dd28f92384" + integrity sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw== dependencies: - "@babel/helper-module-transforms" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-modules-commonjs@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.2.0.tgz#c4f1933f5991d5145e9cfad1dfd848ea1727f404" - integrity sha512-V6y0uaUQrQPXUrmj+hgnks8va2L0zcZymeU7TtWEgdRLNkceafKXEduv7QzgQAE4lT+suwooG9dC7LFhdRAbVQ== +"@babel/plugin-transform-modules-amd@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.0.tgz#7ef1002e67e36da3155edc8bf1ac9398064c02ed" + integrity sha512-h8FjOlYmdZwl7Xm2Ug4iX2j7Qy63NANI+NQVWQzv6r25fqgg7k2dZl03p95kvqNclglHs4FZ+isv4p1uXMA+QA== dependencies: - "@babel/helper-module-transforms" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-simple-access" "^7.1.0" + "@babel/helper-module-transforms" "^7.18.0" + "@babel/helper-plugin-utils" "^7.17.12" + babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-systemjs@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.2.0.tgz#912bfe9e5ff982924c81d0937c92d24994bb9068" - integrity sha512-aYJwpAhoK9a+1+O625WIjvMY11wkB/ok0WClVwmeo3mCjcNRjt+/8gHWrB5i+00mUju0gWsBkQnPpdvQ7PImmQ== +"@babel/plugin-transform-modules-commonjs@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.2.tgz#1aa8efa2e2a6e818b6a7f2235fceaf09bdb31e9e" + integrity sha512-f5A865gFPAJAEE0K7F/+nm5CmAE3y8AWlMBG9unu5j9+tk50UQVK0QS8RNxSp7MJf0wh97uYyLWt3Zvu71zyOQ== dependencies: - "@babel/helper-hoist-variables" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-module-transforms" "^7.18.0" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-simple-access" "^7.18.2" + babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-umd@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.2.0.tgz#7678ce75169f0877b8eb2235538c074268dd01ae" - integrity sha512-BV3bw6MyUH1iIsGhXlOK6sXhmSarZjtJ/vMiD9dNmpY8QXFFQTj+6v92pcfy1iqa8DeAfJFwoxcrS/TUZda6sw== +"@babel/plugin-transform-modules-systemjs@^7.18.0": + version "7.18.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.4.tgz#3d6fd9868c735cce8f38d6ae3a407fb7e61e6d46" + integrity sha512-lH2UaQaHVOAeYrUUuZ8i38o76J/FnO8vu21OE+tD1MyP9lxdZoSfz+pDbWkq46GogUrdrMz3tiz/FYGB+bVThg== dependencies: - "@babel/helper-module-transforms" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-hoist-variables" "^7.16.7" + "@babel/helper-module-transforms" "^7.18.0" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-validator-identifier" "^7.16.7" + babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-named-capturing-groups-regex@^7.3.0": - version "7.3.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.3.0.tgz#140b52985b2d6ef0cb092ef3b29502b990f9cd50" - integrity sha512-NxIoNVhk9ZxS+9lSoAQ/LM0V2UEvARLttEHUrRDGKFaAxOYQcrkN/nLRE+BbbicCAvZPl7wMP0X60HsHE5DtQw== +"@babel/plugin-transform-modules-umd@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.0.tgz#56aac64a2c2a1922341129a4597d1fd5c3ff020f" + integrity sha512-d/zZ8I3BWli1tmROLxXLc9A6YXvGK8egMxHp+E/rRwMh1Kip0AP77VwZae3snEJ33iiWwvNv2+UIIhfalqhzZA== dependencies: - regexp-tree "^0.1.0" + "@babel/helper-module-transforms" "^7.18.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-new-target@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.0.0.tgz#ae8fbd89517fa7892d20e6564e641e8770c3aa4a" - integrity sha512-yin069FYjah+LbqfGeTfzIBODex/e++Yfa0rH0fpfam9uTbuEeEOx5GLGr210ggOV77mVRNoeqSYqeuaqSzVSw== +"@babel/plugin-transform-named-capturing-groups-regex@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.17.12.tgz#9c4a5a5966e0434d515f2675c227fd8cc8606931" + integrity sha512-vWoWFM5CKaTeHrdUJ/3SIOTRV+MBVGybOC9mhJkaprGNt5demMymDW24yC74avb915/mIRe3TgNb/d8idvnCRA== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-object-super@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.2.0.tgz#b35d4c10f56bab5d650047dad0f1d8e8814b6598" - integrity sha512-VMyhPYZISFZAqAPVkiYb7dUe2AsVi2/wCT5+wZdsNO31FojQJa9ns40hzZ6U9f50Jlq4w6qwzdBB2uwqZ00ebg== + "@babel/helper-create-regexp-features-plugin" "^7.17.12" + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-new-target@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.17.12.tgz#10842cd605a620944e81ea6060e9e65c265742e3" + integrity sha512-CaOtzk2fDYisbjAD4Sd1MTKGVIpRtx9bWLyj24Y/k6p4s4gQ3CqDGJauFJxt8M/LEx003d0i3klVqnN73qvK3w== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-replace-supers" "^7.1.0" - -"@babel/plugin-transform-parameters@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.2.0.tgz#0d5ad15dc805e2ea866df4dd6682bfe76d1408c2" - integrity sha512-kB9+hhUidIgUoBQ0MsxMewhzr8i60nMa2KgeJKQWYrqQpqcBYtnpR+JgkadZVZoaEZ/eKu9mclFaVwhRpLNSzA== + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-object-super@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz#ac359cf8d32cf4354d27a46867999490b6c32a94" + integrity sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw== dependencies: - "@babel/helper-call-delegate" "^7.1.0" - "@babel/helper-get-function-arity" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-regenerator@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.0.0.tgz#5b41686b4ed40bef874d7ed6a84bdd849c13e0c1" - integrity sha512-sj2qzsEx8KDVv1QuJc/dEfilkg3RRPvPYx/VnKLtItVQRWt1Wqf5eVCOLZm29CiGFfYYsA3VPjfizTCV0S0Dlw== + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-replace-supers" "^7.16.7" + +"@babel/plugin-transform-parameters@^7.12.1", "@babel/plugin-transform-parameters@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.17.12.tgz#eb467cd9586ff5ff115a9880d6fdbd4a846b7766" + integrity sha512-6qW4rWo1cyCdq1FkYri7AHpauchbGLXpdwnYsfxFb+KtddHENfsY5JZb35xUwkK5opOLcJ3BNd2l7PhRYGlwIA== dependencies: - regenerator-transform "^0.13.3" - -"@babel/plugin-transform-runtime@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.2.0.tgz#566bc43f7d0aedc880eaddbd29168d0f248966ea" - integrity sha512-jIgkljDdq4RYDnJyQsiWbdvGeei/0MOTtSHKO/rfbd/mXBxNpdlulMx49L0HQ4pug1fXannxoqCI+fYSle9eSw== + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-property-literals@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz#2dadac85155436f22c696c4827730e0fe1057a55" + integrity sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw== dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - resolve "^1.8.1" - semver "^5.5.1" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-shorthand-properties@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.2.0.tgz#6333aee2f8d6ee7e28615457298934a3b46198f0" - integrity sha512-QP4eUM83ha9zmYtpbnyjTLAGKQritA5XW/iG9cjtuOI8s1RuL/3V6a3DeSHfKutJQ+ayUfeZJPcnCYEQzaPQqg== +"@babel/plugin-transform-react-constant-elements@^7.14.5": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.17.12.tgz#cc580857696b6dd9e5e3d079e673d060a0657f37" + integrity sha512-maEkX2xs2STuv2Px8QuqxqjhV2LsFobT1elCgyU5704fcyTu9DyD/bJXxD/mrRiVyhpHweOQ00OJ5FKhHq9oEw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.17.12" -"@babel/plugin-transform-spread@^7.2.0": - version "7.2.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.2.2.tgz#3103a9abe22f742b6d406ecd3cd49b774919b406" - integrity sha512-KWfky/58vubwtS0hLqEnrWJjsMGaOeSBn90Ezn5Jeg9Z8KKHmELbP1yGylMlm5N6TPKeY9A2+UaSYLdxahg01w== +"@babel/plugin-transform-react-display-name@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.16.7.tgz#7b6d40d232f4c0f550ea348593db3b21e2404340" + integrity sha512-qgIg8BcZgd0G/Cz916D5+9kqX0c7nPZyXaP8R2tLNN5tkyIZdG5fEwBrxwplzSnjC1jvQmyMNVwUCZPcbGY7Pg== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-sticky-regex@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.2.0.tgz#a1e454b5995560a9c1e0d537dfc15061fd2687e1" - integrity sha512-KKYCoGaRAf+ckH8gEL3JHUaFVyNHKe3ASNsZ+AlktgHevvxGigoIttrEJb8iKN03Q7Eazlv1s6cx2B2cQ3Jabw== +"@babel/plugin-transform-react-jsx-development@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.16.7.tgz#43a00724a3ed2557ed3f276a01a929e6686ac7b8" + integrity sha512-RMvQWvpla+xy6MlBpPlrKZCMRs2AGiHOGHY3xRwl0pEeim348dDyxeH4xBsMPbIMhujeq7ihE702eM2Ew0Wo+A== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.0.0" + "@babel/plugin-transform-react-jsx" "^7.16.7" -"@babel/plugin-transform-template-literals@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.2.0.tgz#d87ed01b8eaac7a92473f608c97c089de2ba1e5b" - integrity sha512-FkPix00J9A/XWXv4VoKJBMeSkyY9x/TqIh76wzcdfl57RJJcf8CehQ08uwfhCDNtRQYtHQKBTwKZDEyjE13Lwg== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" +"@babel/plugin-transform-react-jsx@^7.16.7", "@babel/plugin-transform-react-jsx@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.17.12.tgz#2aa20022709cd6a3f40b45d60603d5f269586dba" + integrity sha512-Lcaw8bxd1DKht3thfD4A12dqo1X16he1Lm8rIv8sTwjAYNInRS1qHa9aJoqvzpscItXvftKDCfaEQzwoVyXpEQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-module-imports" "^7.16.7" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-jsx" "^7.17.12" + "@babel/types" "^7.17.12" -"@babel/plugin-transform-typeof-symbol@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.2.0.tgz#117d2bcec2fbf64b4b59d1f9819894682d29f2b2" - integrity sha512-2LNhETWYxiYysBtrBTqL8+La0jIoQQnIScUJc74OYvUGRmkskNY4EzLCnjHBzdmb38wqtTaixpo1NctEcvMDZw== +"@babel/plugin-transform-react-pure-annotations@^7.16.7": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.0.tgz#ef82c8e310913f3522462c9ac967d395092f1954" + integrity sha512-6+0IK6ouvqDn9bmEG7mEyF/pwlJXVj5lwydybpyyH3D0A7Hftk+NCTdYjnLNZksn261xaOV5ksmp20pQEmc2RQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-regenerator@^7.18.0": + version "7.18.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.0.tgz#44274d655eb3f1af3f3a574ba819d3f48caf99d5" + integrity sha512-C8YdRw9uzx25HSIzwA7EM7YP0FhCe5wNvJbZzjVNHHPGVcDJ3Aie+qGYYdS1oVQgn+B3eAIJbWFLrJ4Jipv7nw== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + regenerator-transform "^0.15.0" + +"@babel/plugin-transform-reserved-words@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.17.12.tgz#7dbd349f3cdffba751e817cf40ca1386732f652f" + integrity sha512-1KYqwbJV3Co03NIi14uEHW8P50Md6KqFgt0FfpHdK6oyAHQVTosgPuPSiWud1HX0oYJ1hGRRlk0fP87jFpqXZA== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-runtime@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.18.2.tgz#04637de1e45ae8847ff14b9beead09c33d34374d" + integrity sha512-mr1ufuRMfS52ttq+1G1PD8OJNqgcTFjq3hwn8SZ5n1x1pBhi0E36rYMdTK0TsKtApJ4lDEdfXJwtGobQMHSMPg== + dependencies: + "@babel/helper-module-imports" "^7.16.7" + "@babel/helper-plugin-utils" "^7.17.12" + babel-plugin-polyfill-corejs2 "^0.3.0" + babel-plugin-polyfill-corejs3 "^0.5.0" + babel-plugin-polyfill-regenerator "^0.3.0" + semver "^6.3.0" + +"@babel/plugin-transform-shorthand-properties@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz#e8549ae4afcf8382f711794c0c7b6b934c5fbd2a" + integrity sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-spread@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.17.12.tgz#c112cad3064299f03ea32afed1d659223935d1f5" + integrity sha512-9pgmuQAtFi3lpNUstvG9nGfk9DkrdmWNp9KeKPFmuZCpEnxRzYlS8JgwPjYj+1AWDOSvoGN0H30p1cBOmT/Svg== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + +"@babel/plugin-transform-sticky-regex@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz#c84741d4f4a38072b9a1e2e3fd56d359552e8660" + integrity sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-template-literals@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.2.tgz#31ed6915721864847c48b656281d0098ea1add28" + integrity sha512-/cmuBVw9sZBGZVOMkpAEaVLwm4JmK2GZ1dFKOGGpMzEHWFmyZZ59lUU0PdRr8YNYeQdNzTDwuxP2X2gzydTc9g== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-typeof-symbol@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.17.12.tgz#0f12f57ac35e98b35b4ed34829948d42bd0e6889" + integrity sha512-Q8y+Jp7ZdtSPXCThB6zjQ74N3lj0f6TDh1Hnf5B+sYlzQ8i5Pjp8gW0My79iekSpT4WnI06blqP6DT0OmaXXmw== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + +"@babel/plugin-transform-typescript@^7.17.12": + version "7.18.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.18.4.tgz#587eaf6a39edb8c06215e550dc939faeadd750bf" + integrity sha512-l4vHuSLUajptpHNEOUDEGsnpl9pfRLsN1XUoDQDD/YBuXTM+v37SHGS+c6n4jdcZy96QtuUuSvZYMLSSsjH8Mw== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.18.0" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/plugin-syntax-typescript" "^7.17.12" + +"@babel/plugin-transform-unicode-escapes@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz#da8717de7b3287a2c6d659750c964f302b31ece3" + integrity sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-unicode-regex@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz#0f7aa4a501198976e25e82702574c34cfebe9ef2" + integrity sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/preset-env@^7.15.6", "@babel/preset-env@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.18.2.tgz#f47d3000a098617926e674c945d95a28cb90977a" + integrity sha512-PfpdxotV6afmXMU47S08F9ZKIm2bJIQ0YbAAtDfIENX7G1NUAXigLREh69CWDjtgUy7dYn7bsMzkgdtAlmS68Q== + dependencies: + "@babel/compat-data" "^7.17.10" + "@babel/helper-compilation-targets" "^7.18.2" + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-validator-option" "^7.16.7" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.17.12" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.17.12" + "@babel/plugin-proposal-async-generator-functions" "^7.17.12" + "@babel/plugin-proposal-class-properties" "^7.17.12" + "@babel/plugin-proposal-class-static-block" "^7.18.0" + "@babel/plugin-proposal-dynamic-import" "^7.16.7" + "@babel/plugin-proposal-export-namespace-from" "^7.17.12" + "@babel/plugin-proposal-json-strings" "^7.17.12" + "@babel/plugin-proposal-logical-assignment-operators" "^7.17.12" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.17.12" + "@babel/plugin-proposal-numeric-separator" "^7.16.7" + "@babel/plugin-proposal-object-rest-spread" "^7.18.0" + "@babel/plugin-proposal-optional-catch-binding" "^7.16.7" + "@babel/plugin-proposal-optional-chaining" "^7.17.12" + "@babel/plugin-proposal-private-methods" "^7.17.12" + "@babel/plugin-proposal-private-property-in-object" "^7.17.12" + "@babel/plugin-proposal-unicode-property-regex" "^7.17.12" + "@babel/plugin-syntax-async-generators" "^7.8.4" + "@babel/plugin-syntax-class-properties" "^7.12.13" + "@babel/plugin-syntax-class-static-block" "^7.14.5" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + "@babel/plugin-syntax-export-namespace-from" "^7.8.3" + "@babel/plugin-syntax-import-assertions" "^7.17.12" + "@babel/plugin-syntax-json-strings" "^7.8.3" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" + "@babel/plugin-syntax-private-property-in-object" "^7.14.5" + "@babel/plugin-syntax-top-level-await" "^7.14.5" + "@babel/plugin-transform-arrow-functions" "^7.17.12" + "@babel/plugin-transform-async-to-generator" "^7.17.12" + "@babel/plugin-transform-block-scoped-functions" "^7.16.7" + "@babel/plugin-transform-block-scoping" "^7.17.12" + "@babel/plugin-transform-classes" "^7.17.12" + "@babel/plugin-transform-computed-properties" "^7.17.12" + "@babel/plugin-transform-destructuring" "^7.18.0" + "@babel/plugin-transform-dotall-regex" "^7.16.7" + "@babel/plugin-transform-duplicate-keys" "^7.17.12" + "@babel/plugin-transform-exponentiation-operator" "^7.16.7" + "@babel/plugin-transform-for-of" "^7.18.1" + "@babel/plugin-transform-function-name" "^7.16.7" + "@babel/plugin-transform-literals" "^7.17.12" + "@babel/plugin-transform-member-expression-literals" "^7.16.7" + "@babel/plugin-transform-modules-amd" "^7.18.0" + "@babel/plugin-transform-modules-commonjs" "^7.18.2" + "@babel/plugin-transform-modules-systemjs" "^7.18.0" + "@babel/plugin-transform-modules-umd" "^7.18.0" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.17.12" + "@babel/plugin-transform-new-target" "^7.17.12" + "@babel/plugin-transform-object-super" "^7.16.7" + "@babel/plugin-transform-parameters" "^7.17.12" + "@babel/plugin-transform-property-literals" "^7.16.7" + "@babel/plugin-transform-regenerator" "^7.18.0" + "@babel/plugin-transform-reserved-words" "^7.17.12" + "@babel/plugin-transform-shorthand-properties" "^7.16.7" + "@babel/plugin-transform-spread" "^7.17.12" + "@babel/plugin-transform-sticky-regex" "^7.16.7" + "@babel/plugin-transform-template-literals" "^7.18.2" + "@babel/plugin-transform-typeof-symbol" "^7.17.12" + "@babel/plugin-transform-unicode-escapes" "^7.16.7" + "@babel/plugin-transform-unicode-regex" "^7.16.7" + "@babel/preset-modules" "^0.1.5" + "@babel/types" "^7.18.2" + babel-plugin-polyfill-corejs2 "^0.3.0" + babel-plugin-polyfill-corejs3 "^0.5.0" + babel-plugin-polyfill-regenerator "^0.3.0" + core-js-compat "^3.22.1" + semver "^6.3.0" + +"@babel/preset-modules@^0.1.5": + version "0.1.5" + resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.5.tgz#ef939d6e7f268827e1841638dc6ff95515e115d9" + integrity sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA== dependencies: "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" + "@babel/plugin-transform-dotall-regex" "^7.4.4" + "@babel/types" "^7.4.4" + esutils "^2.0.2" -"@babel/plugin-transform-unicode-regex@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.2.0.tgz#4eb8db16f972f8abb5062c161b8b115546ade08b" - integrity sha512-m48Y0lMhrbXEJnVUaYly29jRXbQ3ksxPrS1Tg8t+MHqzXhtBYAvI51euOBaoAlZLPHsieY9XPVMf80a5x0cPcA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.0.0" - regexpu-core "^4.1.3" +"@babel/preset-react@^7.14.5", "@babel/preset-react@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.17.12.tgz#62adbd2d1870c0de3893095757ed5b00b492ab3d" + integrity sha512-h5U+rwreXtZaRBEQhW1hOJLMq8XNJBQ/9oymXiCXTuT/0uOwpbT0gUt+sXeOqoXBgNuUKI7TaObVwoEyWkpFgA== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-validator-option" "^7.16.7" + "@babel/plugin-transform-react-display-name" "^7.16.7" + "@babel/plugin-transform-react-jsx" "^7.17.12" + "@babel/plugin-transform-react-jsx-development" "^7.16.7" + "@babel/plugin-transform-react-pure-annotations" "^7.16.7" + +"@babel/preset-typescript@^7.15.0", "@babel/preset-typescript@^7.17.12": + version "7.17.12" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.17.12.tgz#40269e0a0084d56fc5731b6c40febe1c9a4a3e8c" + integrity sha512-S1ViF8W2QwAKUGJXxP9NAfNaqGDdEBJKpYkxHf5Yy2C4NPPzXGeR3Lhk7G8xJaaLcFTRfNjVbtbVtm8Gb0mqvg== + dependencies: + "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-validator-option" "^7.16.7" + "@babel/plugin-transform-typescript" "^7.17.12" + +"@babel/runtime-corejs3@^7.18.3": + version "7.18.3" + resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.18.3.tgz#52f0241a31e0ec61a6187530af6227c2846bd60c" + integrity sha512-l4ddFwrc9rnR+EJsHsh+TJ4A35YqQz/UqcjtlX2ov53hlJYG5CxtQmNZxyajwDVmCxwy++rtvGU5HazCK4W41Q== + dependencies: + core-js-pure "^3.20.2" + regenerator-runtime "^0.13.4" + +"@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.10.3", "@babel/runtime@^7.12.1", "@babel/runtime@^7.12.13", "@babel/runtime@^7.12.5", "@babel/runtime@^7.18.3", "@babel/runtime@^7.8.4": + version "7.18.3" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.18.3.tgz#c7b654b57f6f63cf7f8b418ac9ca04408c4579f4" + integrity sha512-38Y8f7YUhce/K7RMwTp7m0uCumpv9hZkitCbBClqQIow1qSbCvGkcegKOXpEWCQLfWmevgRiWokZ1GkpfhbZug== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/template@^7.12.7", "@babel/template@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.16.7.tgz#8d126c8701fde4d66b264b3eba3d96f07666d155" + integrity sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w== + dependencies: + "@babel/code-frame" "^7.16.7" + "@babel/parser" "^7.16.7" + "@babel/types" "^7.16.7" + +"@babel/traverse@^7.12.9", "@babel/traverse@^7.13.0", "@babel/traverse@^7.16.8", "@babel/traverse@^7.18.0", "@babel/traverse@^7.18.2": + version "7.18.2" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.18.2.tgz#b77a52604b5cc836a9e1e08dca01cba67a12d2e8" + integrity sha512-9eNwoeovJ6KH9zcCNnENY7DMFwTU9JdGCFtqNLfUAqtUHRCOsTOqWoffosP8vKmNYeSBUv3yVJXjfd8ucwOjUA== + dependencies: + "@babel/code-frame" "^7.16.7" + "@babel/generator" "^7.18.2" + "@babel/helper-environment-visitor" "^7.18.2" + "@babel/helper-function-name" "^7.17.9" + "@babel/helper-hoist-variables" "^7.16.7" + "@babel/helper-split-export-declaration" "^7.16.7" + "@babel/parser" "^7.18.0" + "@babel/types" "^7.18.2" + debug "^4.1.0" + globals "^11.1.0" -"@babel/polyfill@^7.0.0": - version "7.2.5" - resolved "https://registry.yarnpkg.com/@babel/polyfill/-/polyfill-7.2.5.tgz#6c54b964f71ad27edddc567d065e57e87ed7fa7d" - integrity sha512-8Y/t3MWThtMLYr0YNC/Q76tqN1w30+b0uQMeFUYauG2UGTR19zyUtFrAzT23zNtBxPp+LbE5E/nwV/q/r3y6ug== +"@babel/types@^7.12.7", "@babel/types@^7.15.6", "@babel/types@^7.16.0", "@babel/types@^7.16.7", "@babel/types@^7.16.8", "@babel/types@^7.17.0", "@babel/types@^7.17.12", "@babel/types@^7.18.0", "@babel/types@^7.18.2", "@babel/types@^7.4.4": + version "7.18.4" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.18.4.tgz#27eae9b9fd18e9dccc3f9d6ad051336f307be354" + integrity sha512-ThN1mBcMq5pG/Vm2IcBmPPfyPXbd8S02rS+OBIDENdufvqC7Z/jHPCv9IcP01277aKtDI8g/2XysBN4hA8niiw== dependencies: - core-js "^2.5.7" - regenerator-runtime "^0.12.0" + "@babel/helper-validator-identifier" "^7.16.7" + to-fast-properties "^2.0.0" -"@babel/preset-env@^7.2.0": - version "7.3.1" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.3.1.tgz#389e8ca6b17ae67aaf9a2111665030be923515db" - integrity sha512-FHKrD6Dxf30e8xgHQO0zJZpUPfVZg+Xwgz5/RdSWCbza9QLNk4Qbp40ctRoqDxml3O8RMzB1DU55SXeDG6PqHQ== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-proposal-async-generator-functions" "^7.2.0" - "@babel/plugin-proposal-json-strings" "^7.2.0" - "@babel/plugin-proposal-object-rest-spread" "^7.3.1" - "@babel/plugin-proposal-optional-catch-binding" "^7.2.0" - "@babel/plugin-proposal-unicode-property-regex" "^7.2.0" - "@babel/plugin-syntax-async-generators" "^7.2.0" - "@babel/plugin-syntax-json-strings" "^7.2.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" - "@babel/plugin-transform-arrow-functions" "^7.2.0" - "@babel/plugin-transform-async-to-generator" "^7.2.0" - "@babel/plugin-transform-block-scoped-functions" "^7.2.0" - "@babel/plugin-transform-block-scoping" "^7.2.0" - "@babel/plugin-transform-classes" "^7.2.0" - "@babel/plugin-transform-computed-properties" "^7.2.0" - "@babel/plugin-transform-destructuring" "^7.2.0" - "@babel/plugin-transform-dotall-regex" "^7.2.0" - "@babel/plugin-transform-duplicate-keys" "^7.2.0" - "@babel/plugin-transform-exponentiation-operator" "^7.2.0" - "@babel/plugin-transform-for-of" "^7.2.0" - "@babel/plugin-transform-function-name" "^7.2.0" - "@babel/plugin-transform-literals" "^7.2.0" - "@babel/plugin-transform-modules-amd" "^7.2.0" - "@babel/plugin-transform-modules-commonjs" "^7.2.0" - "@babel/plugin-transform-modules-systemjs" "^7.2.0" - "@babel/plugin-transform-modules-umd" "^7.2.0" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.3.0" - "@babel/plugin-transform-new-target" "^7.0.0" - "@babel/plugin-transform-object-super" "^7.2.0" - "@babel/plugin-transform-parameters" "^7.2.0" - "@babel/plugin-transform-regenerator" "^7.0.0" - "@babel/plugin-transform-shorthand-properties" "^7.2.0" - "@babel/plugin-transform-spread" "^7.2.0" - "@babel/plugin-transform-sticky-regex" "^7.2.0" - "@babel/plugin-transform-template-literals" "^7.2.0" - "@babel/plugin-transform-typeof-symbol" "^7.2.0" - "@babel/plugin-transform-unicode-regex" "^7.2.0" - browserslist "^4.3.4" - invariant "^2.2.2" - js-levenshtein "^1.1.3" - semver "^5.3.0" - -"@babel/runtime@^7.2.0": - version "7.3.1" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.3.1.tgz#574b03e8e8a9898eaf4a872a92ea20b7846f6f2a" - integrity sha512-7jGW8ppV0ant637pIqAcFfQDDH1orEPGJb8aXfUozuCU3QqX7rX4DA8iwrbPrR1hcH0FTTHz47yQnk+bl5xHQA== - dependencies: - regenerator-runtime "^0.12.0" +"@colors/colors@1.5.0": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" + integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== + +"@docsearch/css@3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@docsearch/css/-/css-3.1.0.tgz#6781cad43fc2e034d012ee44beddf8f93ba21f19" + integrity sha512-bh5IskwkkodbvC0FzSg1AxMykfDl95hebEKwxNoq4e5QaGzOXSBgW8+jnMFZ7JU4sTBiB04vZWoUSzNrPboLZA== + +"@docsearch/react@^3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@docsearch/react/-/react-3.1.0.tgz#da943a64c01ee82b04e53b691806469272f943f7" + integrity sha512-bjB6ExnZzf++5B7Tfoi6UXgNwoUnNOfZ1NyvnvPhWgCMy5V/biAtLL4o7owmZSYdAKeFSvZ5Lxm0is4su/dBWg== + dependencies: + "@algolia/autocomplete-core" "1.6.3" + "@docsearch/css" "3.1.0" + algoliasearch "^4.0.0" + +"@docusaurus/core@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/core/-/core-2.0.0-beta.21.tgz#50897317b22dbd94b1bf91bb30c2a0fddd15a806" + integrity sha512-qysDMVp1M5UozK3u/qOxsEZsHF7jeBvJDS+5ItMPYmNKvMbNKeYZGA0g6S7F9hRDwjIlEbvo7BaX0UMDcmTAWA== + dependencies: + "@babel/core" "^7.18.2" + "@babel/generator" "^7.18.2" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + "@babel/plugin-transform-runtime" "^7.18.2" + "@babel/preset-env" "^7.18.2" + "@babel/preset-react" "^7.17.12" + "@babel/preset-typescript" "^7.17.12" + "@babel/runtime" "^7.18.3" + "@babel/runtime-corejs3" "^7.18.3" + "@babel/traverse" "^7.18.2" + "@docusaurus/cssnano-preset" "2.0.0-beta.21" + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/mdx-loader" "2.0.0-beta.21" + "@docusaurus/react-loadable" "5.5.2" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-common" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + "@slorber/static-site-generator-webpack-plugin" "^4.0.4" + "@svgr/webpack" "^6.2.1" + autoprefixer "^10.4.7" + babel-loader "^8.2.5" + babel-plugin-dynamic-import-node "^2.3.3" + boxen "^6.2.1" + chalk "^4.1.2" + chokidar "^3.5.3" + clean-css "^5.3.0" + cli-table3 "^0.6.2" + combine-promises "^1.1.0" + commander "^5.1.0" + copy-webpack-plugin "^11.0.0" + core-js "^3.22.7" + css-loader "^6.7.1" + css-minimizer-webpack-plugin "^4.0.0" + cssnano "^5.1.9" + del "^6.1.1" + detect-port "^1.3.0" + escape-html "^1.0.3" + eta "^1.12.3" + file-loader "^6.2.0" + fs-extra "^10.1.0" + html-minifier-terser "^6.1.0" + html-tags "^3.2.0" + html-webpack-plugin "^5.5.0" + import-fresh "^3.3.0" + leven "^3.1.0" + lodash "^4.17.21" + mini-css-extract-plugin "^2.6.0" + postcss "^8.4.14" + postcss-loader "^7.0.0" + prompts "^2.4.2" + react-dev-utils "^12.0.1" + react-helmet-async "^1.3.0" + react-loadable "npm:@docusaurus/react-loadable@5.5.2" + react-loadable-ssr-addon-v5-slorber "^1.0.1" + react-router "^5.3.3" + react-router-config "^5.1.1" + react-router-dom "^5.3.3" + remark-admonitions "^1.2.1" + rtl-detect "^1.0.4" + semver "^7.3.7" + serve-handler "^6.1.3" + shelljs "^0.8.5" + terser-webpack-plugin "^5.3.1" + tslib "^2.4.0" + update-notifier "^5.1.0" + url-loader "^4.1.1" + wait-on "^6.0.1" + webpack "^5.72.1" + webpack-bundle-analyzer "^4.5.0" + webpack-dev-server "^4.9.0" + webpack-merge "^5.8.0" + webpackbar "^5.0.2" + +"@docusaurus/cssnano-preset@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/cssnano-preset/-/cssnano-preset-2.0.0-beta.21.tgz#38113877a5857c3f9d493522085d20909dcec474" + integrity sha512-fhTZrg1vc6zYYZIIMXpe1TnEVGEjqscBo0s1uomSwKjjtMgu7wkzc1KKJYY7BndsSA+fVVkZ+OmL/kAsmK7xxw== + dependencies: + cssnano-preset-advanced "^5.3.5" + postcss "^8.4.14" + postcss-sort-media-queries "^4.2.1" + tslib "^2.4.0" + +"@docusaurus/logger@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/logger/-/logger-2.0.0-beta.21.tgz#f6ab4133917965349ae03fd9111a940b24d4fd12" + integrity sha512-HTFp8FsSMrAj7Uxl5p72U+P7rjYU/LRRBazEoJbs9RaqoKEdtZuhv8MYPOCh46K9TekaoquRYqag2o23Qt4ggA== + dependencies: + chalk "^4.1.2" + tslib "^2.4.0" + +"@docusaurus/mdx-loader@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/mdx-loader/-/mdx-loader-2.0.0-beta.21.tgz#52af341e21f22be882d2155a7349bea10f5d77a3" + integrity sha512-AI+4obJnpOaBOAYV6df2ux5Y1YJCBS+MhXFf0yhED12sVLJi2vffZgdamYd/d/FwvWDw6QLs/VD2jebd7P50yQ== + dependencies: + "@babel/parser" "^7.18.3" + "@babel/traverse" "^7.18.2" + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@mdx-js/mdx" "^1.6.22" + escape-html "^1.0.3" + file-loader "^6.2.0" + fs-extra "^10.1.0" + image-size "^1.0.1" + mdast-util-to-string "^2.0.0" + remark-emoji "^2.2.0" + stringify-object "^3.3.0" + tslib "^2.4.0" + unist-util-visit "^2.0.3" + url-loader "^4.1.1" + webpack "^5.72.1" + +"@docusaurus/module-type-aliases@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/module-type-aliases/-/module-type-aliases-2.0.0-beta.21.tgz#345f1c1a99407775d1d3ffc1a90c2df93d50a9b8" + integrity sha512-gRkWICgQZiqSJgrwRKWjXm5gAB+9IcfYdUbCG0PRPP/G8sNs9zBIOY4uT4Z5ox2CWFEm44U3RTTxj7BiLVMBXw== + dependencies: + "@docusaurus/types" "2.0.0-beta.21" + "@types/react" "*" + "@types/react-router-config" "*" + "@types/react-router-dom" "*" + react-helmet-async "*" + +"@docusaurus/plugin-content-blog@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-blog/-/plugin-content-blog-2.0.0-beta.21.tgz#86211deeea901ddcd77ca387778e121e93ee8d01" + integrity sha512-IP21yJViP3oBmgsWBU5LhrG1MZXV4mYCQSoCAboimESmy1Z11RCNP2tXaqizE3iTmXOwZZL+SNBk06ajKCEzWg== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/mdx-loader" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-common" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + cheerio "^1.0.0-rc.11" + feed "^4.2.2" + fs-extra "^10.1.0" + lodash "^4.17.21" + reading-time "^1.5.0" + remark-admonitions "^1.2.1" + tslib "^2.4.0" + unist-util-visit "^2.0.3" + utility-types "^3.10.0" + webpack "^5.72.1" + +"@docusaurus/plugin-content-docs@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-docs/-/plugin-content-docs-2.0.0-beta.21.tgz#b3171fa9aed99e367b6eb7111187bd0e3dcf2949" + integrity sha512-aa4vrzJy4xRy81wNskyhE3wzRf3AgcESZ1nfKh8xgHUkT7fDTZ1UWlg50Jb3LBCQFFyQG2XQB9N6llskI/KUnw== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/mdx-loader" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + combine-promises "^1.1.0" + fs-extra "^10.1.0" + import-fresh "^3.3.0" + js-yaml "^4.1.0" + lodash "^4.17.21" + remark-admonitions "^1.2.1" + tslib "^2.4.0" + utility-types "^3.10.0" + webpack "^5.72.1" + +"@docusaurus/plugin-content-pages@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-content-pages/-/plugin-content-pages-2.0.0-beta.21.tgz#df6b4c5c4cde8a0ea491a30002e84941ca7bf0cf" + integrity sha512-DmXOXjqNI+7X5hISzCvt54QIK6XBugu2MOxjxzuqI7q92Lk/EVdraEj5mthlH8IaEH/VlpWYJ1O9TzLqX5vH2g== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/mdx-loader" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + fs-extra "^10.1.0" + remark-admonitions "^1.2.1" + tslib "^2.4.0" + webpack "^5.72.1" + +"@docusaurus/plugin-debug@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-debug/-/plugin-debug-2.0.0-beta.21.tgz#dfa212fd90fe2f54439aacdc8c143e8ce96b0d27" + integrity sha512-P54J4q4ecsyWW0Jy4zbimSIHna999AfbxpXGmF1IjyHrjoA3PtuakV1Ai51XrGEAaIq9q6qMQkEhbUd3CffGAw== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + fs-extra "^10.1.0" + react-json-view "^1.21.3" + tslib "^2.4.0" + +"@docusaurus/plugin-google-analytics@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-2.0.0-beta.21.tgz#5475c58fb23603badf41d84298569f6c46b4e6b2" + integrity sha512-+5MS0PeGaJRgPuNZlbd/WMdQSpOACaxEz7A81HAxm6kE+tIASTW3l8jgj1eWFy/PGPzaLnQrEjxI1McAfnYmQw== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + tslib "^2.4.0" + +"@docusaurus/plugin-google-gtag@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-2.0.0-beta.21.tgz#a4a101089994a7103c1cc7cddb15170427b185d6" + integrity sha512-4zxKZOnf0rfh6myXLG7a6YZfQcxYDMBsWqANEjCX77H5gPdK+GHZuDrxK6sjFvRBv4liYCrNjo7HJ4DpPoT0zA== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + tslib "^2.4.0" + +"@docusaurus/plugin-sitemap@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/plugin-sitemap/-/plugin-sitemap-2.0.0-beta.21.tgz#8bfa695eada2ec95c9376a884641237ffca5dd3d" + integrity sha512-/ynWbcXZXcYZ6sT2X6vAJbnfqcPxwdGEybd0rcRZi4gBHq6adMofYI25AqELmnbBDxt0If+vlAeUHFRG5ueP7Q== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-common" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + fs-extra "^10.1.0" + sitemap "^7.1.1" + tslib "^2.4.0" + +"@docusaurus/preset-classic@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/preset-classic/-/preset-classic-2.0.0-beta.21.tgz#1362d8650ebed22633db411caaba80075f7c86ce" + integrity sha512-KvBnIUu7y69pNTJ9UhX6SdNlK6prR//J3L4rhN897tb8xx04xHHILlPXko2Il+C3Xzgh3OCgyvkoz9K6YlFTDw== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/plugin-content-blog" "2.0.0-beta.21" + "@docusaurus/plugin-content-docs" "2.0.0-beta.21" + "@docusaurus/plugin-content-pages" "2.0.0-beta.21" + "@docusaurus/plugin-debug" "2.0.0-beta.21" + "@docusaurus/plugin-google-analytics" "2.0.0-beta.21" + "@docusaurus/plugin-google-gtag" "2.0.0-beta.21" + "@docusaurus/plugin-sitemap" "2.0.0-beta.21" + "@docusaurus/theme-classic" "2.0.0-beta.21" + "@docusaurus/theme-common" "2.0.0-beta.21" + "@docusaurus/theme-search-algolia" "2.0.0-beta.21" + +"@docusaurus/react-loadable@5.5.2", "react-loadable@npm:@docusaurus/react-loadable@5.5.2": + version "5.5.2" + resolved "https://registry.yarnpkg.com/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz#81aae0db81ecafbdaee3651f12804580868fa6ce" + integrity sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ== + dependencies: + "@types/react" "*" + prop-types "^15.6.2" -"@babel/template@^7.1.0", "@babel/template@^7.1.2", "@babel/template@^7.2.2": - version "7.2.2" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.2.2.tgz#005b3fdf0ed96e88041330379e0da9a708eb2907" - integrity sha512-zRL0IMM02AUDwghf5LMSSDEz7sBCO2YnNmpg3uWTZj/v1rcG2BmQUvaGU8GhU8BvfMh1k2KIAYZ7Ji9KXPUg7g== +"@docusaurus/theme-classic@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-classic/-/theme-classic-2.0.0-beta.21.tgz#6df5b9ea2d389dafb6f59badeabb3eda060b5017" + integrity sha512-Ge0WNdTefD0VDQfaIMRRWa8tWMG9+8/OlBRd5MK88/TZfqdBq7b/gnCSaalQlvZwwkj6notkKhHx72+MKwWUJA== + dependencies: + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/plugin-content-blog" "2.0.0-beta.21" + "@docusaurus/plugin-content-docs" "2.0.0-beta.21" + "@docusaurus/plugin-content-pages" "2.0.0-beta.21" + "@docusaurus/theme-common" "2.0.0-beta.21" + "@docusaurus/theme-translations" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-common" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + "@mdx-js/react" "^1.6.22" + clsx "^1.1.1" + copy-text-to-clipboard "^3.0.1" + infima "0.2.0-alpha.39" + lodash "^4.17.21" + nprogress "^0.2.0" + postcss "^8.4.14" + prism-react-renderer "^1.3.3" + prismjs "^1.28.0" + react-router-dom "^5.3.3" + rtlcss "^3.5.0" + tslib "^2.4.0" + +"@docusaurus/theme-common@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-common/-/theme-common-2.0.0-beta.21.tgz#508478251982d01655ef505ccb2420db38623db8" + integrity sha512-fTKoTLRfjuFG6c3iwnVjIIOensxWMgdBKLfyE5iih3Lq7tQgkE7NyTGG9BKLrnTJ7cAD2UXdXM9xbB7tBf1qzg== + dependencies: + "@docusaurus/module-type-aliases" "2.0.0-beta.21" + "@docusaurus/plugin-content-blog" "2.0.0-beta.21" + "@docusaurus/plugin-content-docs" "2.0.0-beta.21" + "@docusaurus/plugin-content-pages" "2.0.0-beta.21" + clsx "^1.1.1" + parse-numeric-range "^1.3.0" + prism-react-renderer "^1.3.3" + tslib "^2.4.0" + utility-types "^3.10.0" + +"@docusaurus/theme-search-algolia@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.0.0-beta.21.tgz#2891f11372e2542e4e1426c3100b72c2d30d4d68" + integrity sha512-T1jKT8MVSSfnztSqeebUOpWHPoHKtwDXtKYE0xC99JWoZ+mMfv8AFhVSoSddn54jLJjV36mxg841eHQIySMCpQ== + dependencies: + "@docsearch/react" "^3.1.0" + "@docusaurus/core" "2.0.0-beta.21" + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/plugin-content-docs" "2.0.0-beta.21" + "@docusaurus/theme-common" "2.0.0-beta.21" + "@docusaurus/theme-translations" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + "@docusaurus/utils-validation" "2.0.0-beta.21" + algoliasearch "^4.13.1" + algoliasearch-helper "^3.8.2" + clsx "^1.1.1" + eta "^1.12.3" + fs-extra "^10.1.0" + lodash "^4.17.21" + tslib "^2.4.0" + utility-types "^3.10.0" + +"@docusaurus/theme-translations@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-translations/-/theme-translations-2.0.0-beta.21.tgz#5da60ffc58de256b96316c5e0fe2733c1e83f22c" + integrity sha512-dLVT9OIIBs6MpzMb1bAy+C0DPJK3e3DNctG+ES0EP45gzEqQxzs4IsghpT+QDaOsuhNnAlosgJpFWX3rqxF9xA== + dependencies: + fs-extra "^10.1.0" + tslib "^2.4.0" + +"@docusaurus/types@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/types/-/types-2.0.0-beta.21.tgz#36659c6c012663040dcd4cbc97b5d7a555dae229" + integrity sha512-/GH6Npmq81eQfMC/ikS00QSv9jNyO1RXEpNSx5GLA3sFX8Iib26g2YI2zqNplM8nyxzZ2jVBuvUoeODTIbTchQ== + dependencies: + commander "^5.1.0" + history "^4.9.0" + joi "^17.6.0" + react-helmet-async "^1.3.0" + utility-types "^3.10.0" + webpack "^5.72.1" + webpack-merge "^5.8.0" + +"@docusaurus/utils-common@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/utils-common/-/utils-common-2.0.0-beta.21.tgz#81e86ed04ad62b75e9ba6a5e7689dc23d5f36a0a" + integrity sha512-5w+6KQuJb6pUR2M8xyVuTMvO5NFQm/p8TOTDFTx60wt3p0P1rRX00v6FYsD4PK6pgmuoKjt2+Ls8dtSXc4qFpQ== + dependencies: + tslib "^2.4.0" + +"@docusaurus/utils-validation@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/utils-validation/-/utils-validation-2.0.0-beta.21.tgz#10169661be5f8a233f4c12202ee5802ccb77400f" + integrity sha512-6NG1FHTRjv1MFzqW//292z7uCs77vntpWEbZBHk3n67aB1HoMn5SOwjLPtRDjbCgn6HCHFmdiJr6euCbjhYolg== + dependencies: + "@docusaurus/logger" "2.0.0-beta.21" + "@docusaurus/utils" "2.0.0-beta.21" + joi "^17.6.0" + js-yaml "^4.1.0" + tslib "^2.4.0" + +"@docusaurus/utils@2.0.0-beta.21": + version "2.0.0-beta.21" + resolved "https://registry.yarnpkg.com/@docusaurus/utils/-/utils-2.0.0-beta.21.tgz#8fc4499c4cfedd29805025d930f8008cad255044" + integrity sha512-M/BrVCDmmUPZLxtiStBgzpQ4I5hqkggcpnQmEN+LbvbohjbtVnnnZQ0vptIziv1w8jry/woY+ePsyOO7O/yeLQ== + dependencies: + "@docusaurus/logger" "2.0.0-beta.21" + "@svgr/webpack" "^6.2.1" + file-loader "^6.2.0" + fs-extra "^10.1.0" + github-slugger "^1.4.0" + globby "^11.1.0" + gray-matter "^4.0.3" + js-yaml "^4.1.0" + lodash "^4.17.21" + micromatch "^4.0.5" + resolve-pathname "^3.0.0" + shelljs "^0.8.5" + tslib "^2.4.0" + url-loader "^4.1.1" + webpack "^5.72.1" + +"@hapi/hoek@^9.0.0": + version "9.3.0" + resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.3.0.tgz#8368869dcb735be2e7f5cb7647de78e167a251fb" + integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ== + +"@hapi/topo@^5.0.0": + version "5.1.0" + resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.1.0.tgz#dc448e332c6c6e37a4dc02fd84ba8d44b9afb012" + integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg== dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/parser" "^7.2.2" - "@babel/types" "^7.2.2" + "@hapi/hoek" "^9.0.0" -"@babel/traverse@^7.1.0", "@babel/traverse@^7.1.5", "@babel/traverse@^7.2.2", "@babel/traverse@^7.2.3": - version "7.2.3" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.2.3.tgz#7ff50cefa9c7c0bd2d81231fdac122f3957748d8" - integrity sha512-Z31oUD/fJvEWVR0lNZtfgvVt512ForCTNKYcJBGbPb1QZfve4WGH8Wsy7+Mev33/45fhP/hwQtvgusNdcCMgSw== +"@jridgewell/gen-mapping@^0.1.0": + version "0.1.1" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz#e5d2e450306a9491e3bd77e323e38d7aff315996" + integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w== dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/generator" "^7.2.2" - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-split-export-declaration" "^7.0.0" - "@babel/parser" "^7.2.3" - "@babel/types" "^7.2.2" - debug "^4.1.0" - globals "^11.1.0" - lodash "^4.17.10" + "@jridgewell/set-array" "^1.0.0" + "@jridgewell/sourcemap-codec" "^1.4.10" -"@babel/types@^7.0.0", "@babel/types@^7.2.0", "@babel/types@^7.2.2", "@babel/types@^7.3.0", "@babel/types@^7.3.2": - version "7.3.2" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.3.2.tgz#424f5be4be633fff33fb83ab8d67e4a8290f5a2f" - integrity sha512-3Y6H8xlUlpbGR+XvawiH0UXehqydTmNmEpozWcXymqwcrwYAl5KMvKtQ+TF6f6E08V6Jur7v/ykdDSF+WDEIXQ== +"@jridgewell/gen-mapping@^0.3.0": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.1.tgz#cf92a983c83466b8c0ce9124fadeaf09f7c66ea9" + integrity sha512-GcHwniMlA2z+WFPWuY8lp3fsza0I8xPFMWL5+n8LYyP6PSvPrXf4+n8stDHZY2DM0zy9sVkRDy1jDI4XGzYVqg== dependencies: - esutils "^2.0.2" - lodash "^4.17.10" - to-fast-properties "^2.0.0" + "@jridgewell/set-array" "^1.0.0" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" -abab@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.0.tgz#aba0ab4c5eee2d4c79d3487d85450fb2376ebb0f" - integrity sha512-sY5AXXVZv4Y1VACTtR11UJCPHHudgY5i26Qj5TypE6DKlIApbwb5uqhXcJ5UUGbvZNRh7EeIoW+LrJumBsKp7w== +"@jridgewell/resolve-uri@^3.0.3": + version "3.0.7" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.0.7.tgz#30cd49820a962aff48c8fffc5cd760151fca61fe" + integrity sha512-8cXDaBBHOr2pQ7j77Y6Vp5VDT2sIqWyWQ56TjEq4ih/a4iST3dItRe8Q9fp0rrIl9DoKhWQtUQz/YpOxLkXbNA== -abbrev@1: +"@jridgewell/set-array@^1.0.0": version "1.1.1" - resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" - integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.1.tgz#36a6acc93987adcf0ba50c66908bd0b70de8afea" + integrity sha512-Ct5MqZkLGEXTVmQYbGtx9SVqD2fqwvdubdps5D3djjAkgkKwT918VNOz65pEHFaYTeWcukmJmH5SwsA9Tn2ObQ== -acorn-globals@^4.1.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-4.3.0.tgz#e3b6f8da3c1552a95ae627571f7dd6923bb54103" - integrity sha512-hMtHj3s5RnuhvHPowpBYvJVj3rAar82JiDQHvGs1zO0l10ocX/xEdBShNHTJaboucJUsScghp74pH3s7EnHHQw== +"@jridgewell/source-map@^0.3.2": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb" + integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw== dependencies: - acorn "^6.0.1" - acorn-walk "^6.0.1" - -acorn-walk@^6.0.1: - version "6.1.1" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-6.1.1.tgz#d363b66f5fac5f018ff9c3a1e7b6f8e310cc3913" - integrity sha512-OtUw6JUTgxA2QoqqmrmQ7F2NYqiBPi/L2jqHyFtllhOUvXYQXf0Z1CYUinIfyT4bTCGmrA7gX9FvHA81uzCoVw== + "@jridgewell/gen-mapping" "^0.3.0" + "@jridgewell/trace-mapping" "^0.3.9" -acorn@^5.5.3: - version "5.7.4" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.4.tgz#3e8d8a9947d0599a1796d10225d7432f4a4acf5e" - integrity sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg== +"@jridgewell/sourcemap-codec@^1.4.10": + version "1.4.13" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.13.tgz#b6461fb0c2964356c469e115f504c95ad97ab88c" + integrity sha512-GryiOJmNcWbovBxTfZSF71V/mXbgcV3MewDe3kIMCLyIh5e7SKAeUZs+rMnJ8jkMolZ/4/VsdBmMrw3l+VdZ3w== -acorn@^6.0.1: - version "6.1.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.1.0.tgz#b0a3be31752c97a0f7013c5f4903b71a05db6818" - integrity sha512-MW/FjM+IvU9CgBzjO3UIPCE2pyEwUsoFl+VGdczOPEdxfGFjuKny/gN54mOuX7Qxmb9Rg9MCn2oKiSUeW+pjrw== +"@jridgewell/trace-mapping@^0.3.7", "@jridgewell/trace-mapping@^0.3.9": + version "0.3.13" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.13.tgz#dcfe3e95f224c8fe97a87a5235defec999aa92ea" + integrity sha512-o1xbKhp9qnIAoHJSWd6KlCZfqslL4valSF81H8ImioOAxluWYWOpWkpyktY2vnt4tbrX9XYaxovq6cgowaJp2w== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" -agentkeepalive@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-2.2.0.tgz#c5d1bd4b129008f1163f236f86e5faea2026e2ef" - integrity sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8= +"@leichtgewicht/ip-codec@^2.0.1": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz#b2ac626d6cb9c8718ab459166d4bb405b8ffa78b" + integrity sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A== + +"@mdx-js/mdx@^1.6.22": + version "1.6.22" + resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-1.6.22.tgz#8a723157bf90e78f17dc0f27995398e6c731f1ba" + integrity sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA== + dependencies: + "@babel/core" "7.12.9" + "@babel/plugin-syntax-jsx" "7.12.1" + "@babel/plugin-syntax-object-rest-spread" "7.8.3" + "@mdx-js/util" "1.6.22" + babel-plugin-apply-mdx-type-prop "1.6.22" + babel-plugin-extract-import-names "1.6.22" + camelcase-css "2.0.1" + detab "2.0.4" + hast-util-raw "6.0.1" + lodash.uniq "4.5.0" + mdast-util-to-hast "10.0.1" + remark-footnotes "2.0.0" + remark-mdx "1.6.22" + remark-parse "8.0.3" + remark-squeeze-paragraphs "4.0.0" + style-to-object "0.3.0" + unified "9.2.0" + unist-builder "2.0.3" + unist-util-visit "2.0.3" + +"@mdx-js/react@^1.6.22": + version "1.6.22" + resolved "https://registry.yarnpkg.com/@mdx-js/react/-/react-1.6.22.tgz#ae09b4744fddc74714ee9f9d6f17a66e77c43573" + integrity sha512-TDoPum4SHdfPiGSAaRBw7ECyI8VaHpK8GJugbJIJuqyh6kzw9ZLJZW3HGL3NNrJGxcAixUvqROm+YuQOo5eXtg== + +"@mdx-js/util@1.6.22": + version "1.6.22" + resolved "https://registry.yarnpkg.com/@mdx-js/util/-/util-1.6.22.tgz#219dfd89ae5b97a8801f015323ffa4b62f45718b" + integrity sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA== + +"@nodelib/fs.scandir@2.1.5": + version "2.1.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== + dependencies: + "@nodelib/fs.stat" "2.0.5" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== -ajv@^6.5.5: - version "6.9.1" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.9.1.tgz#a4d3683d74abc5670e75f0b16520f70a20ea8dc1" - integrity sha512-XDN92U311aINL77ieWHmqCcNlwjoP5cHXDxIxbf2MaPYuCXOHS7gHH8jktxeK5omgd52XbSTX6a4Piwd1pQmzA== +"@nodelib/fs.walk@^1.2.3": + version "1.2.8" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== dependencies: - fast-deep-equal "^2.0.1" - fast-json-stable-stringify "^2.0.0" - json-schema-traverse "^0.4.1" - uri-js "^4.2.2" + "@nodelib/fs.scandir" "2.1.5" + fastq "^1.6.0" -algoliasearch-helper@^2.26.0: - version "2.26.1" - resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-2.26.1.tgz#75bd34f095e852d1bda483b8ebfb83c3c6e2852c" - integrity sha512-fQBZZXC3rac4wadRj5wA/gxy88Twb+GQF3n8foew8SAsqe9Q59PFq1y3j08pr6eNSRYkZJV7qMpe7ox5D27KOw== - dependencies: - events "^1.1.1" - lodash "^4.17.5" - qs "^6.5.1" - util "^0.10.3" - -algoliasearch@^3.24.9, algoliasearch@^3.27.0: - version "3.32.0" - resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-3.32.0.tgz#5818168c26ff921bd0346a919071bac928b747ce" - integrity sha512-C8oQnPTf0wPuyD2jSZwtBAPvz+lHOE7zRIPpgXGBuNt6ZNcC4omsbytG26318rT77a8h4759vmIp6n9p8iw4NA== - dependencies: - agentkeepalive "^2.2.0" - debug "^2.6.8" - envify "^4.0.0" - es6-promise "^4.1.0" - events "^1.1.0" - foreach "^2.0.5" - global "^4.3.2" - inherits "^2.0.1" - isarray "^2.0.1" - load-script "^1.0.0" - object-keys "^1.0.11" - querystring-es3 "^0.2.1" - reduce "^1.0.1" - semver "^5.1.0" - tunnel-agent "^0.6.0" - -ansi-regex@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" - integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= +"@polka/url@^1.0.0-next.20": + version "1.0.0-next.21" + resolved "https://registry.yarnpkg.com/@polka/url/-/url-1.0.0-next.21.tgz#5de5a2385a35309427f6011992b544514d559aa1" + integrity sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g== -ansi-regex@^3.0.0: +"@sideway/address@^4.1.3": + version "4.1.4" + resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.4.tgz#03dccebc6ea47fdc226f7d3d1ad512955d4783f0" + integrity sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@sideway/formula@^3.0.0": version "3.0.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" - integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= + resolved "https://registry.yarnpkg.com/@sideway/formula/-/formula-3.0.0.tgz#fe158aee32e6bd5de85044be615bc08478a0a13c" + integrity sha512-vHe7wZ4NOXVfkoRb8T5otiENVlT7a3IAiw7H5M2+GO+9CDgcVUUsX1zalAztCmwyOr2RUTGJdgB+ZvSVqmdHmg== -ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== +"@sideway/pinpoint@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" + integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== + +"@sindresorhus/is@^0.14.0": + version "0.14.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" + integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== + +"@slorber/static-site-generator-webpack-plugin@^4.0.4": + version "4.0.7" + resolved "https://registry.yarnpkg.com/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz#fc1678bddefab014e2145cbe25b3ce4e1cfc36f3" + integrity sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA== + dependencies: + eval "^0.1.8" + p-map "^4.0.0" + webpack-sources "^3.2.2" + +"@svgr/babel-plugin-add-jsx-attribute@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.0.0.tgz#bd6d1ff32a31b82b601e73672a789cc41e84fe18" + integrity sha512-MdPdhdWLtQsjd29Wa4pABdhWbaRMACdM1h31BY+c6FghTZqNGT7pEYdBoaGeKtdTOBC/XNFQaKVj+r/Ei2ryWA== + +"@svgr/babel-plugin-remove-jsx-attribute@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-6.0.0.tgz#58654908beebfa069681a83332544b17e5237e89" + integrity sha512-aVdtfx9jlaaxc3unA6l+M9YRnKIZjOhQPthLKqmTXC8UVkBLDRGwPKo+r8n3VZN8B34+yVajzPTZ+ptTSuZZCw== + +"@svgr/babel-plugin-remove-jsx-empty-expression@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-6.0.0.tgz#d06dd6e8a8f603f92f9979bb9990a1f85a4f57ba" + integrity sha512-Ccj42ApsePD451AZJJf1QzTD1B/BOU392URJTeXFxSK709i0KUsGtbwyiqsKu7vsYxpTM0IA5clAKDyf9RCZyA== + +"@svgr/babel-plugin-replace-jsx-attribute-value@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.0.0.tgz#0b85837577b02c31c09c758a12932820f5245cee" + integrity sha512-88V26WGyt1Sfd1emBYmBJRWMmgarrExpKNVmI9vVozha4kqs6FzQJ/Kp5+EYli1apgX44518/0+t9+NU36lThQ== + +"@svgr/babel-plugin-svg-dynamic-title@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.0.0.tgz#28236ec26f7ab9d486a487d36ae52d58ba15676f" + integrity sha512-F7YXNLfGze+xv0KMQxrl2vkNbI9kzT9oDK55/kUuymh1ACyXkMV+VZWX1zEhSTfEKh7VkHVZGmVtHg8eTZ6PRg== + +"@svgr/babel-plugin-svg-em-dimensions@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.0.0.tgz#40267c5dea1b43c4f83a0eb6169e08b43d8bafce" + integrity sha512-+rghFXxdIqJNLQK08kwPBD3Z22/0b2tEZ9lKiL/yTfuyj1wW8HUXu4bo/XkogATIYuXSghVQOOCwURXzHGKyZA== + +"@svgr/babel-plugin-transform-react-native-svg@^6.0.0": + version "6.0.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.0.0.tgz#eb688d0a5f539e34d268d8a516e81f5d7fede7c9" + integrity sha512-VaphyHZ+xIKv5v0K0HCzyfAaLhPGJXSk2HkpYfXIOKb7DjLBv0soHDxNv6X0vr2titsxE7klb++u7iOf7TSrFQ== + +"@svgr/babel-plugin-transform-svg-component@^6.2.0": + version "6.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.2.0.tgz#7ba61d9fc1fb42b0ba1a04e4630019fa7e993c4f" + integrity sha512-bhYIpsORb++wpsp91fymbFkf09Z/YEKR0DnFjxvN+8JHeCUD2unnh18jIMKnDJTWtvpTaGYPXELVe4OOzFI0xg== + +"@svgr/babel-preset@^6.2.0": + version "6.2.0" + resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-6.2.0.tgz#1d3ad8c7664253a4be8e4a0f0e6872f30d8af627" + integrity sha512-4WQNY0J71JIaL03DRn0vLiz87JXx0b9dYm2aA8XHlQJQoixMl4r/soYHm8dsaJZ3jWtkCiOYy48dp9izvXhDkQ== + dependencies: + "@svgr/babel-plugin-add-jsx-attribute" "^6.0.0" + "@svgr/babel-plugin-remove-jsx-attribute" "^6.0.0" + "@svgr/babel-plugin-remove-jsx-empty-expression" "^6.0.0" + "@svgr/babel-plugin-replace-jsx-attribute-value" "^6.0.0" + "@svgr/babel-plugin-svg-dynamic-title" "^6.0.0" + "@svgr/babel-plugin-svg-em-dimensions" "^6.0.0" + "@svgr/babel-plugin-transform-react-native-svg" "^6.0.0" + "@svgr/babel-plugin-transform-svg-component" "^6.2.0" + +"@svgr/core@^6.2.1": + version "6.2.1" + resolved "https://registry.yarnpkg.com/@svgr/core/-/core-6.2.1.tgz#195de807a9f27f9e0e0d678e01084b05c54fdf61" + integrity sha512-NWufjGI2WUyrg46mKuySfviEJ6IxHUOm/8a3Ph38VCWSp+83HBraCQrpEM3F3dB6LBs5x8OElS8h3C0oOJaJAA== + dependencies: + "@svgr/plugin-jsx" "^6.2.1" + camelcase "^6.2.0" + cosmiconfig "^7.0.1" + +"@svgr/hast-util-to-babel-ast@^6.2.1": + version "6.2.1" + resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.2.1.tgz#ae065567b74cbe745afae617053adf9a764bea25" + integrity sha512-pt7MMkQFDlWJVy9ULJ1h+hZBDGFfSCwlBNW1HkLnVi7jUhyEXUaGYWi1x6bM2IXuAR9l265khBT4Av4lPmaNLQ== + dependencies: + "@babel/types" "^7.15.6" + entities "^3.0.1" + +"@svgr/plugin-jsx@^6.2.1": + version "6.2.1" + resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-6.2.1.tgz#5668f1d2aa18c2f1bb7a1fc9f682d3f9aed263bd" + integrity sha512-u+MpjTsLaKo6r3pHeeSVsh9hmGRag2L7VzApWIaS8imNguqoUwDq/u6U/NDmYs/KAsrmtBjOEaAAPbwNGXXp1g== + dependencies: + "@babel/core" "^7.15.5" + "@svgr/babel-preset" "^6.2.0" + "@svgr/hast-util-to-babel-ast" "^6.2.1" + svg-parser "^2.0.2" + +"@svgr/plugin-svgo@^6.2.0": + version "6.2.0" + resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-6.2.0.tgz#4cbe6a33ccccdcae4e3b63ded64cc1cbe1faf48c" + integrity sha512-oDdMQONKOJEbuKwuy4Np6VdV6qoaLLvoY86hjvQEgU82Vx1MSWRyYms6Sl0f+NtqxLI/rDVufATbP/ev996k3Q== + dependencies: + cosmiconfig "^7.0.1" + deepmerge "^4.2.2" + svgo "^2.5.0" + +"@svgr/webpack@^6.2.1": + version "6.2.1" + resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-6.2.1.tgz#ef5d51c1b6be4e7537fb9f76b3f2b2e22b63c58d" + integrity sha512-h09ngMNd13hnePwgXa+Y5CgOjzlCvfWLHg+MBnydEedAnuLRzUHUJmGS3o2OsrhxTOOqEsPOFt5v/f6C5Qulcw== + dependencies: + "@babel/core" "^7.15.5" + "@babel/plugin-transform-react-constant-elements" "^7.14.5" + "@babel/preset-env" "^7.15.6" + "@babel/preset-react" "^7.14.5" + "@babel/preset-typescript" "^7.15.0" + "@svgr/core" "^6.2.1" + "@svgr/plugin-jsx" "^6.2.1" + "@svgr/plugin-svgo" "^6.2.0" + +"@szmarczak/http-timer@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" + integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== dependencies: - color-convert "^1.9.0" + defer-to-connect "^1.0.1" -anymatch@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" - integrity sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw== +"@trysound/sax@0.2.0": + version "0.2.0" + resolved "https://registry.yarnpkg.com/@trysound/sax/-/sax-0.2.0.tgz#cccaab758af56761eb7bf37af6f03f326dd798ad" + integrity sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA== + +"@types/body-parser@*": + version "1.19.2" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.2.tgz#aea2059e28b7658639081347ac4fab3de166e6f0" + integrity sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g== dependencies: - micromatch "^3.1.4" - normalize-path "^2.1.1" + "@types/connect" "*" + "@types/node" "*" -aproba@^1.0.3: - version "1.2.0" - resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" - integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== +"@types/bonjour@^3.5.9": + version "3.5.10" + resolved "https://registry.yarnpkg.com/@types/bonjour/-/bonjour-3.5.10.tgz#0f6aadfe00ea414edc86f5d106357cda9701e275" + integrity sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw== + dependencies: + "@types/node" "*" -are-we-there-yet@~1.1.2: - version "1.1.5" - resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" - integrity sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w== +"@types/connect-history-api-fallback@^1.3.5": + version "1.3.5" + resolved "https://registry.yarnpkg.com/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.3.5.tgz#d1f7a8a09d0ed5a57aee5ae9c18ab9b803205dae" + integrity sha512-h8QJa8xSb1WD4fpKBDcATDNGXghFj6/3GRWG6dhmRcu0RX1Ubasur2Uvx5aeEwlf0MwblEC2bMzzMQntxnw/Cw== dependencies: - delegates "^1.0.0" - readable-stream "^2.0.6" + "@types/express-serve-static-core" "*" + "@types/node" "*" -arr-diff@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" - integrity sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA= +"@types/connect@*": + version "3.4.35" + resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.35.tgz#5fcf6ae445e4021d1fc2219a4873cc73a3bb2ad1" + integrity sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ== + dependencies: + "@types/node" "*" -arr-flatten@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" - integrity sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg== +"@types/eslint-scope@^3.7.3": + version "3.7.3" + resolved "https://registry.yarnpkg.com/@types/eslint-scope/-/eslint-scope-3.7.3.tgz#125b88504b61e3c8bc6f870882003253005c3224" + integrity sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g== + dependencies: + "@types/eslint" "*" + "@types/estree" "*" -arr-union@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" - integrity sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ= +"@types/eslint@*": + version "8.4.3" + resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-8.4.3.tgz#5c92815a3838b1985c90034cd85f26f59d9d0ece" + integrity sha512-YP1S7YJRMPs+7KZKDb9G63n8YejIwW9BALq7a5j2+H4yl6iOv9CB29edho+cuFRrvmJbbaH2yiVChKLJVysDGw== + dependencies: + "@types/estree" "*" + "@types/json-schema" "*" -array-equal@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93" - integrity sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM= +"@types/estree@*", "@types/estree@^0.0.51": + version "0.0.51" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.51.tgz#cfd70924a25a3fd32b218e5e420e6897e1ac4f40" + integrity sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ== -array-unique@^0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" - integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg= +"@types/express-serve-static-core@*", "@types/express-serve-static-core@^4.17.18": + version "4.17.28" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.28.tgz#c47def9f34ec81dc6328d0b1b5303d1ec98d86b8" + integrity sha512-P1BJAEAW3E2DJUlkgq4tOL3RyMunoWXqbSCygWo5ZIWTjUgN1YnaXWW4VWl/oc8vs/XoYibEGBKP0uZyF4AHig== + dependencies: + "@types/node" "*" + "@types/qs" "*" + "@types/range-parser" "*" -asn1@~0.2.3: - version "0.2.4" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" - integrity sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg== +"@types/express@*", "@types/express@^4.17.13": + version "4.17.13" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.13.tgz#a76e2995728999bab51a33fabce1d705a3709034" + integrity sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA== dependencies: - safer-buffer "~2.1.0" + "@types/body-parser" "*" + "@types/express-serve-static-core" "^4.17.18" + "@types/qs" "*" + "@types/serve-static" "*" -assert-plus@1.0.0, assert-plus@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= +"@types/hast@^2.0.0": + version "2.3.4" + resolved "https://registry.yarnpkg.com/@types/hast/-/hast-2.3.4.tgz#8aa5ef92c117d20d974a82bdfb6a648b08c0bafc" + integrity sha512-wLEm0QvaoawEDoTRwzTXp4b4jpwiJDvR5KMnFnVodm3scufTlBOWRD6N1OBf9TZMhjlNsSfcO5V+7AF4+Vy+9g== + dependencies: + "@types/unist" "*" -assign-symbols@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" - integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c= +"@types/history@^4.7.11": + version "4.7.11" + resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.11.tgz#56588b17ae8f50c53983a524fc3cc47437969d64" + integrity sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA== -async-each@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d" - integrity sha1-GdOGodntxufByF04iu28xW0zYC0= +"@types/html-minifier-terser@^6.0.0": + version "6.1.0" + resolved "https://registry.yarnpkg.com/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz#4fc33a00c1d0c16987b1a20cf92d20614c55ac35" + integrity sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg== -async-limiter@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.0.tgz#78faed8c3d074ab81f22b4e985d79e8738f720f8" - integrity sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg== +"@types/http-proxy@^1.17.8": + version "1.17.9" + resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.9.tgz#7f0e7931343761efde1e2bf48c40f02f3f75705a" + integrity sha512-QsbSjA/fSk7xB+UXlCT3wHBy5ai9wOcNDWwZAtud+jXhwOM3l+EYZh8Lng4+/6n8uar0J7xILzqftJdJ/Wdfkw== + dependencies: + "@types/node" "*" -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= +"@types/json-schema@*", "@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9": + version "7.0.11" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.11.tgz#d421b6c527a3037f7c84433fd2c4229e016863d3" + integrity sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ== -atob@^2.1.1: - version "2.1.2" - resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" - integrity sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg== - -atomic-algolia@^0.3.15: - version "0.3.17" - resolved "https://registry.yarnpkg.com/atomic-algolia/-/atomic-algolia-0.3.17.tgz#be65c69448d792837c14cefa986ecce312851271" - integrity sha512-Pk4GrmOWuEdVDkKv2PNFjXyhzr/99T/69PQtoy0YfLrWA0RrQK4tfJ6r4hEU4zAFvB1aMjF7M6KdWoggFVbCfw== - dependencies: - "@babel/cli" "^7.2.0" - "@babel/core" "^7.2.0" - "@babel/plugin-transform-runtime" "^7.2.0" - "@babel/polyfill" "^7.0.0" - "@babel/preset-env" "^7.2.0" - "@babel/runtime" "^7.2.0" - algoliasearch "^3.24.9" - chalk "^2.3.0" - debug "^4.1.0" - dotenv "^5.0.0" - md5 "^2.2.1" +"@types/mdast@^3.0.0": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.10.tgz#4724244a82a4598884cbbe9bcfd73dff927ee8af" + integrity sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA== + dependencies: + "@types/unist" "*" -aws-sign2@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" - integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= +"@types/mime@^1": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a" + integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw== -aws4@^1.8.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" - integrity sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ== +"@types/node@*", "@types/node@^17.0.5": + version "17.0.41" + resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.41.tgz#1607b2fd3da014ae5d4d1b31bc792a39348dfb9b" + integrity sha512-xA6drNNeqb5YyV5fO3OAEsnXLfO7uF0whiOfPTz5AeDo8KeZFmODKnvwPymMNO8qE/an8pVY/O50tig2SQCrGw== -balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= - -base@^0.11.1: - version "0.11.2" - resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" - integrity sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg== - dependencies: - cache-base "^1.0.1" - class-utils "^0.3.5" - component-emitter "^1.2.1" - define-property "^1.0.0" - isobject "^3.0.1" - mixin-deep "^1.2.0" - pascalcase "^0.1.1" +"@types/parse-json@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" + integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== -bcrypt-pbkdf@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4= - dependencies: - tweetnacl "^0.14.3" +"@types/parse5@^5.0.0": + version "5.0.3" + resolved "https://registry.yarnpkg.com/@types/parse5/-/parse5-5.0.3.tgz#e7b5aebbac150f8b5fdd4a46e7f0bd8e65e19109" + integrity sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw== -binary-extensions@^1.0.0: - version "1.13.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.13.0.tgz#9523e001306a32444b907423f1de2164222f6ab1" - integrity sha512-EgmjVLMn22z7eGGv3kcnHwSnJXmFHjISTY9E/S5lIcTD3Oxw05QTcBLNkJFzcb3cNueUdF/IN4U+d78V0zO8Hw== +"@types/prop-types@*": + version "15.7.5" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf" + integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w== -brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== +"@types/qs@*": + version "6.9.7" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.7.tgz#63bb7d067db107cc1e457c303bc25d511febf6cb" + integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== + +"@types/range-parser@*": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.4.tgz#cd667bcfdd025213aafb7ca5915a932590acdcdc" + integrity sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw== + +"@types/react-router-config@*": + version "5.0.6" + resolved "https://registry.yarnpkg.com/@types/react-router-config/-/react-router-config-5.0.6.tgz#87c5c57e72d241db900d9734512c50ccec062451" + integrity sha512-db1mx37a1EJDf1XeX8jJN7R3PZABmJQXR8r28yUjVMFSjkmnQo6X6pOEEmNl+Tp2gYQOGPdYbFIipBtdElZ3Yg== dependencies: - balanced-match "^1.0.0" - concat-map "0.0.1" + "@types/history" "^4.7.11" + "@types/react" "*" + "@types/react-router" "*" -braces@^2.3.1, braces@^2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729" - integrity sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w== +"@types/react-router-dom@*": + version "5.3.3" + resolved "https://registry.yarnpkg.com/@types/react-router-dom/-/react-router-dom-5.3.3.tgz#e9d6b4a66fcdbd651a5f106c2656a30088cc1e83" + integrity sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw== dependencies: - arr-flatten "^1.1.0" - array-unique "^0.3.2" - extend-shallow "^2.0.1" - fill-range "^4.0.0" - isobject "^3.0.1" - repeat-element "^1.1.2" - snapdragon "^0.8.1" - snapdragon-node "^2.0.1" - split-string "^3.0.2" - to-regex "^3.0.1" - -browser-process-hrtime@^0.1.2: - version "0.1.3" - resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-0.1.3.tgz#616f00faef1df7ec1b5bf9cfe2bdc3170f26c7b4" - integrity sha512-bRFnI4NnjO6cnyLmOV/7PVoDEMJChlcfN0z4s1YMBY989/SvlfMI1lgCnkFUs53e9gQF+w7qu7XdllSTiSl8Aw== - -browserslist@^4.3.4: - version "4.4.1" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.4.1.tgz#42e828954b6b29a7a53e352277be429478a69062" - integrity sha512-pEBxEXg7JwaakBXjATYw/D1YZh4QUSCX/Mnd/wnqSRPPSi1U39iDhDoKGoBUcraKdxDlrYqJxSI5nNvD+dWP2A== + "@types/history" "^4.7.11" + "@types/react" "*" + "@types/react-router" "*" + +"@types/react-router@*": + version "5.1.18" + resolved "https://registry.yarnpkg.com/@types/react-router/-/react-router-5.1.18.tgz#c8851884b60bc23733500d86c1266e1cfbbd9ef3" + integrity sha512-YYknwy0D0iOwKQgz9v8nOzt2J6l4gouBmDnWqUUznltOTaon+r8US8ky8HvN0tXvc38U9m6z/t2RsVsnd1zM0g== dependencies: - caniuse-lite "^1.0.30000929" - electron-to-chromium "^1.3.103" - node-releases "^1.1.3" + "@types/history" "^4.7.11" + "@types/react" "*" -cache-base@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" - integrity sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ== +"@types/react@*": + version "18.0.12" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.0.12.tgz#cdaa209d0a542b3fcf69cf31a03976ec4cdd8840" + integrity sha512-duF1OTASSBQtcigUvhuiTB1Ya3OvSy+xORCiEf20H0P0lzx+/KeVsA99U5UjLXSbyo1DRJDlLKqTeM1ngosqtg== dependencies: - collection-visit "^1.0.0" - component-emitter "^1.2.1" - get-value "^2.0.6" - has-value "^1.0.0" - isobject "^3.0.1" - set-value "^2.0.0" - to-object-path "^0.3.0" - union-value "^1.0.0" - unset-value "^1.0.0" + "@types/prop-types" "*" + "@types/scheduler" "*" + csstype "^3.0.2" -camelcase@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.0.0.tgz#03295527d58bd3cd4aa75363f35b2e8d97be2f42" - integrity sha512-faqwZqnWxbxn+F1d399ygeamQNy3lPp/H9H6rNrqYh4FSVCtcY+3cub1MxA8o9mDd55mM8Aghuu/kuyYA6VTsA== +"@types/retry@0.12.0": + version "0.12.0" + resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.0.tgz#2b35eccfcee7d38cd72ad99232fbd58bffb3c84d" + integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA== -caniuse-lite@^1.0.30000929: - version "1.0.30000936" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000936.tgz#5d33b118763988bf721b9b8ad436d0400e4a116b" - integrity sha512-orX4IdpbFhdNO7bTBhSbahp1EBpqzBc+qrvTRVUFfZgA4zta7TdM6PN5ZxkEUgDnz36m+PfWGcdX7AVfFWItJw== +"@types/sax@^1.2.1": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@types/sax/-/sax-1.2.4.tgz#8221affa7f4f3cb21abd22f244cfabfa63e6a69e" + integrity sha512-pSAff4IAxJjfAXUG6tFkO7dsSbTmf8CtUpfhhZ5VhkRpC4628tJhh3+V6H1E+/Gs9piSzYKT5yzHO5M4GG9jkw== + dependencies: + "@types/node" "*" -caseless@~0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" - integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= +"@types/scheduler@*": + version "0.16.2" + resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.2.tgz#1a62f89525723dde24ba1b01b092bf5df8ad4d39" + integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew== -chalk@^2.0.0, chalk@^2.3.0: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== +"@types/serve-index@^1.9.1": + version "1.9.1" + resolved "https://registry.yarnpkg.com/@types/serve-index/-/serve-index-1.9.1.tgz#1b5e85370a192c01ec6cec4735cf2917337a6278" + integrity sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg== dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" + "@types/express" "*" -charenc@~0.0.1: - version "0.0.2" - resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" - integrity sha1-wKHS86cJLgN3S/qD8UwPxXkKhmc= +"@types/serve-static@*", "@types/serve-static@^1.13.10": + version "1.13.10" + resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.13.10.tgz#f5e0ce8797d2d7cc5ebeda48a52c96c4fa47a8d9" + integrity sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ== + dependencies: + "@types/mime" "^1" + "@types/node" "*" -chokidar@^2.0.3: - version "2.1.0" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.1.0.tgz#5fcb70d0b28ebe0867eb0f09d5f6a08f29a1efa0" - integrity sha512-5t6G2SH8eO6lCvYOoUpaRnF5Qfd//gd7qJAkwRUw9qlGVkiQ13uwQngqbWWaurOsaAm9+kUGbITADxt6H0XFNQ== +"@types/sockjs@^0.3.33": + version "0.3.33" + resolved "https://registry.yarnpkg.com/@types/sockjs/-/sockjs-0.3.33.tgz#570d3a0b99ac995360e3136fd6045113b1bd236f" + integrity sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw== dependencies: - anymatch "^2.0.0" - async-each "^1.0.1" - braces "^2.3.2" - glob-parent "^3.1.0" - inherits "^2.0.3" - is-binary-path "^1.0.0" - is-glob "^4.0.0" - normalize-path "^3.0.0" - path-is-absolute "^1.0.0" - readdirp "^2.2.1" - upath "^1.1.0" - optionalDependencies: - fsevents "^1.2.7" + "@types/node" "*" -chownr@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.1.tgz#54726b8b8fff4df053c42187e801fb4412df1494" - integrity sha512-j38EvO5+LHX84jlo6h4UzmOwi0UgW61WRyPtJz4qaadK5eY3BTS5TY/S1Stc3Uk2lIM6TPevAlULiEJwie860g== +"@types/unist@*", "@types/unist@^2.0.0", "@types/unist@^2.0.2", "@types/unist@^2.0.3": + version "2.0.6" + resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.6.tgz#250a7b16c3b91f672a24552ec64678eeb1d3a08d" + integrity sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ== + +"@types/ws@^8.5.1": + version "8.5.3" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.3.tgz#7d25a1ffbecd3c4f2d35068d0b283c037003274d" + integrity sha512-6YOoWjruKj1uLf3INHH7D3qTXwFfEsg1kf3c0uDdSBJwfa/llkwIjrAGV7j7mVgGNbzTQ3HiHKKDXl6bJPD97w== + dependencies: + "@types/node" "*" + +"@webassemblyjs/ast@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.11.1.tgz#2bfd767eae1a6996f432ff7e8d7fc75679c0b6a7" + integrity sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw== + dependencies: + "@webassemblyjs/helper-numbers" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + +"@webassemblyjs/floating-point-hex-parser@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz#f6c61a705f0fd7a6aecaa4e8198f23d9dc179e4f" + integrity sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ== + +"@webassemblyjs/helper-api-error@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz#1a63192d8788e5c012800ba6a7a46c705288fd16" + integrity sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg== + +"@webassemblyjs/helper-buffer@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz#832a900eb444884cde9a7cad467f81500f5e5ab5" + integrity sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA== + +"@webassemblyjs/helper-numbers@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz#64d81da219fbbba1e3bd1bfc74f6e8c4e10a62ae" + integrity sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ== + dependencies: + "@webassemblyjs/floating-point-hex-parser" "1.11.1" + "@webassemblyjs/helper-api-error" "1.11.1" + "@xtuc/long" "4.2.2" + +"@webassemblyjs/helper-wasm-bytecode@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz#f328241e41e7b199d0b20c18e88429c4433295e1" + integrity sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q== + +"@webassemblyjs/helper-wasm-section@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz#21ee065a7b635f319e738f0dd73bfbda281c097a" + integrity sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + +"@webassemblyjs/ieee754@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz#963929e9bbd05709e7e12243a099180812992614" + integrity sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ== + dependencies: + "@xtuc/ieee754" "^1.2.0" + +"@webassemblyjs/leb128@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.11.1.tgz#ce814b45574e93d76bae1fb2644ab9cdd9527aa5" + integrity sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw== + dependencies: + "@xtuc/long" "4.2.2" + +"@webassemblyjs/utf8@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.11.1.tgz#d1f8b764369e7c6e6bae350e854dec9a59f0a3ff" + integrity sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ== + +"@webassemblyjs/wasm-edit@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz#ad206ebf4bf95a058ce9880a8c092c5dec8193d6" + integrity sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/helper-wasm-section" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/wasm-opt" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + "@webassemblyjs/wast-printer" "1.11.1" + +"@webassemblyjs/wasm-gen@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz#86c5ea304849759b7d88c47a32f4f039ae3c8f76" + integrity sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ieee754" "1.11.1" + "@webassemblyjs/leb128" "1.11.1" + "@webassemblyjs/utf8" "1.11.1" + +"@webassemblyjs/wasm-opt@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz#657b4c2202f4cf3b345f8a4c6461c8c2418985f2" + integrity sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + +"@webassemblyjs/wasm-parser@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz#86ca734534f417e9bd3c67c7a1c75d8be41fb199" + integrity sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-api-error" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ieee754" "1.11.1" + "@webassemblyjs/leb128" "1.11.1" + "@webassemblyjs/utf8" "1.11.1" + +"@webassemblyjs/wast-printer@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz#d0c73beda8eec5426f10ae8ef55cee5e7084c2f0" + integrity sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@xtuc/long" "4.2.2" + +"@xtuc/ieee754@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790" + integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA== + +"@xtuc/long@4.2.2": + version "4.2.2" + resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" + integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== -class-utils@^0.3.5: - version "0.3.6" - resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" - integrity sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg== +accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.8: + version "1.3.8" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" + integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw== dependencies: - arr-union "^3.1.0" - define-property "^0.2.5" - isobject "^3.0.0" - static-extend "^0.1.1" + mime-types "~2.1.34" + negotiator "0.6.3" + +acorn-import-assertions@^1.7.6: + version "1.8.0" + resolved "https://registry.yarnpkg.com/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz#ba2b5939ce62c238db6d93d81c9b111b29b855e9" + integrity sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw== + +acorn-walk@^8.0.0: + version "8.2.0" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.2.0.tgz#741210f2e2426454508853a2f44d0ab83b7f69c1" + integrity sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA== -classnames@^2.2.5: - version "2.2.6" - resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" - integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q== +acorn@^8.0.4, acorn@^8.4.1, acorn@^8.5.0: + version "8.7.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.1.tgz#0197122c843d1bf6d0a5e83220a788f278f63c30" + integrity sha512-Xx54uLJQZ19lKygFXOWsscKUbsBZW0CPykPhVQdhIeIwrbPmJzqeASDInc8nKBnp/JT6igTs82qPXz069H8I/A== + +address@^1.0.1, address@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/address/-/address-1.2.0.tgz#d352a62c92fee90f89a693eccd2a8b2139ab02d9" + integrity sha512-tNEZYz5G/zYunxFm7sfhAxkXEuLj3K6BKwv6ZURlsF6yiUQ65z0Q2wZW9L5cPUl9ocofGvXOdFYbFHp0+6MOig== -cli-table3@^0.5.0: - version "0.5.1" - resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.5.1.tgz#0252372d94dfc40dbd8df06005f48f31f656f202" - integrity sha512-7Qg2Jrep1S/+Q3EceiZtQcDPWxhAvBw+ERf1162v4sikJrvojMHFqXt8QIVha8UlH9rgU0BeWPytZ9/TzYqlUw== +aggregate-error@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" + integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA== dependencies: - object-assign "^4.1.0" - string-width "^2.1.1" - optionalDependencies: - colors "^1.1.2" + clean-stack "^2.0.0" + indent-string "^4.0.0" -cliui@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49" - integrity sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ== +ajv-formats@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ajv-formats/-/ajv-formats-2.1.1.tgz#6e669400659eb74973bbf2e33327180a0996b520" + integrity sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA== dependencies: - string-width "^2.1.1" - strip-ansi "^4.0.0" - wrap-ansi "^2.0.0" + ajv "^8.0.0" -code-point-at@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" - integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= +ajv-keywords@^3.4.1, ajv-keywords@^3.5.2: + version "3.5.2" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz#31f29da5ab6e00d1c2d329acf7b5929614d5014d" + integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ== -collection-visit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" - integrity sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA= +ajv-keywords@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz#69d4d385a4733cdbeab44964a1170a88f87f0e16" + integrity sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw== dependencies: - map-visit "^1.0.0" - object-visit "^1.0.0" + fast-deep-equal "^3.1.3" -color-convert@^1.9.0: - version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" - integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== +ajv@^6.12.2, ajv@^6.12.4, ajv@^6.12.5: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== dependencies: - color-name "1.1.3" - -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" - integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" -colors@^1.1.2: - version "1.3.3" - resolved "https://registry.yarnpkg.com/colors/-/colors-1.3.3.tgz#39e005d546afe01e01f9c4ca8fa50f686a01205d" - integrity sha512-mmGt/1pZqYRjMxB1axhTo16/snVZ5krrKkcmMeVKxzECMMXoCgnvTPp10QgHfcbQZw8Dq2jMNG6je4JlWU0gWg== +ajv@^8.0.0, ajv@^8.8.0: + version "8.11.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.11.0.tgz#977e91dd96ca669f54a11e23e378e33b884a565f" + integrity sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg== + dependencies: + fast-deep-equal "^3.1.1" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.2.2" -combined-stream@^1.0.6, combined-stream@~1.0.6: - version "1.0.7" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.7.tgz#2d1d24317afb8abe95d6d2c0b07b57813539d828" - integrity sha512-brWl9y6vOB1xYPZcpZde3N9zDByXTosAeMDo4p1wzo6UMOX4vumB+TP1RZ76sfE6Md68Q0NJSrE/gbezd4Ul+w== +algoliasearch-helper@^3.8.2: + version "3.8.2" + resolved "https://registry.yarnpkg.com/algoliasearch-helper/-/algoliasearch-helper-3.8.2.tgz#35726dc6d211f49dbab0bf6d37b4658165539523" + integrity sha512-AXxiF0zT9oYwl8ZBgU/eRXvfYhz7cBA5YrLPlw9inZHdaYF0QEya/f1Zp1mPYMXc1v6VkHwBq4pk6/vayBLICg== + dependencies: + "@algolia/events" "^4.0.1" + +algoliasearch@^4.0.0, algoliasearch@^4.13.1: + version "4.13.1" + resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-4.13.1.tgz#54195c41c9e4bd13ed64982248cf49d4576974fe" + integrity sha512-dtHUSE0caWTCE7liE1xaL+19AFf6kWEcyn76uhcitWpntqvicFHXKFoZe5JJcv9whQOTRM6+B8qJz6sFj+rDJA== + dependencies: + "@algolia/cache-browser-local-storage" "4.13.1" + "@algolia/cache-common" "4.13.1" + "@algolia/cache-in-memory" "4.13.1" + "@algolia/client-account" "4.13.1" + "@algolia/client-analytics" "4.13.1" + "@algolia/client-common" "4.13.1" + "@algolia/client-personalization" "4.13.1" + "@algolia/client-search" "4.13.1" + "@algolia/logger-common" "4.13.1" + "@algolia/logger-console" "4.13.1" + "@algolia/requester-browser-xhr" "4.13.1" + "@algolia/requester-common" "4.13.1" + "@algolia/requester-node-http" "4.13.1" + "@algolia/transporter" "4.13.1" + +ansi-align@^3.0.0, ansi-align@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-3.0.1.tgz#0cdf12e111ace773a86e9a1fad1225c43cb19a59" + integrity sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w== dependencies: - delayed-stream "~1.0.0" + string-width "^4.1.0" -commander@^2.8.1: - version "2.19.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.19.0.tgz#f6198aa84e5b83c46054b94ddedbfed5ee9ff12a" - integrity sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg== +ansi-html-community@^0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/ansi-html-community/-/ansi-html-community-0.0.8.tgz#69fbc4d6ccbe383f9736934ae34c3f8290f1bf41" + integrity sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw== -component-emitter@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" - integrity sha1-E3kY1teCg/ffemt8WmPhQOaUJeY= +ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -concat-map@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= +ansi-regex@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" + integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== -console-control-strings@^1.0.0, console-control-strings@~1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" - integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4= +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" -convert-source-map@^1.1.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20" - integrity sha512-eFu7XigvxdZ1ETfbgPBohgyQ/Z++C0eEhTor0qRwBw9unw+L0/6V8wkSuGgzdThkiS5lSpdptOQPD8Ak40a+7A== +ansi-styles@^4.0.0, ansi-styles@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== dependencies: - safe-buffer "~5.1.1" + color-convert "^2.0.1" -copy-descriptor@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" - integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= +ansi-styles@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.1.0.tgz#87313c102b8118abd57371afab34618bf7350ed3" + integrity sha512-VbqNsoz55SYGczauuup0MFUyXNQviSpFTj1RQtFzmQLk18qbVSpTFFGMT293rmDaQuKCT6InmbuEyUne4mTuxQ== -core-js@^2.5.7: - version "2.6.4" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.4.tgz#b8897c062c4d769dd30a0ac5c73976c47f92ea0d" - integrity sha512-05qQ5hXShcqGkPZpXEFLIpxayZscVD2kuMBZewxiIPPEagukO4mqgPA9CWhUvFBJfy3ODdK2p9xyHh7FTU9/7A== +anymatch@~3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" + integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" -core-util-is@1.0.2, core-util-is@~1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" - integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= - -cross-spawn@^6.0.0: - version "6.0.5" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" - integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== - dependencies: - nice-try "^1.0.4" - path-key "^2.0.1" - semver "^5.5.0" - shebang-command "^1.2.0" - which "^1.2.9" - -crypt@~0.0.1: - version "0.0.2" - resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" - integrity sha1-iNf/fsDfuG9xPch7u0LQRNPmxBs= - -cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0": - version "0.3.6" - resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.6.tgz#f85206cee04efa841f3c5982a74ba96ab20d65ad" - integrity sha512-DtUeseGk9/GBW0hl0vVPpU22iHL6YB5BUX7ml1hB+GMpo0NX5G4voX3kdWiMSEguFtcW3Vh3djqNF4aIe6ne0A== - -cssstyle@^1.0.0: +arg@^5.0.0: + version "5.0.2" + resolved "https://registry.yarnpkg.com/arg/-/arg-5.0.2.tgz#c81433cc427c92c4dcf4865142dbca6f15acd59c" + integrity sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg== + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +array-flatten@1.1.1: version "1.1.1" - resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-1.1.1.tgz#18b038a9c44d65f7a8e428a653b9f6fe42faf5fb" - integrity sha512-364AI1l/M5TYcFH83JnOH/pSqgaNnKmYgKrm0didZMGKWjQB60dymwWy1rKUgL3J1ffdq9xVi2yGLHdSjjSNog== + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== + +array-flatten@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099" + integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +asap@~2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA== + +at-least-node@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" + integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== + +autoprefixer@^10.3.7, autoprefixer@^10.4.7: + version "10.4.7" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-10.4.7.tgz#1db8d195f41a52ca5069b7593be167618edbbedf" + integrity sha512-ypHju4Y2Oav95SipEcCcI5J7CGPuvz8oat7sUtYj3ClK44bldfvtvcxK6IEK++7rqB7YchDGzweZIBG+SD0ZAA== + dependencies: + browserslist "^4.20.3" + caniuse-lite "^1.0.30001335" + fraction.js "^4.2.0" + normalize-range "^0.1.2" + picocolors "^1.0.0" + postcss-value-parser "^4.2.0" + +axios@^0.25.0: + version "0.25.0" + resolved "https://registry.yarnpkg.com/axios/-/axios-0.25.0.tgz#349cfbb31331a9b4453190791760a8d35b093e0a" + integrity sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g== + dependencies: + follow-redirects "^1.14.7" + +babel-loader@^8.2.5: + version "8.2.5" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.2.5.tgz#d45f585e654d5a5d90f5350a779d7647c5ed512e" + integrity sha512-OSiFfH89LrEMiWd4pLNqGz4CwJDtbs2ZVc+iGu2HrkRfPxId9F2anQj38IxWpmRfsUY0aBZYi1EFcd3mhtRMLQ== + dependencies: + find-cache-dir "^3.3.1" + loader-utils "^2.0.0" + make-dir "^3.1.0" + schema-utils "^2.6.5" + +babel-plugin-apply-mdx-type-prop@1.6.22: + version "1.6.22" + resolved "https://registry.yarnpkg.com/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz#d216e8fd0de91de3f1478ef3231e05446bc8705b" + integrity sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ== + dependencies: + "@babel/helper-plugin-utils" "7.10.4" + "@mdx-js/util" "1.6.22" + +babel-plugin-dynamic-import-node@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz#84fda19c976ec5c6defef57f9427b3def66e17a3" + integrity sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ== dependencies: - cssom "0.3.x" + object.assign "^4.1.0" -dashdash@^1.12.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" - integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA= +babel-plugin-extract-import-names@1.6.22: + version "1.6.22" + resolved "https://registry.yarnpkg.com/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz#de5f9a28eb12f3eb2578bf74472204e66d1a13dc" + integrity sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ== dependencies: - assert-plus "^1.0.0" + "@babel/helper-plugin-utils" "7.10.4" -data-urls@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-1.1.0.tgz#15ee0582baa5e22bb59c77140da8f9c76963bbfe" - integrity sha512-YTWYI9se1P55u58gL5GkQHW4P6VJBJ5iBT+B5a7i2Tjadhv52paJG0qHX4A0OR6/t52odI64KP2YvFpkDOi3eQ== +babel-plugin-polyfill-corejs2@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz#440f1b70ccfaabc6b676d196239b138f8a2cfba5" + integrity sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w== dependencies: - abab "^2.0.0" - whatwg-mimetype "^2.2.0" - whatwg-url "^7.0.0" + "@babel/compat-data" "^7.13.11" + "@babel/helper-define-polyfill-provider" "^0.3.1" + semver "^6.1.1" -debug@^2.1.2, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8: - version "2.6.9" - resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== +babel-plugin-polyfill-corejs3@^0.5.0: + version "0.5.2" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz#aabe4b2fa04a6e038b688c5e55d44e78cd3a5f72" + integrity sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ== dependencies: - ms "2.0.0" + "@babel/helper-define-polyfill-provider" "^0.3.1" + core-js-compat "^3.21.0" -debug@^4.1.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" - integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== +babel-plugin-polyfill-regenerator@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz#2c0678ea47c75c8cc2fbb1852278d8fb68233990" + integrity sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A== dependencies: - ms "^2.1.1" + "@babel/helper-define-polyfill-provider" "^0.3.1" -decamelize@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" - integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= +bail@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.5.tgz#b6fa133404a392cbc1f8c4bf63f5953351e7a776" + integrity sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ== -decode-uri-component@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" - integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== -deep-extend@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" - integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== +base16@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/base16/-/base16-1.0.0.tgz#e297f60d7ec1014a7a971a39ebc8a98c0b681e70" + integrity sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ== -deep-is@~0.1.3: - version "0.1.3" - resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" - integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= +batch@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" + integrity sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw== -define-properties@^1.1.2: - version "1.1.3" - resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" - integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== - dependencies: - object-keys "^1.0.12" +big.js@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== + +binary-extensions@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" + integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== + +body-parser@1.20.0: + version "1.20.0" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.0.tgz#3de69bd89011c11573d7bfee6a64f11b6bd27cc5" + integrity sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg== + dependencies: + bytes "3.1.2" + content-type "~1.0.4" + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + http-errors "2.0.0" + iconv-lite "0.4.24" + on-finished "2.4.1" + qs "6.10.3" + raw-body "2.5.1" + type-is "~1.6.18" + unpipe "1.0.0" + +bonjour-service@^1.0.11: + version "1.0.13" + resolved "https://registry.yarnpkg.com/bonjour-service/-/bonjour-service-1.0.13.tgz#4ac003dc1626023252d58adf2946f57e5da450c1" + integrity sha512-LWKRU/7EqDUC9CTAQtuZl5HzBALoCYwtLhffW3et7vZMwv3bWLpJf8bRYlMD5OCcDpTfnPgNCV4yo9ZIaJGMiA== + dependencies: + array-flatten "^2.1.2" + dns-equal "^1.0.0" + fast-deep-equal "^3.1.3" + multicast-dns "^7.2.5" + +boolbase@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + integrity sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww== + +boxen@^5.0.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/boxen/-/boxen-5.1.2.tgz#788cb686fc83c1f486dfa8a40c68fc2b831d2b50" + integrity sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ== + dependencies: + ansi-align "^3.0.0" + camelcase "^6.2.0" + chalk "^4.1.0" + cli-boxes "^2.2.1" + string-width "^4.2.2" + type-fest "^0.20.2" + widest-line "^3.1.0" + wrap-ansi "^7.0.0" + +boxen@^6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/boxen/-/boxen-6.2.1.tgz#b098a2278b2cd2845deef2dff2efc38d329b434d" + integrity sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw== + dependencies: + ansi-align "^3.0.1" + camelcase "^6.2.0" + chalk "^4.1.2" + cli-boxes "^3.0.0" + string-width "^5.0.1" + type-fest "^2.5.0" + widest-line "^4.0.1" + wrap-ansi "^8.0.1" -define-property@^0.2.5: - version "0.2.5" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" - integrity sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY= +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== dependencies: - is-descriptor "^0.1.0" + balanced-match "^1.0.0" + concat-map "0.0.1" -define-property@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" - integrity sha1-dp66rz9KY6rTr56NMEybvnm/sOY= +braces@^3.0.2, braces@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== dependencies: - is-descriptor "^1.0.0" + fill-range "^7.0.1" -define-property@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d" - integrity sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ== +browserslist@^4.0.0, browserslist@^4.14.5, browserslist@^4.16.6, browserslist@^4.18.1, browserslist@^4.20.2, browserslist@^4.20.3: + version "4.20.4" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.20.4.tgz#98096c9042af689ee1e0271333dbc564b8ce4477" + integrity sha512-ok1d+1WpnU24XYN7oC3QWgTyMhY/avPJ/r9T00xxvUOIparA/gc+UPUMaod3i+G6s+nI2nUb9xZ5k794uIwShw== dependencies: - is-descriptor "^1.0.2" - isobject "^3.0.1" + caniuse-lite "^1.0.30001349" + electron-to-chromium "^1.4.147" + escalade "^3.1.1" + node-releases "^2.0.5" + picocolors "^1.0.0" -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= +buffer-from@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" + integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== -delegates@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" - integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= +bytes@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" + integrity sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw== -detect-libc@^1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" - integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups= +bytes@3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== -dom-walk@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/dom-walk/-/dom-walk-0.1.1.tgz#672226dc74c8f799ad35307df936aba11acd6018" - integrity sha1-ZyIm3HTI95mtNTB9+TaroRrNYBg= +cacheable-request@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" + integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== + dependencies: + clone-response "^1.0.2" + get-stream "^5.1.0" + http-cache-semantics "^4.0.0" + keyv "^3.0.0" + lowercase-keys "^2.0.0" + normalize-url "^4.1.0" + responselike "^1.0.2" + +call-bind@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c" + integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA== + dependencies: + function-bind "^1.1.1" + get-intrinsic "^1.0.2" -domexception@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/domexception/-/domexception-1.0.1.tgz#937442644ca6a31261ef36e3ec677fe805582c90" - integrity sha512-raigMkn7CJNNo6Ihro1fzG7wr3fHuYVytzquZKX5n0yizGsTcYgzdIUwj1X9pK0VvjeihV+XiclP+DjwbsSKug== +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camel-case@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-4.1.2.tgz#9728072a954f805228225a6deea6b38461e1bd5a" + integrity sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw== dependencies: - webidl-conversions "^4.0.2" + pascal-case "^3.1.2" + tslib "^2.0.3" -dotenv@^5.0.0: - version "5.0.1" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-5.0.1.tgz#a5317459bd3d79ab88cff6e44057a6a3fbb1fcef" - integrity sha512-4As8uPrjfwb7VXC+WnLCbXK7y+Ueb2B3zgNCePYfhxS1PYeaO1YTeplffTEcbfLhvFNGLAz90VvJs9yomG7bow== +camelcase-css@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5" + integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA== -ecc-jsbn@~0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" - integrity sha1-OoOpBOVDUyh4dMVkt1SThoSamMk= +camelcase@^6.2.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" + integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== + +caniuse-api@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0" + integrity sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw== dependencies: - jsbn "~0.1.0" - safer-buffer "^2.1.0" + browserslist "^4.0.0" + caniuse-lite "^1.0.0" + lodash.memoize "^4.1.2" + lodash.uniq "^4.5.0" -electron-to-chromium@^1.3.103: - version "1.3.113" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.113.tgz#b1ccf619df7295aea17bc6951dc689632629e4a9" - integrity sha512-De+lPAxEcpxvqPTyZAXELNpRZXABRxf+uL/rSykstQhzj/B0l1150G/ExIIxKc16lI89Hgz81J0BHAcbTqK49g== +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001335, caniuse-lite@^1.0.30001349: + version "1.0.30001352" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001352.tgz#cc6f5da3f983979ad1e2cdbae0505dccaa7c6a12" + integrity sha512-GUgH8w6YergqPQDGWhJGt8GDRnY0L/iJVQcU3eJ46GYf52R8tk0Wxp0PymuFVZboJYXGiCqwozAYZNRjVj6IcA== -end-of-stream@^1.1.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.1.tgz#ed29634d19baba463b6ce6b80a37213eab71ec43" - integrity sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q== +ccount@^1.0.0, ccount@^1.0.3: + version "1.1.0" + resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.1.0.tgz#246687debb6014735131be8abab2d93898f8d043" + integrity sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg== + +chalk@^2.0.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== dependencies: - once "^1.4.0" + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" -envify@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/envify/-/envify-4.1.0.tgz#f39ad3db9d6801b4e6b478b61028d3f0b6819f7e" - integrity sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw== +chalk@^4.1.0, chalk@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== dependencies: - esprima "^4.0.0" - through "~2.3.4" + ansi-styles "^4.1.0" + supports-color "^7.1.0" -es6-promise@^4.1.0: - version "4.2.5" - resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.2.5.tgz#da6d0d5692efb461e082c14817fe2427d8f5d054" - integrity sha512-n6wvpdE43VFtJq+lUDYDBFUwV8TZbuGXLV4D6wKafg13ldznKsyEvatubnmUe31zcvelSzOHF+XbaT+Bl9ObDg== +character-entities-legacy@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz#94bc1845dce70a5bb9d2ecc748725661293d8fc1" + integrity sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA== -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= +character-entities@^1.0.0: + version "1.2.4" + resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-1.2.4.tgz#e12c3939b7eaf4e5b15e7ad4c5e28e1d48c5b16b" + integrity sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw== -escodegen@^1.9.1: - version "1.11.0" - resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.11.0.tgz#b27a9389481d5bfd5bec76f7bb1eb3f8f4556589" - integrity sha512-IeMV45ReixHS53K/OmfKAIztN/igDHzTJUhZM3k1jMhIZWjk45SMwAtBsEXiJp3vSPmTcu6CXn7mDvFHRN66fw== - dependencies: - esprima "^3.1.3" - estraverse "^4.2.0" - esutils "^2.0.2" - optionator "^0.8.1" +character-reference-invalid@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz#083329cda0eae272ab3dbbf37e9a382c13af1560" + integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg== + +cheerio-select@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cheerio-select/-/cheerio-select-2.1.0.tgz#4d8673286b8126ca2a8e42740d5e3c4884ae21b4" + integrity sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g== + dependencies: + boolbase "^1.0.0" + css-select "^5.1.0" + css-what "^6.1.0" + domelementtype "^2.3.0" + domhandler "^5.0.3" + domutils "^3.0.1" + +cheerio@^1.0.0-rc.11: + version "1.0.0-rc.11" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.11.tgz#1be84be1a126958366bcc57a11648cd9b30a60c2" + integrity sha512-bQwNaDIBKID5ts/DsdhxrjqFXYfLw4ste+wMKqWA8DyKcS4qwsPP4Bk8ZNaTJjvpiX/qW3BT4sU7d6Bh5i+dag== + dependencies: + cheerio-select "^2.1.0" + dom-serializer "^2.0.0" + domhandler "^5.0.3" + domutils "^3.0.1" + htmlparser2 "^8.0.1" + parse5 "^7.0.0" + parse5-htmlparser2-tree-adapter "^7.0.0" + tslib "^2.4.0" + +chokidar@^3.4.2, chokidar@^3.5.3: + version "3.5.3" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.3.tgz#1cf37c8707b932bd1af1ae22c0432e2acd1903bd" + integrity sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw== + dependencies: + anymatch "~3.1.2" + braces "~3.0.2" + glob-parent "~5.1.2" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.6.0" optionalDependencies: - source-map "~0.6.1" + fsevents "~2.3.2" -esprima@^3.1.3: - version "3.1.3" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633" - integrity sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM= +chrome-trace-event@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz#1015eced4741e15d06664a957dbbf50d041e26ac" + integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg== -esprima@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" - integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== +ci-info@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" + integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== -estraverse@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.2.0.tgz#0dee3fed31fcd469618ce7342099fc1afa0bdb13" - integrity sha1-De4/7TH81GlhjOc0IJn8GvoL2xM= +clean-css@^5.2.2, clean-css@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-5.3.0.tgz#ad3d8238d5f3549e83d5f87205189494bc7cbb59" + integrity sha512-YYuuxv4H/iNb1Z/5IbMRoxgrzjWGhOEFfd+groZ5dMCVkpENiMZmwspdrzBo9286JjM1gZJPAyL7ZIdzuvu2AQ== + dependencies: + source-map "~0.6.0" -esutils@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" - integrity sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs= +clean-stack@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" + integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== -events@^1.1.0, events@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/events/-/events-1.1.1.tgz#9ebdb7635ad099c70dcc4c2a1f5004288e8bd924" - integrity sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ= +cli-boxes@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-2.2.1.tgz#ddd5035d25094fce220e9cab40a45840a440318f" + integrity sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw== -execa@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8" - integrity sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA== - dependencies: - cross-spawn "^6.0.0" - get-stream "^4.0.0" - is-stream "^1.1.0" - npm-run-path "^2.0.0" - p-finally "^1.0.0" - signal-exit "^3.0.0" - strip-eof "^1.0.0" - -expand-brackets@^2.1.4: - version "2.1.4" - resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" - integrity sha1-t3c14xXOMPa27/D4OwQVGiJEliI= - dependencies: - debug "^2.3.3" - define-property "^0.2.5" - extend-shallow "^2.0.1" - posix-character-classes "^0.1.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" +cli-boxes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-3.0.0.tgz#71a10c716feeba005e4504f36329ef0b17cf3145" + integrity sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g== -extend-shallow@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" - integrity sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8= +cli-table3@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.2.tgz#aaf5df9d8b5bf12634dc8b3040806a0c07120d2a" + integrity sha512-QyavHCaIC80cMivimWu4aWHilIpiDpfm3hGmqAmXVL1UsnbLuBSMd21hTX6VY4ZSDSM73ESLeF8TOYId3rBTbw== dependencies: - is-extendable "^0.1.0" + string-width "^4.2.0" + optionalDependencies: + "@colors/colors" "1.5.0" -extend-shallow@^3.0.0, extend-shallow@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8" - integrity sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg= +clone-deep@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-4.0.1.tgz#c19fd9bdbbf85942b4fd979c84dcf7d5f07c2387" + integrity sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ== dependencies: - assign-symbols "^1.0.0" - is-extendable "^1.0.1" - -extend@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" - integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + is-plain-object "^2.0.4" + kind-of "^6.0.2" + shallow-clone "^3.0.0" -extglob@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543" - integrity sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw== +clone-response@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + integrity sha512-yjLXh88P599UOyPTFX0POsd7WxnbsVsGohcwzHOLspIhhpalPw1BcqED8NblyZLKcGrL8dTgMlcaZxV2jAD41Q== dependencies: - array-unique "^0.3.2" - define-property "^1.0.0" - expand-brackets "^2.1.4" - extend-shallow "^2.0.1" - fragment-cache "^0.2.1" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" + mimic-response "^1.0.0" -extsprintf@1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" - integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU= +clsx@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-1.1.1.tgz#98b3134f9abbdf23b2663491ace13c5c03a73188" + integrity sha512-6/bPho624p3S2pMyvP5kKBPXnI3ufHLObBFCfgx+LkeR5lg2XYy2hqZqUf45ypD8COn2bhgGJSUE+l5dhNBieA== -extsprintf@^1.2.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" - integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= +collapse-white-space@^1.0.2: + version "1.0.6" + resolved "https://registry.yarnpkg.com/collapse-white-space/-/collapse-white-space-1.0.6.tgz#e63629c0016665792060dbbeb79c42239d2c5287" + integrity sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ== + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" -fast-deep-equal@^2.0.1: +color-convert@^2.0.1: version "2.0.1" - resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49" - integrity sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk= + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" -fast-json-stable-stringify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" - integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I= +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== -fast-levenshtein@~2.0.4: - version "2.0.6" - resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" - integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -fill-range@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" - integrity sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc= - dependencies: - extend-shallow "^2.0.1" - is-number "^3.0.0" - repeat-string "^1.6.1" - to-regex-range "^2.1.0" +colord@^2.9.1: + version "2.9.2" + resolved "https://registry.yarnpkg.com/colord/-/colord-2.9.2.tgz#25e2bacbbaa65991422c07ea209e2089428effb1" + integrity sha512-Uqbg+J445nc1TKn4FoDPS6ZZqAvEDnwrH42yo8B40JSOgSLxMZ/gt3h4nmCtPLQeXhjJJkqBx7SCY35WnIixaQ== -find-up@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" - integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== - dependencies: - locate-path "^3.0.0" +colorette@^2.0.10: + version "2.0.17" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-2.0.17.tgz#5dd4c0d15e2984b7433cb4a9f2ead45063b80c47" + integrity sha512-hJo+3Bkn0NCHybn9Tu35fIeoOKGOk5OCC32y4Hz2It+qlCO2Q3DeQ1hRn/tDDMQKRYUEzqsl7jbF6dYKjlE60g== -for-in@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" - integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA= +combine-promises@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/combine-promises/-/combine-promises-1.1.0.tgz#72db90743c0ca7aab7d0d8d2052fd7b0f674de71" + integrity sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg== -foreach@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" - integrity sha1-C+4AUBiusmDQo6865ljdATbsG5k= +comma-separated-tokens@^1.0.0: + version "1.0.8" + resolved "https://registry.yarnpkg.com/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz#632b80b6117867a158f1080ad498b2fbe7e3f5ea" + integrity sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw== -forever-agent@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" - integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= +commander@^2.20.0: + version "2.20.3" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== -form-data@~2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" - integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== +commander@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae" + integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg== + +commander@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" + integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== + +commander@^8.3.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" + integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww== + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg== + +compressible@~2.0.16: + version "2.0.18" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba" + integrity sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg== dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.6" - mime-types "^2.1.12" + mime-db ">= 1.43.0 < 2" -fragment-cache@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" - integrity sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk= +compression@^1.7.4: + version "1.7.4" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f" + integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ== dependencies: - map-cache "^0.2.2" + accepts "~1.3.5" + bytes "3.0.0" + compressible "~2.0.16" + debug "2.6.9" + on-headers "~1.0.2" + safe-buffer "5.1.2" + vary "~1.1.2" -fs-minipass@^1.2.5: - version "1.2.5" - resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.5.tgz#06c277218454ec288df77ada54a03b8702aacb9d" - integrity sha512-JhBl0skXjUPCFH7x6x61gQxrKyXsxB5gcgePLZCwfyCGGsTISMoIeObbrvVeP6Xmyaudw4TT43qV2Gz+iyd2oQ== +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +configstore@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/configstore/-/configstore-5.0.1.tgz#d365021b5df4b98cdd187d6a3b0e3f6a7cc5ed96" + integrity sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA== dependencies: - minipass "^2.2.1" + dot-prop "^5.2.0" + graceful-fs "^4.1.2" + make-dir "^3.0.0" + unique-string "^2.0.0" + write-file-atomic "^3.0.0" + xdg-basedir "^4.0.0" -fs-readdir-recursive@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz#e32fc030a2ccee44a6b5371308da54be0b397d27" - integrity sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA== +connect-history-api-fallback@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz#8b32089359308d111115d81cad3fceab888f97bc" + integrity sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg== -fs.realpath@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= +consola@^2.15.3: + version "2.15.3" + resolved "https://registry.yarnpkg.com/consola/-/consola-2.15.3.tgz#2e11f98d6a4be71ff72e0bdf07bd23e12cb61550" + integrity sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw== -fsevents@^1.2.7: - version "1.2.7" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.7.tgz#4851b664a3783e52003b3c66eb0eee1074933aa4" - integrity sha512-Pxm6sI2MeBD7RdD12RYsqaP0nMiwx8eZBXCa6z2L+mRHm2DYrOYwihmhjpkdjUHwQhslWQjRpEgNq4XvBmaAuw== +content-disposition@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" + integrity sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA== + +content-disposition@0.5.4: + version "0.5.4" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.4.tgz#8b82b4efac82512a02bb0b1dcec9d2c5e8eb5bfe" + integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ== dependencies: - nan "^2.9.2" - node-pre-gyp "^0.10.0" + safe-buffer "5.2.1" -function-bind@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" - integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== +content-type@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== -gauge@~2.7.3: - version "2.7.4" - resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" - integrity sha1-LANAXHU4w51+s3sxcCLjJfsBi/c= +convert-source-map@^1.7.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.8.0.tgz#f3373c32d21b4d780dd8004514684fb791ca4369" + integrity sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA== dependencies: - aproba "^1.0.3" - console-control-strings "^1.0.0" - has-unicode "^2.0.0" - object-assign "^4.1.0" - signal-exit "^3.0.0" - string-width "^1.0.1" - strip-ansi "^3.0.1" - wide-align "^1.1.0" + safe-buffer "~5.1.1" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== + +cookie@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" + integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== + +copy-text-to-clipboard@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/copy-text-to-clipboard/-/copy-text-to-clipboard-3.0.1.tgz#8cbf8f90e0a47f12e4a24743736265d157bce69c" + integrity sha512-rvVsHrpFcL4F2P8ihsoLdFHmd404+CMg71S756oRSeQgqk51U3kicGdnvfkrxva0xXH92SjGS62B0XIJsbh+9Q== + +copy-webpack-plugin@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz#96d4dbdb5f73d02dd72d0528d1958721ab72e04a" + integrity sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ== + dependencies: + fast-glob "^3.2.11" + glob-parent "^6.0.1" + globby "^13.1.1" + normalize-path "^3.0.0" + schema-utils "^4.0.0" + serialize-javascript "^6.0.0" + +core-js-compat@^3.21.0, core-js-compat@^3.22.1: + version "3.22.8" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.22.8.tgz#46fa34ce1ddf742acd7f95f575f66bbb21e05d62" + integrity sha512-pQnwg4xtuvc2Bs/5zYQPaEYYSuTxsF7LBWF0SvnVhthZo/Qe+rJpcEekrdNK5DWwDJ0gv0oI9NNX5Mppdy0ctg== + dependencies: + browserslist "^4.20.3" + semver "7.0.0" -get-caller-file@^1.0.1: +core-js-pure@^3.20.2: + version "3.22.8" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.22.8.tgz#f2157793b58719196ccf9673cc14f3683adc0957" + integrity sha512-bOxbZIy9S5n4OVH63XaLVXZ49QKicjowDx/UELyJ68vxfCRpYsbyh/WNZNfEfAk+ekA8vSjt+gCDpvh672bc3w== + +core-js@^3.22.7: + version "3.22.8" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.22.8.tgz#23f860b1fe60797cc4f704d76c93fea8a2f60631" + integrity sha512-UoGQ/cfzGYIuiq6Z7vWL1HfkE9U9IZ4Ub+0XSiJTCzvbZzgPA69oDF2f+lgJ6dFFLEdjW5O6svvoKzXX23xFkA== + +core-util-is@~1.0.0: version "1.0.3" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" - integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" + integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== + +cosmiconfig@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" + integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.1.0" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.7.2" + +cosmiconfig@^7.0.0, cosmiconfig@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.0.1.tgz#714d756522cace867867ccb4474c5d01bbae5d6d" + integrity sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.2.1" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.10.0" + +cross-fetch@^3.1.5: + version "3.1.5" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.1.5.tgz#e1389f44d9e7ba767907f7af8454787952ab534f" + integrity sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw== + dependencies: + node-fetch "2.6.7" + +cross-spawn@^7.0.3: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +crypto-random-string@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5" + integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA== + +css-declaration-sorter@^6.2.2: + version "6.3.0" + resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-6.3.0.tgz#72ebd995c8f4532ff0036631f7365cce9759df14" + integrity sha512-OGT677UGHJTAVMRhPO+HJ4oKln3wkBTwtDFH0ojbqm+MJm6xuDMHp2nkhh/ThaBqq20IbraBQSWKfSLNHQO9Og== + +css-loader@^6.7.1: + version "6.7.1" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-6.7.1.tgz#e98106f154f6e1baf3fc3bc455cb9981c1d5fd2e" + integrity sha512-yB5CNFa14MbPJcomwNh3wLThtkZgcNyI2bNMRt8iE5Z8Vwl7f8vQXFAzn2HDOJvtDq2NTZBUGMSUNNyrv3/+cw== + dependencies: + icss-utils "^5.1.0" + postcss "^8.4.7" + postcss-modules-extract-imports "^3.0.0" + postcss-modules-local-by-default "^4.0.0" + postcss-modules-scope "^3.0.0" + postcss-modules-values "^4.0.0" + postcss-value-parser "^4.2.0" + semver "^7.3.5" + +css-minimizer-webpack-plugin@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.0.0.tgz#e11800388c19c2b7442c39cc78ac8ae3675c9605" + integrity sha512-7ZXXRzRHvofv3Uac5Y+RkWRNo0ZMlcg8e9/OtrqUYmwDWJo+qs67GvdeFrXLsFb7czKNwjQhPkM0avlIYl+1nA== + dependencies: + cssnano "^5.1.8" + jest-worker "^27.5.1" + postcss "^8.4.13" + schema-utils "^4.0.0" + serialize-javascript "^6.0.0" + source-map "^0.6.1" -get-stream@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" - integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== +css-select@^4.1.3: + version "4.3.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-4.3.0.tgz#db7129b2846662fd8628cfc496abb2b59e41529b" + integrity sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ== dependencies: - pump "^3.0.0" + boolbase "^1.0.0" + css-what "^6.0.1" + domhandler "^4.3.1" + domutils "^2.8.0" + nth-check "^2.0.1" + +css-select@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-5.1.0.tgz#b8ebd6554c3637ccc76688804ad3f6a6fdaea8a6" + integrity sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg== + dependencies: + boolbase "^1.0.0" + css-what "^6.1.0" + domhandler "^5.0.2" + domutils "^3.0.1" + nth-check "^2.0.1" + +css-tree@^1.1.2, css-tree@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.3.tgz#eb4870fb6fd7707327ec95c2ff2ab09b5e8db91d" + integrity sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q== + dependencies: + mdn-data "2.0.14" + source-map "^0.6.1" + +css-what@^6.0.1, css-what@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-6.1.0.tgz#fb5effcf76f1ddea2c81bdfaa4de44e79bac70f4" + integrity sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw== + +cssesc@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" + integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== + +cssnano-preset-advanced@^5.3.5: + version "5.3.7" + resolved "https://registry.yarnpkg.com/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.7.tgz#5c00fd6603ab1b2d1dee5540bd3a5931ab275d36" + integrity sha512-VNOdTMRA60KhaURZhnkTGeluHQBHWDMwY7TIDu1Qydf88X6k8xZbV2I+Wlm8JRaj2oi18xvoIOAW17JneoZzEg== + dependencies: + autoprefixer "^10.3.7" + cssnano-preset-default "^5.2.11" + postcss-discard-unused "^5.1.0" + postcss-merge-idents "^5.1.1" + postcss-reduce-idents "^5.2.0" + postcss-zindex "^5.1.0" + +cssnano-preset-default@^5.2.11: + version "5.2.11" + resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-5.2.11.tgz#28350471bc1af9df14052472b61340347f453a53" + integrity sha512-4PadR1NtuaIK8MvLNuY7MznK4WJteldGlzCiMaaTiOUP+apeiIvUDIXykzUOoqgOOUAHrU64ncdD90NfZR3LSQ== + dependencies: + css-declaration-sorter "^6.2.2" + cssnano-utils "^3.1.0" + postcss-calc "^8.2.3" + postcss-colormin "^5.3.0" + postcss-convert-values "^5.1.2" + postcss-discard-comments "^5.1.2" + postcss-discard-duplicates "^5.1.0" + postcss-discard-empty "^5.1.1" + postcss-discard-overridden "^5.1.0" + postcss-merge-longhand "^5.1.5" + postcss-merge-rules "^5.1.2" + postcss-minify-font-values "^5.1.0" + postcss-minify-gradients "^5.1.1" + postcss-minify-params "^5.1.3" + postcss-minify-selectors "^5.2.1" + postcss-normalize-charset "^5.1.0" + postcss-normalize-display-values "^5.1.0" + postcss-normalize-positions "^5.1.0" + postcss-normalize-repeat-style "^5.1.0" + postcss-normalize-string "^5.1.0" + postcss-normalize-timing-functions "^5.1.0" + postcss-normalize-unicode "^5.1.0" + postcss-normalize-url "^5.1.0" + postcss-normalize-whitespace "^5.1.1" + postcss-ordered-values "^5.1.2" + postcss-reduce-initial "^5.1.0" + postcss-reduce-transforms "^5.1.0" + postcss-svgo "^5.1.0" + postcss-unique-selectors "^5.1.1" + +cssnano-utils@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/cssnano-utils/-/cssnano-utils-3.1.0.tgz#95684d08c91511edfc70d2636338ca37ef3a6861" + integrity sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA== + +cssnano@^5.1.8, cssnano@^5.1.9: + version "5.1.11" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-5.1.11.tgz#3bb003380718c7948ce3813493370e8946caf04b" + integrity sha512-2nx+O6LvewPo5EBtYrKc8762mMkZRk9cMGIOP4UlkmxHm7ObxH+zvsJJ+qLwPkUc4/yumL/qJkavYi9NlodWIQ== + dependencies: + cssnano-preset-default "^5.2.11" + lilconfig "^2.0.3" + yaml "^1.10.2" + +csso@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/csso/-/csso-4.2.0.tgz#ea3a561346e8dc9f546d6febedd50187cf389529" + integrity sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA== + dependencies: + css-tree "^1.1.2" + +csstype@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.0.tgz#4ddcac3718d787cf9df0d1b7d15033925c8f29f2" + integrity sha512-uX1KG+x9h5hIJsaKR9xHUeUraxf8IODOwq9JLNPq6BwB04a/xgpq3rcx47l5BZu5zBPlgD342tdke3Hom/nJRA== + +debug@2.6.9, debug@^2.6.0: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +debug@^4.1.0, debug@^4.1.1: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== + dependencies: + ms "2.1.2" + +decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + integrity sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA== + dependencies: + mimic-response "^1.0.0" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== + +deepmerge@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.2.2.tgz#44d2ea3679b8f4d4ffba33f03d865fc1e7bf4955" + integrity sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg== + +default-gateway@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/default-gateway/-/default-gateway-6.0.3.tgz#819494c888053bdb743edbf343d6cdf7f2943a71" + integrity sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg== + dependencies: + execa "^5.0.0" + +defer-to-connect@^1.0.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" + integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== + +define-lazy-prop@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz#3f7ae421129bcaaac9bc74905c98a0009ec9ee7f" + integrity sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og== + +define-properties@^1.1.3: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.4.tgz#0b14d7bd7fbeb2f3572c3a7eda80ea5d57fb05b1" + integrity sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA== + dependencies: + has-property-descriptors "^1.0.0" + object-keys "^1.1.1" + +del@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/del/-/del-6.1.1.tgz#3b70314f1ec0aa325c6b14eb36b95786671edb7a" + integrity sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg== + dependencies: + globby "^11.0.1" + graceful-fs "^4.2.4" + is-glob "^4.0.1" + is-path-cwd "^2.2.0" + is-path-inside "^3.0.2" + p-map "^4.0.0" + rimraf "^3.0.2" + slash "^3.0.0" + +depd@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" + integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== + +depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ== + +destroy@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" + integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== + +detab@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/detab/-/detab-2.0.4.tgz#b927892069aff405fbb9a186fe97a44a92a94b43" + integrity sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g== + dependencies: + repeat-string "^1.5.4" + +detect-node@^2.0.4: + version "2.1.0" + resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.1.0.tgz#c9c70775a49c3d03bc2c06d9a73be550f978f8b1" + integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g== + +detect-port-alt@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/detect-port-alt/-/detect-port-alt-1.1.6.tgz#24707deabe932d4a3cf621302027c2b266568275" + integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q== + dependencies: + address "^1.0.1" + debug "^2.6.0" + +detect-port@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/detect-port/-/detect-port-1.3.0.tgz#d9c40e9accadd4df5cac6a782aefd014d573d1f1" + integrity sha512-E+B1gzkl2gqxt1IhUzwjrxBKRqx1UzC3WLONHinn8S3T6lwV/agVCyitiFOsGJ/eYuEUBvD71MZHy3Pv1G9doQ== + dependencies: + address "^1.0.1" + debug "^2.6.0" + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +dns-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" + integrity sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg== + +dns-packet@^5.2.2: + version "5.3.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-5.3.1.tgz#eb94413789daec0f0ebe2fcc230bdc9d7c91b43d" + integrity sha512-spBwIj0TK0Ey3666GwIdWVfUpLyubpU53BTCu8iPn4r4oXd9O14Hjg3EHw3ts2oed77/SeckunUYCyRlSngqHw== + dependencies: + "@leichtgewicht/ip-codec" "^2.0.1" + +dom-converter@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768" + integrity sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA== + dependencies: + utila "~0.4" + +dom-serializer@^1.0.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.4.1.tgz#de5d41b1aea290215dc45a6dae8adcf1d32e2d30" + integrity sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag== + dependencies: + domelementtype "^2.0.1" + domhandler "^4.2.0" + entities "^2.0.0" + +dom-serializer@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-2.0.0.tgz#e41b802e1eedf9f6cae183ce5e622d789d7d8e53" + integrity sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg== + dependencies: + domelementtype "^2.3.0" + domhandler "^5.0.2" + entities "^4.2.0" + +domelementtype@^2.0.1, domelementtype@^2.2.0, domelementtype@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d" + integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw== + +domhandler@^4.0.0, domhandler@^4.2.0, domhandler@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.3.1.tgz#8d792033416f59d68bc03a5aa7b018c1ca89279c" + integrity sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ== + dependencies: + domelementtype "^2.2.0" + +domhandler@^5.0.1, domhandler@^5.0.2, domhandler@^5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-5.0.3.tgz#cc385f7f751f1d1fc650c21374804254538c7d31" + integrity sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w== + dependencies: + domelementtype "^2.3.0" + +domutils@^2.5.2, domutils@^2.8.0: + version "2.8.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.8.0.tgz#4437def5db6e2d1f5d6ee859bd95ca7d02048135" + integrity sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A== + dependencies: + dom-serializer "^1.0.1" + domelementtype "^2.2.0" + domhandler "^4.2.0" + +domutils@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-3.0.1.tgz#696b3875238338cb186b6c0612bd4901c89a4f1c" + integrity sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q== + dependencies: + dom-serializer "^2.0.0" + domelementtype "^2.3.0" + domhandler "^5.0.1" + +dot-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/dot-case/-/dot-case-3.0.4.tgz#9b2b670d00a431667a8a75ba29cd1b98809ce751" + integrity sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w== + dependencies: + no-case "^3.0.4" + tslib "^2.0.3" + +dot-prop@^5.2.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-5.3.0.tgz#90ccce708cd9cd82cc4dc8c3ddd9abdd55b20e88" + integrity sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q== + dependencies: + is-obj "^2.0.0" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + integrity sha512-CEj8FwwNA4cVH2uFCoHUrmojhYh1vmCdOaneKJXwkeY1i9jnlslVo9dx+hQ5Hl9GnH/Bwy/IjxAyOePyPKYnzA== + +duplexer@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.2.tgz#3abe43aef3835f8ae077d136ddce0f276b0400e6" + integrity sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg== + +eastasianwidth@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" + integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== + +electron-to-chromium@^1.4.147: + version "1.4.151" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.151.tgz#d1c09dd3a06cb81ef03a3bbbff6905827c33ab4b" + integrity sha512-XaG2LpZi9fdiWYOqJh0dJy4SlVywCvpgYXhzOlZTp4JqSKqxn5URqOjbm9OMYB3aInA2GuHQiem1QUOc1yT0Pw== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +emoji-regex@^9.2.2: + version "9.2.2" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" + integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== + +emojis-list@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78" + integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q== + +emoticon@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/emoticon/-/emoticon-3.2.0.tgz#c008ca7d7620fac742fe1bf4af8ff8fed154ae7f" + integrity sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg== + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== + +end-of-stream@^1.1.0: + version "1.4.4" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +enhanced-resolve@^5.9.3: + version "5.9.3" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.9.3.tgz#44a342c012cbc473254af5cc6ae20ebd0aae5d88" + integrity sha512-Bq9VSor+kjvW3f9/MiiR4eE3XYgOl7/rS8lnSxbRbF3kS0B2r+Y9w5krBWxZgDxASVZbdYrn5wT4j/Wb0J9qow== + dependencies: + graceful-fs "^4.2.4" + tapable "^2.2.0" + +entities@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" + integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== + +entities@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/entities/-/entities-3.0.1.tgz#2b887ca62585e96db3903482d336c1006c3001d4" + integrity sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q== + +entities@^4.2.0, entities@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-4.3.0.tgz#62915f08d67353bb4eb67e3d62641a4059aec656" + integrity sha512-/iP1rZrSEJ0DTlPiX+jbzlA3eVkY/e8L8SozroF395fIqE3TYF/Nz7YOMAawta+vLmyJ/hkGNNPcSbMADCCXbg== + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +es-module-lexer@^0.9.0: + version "0.9.3" + resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-0.9.3.tgz#6f13db00cc38417137daf74366f535c8eb438f19" + integrity sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ== + +escalade@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" + integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== + +escape-goat@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/escape-goat/-/escape-goat-2.1.1.tgz#1b2dc77003676c457ec760b2dc68edb648188675" + integrity sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q== + +escape-html@^1.0.3, escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +eslint-scope@5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" + integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== + dependencies: + esrecurse "^4.3.0" + estraverse "^4.1.1" + +esprima@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +esrecurse@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== + dependencies: + estraverse "^5.2.0" + +estraverse@^4.1.1: + version "4.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +estraverse@^5.2.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123" + integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +eta@^1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/eta/-/eta-1.12.3.tgz#2982d08adfbef39f9fa50e2fbd42d7337e7338b1" + integrity sha512-qHixwbDLtekO/d51Yr4glcaUJCIjGVJyTzuqV4GPlgZo1YpgOKG+avQynErZIYrfM6JIJdtiG2Kox8tbb+DoGg== + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== + +eval@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/eval/-/eval-0.1.8.tgz#2b903473b8cc1d1989b83a1e7923f883eb357f85" + integrity sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw== + dependencies: + "@types/node" "*" + require-like ">= 0.1.1" + +eventemitter3@^4.0.0: + version "4.0.7" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" + integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== + +events@^3.2.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" + integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== + +execa@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" + integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.0" + human-signals "^2.1.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.1" + onetime "^5.1.2" + signal-exit "^3.0.3" + strip-final-newline "^2.0.0" + +express@^4.17.3: + version "4.18.1" + resolved "https://registry.yarnpkg.com/express/-/express-4.18.1.tgz#7797de8b9c72c857b9cd0e14a5eea80666267caf" + integrity sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q== + dependencies: + accepts "~1.3.8" + array-flatten "1.1.1" + body-parser "1.20.0" + content-disposition "0.5.4" + content-type "~1.0.4" + cookie "0.5.0" + cookie-signature "1.0.6" + debug "2.6.9" + depd "2.0.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "1.2.0" + fresh "0.5.2" + http-errors "2.0.0" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "2.4.1" + parseurl "~1.3.3" + path-to-regexp "0.1.7" + proxy-addr "~2.0.7" + qs "6.10.3" + range-parser "~1.2.1" + safe-buffer "5.2.1" + send "0.18.0" + serve-static "1.15.0" + setprototypeof "1.2.0" + statuses "2.0.1" + type-is "~1.6.18" + utils-merge "1.0.1" + vary "~1.1.2" + +extend-shallow@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + integrity sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug== + dependencies: + is-extendable "^0.1.0" + +extend@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + +fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-glob@^3.2.11, fast-glob@^3.2.9: + version "3.2.11" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.11.tgz#a1172ad95ceb8a16e20caa5c5e56480e5129c1d9" + integrity sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-url-parser@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/fast-url-parser/-/fast-url-parser-1.1.3.tgz#f4af3ea9f34d8a271cf58ad2b3759f431f0b318d" + integrity sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ== + dependencies: + punycode "^1.3.2" + +fastq@^1.6.0: + version "1.13.0" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.13.0.tgz#616760f88a7526bdfc596b7cab8c18938c36b98c" + integrity sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw== + dependencies: + reusify "^1.0.4" + +faye-websocket@^0.11.3: + version "0.11.4" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.4.tgz#7f0d9275cfdd86a1c963dc8b65fcc451edcbb1da" + integrity sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g== + dependencies: + websocket-driver ">=0.5.1" + +fbemitter@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/fbemitter/-/fbemitter-3.0.0.tgz#00b2a1af5411254aab416cd75f9e6289bee4bff3" + integrity sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw== + dependencies: + fbjs "^3.0.0" + +fbjs-css-vars@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz#216551136ae02fe255932c3ec8775f18e2c078b8" + integrity sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ== + +fbjs@^3.0.0, fbjs@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-3.0.4.tgz#e1871c6bd3083bac71ff2da868ad5067d37716c6" + integrity sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ== + dependencies: + cross-fetch "^3.1.5" + fbjs-css-vars "^1.0.0" + loose-envify "^1.0.0" + object-assign "^4.1.0" + promise "^7.1.1" + setimmediate "^1.0.5" + ua-parser-js "^0.7.30" + +feed@^4.2.2: + version "4.2.2" + resolved "https://registry.yarnpkg.com/feed/-/feed-4.2.2.tgz#865783ef6ed12579e2c44bbef3c9113bc4956a7e" + integrity sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ== + dependencies: + xml-js "^1.6.11" + +file-loader@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-6.2.0.tgz#baef7cf8e1840df325e4390b4484879480eebe4d" + integrity sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw== + dependencies: + loader-utils "^2.0.0" + schema-utils "^3.0.0" + +filesize@^8.0.6: + version "8.0.7" + resolved "https://registry.yarnpkg.com/filesize/-/filesize-8.0.7.tgz#695e70d80f4e47012c132d57a059e80c6b580bd8" + integrity sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ== + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +finalhandler@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" + integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "2.4.1" + parseurl "~1.3.3" + statuses "2.0.1" + unpipe "~1.0.0" + +find-cache-dir@^3.3.1: + version "3.3.2" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-3.3.2.tgz#b30c5b6eff0730731aea9bbd9dbecbd80256d64b" + integrity sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig== + dependencies: + commondir "^1.0.1" + make-dir "^3.0.2" + pkg-dir "^4.1.0" + +find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + +find-up@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +find-up@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + +flux@^4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/flux/-/flux-4.0.3.tgz#573b504a24982c4768fdfb59d8d2ea5637d72ee7" + integrity sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw== + dependencies: + fbemitter "^3.0.0" + fbjs "^3.0.1" + +follow-redirects@^1.0.0, follow-redirects@^1.14.7: + version "1.15.1" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.1.tgz#0ca6a452306c9b276e4d3127483e29575e207ad5" + integrity sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA== + +fork-ts-checker-webpack-plugin@^6.5.0: + version "6.5.2" + resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.2.tgz#4f67183f2f9eb8ba7df7177ce3cf3e75cdafb340" + integrity sha512-m5cUmF30xkZ7h4tWUgTAcEaKmUW7tfyUyTqNNOz7OxWJ0v1VWKTcOvH8FWHUwSjlW/356Ijc9vi3XfcPstpQKA== + dependencies: + "@babel/code-frame" "^7.8.3" + "@types/json-schema" "^7.0.5" + chalk "^4.1.0" + chokidar "^3.4.2" + cosmiconfig "^6.0.0" + deepmerge "^4.2.2" + fs-extra "^9.0.0" + glob "^7.1.6" + memfs "^3.1.2" + minimatch "^3.0.4" + schema-utils "2.7.0" + semver "^7.3.2" + tapable "^1.0.0" + +forwarded@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" + integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow== + +fraction.js@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/fraction.js/-/fraction.js-4.2.0.tgz#448e5109a313a3527f5a3ab2119ec4cf0e0e2950" + integrity sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA== + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== + +fs-extra@^10.1.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-10.1.0.tgz#02873cfbc4084dde127eaa5f9905eef2325d1abf" + integrity sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fs-extra@^9.0.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" + integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== + dependencies: + at-least-node "^1.0.0" + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fs-monkey@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/fs-monkey/-/fs-monkey-1.0.3.tgz#ae3ac92d53bb328efe0e9a1d9541f6ad8d48e2d3" + integrity sha512-cybjIfiiE+pTWicSCLFHSrXZ6EilF30oh91FDP9S2B051prEa7QWfrVTQm10/dDpswBDXZugPa1Ogu8Yh+HV0Q== + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== + +fsevents@~2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +gensync@^1.0.0-beta.1, gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +get-intrinsic@^1.0.2, get-intrinsic@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.2.tgz#336975123e05ad0b7ba41f152ee4aadbea6cf598" + integrity sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA== + dependencies: + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.3" + +get-own-enumerable-property-symbols@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664" + integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== + +get-stream@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" + integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== + dependencies: + pump "^3.0.0" + +get-stream@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" + integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== + dependencies: + pump "^3.0.0" + +get-stream@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" + integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== + +github-slugger@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/github-slugger/-/github-slugger-1.4.0.tgz#206eb96cdb22ee56fdc53a28d5a302338463444e" + integrity sha512-w0dzqw/nt51xMVmlaV1+JRzN+oCa1KfcgGEWhxUG16wbdA+Xnt/yoFO8Z8x/V82ZcZ0wy6ln9QDup5avbhiDhQ== + +glob-parent@^5.1.2, glob-parent@~5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + +glob-parent@^6.0.1: + version "6.0.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3" + integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== + dependencies: + is-glob "^4.0.3" + +glob-to-regexp@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz#c75297087c851b9a578bd217dd59a92f59fe546e" + integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== + +glob@^7.0.0, glob@^7.1.3, glob@^7.1.6: + version "7.2.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" + integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.1.1" + once "^1.3.0" + path-is-absolute "^1.0.0" + +global-dirs@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-3.0.0.tgz#70a76fe84ea315ab37b1f5576cbde7d48ef72686" + integrity sha512-v8ho2DS5RiCjftj1nD9NmnfaOzTdud7RRnVd9kFNOjqZbISlx5DQ+OrTkywgd0dIt7oFCvKetZSHoHcP3sDdiA== + dependencies: + ini "2.0.0" + +global-modules@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780" + integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A== + dependencies: + global-prefix "^3.0.0" + +global-prefix@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-3.0.0.tgz#fc85f73064df69f50421f47f883fe5b913ba9b97" + integrity sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg== + dependencies: + ini "^1.3.5" + kind-of "^6.0.2" + which "^1.3.1" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globby@^11.0.1, globby@^11.0.4, globby@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" + +globby@^13.1.1: + version "13.1.1" + resolved "https://registry.yarnpkg.com/globby/-/globby-13.1.1.tgz#7c44a93869b0b7612e38f22ed532bfe37b25ea6f" + integrity sha512-XMzoDZbGZ37tufiv7g0N4F/zp3zkwdFtVbV3EHsVl1KQr4RPLfNoT068/97RPshz2J5xYNEjLKKBKaGHifBd3Q== + dependencies: + dir-glob "^3.0.1" + fast-glob "^3.2.11" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^4.0.0" + +got@^9.6.0: + version "9.6.0" + resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" + integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== + dependencies: + "@sindresorhus/is" "^0.14.0" + "@szmarczak/http-timer" "^1.1.2" + cacheable-request "^6.0.0" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^4.1.0" + lowercase-keys "^1.0.1" + mimic-response "^1.0.1" + p-cancelable "^1.0.0" + to-readable-stream "^1.0.0" + url-parse-lax "^3.0.0" + +graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.6, graceful-fs@^4.2.9: + version "4.2.10" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" + integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== + +gray-matter@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/gray-matter/-/gray-matter-4.0.3.tgz#e893c064825de73ea1f5f7d88c7a9f7274288798" + integrity sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q== + dependencies: + js-yaml "^3.13.1" + kind-of "^6.0.2" + section-matter "^1.0.0" + strip-bom-string "^1.0.0" + +gzip-size@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-6.0.0.tgz#065367fd50c239c0671cbcbad5be3e2eeb10e462" + integrity sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q== + dependencies: + duplexer "^0.1.2" + +handle-thing@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-2.0.1.tgz#857f79ce359580c340d43081cc648970d0bb234e" + integrity sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-property-descriptors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz#610708600606d36961ed04c196193b6a607fa861" + integrity sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ== + dependencies: + get-intrinsic "^1.1.1" + +has-symbols@^1.0.1, has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + +has-yarn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/has-yarn/-/has-yarn-2.1.0.tgz#137e11354a7b5bf11aa5cb649cf0c6f3ff2b2e77" + integrity sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw== + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +hast-to-hyperscript@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz#9b67fd188e4c81e8ad66f803855334173920218d" + integrity sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA== + dependencies: + "@types/unist" "^2.0.3" + comma-separated-tokens "^1.0.0" + property-information "^5.3.0" + space-separated-tokens "^1.0.0" + style-to-object "^0.3.0" + unist-util-is "^4.0.0" + web-namespaces "^1.0.0" + +hast-util-from-parse5@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-5.0.3.tgz#3089dc0ee2ccf6ec8bc416919b51a54a589e097c" + integrity sha512-gOc8UB99F6eWVWFtM9jUikjN7QkWxB3nY0df5Z0Zq1/Nkwl5V4hAAsl0tmwlgWl/1shlTF8DnNYLO8X6wRV9pA== + dependencies: + ccount "^1.0.3" + hastscript "^5.0.0" + property-information "^5.0.0" + web-namespaces "^1.1.2" + xtend "^4.0.1" + +hast-util-from-parse5@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz#554e34abdeea25ac76f5bd950a1f0180e0b3bc2a" + integrity sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA== + dependencies: + "@types/parse5" "^5.0.0" + hastscript "^6.0.0" + property-information "^5.0.0" + vfile "^4.0.0" + vfile-location "^3.2.0" + web-namespaces "^1.0.0" + +hast-util-parse-selector@^2.0.0: + version "2.2.5" + resolved "https://registry.yarnpkg.com/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz#d57c23f4da16ae3c63b3b6ca4616683313499c3a" + integrity sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ== + +hast-util-raw@6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/hast-util-raw/-/hast-util-raw-6.0.1.tgz#973b15930b7529a7b66984c98148b46526885977" + integrity sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig== + dependencies: + "@types/hast" "^2.0.0" + hast-util-from-parse5 "^6.0.0" + hast-util-to-parse5 "^6.0.0" + html-void-elements "^1.0.0" + parse5 "^6.0.0" + unist-util-position "^3.0.0" + vfile "^4.0.0" + web-namespaces "^1.0.0" + xtend "^4.0.0" + zwitch "^1.0.0" + +hast-util-to-parse5@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz#1ec44650b631d72952066cea9b1445df699f8479" + integrity sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ== + dependencies: + hast-to-hyperscript "^9.0.0" + property-information "^5.0.0" + web-namespaces "^1.0.0" + xtend "^4.0.0" + zwitch "^1.0.0" + +hastscript@^5.0.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-5.1.2.tgz#bde2c2e56d04c62dd24e8c5df288d050a355fb8a" + integrity sha512-WlztFuK+Lrvi3EggsqOkQ52rKbxkXL3RwB6t5lwoa8QLMemoWfBuL43eDrwOamJyR7uKQKdmKYaBH1NZBiIRrQ== + dependencies: + comma-separated-tokens "^1.0.0" + hast-util-parse-selector "^2.0.0" + property-information "^5.0.0" + space-separated-tokens "^1.0.0" + +hastscript@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/hastscript/-/hastscript-6.0.0.tgz#e8768d7eac56c3fdeac8a92830d58e811e5bf640" + integrity sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w== + dependencies: + "@types/hast" "^2.0.0" + comma-separated-tokens "^1.0.0" + hast-util-parse-selector "^2.0.0" + property-information "^5.0.0" + space-separated-tokens "^1.0.0" + +he@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== + +history@^4.9.0: + version "4.10.1" + resolved "https://registry.yarnpkg.com/history/-/history-4.10.1.tgz#33371a65e3a83b267434e2b3f3b1b4c58aad4cf3" + integrity sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew== + dependencies: + "@babel/runtime" "^7.1.2" + loose-envify "^1.2.0" + resolve-pathname "^3.0.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + value-equal "^1.0.1" + +hoist-non-react-statics@^3.1.0: + version "3.3.2" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== + dependencies: + react-is "^16.7.0" + +hpack.js@^2.1.6: + version "2.1.6" + resolved "https://registry.yarnpkg.com/hpack.js/-/hpack.js-2.1.6.tgz#87774c0949e513f42e84575b3c45681fade2a0b2" + integrity sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ== + dependencies: + inherits "^2.0.1" + obuf "^1.0.0" + readable-stream "^2.0.1" + wbuf "^1.1.0" + +html-entities@^2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-2.3.3.tgz#117d7626bece327fc8baace8868fa6f5ef856e46" + integrity sha512-DV5Ln36z34NNTDgnz0EWGBLZENelNAtkiFA4kyNOG2tDI6Mz1uSWiq1wAKdyjnJwyDiDO7Fa2SO1CTxPXL8VxA== + +html-minifier-terser@^6.0.2, html-minifier-terser@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz#bfc818934cc07918f6b3669f5774ecdfd48f32ab" + integrity sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw== + dependencies: + camel-case "^4.1.2" + clean-css "^5.2.2" + commander "^8.3.0" + he "^1.2.0" + param-case "^3.0.4" + relateurl "^0.2.7" + terser "^5.10.0" + +html-tags@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/html-tags/-/html-tags-3.2.0.tgz#dbb3518d20b726524e4dd43de397eb0a95726961" + integrity sha512-vy7ClnArOZwCnqZgvv+ddgHgJiAFXe3Ge9ML5/mBctVJoUoYPCdxVucOywjDARn6CVoh3dRSFdPHy2sX80L0Wg== + +html-void-elements@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/html-void-elements/-/html-void-elements-1.0.5.tgz#ce9159494e86d95e45795b166c2021c2cfca4483" + integrity sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w== + +html-webpack-plugin@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-5.5.0.tgz#c3911936f57681c1f9f4d8b68c158cd9dfe52f50" + integrity sha512-sy88PC2cRTVxvETRgUHFrL4No3UxvcH8G1NepGhqaTT+GXN2kTamqasot0inS5hXeg1cMbFDt27zzo9p35lZVw== + dependencies: + "@types/html-minifier-terser" "^6.0.0" + html-minifier-terser "^6.0.2" + lodash "^4.17.21" + pretty-error "^4.0.0" + tapable "^2.0.0" + +htmlparser2@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-6.1.0.tgz#c4d762b6c3371a05dbe65e94ae43a9f845fb8fb7" + integrity sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A== + dependencies: + domelementtype "^2.0.1" + domhandler "^4.0.0" + domutils "^2.5.2" + entities "^2.0.0" + +htmlparser2@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-8.0.1.tgz#abaa985474fcefe269bc761a779b544d7196d010" + integrity sha512-4lVbmc1diZC7GUJQtRQ5yBAeUCL1exyMwmForWkRLnwyzWBFxN633SALPMGYaWZvKe9j1pRZJpauvmxENSp/EA== + dependencies: + domelementtype "^2.3.0" + domhandler "^5.0.2" + domutils "^3.0.1" + entities "^4.3.0" + +http-cache-semantics@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" + integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + +http-deceiver@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/http-deceiver/-/http-deceiver-1.2.7.tgz#fa7168944ab9a519d337cb0bec7284dc3e723d87" + integrity sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw== + +http-errors@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" + integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== + dependencies: + depd "2.0.0" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses "2.0.1" + toidentifier "1.0.1" + +http-errors@~1.6.2: + version "1.6.3" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d" + integrity sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A== + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.0" + statuses ">= 1.4.0 < 2" + +http-parser-js@>=0.5.1: + version "0.5.6" + resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.6.tgz#2e02406ab2df8af8a7abfba62e0da01c62b95afd" + integrity sha512-vDlkRPDJn93swjcjqMSaGSPABbIarsr1TLAui/gLDXzV5VsJNdXNzMYDyNBLQkjWQCJ1uizu8T2oDMhmGt0PRA== + +http-proxy-middleware@^2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz#e1a4dd6979572c7ab5a4e4b55095d1f32a74963f" + integrity sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw== + dependencies: + "@types/http-proxy" "^1.17.8" + http-proxy "^1.18.1" + is-glob "^4.0.1" + is-plain-obj "^3.0.0" + micromatch "^4.0.2" + +http-proxy@^1.18.1: + version "1.18.1" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.1.tgz#401541f0534884bbf95260334e72f88ee3976549" + integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ== + dependencies: + eventemitter3 "^4.0.0" + follow-redirects "^1.0.0" + requires-port "^1.0.0" + +human-signals@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" + integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== + +iconv-lite@0.4.24: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +icss-utils@^5.0.0, icss-utils@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-5.1.0.tgz#c6be6858abd013d768e98366ae47e25d5887b1ae" + integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA== + +ignore@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.0.tgz#6d3bac8fa7fe0d45d9f9be7bac2fc279577e345a" + integrity sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ== + +image-size@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.0.1.tgz#86d6cfc2b1d19eab5d2b368d4b9194d9e48541c5" + integrity sha512-VAwkvNSNGClRw9mDHhc5Efax8PLlsOGcUTh0T/LIriC8vPA3U5PdqXWqkz406MoYHMKW8Uf9gWr05T/rYB44kQ== + dependencies: + queue "6.0.2" + +immer@^9.0.7: + version "9.0.14" + resolved "https://registry.yarnpkg.com/immer/-/immer-9.0.14.tgz#e05b83b63999d26382bb71676c9d827831248a48" + integrity sha512-ubBeqQutOSLIFCUBN03jGeOS6a3DoYlSYwYJTa+gSKEZKU5redJIqkIdZ3JVv/4RZpfcXdAWH5zCNLWPRv2WDw== + +import-fresh@^3.1.0, import-fresh@^3.2.1, import-fresh@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +import-lazy@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-2.1.0.tgz#05698e3d45c88e8d7e9d92cb0584e77f096f3e43" + integrity sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A== + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +infima@0.2.0-alpha.39: + version "0.2.0-alpha.39" + resolved "https://registry.yarnpkg.com/infima/-/infima-0.2.0-alpha.39.tgz#054b13ac44f3e9a42bc083988f1a1586add2f59c" + integrity sha512-UyYiwD3nwHakGhuOUfpe3baJ8gkiPpRVx4a4sE/Ag+932+Y6swtLsdPoRR8ezhwqGnduzxmFkjumV9roz6QoLw== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.4, inherits@^2.0.0, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw== + +ini@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ini/-/ini-2.0.0.tgz#e5fd556ecdd5726be978fa1001862eacb0a94bc5" + integrity sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA== + +ini@^1.3.5, ini@~1.3.0: + version "1.3.8" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + +inline-style-parser@0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1" + integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q== + +interpret@^1.0.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" + integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA== + +invariant@^2.2.4: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== + dependencies: + loose-envify "^1.0.0" + +ipaddr.js@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" + integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== + +ipaddr.js@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-2.0.1.tgz#eca256a7a877e917aeb368b0a7497ddf42ef81c0" + integrity sha512-1qTgH9NG+IIJ4yfKs2e6Pp1bZg8wbDbKHT21HrLIeYBTRLgMYKnMTPAuI3Lcs61nfx5h1xlXnbJtH1kX5/d/ng== + +is-alphabetical@1.0.4, is-alphabetical@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-1.0.4.tgz#9e7d6b94916be22153745d184c298cbf986a686d" + integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg== + +is-alphanumerical@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz#7eb9a2431f855f6b1ef1a78e326df515696c4dbf" + integrity sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A== + dependencies: + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== + +is-binary-path@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== + dependencies: + binary-extensions "^2.0.0" + +is-buffer@^2.0.0: + version "2.0.5" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" + integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== + +is-ci@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" + integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w== + dependencies: + ci-info "^2.0.0" + +is-core-module@^2.8.1: + version "2.9.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.9.0.tgz#e1c34429cd51c6dd9e09e0799e396e27b19a9c69" + integrity sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A== + dependencies: + has "^1.0.3" + +is-decimal@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-1.0.4.tgz#65a3a5958a1c5b63a706e1b333d7cd9f630d3fa5" + integrity sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw== + +is-docker@^2.0.0, is-docker@^2.1.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" + integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== + +is-extendable@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + integrity sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw== + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== + dependencies: + is-extglob "^2.1.1" + +is-hexadecimal@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz#cc35c97588da4bd49a8eedd6bc4082d44dcb23a7" + integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw== + +is-installed-globally@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.4.0.tgz#9a0fd407949c30f86eb6959ef1b7994ed0b7b520" + integrity sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ== + dependencies: + global-dirs "^3.0.0" + is-path-inside "^3.0.2" + +is-npm@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-5.0.0.tgz#43e8d65cc56e1b67f8d47262cf667099193f45a8" + integrity sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA== + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + integrity sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg== + +is-obj@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-2.0.0.tgz#473fb05d973705e3fd9620545018ca8e22ef4982" + integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w== + +is-path-cwd@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-2.2.0.tgz#67d43b82664a7b5191fd9119127eb300048a9fdb" + integrity sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ== + +is-path-inside@^3.0.2: + version "3.0.3" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" + integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== + +is-plain-obj@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" + integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== + +is-plain-obj@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-3.0.0.tgz#af6f2ea14ac5a646183a5bbdb5baabbc156ad9d7" + integrity sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA== + +is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== + dependencies: + isobject "^3.0.1" + +is-regexp@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" + integrity sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA== + +is-root@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c" + integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg== + +is-stream@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" + integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== + +is-typedarray@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== + +is-whitespace-character@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz#0858edd94a95594c7c9dd0b5c174ec6e45ee4aa7" + integrity sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w== + +is-word-character@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-word-character/-/is-word-character-1.0.4.tgz#ce0e73216f98599060592f62ff31354ddbeb0230" + integrity sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA== + +is-wsl@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" + integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== + dependencies: + is-docker "^2.0.0" + +is-yarn-global@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/is-yarn-global/-/is-yarn-global-0.3.0.tgz#d502d3382590ea3004893746754c89139973e232" + integrity sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw== + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ== + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== + +isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg== + +jest-worker@^27.4.5, jest-worker@^27.5.1: + version "27.5.1" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-27.5.1.tgz#8d146f0900e8973b106b6f73cc1e9a8cb86f8db0" + integrity sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg== + dependencies: + "@types/node" "*" + merge-stream "^2.0.0" + supports-color "^8.0.0" + +joi@^17.6.0: + version "17.6.0" + resolved "https://registry.yarnpkg.com/joi/-/joi-17.6.0.tgz#0bb54f2f006c09a96e75ce687957bd04290054b2" + integrity sha512-OX5dG6DTbcr/kbMFj0KGYxuew69HPcAE3K/sZpEV2nP6e/j/C0HV+HNiBPCASxdx5T7DMoa0s8UeHWMnb6n2zw== + dependencies: + "@hapi/hoek" "^9.0.0" + "@hapi/topo" "^5.0.0" + "@sideway/address" "^4.1.3" + "@sideway/formula" "^3.0.0" + "@sideway/pinpoint" "^2.0.0" + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-yaml@^3.13.1: + version "3.14.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + integrity sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA== + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + integrity sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ== + +json-parse-even-better-errors@^2.3.0, json-parse-even-better-errors@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-schema-traverse@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2" + integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== + +json5@^2.1.2, json5@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.1.tgz#655d50ed1e6f95ad1a3caababd2b0efda10b395c" + integrity sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA== + +jsonfile@^6.0.1: + version "6.1.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" + integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + dependencies: + universalify "^2.0.0" + optionalDependencies: + graceful-fs "^4.1.6" + +keyv@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" + integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== + dependencies: + json-buffer "3.0.0" + +kind-of@^6.0.0, kind-of@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" + integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== + +kleur@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" + integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== + +klona@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/klona/-/klona-2.0.5.tgz#d166574d90076395d9963aa7a928fabb8d76afbc" + integrity sha512-pJiBpiXMbt7dkzXe8Ghj/u4FfXOOa98fPW+bihOJ4SjnoijweJrNThJfd3ifXpXhREjpoF2mZVH1GfS9LV3kHQ== + +latest-version@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-5.1.0.tgz#119dfe908fe38d15dfa43ecd13fa12ec8832face" + integrity sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA== + dependencies: + package-json "^6.3.0" + +leven@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" + integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== + +lilconfig@^2.0.3: + version "2.0.5" + resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-2.0.5.tgz#19e57fd06ccc3848fd1891655b5a447092225b25" + integrity sha512-xaYmXZtTHPAw5m+xLN8ab9C+3a8YmV3asNSPOATITbtwrfbwaLJj8h66H1WMIpALCkqsIzK3h7oQ+PdX+LQ9Eg== + +lines-and-columns@^1.1.6: + version "1.2.4" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== + +loader-runner@^4.2.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-4.3.0.tgz#c1b4a163b99f614830353b16755e7149ac2314e1" + integrity sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg== + +loader-utils@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.2.tgz#d6e3b4fb81870721ae4e0868ab11dd638368c129" + integrity sha512-TM57VeHptv569d/GKh6TAYdzKblwDNiumOdkFnejjD0XwTH87K90w3O7AiJRqdQoXygvi1VQTJTLGhJl7WqA7A== + dependencies: + big.js "^5.2.2" + emojis-list "^3.0.0" + json5 "^2.1.2" + +loader-utils@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-3.2.0.tgz#bcecc51a7898bee7473d4bc6b845b23af8304d4f" + integrity sha512-HVl9ZqccQihZ7JM85dco1MvO9G+ONvxoGa9rkhzFsneGLKSUg1gJf9bWzhRhcvm2qChhWpebQhP44qxjKIUCaQ== + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +locate-path@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== + dependencies: + p-locate "^5.0.0" + +lodash.curry@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.curry/-/lodash.curry-4.1.1.tgz#248e36072ede906501d75966200a86dab8b23170" + integrity sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA== + +lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== + +lodash.flow@^3.3.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/lodash.flow/-/lodash.flow-3.5.0.tgz#87bf40292b8cf83e4e8ce1a3ae4209e20071675a" + integrity sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw== + +lodash.memoize@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" + integrity sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag== + +lodash.uniq@4.5.0, lodash.uniq@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + integrity sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ== + +lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +lower-case@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-2.0.2.tgz#6fa237c63dbdc4a82ca0fd882e4722dc5e634e28" + integrity sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg== + dependencies: + tslib "^2.0.3" -get-value@^2.0.3, get-value@^2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" - integrity sha1-3BXKHGcjh8p2vTesCjlbogQqLCg= +lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== -getpass@^0.1.1: - version "0.1.7" - resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" - integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo= +lowercase-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" + integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== dependencies: - assert-plus "^1.0.0" + yallist "^4.0.0" -glob-parent@^3.1.0: +make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0: version "3.1.0" - resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" - integrity sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4= + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" + integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== dependencies: - is-glob "^3.1.0" - path-dirname "^1.0.0" + semver "^6.0.0" -glob@^7.0.0, glob@^7.1.3: - version "7.1.3" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1" - integrity sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.0.4" - once "^1.3.0" - path-is-absolute "^1.0.0" +markdown-escapes@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/markdown-escapes/-/markdown-escapes-1.0.4.tgz#c95415ef451499d7602b91095f3c8e8975f78535" + integrity sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg== -global@^4.3.2: - version "4.3.2" - resolved "https://registry.yarnpkg.com/global/-/global-4.3.2.tgz#e76989268a6c74c38908b1305b10fc0e394e9d0f" - integrity sha1-52mJJopsdMOJCLEwWxD8DjlOnQ8= +mdast-squeeze-paragraphs@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz#7c4c114679c3bee27ef10b58e2e015be79f1ef97" + integrity sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ== dependencies: - min-document "^2.19.0" - process "~0.5.1" + unist-util-remove "^2.0.0" -globals@^11.1.0: - version "11.11.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-11.11.0.tgz#dcf93757fa2de5486fbeed7118538adf789e9c2e" - integrity sha512-WHq43gS+6ufNOEqlrDBxVEbb8ntfXrfAUU2ZOpCxrBdGKW3gyv8mCxAfIBD0DroPKGrJ2eSsXsLtY9MPntsyTw== +mdast-util-definitions@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz#c5c1a84db799173b4dcf7643cda999e440c24db2" + integrity sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ== + dependencies: + unist-util-visit "^2.0.0" + +mdast-util-to-hast@10.0.1: + version "10.0.1" + resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz#0cfc82089494c52d46eb0e3edb7a4eb2aea021eb" + integrity sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA== + dependencies: + "@types/mdast" "^3.0.0" + "@types/unist" "^2.0.0" + mdast-util-definitions "^4.0.0" + mdurl "^1.0.0" + unist-builder "^2.0.0" + unist-util-generated "^1.0.0" + unist-util-position "^3.0.0" + unist-util-visit "^2.0.0" + +mdast-util-to-string@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz#b8cfe6a713e1091cb5b728fc48885a4767f8b97b" + integrity sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w== -graceful-fs@^4.1.11: - version "4.1.15" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00" - integrity sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA== +mdn-data@2.0.14: + version "2.0.14" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.14.tgz#7113fc4281917d63ce29b43446f701e68c25ba50" + integrity sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow== -har-schema@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" - integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= +mdurl@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e" + integrity sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g== -har-validator@~5.1.0: - version "5.1.3" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" - integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g== +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== + +memfs@^3.1.2, memfs@^3.4.3: + version "3.4.4" + resolved "https://registry.yarnpkg.com/memfs/-/memfs-3.4.4.tgz#e8973cd8060548916adcca58a248e7805c715e89" + integrity sha512-W4gHNUE++1oSJVn8Y68jPXi+mkx3fXR5ITE/Ubz6EQ3xRpCN5k2CQ4AUR8094Z7211F876TyoBACGsIveqgiGA== dependencies: - ajv "^6.5.5" - har-schema "^2.0.0" + fs-monkey "1.0.3" -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== -has-symbols@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" - integrity sha1-uhqPGvKg/DllD1yFA2dwQSIGO0Q= +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== -has-unicode@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" - integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk= +merge2@^1.3.0, merge2@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== -has-value@^0.3.1: - version "0.3.1" - resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" - integrity sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8= - dependencies: - get-value "^2.0.3" - has-values "^0.1.4" - isobject "^2.0.0" +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== -has-value@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" - integrity sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc= +micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== dependencies: - get-value "^2.0.6" - has-values "^1.0.0" - isobject "^3.0.0" + braces "^3.0.2" + picomatch "^2.3.1" -has-values@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" - integrity sha1-bWHeldkd/Km5oCCJrThL/49it3E= +mime-db@1.52.0, "mime-db@>= 1.43.0 < 2": + version "1.52.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== -has-values@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" - integrity sha1-lbC2P+whRmGab+V/51Yo1aOe/k8= - dependencies: - is-number "^3.0.0" - kind-of "^4.0.0" +mime-db@~1.33.0: + version "1.33.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" + integrity sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ== -hogan.js@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/hogan.js/-/hogan.js-3.0.2.tgz#4cd9e1abd4294146e7679e41d7898732b02c7bfd" - integrity sha1-TNnhq9QpQUbnZ55B14mHMrAse/0= +mime-types@2.1.18: + version "2.1.18" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + integrity sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ== dependencies: - mkdirp "0.3.0" - nopt "1.0.10" + mime-db "~1.33.0" -html-encoding-sniffer@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8" - integrity sha512-71lZziiDnsuabfdYiUeWdCVyKuqwWi23L8YeIgV9jSSZHCtb6wB1BKWooH7L3tn4/FuZJMVWyNaIDr4RGmaSYw== +mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.17, mime-types@~2.1.24, mime-types@~2.1.34: + version "2.1.35" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== dependencies: - whatwg-encoding "^1.0.1" + mime-db "1.52.0" -http-signature@~1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" - integrity sha1-muzZJRFHcvPZW2WmCruPfBj7rOE= - dependencies: - assert-plus "^1.0.0" - jsprim "^1.2.2" - sshpk "^1.7.0" +mime@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== -iconv-lite@0.4.24, iconv-lite@^0.4.4: - version "0.4.24" - resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" - integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== +mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +mimic-response@^1.0.0, mimic-response@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" + integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== + +mini-create-react-context@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/mini-create-react-context/-/mini-create-react-context-0.4.1.tgz#072171561bfdc922da08a60c2197a497cc2d1d5e" + integrity sha512-YWCYEmd5CQeHGSAKrYvXgmzzkrvssZcuuQDDeqkT+PziKGMgE+0MCCtcKbROzocGBG1meBLl2FotlRwf4gAzbQ== dependencies: - safer-buffer ">= 2.1.2 < 3" + "@babel/runtime" "^7.12.1" + tiny-warning "^1.0.3" -ignore-walk@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" - integrity sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ== +mini-css-extract-plugin@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.6.0.tgz#578aebc7fc14d32c0ad304c2c34f08af44673f5e" + integrity sha512-ndG8nxCEnAemsg4FSgS+yNyHKgkTB4nPKqCOgh65j3/30qqC5RaSQQXMm++Y6sb6E1zRSxPkztj9fqxhS1Eo6w== dependencies: - minimatch "^3.0.4" + schema-utils "^4.0.0" + +minimalistic-assert@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" + integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== -immutability-helper@^2.7.1: - version "2.9.1" - resolved "https://registry.yarnpkg.com/immutability-helper/-/immutability-helper-2.9.1.tgz#71c423ba387e67b6c6ceba0650572f2a2a6727df" - integrity sha512-r/RmRG8xO06s/k+PIaif2r5rGc3j4Yhc01jSBfwPCXDLYZwp/yxralI37Df1mwmuzcCsen/E/ITKcTEvc1PQmQ== +minimatch@3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== dependencies: - invariant "^2.2.0" + brace-expansion "^1.1.7" -inflight@^1.0.4: - version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= +minimatch@^3.0.4, minimatch@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== dependencies: - once "^1.3.0" - wrappy "1" + brace-expansion "^1.1.7" -inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" - integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= +minimist@^1.2.0, minimist@^1.2.5: + version "1.2.6" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" + integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -ini@~1.3.0: - version "1.3.5" - resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" - integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw== - -instantsearch.js@^2.8.0: - version "2.10.4" - resolved "https://registry.yarnpkg.com/instantsearch.js/-/instantsearch.js-2.10.4.tgz#446b1ce06eff52c86f195e761087950020cc7fee" - integrity sha512-hhGdYQJBejN4Xm1ElirNenD1BUsP6HE9HOoAII13psn1vXnKE89oQ7/3Z/fpVRBKM0P2KopXJZ5WVn2JFp7ZDQ== - dependencies: - algoliasearch "^3.27.0" - algoliasearch-helper "^2.26.0" - classnames "^2.2.5" - events "^1.1.0" - hogan.js "^3.0.2" - lodash "^4.17.5" - preact "^8.2.7" - preact-compat "^3.18.0" - preact-rheostat "^2.1.1" - prop-types "^15.5.10" - qs "^6.5.1" - to-factory "^1.0.0" - -invariant@^2.2.0, invariant@^2.2.2: - version "2.2.4" - resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" - integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== - dependencies: - loose-envify "^1.0.0" +mrmime@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mrmime/-/mrmime-1.0.1.tgz#5f90c825fad4bdd41dc914eff5d1a8cfdaf24f27" + integrity sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw== -invert-kv@^2.0.0: +ms@2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02" - integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA== + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== -ip-regex@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" - integrity sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk= +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -is-accessor-descriptor@^0.1.6: - version "0.1.6" - resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" - integrity sha1-qeEss66Nh2cn7u84Q/igiXtcmNY= - dependencies: - kind-of "^3.0.2" +ms@2.1.3: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== -is-accessor-descriptor@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656" - integrity sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ== +multicast-dns@^7.2.5: + version "7.2.5" + resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-7.2.5.tgz#77eb46057f4d7adbd16d9290fa7299f6fa64cced" + integrity sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg== dependencies: - kind-of "^6.0.0" + dns-packet "^5.2.2" + thunky "^1.0.2" -is-binary-path@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" - integrity sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg= - dependencies: - binary-extensions "^1.0.0" +nanoid@^3.3.4: + version "3.3.4" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab" + integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw== -is-buffer@^1.1.5, is-buffer@~1.1.1: - version "1.1.6" - resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" - integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== +negotiator@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" + integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== -is-data-descriptor@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" - integrity sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y= - dependencies: - kind-of "^3.0.2" +neo-async@^2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f" + integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw== -is-data-descriptor@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7" - integrity sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ== +no-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-3.0.4.tgz#d361fd5c9800f558551a8369fc0dcd4662b6124d" + integrity sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg== dependencies: - kind-of "^6.0.0" + lower-case "^2.0.2" + tslib "^2.0.3" -is-descriptor@^0.1.0: - version "0.1.6" - resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" - integrity sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg== +node-emoji@^1.10.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" + integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== dependencies: - is-accessor-descriptor "^0.1.6" - is-data-descriptor "^0.1.4" - kind-of "^5.0.0" + lodash "^4.17.21" -is-descriptor@^1.0.0, is-descriptor@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec" - integrity sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg== +node-fetch@2.6.7: + version "2.6.7" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" + integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== dependencies: - is-accessor-descriptor "^1.0.0" - is-data-descriptor "^1.0.0" - kind-of "^6.0.2" - -is-extendable@^0.1.0, is-extendable@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" - integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik= + whatwg-url "^5.0.0" -is-extendable@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" - integrity sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA== - dependencies: - is-plain-object "^2.0.4" +node-forge@^1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.1.tgz#be8da2af243b2417d5f646a770663a92b7e9ded3" + integrity sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA== -is-extglob@^2.1.0, is-extglob@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" - integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= +node-releases@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.5.tgz#280ed5bc3eba0d96ce44897d8aee478bfb3d9666" + integrity sha512-U9h1NLROZTq9uE1SNffn6WuPDg8icmi3ns4rEl/oTfIle4iLjTliCzgTsbaIFMq/Xn078/lfY/BL0GWZ+psK4Q== -is-fullwidth-code-point@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" - integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs= - dependencies: - number-is-nan "^1.0.0" +normalize-path@^3.0.0, normalize-path@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== -is-fullwidth-code-point@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" - integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA== -is-glob@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" - integrity sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo= - dependencies: - is-extglob "^2.1.0" +normalize-url@^4.1.0: + version "4.5.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.1.tgz#0dd90cf1288ee1d1313b87081c9a5932ee48518a" + integrity sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA== -is-glob@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.0.tgz#9521c76845cc2610a85203ddf080a958c2ffabc0" - integrity sha1-lSHHaEXMJhCoUgPd8ICpWML/q8A= - dependencies: - is-extglob "^2.1.1" +normalize-url@^6.0.1: + version "6.1.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-6.1.0.tgz#40d0885b535deffe3f3147bec877d05fe4c5668a" + integrity sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A== -is-number@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" - integrity sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU= +npm-run-path@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== dependencies: - kind-of "^3.0.2" + path-key "^3.0.0" -is-plain-obj@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" - integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= +nprogress@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/nprogress/-/nprogress-0.2.0.tgz#cb8f34c53213d895723fcbab907e9422adbcafb1" + integrity sha512-I19aIingLgR1fmhftnbWWO3dXc0hSxqHQHQb3H8m+K3TnEn/iSeTZZOyvKXWqQESMwuUVnatlCnZdLBZZt2VSA== -is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" - integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== +nth-check@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.1.1.tgz#c9eab428effce36cd6b92c924bdb000ef1f1ed1d" + integrity sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w== dependencies: - isobject "^3.0.1" - -is-stream@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" - integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= + boolbase "^1.0.0" -is-typedarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= +object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== -is-windows@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" - integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== +object-inspect@^1.9.0: + version "1.12.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.2.tgz#c0641f26394532f28ab8d796ab954e43c009a8ea" + integrity sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ== -isarray@1.0.0, isarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" - integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= +object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== -isarray@^2.0.1: - version "2.0.4" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.4.tgz#38e7bcbb0f3ba1b7933c86ba1894ddfc3781bbb7" - integrity sha512-GMxXOiUirWg1xTKRipM0Ek07rX+ubx4nNVElTJdNLYmNO/2YrDkgJGw9CljXn+r4EWiDQg/8lsRdHyg2PJuUaA== +object.assign@^4.1.0: + version "4.1.2" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.2.tgz#0ed54a342eceb37b38ff76eb831a0e788cb63940" + integrity sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ== + dependencies: + call-bind "^1.0.0" + define-properties "^1.1.3" + has-symbols "^1.0.1" + object-keys "^1.1.1" -isexe@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" - integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= +obuf@^1.0.0, obuf@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e" + integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg== -isobject@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" - integrity sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk= +on-finished@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" + integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg== dependencies: - isarray "1.0.0" + ee-first "1.1.1" -isobject@^3.0.0, isobject@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" - integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= +on-headers@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" + integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== -isstream@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" - integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== + dependencies: + wrappy "1" -izimodal@^1.5.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/izimodal/-/izimodal-1.5.1.tgz#a49145030d8d9edfc60dfc35ae1758e4cf8502f1" - integrity sha512-DqqGUd741tAqJnWZMQRgixlgtSB6tb/HhfddmlsFWE5P7sckF2SmKVyyttpAdBdN5LUzQiF/R/+IjJw0TS5oTA== +onetime@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" + integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== dependencies: - jquery "~2" + mimic-fn "^2.1.0" -jquery@^3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.5.0.tgz#9980b97d9e4194611c36530e7dc46a58d7340fc9" - integrity sha512-Xb7SVYMvygPxbFMpTFQiHh1J7HClEaThguL15N/Gg37Lri/qKyhRGZYzHRyLH8Stq3Aow0LsHO2O2ci86fCrNQ== +open@^8.0.9, open@^8.4.0: + version "8.4.0" + resolved "https://registry.yarnpkg.com/open/-/open-8.4.0.tgz#345321ae18f8138f82565a910fdc6b39e8c244f8" + integrity sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q== + dependencies: + define-lazy-prop "^2.0.0" + is-docker "^2.1.1" + is-wsl "^2.2.0" -jquery@~2: - version "2.2.4" - resolved "https://registry.yarnpkg.com/jquery/-/jquery-2.2.4.tgz#2c89d6889b5eac522a7eea32c14521559c6cbf02" - integrity sha1-LInWiJterFIqfuoywUUhVZxsvwI= +opener@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/opener/-/opener-1.5.2.tgz#5d37e1f35077b9dcac4301372271afdeb2a13598" + integrity sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A== -js-levenshtein@^1.1.3: - version "1.1.6" - resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" - integrity sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g== +p-cancelable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" + integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== -"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" - integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== +p-limit@^2.0.0, p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" -jsbn@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" - integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM= +p-limit@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" -jsdom@^11.11.0: - version "11.12.0" - resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-11.12.0.tgz#1a80d40ddd378a1de59656e9e6dc5a3ba8657bc8" - integrity sha512-y8Px43oyiBM13Zc1z780FrfNLJCXTL40EWlty/LXUtcjykRBNgLlCjWXpfSPBl2iv+N7koQN+dvqszHZgT/Fjw== - dependencies: - abab "^2.0.0" - acorn "^5.5.3" - acorn-globals "^4.1.0" - array-equal "^1.0.0" - cssom ">= 0.3.2 < 0.4.0" - cssstyle "^1.0.0" - data-urls "^1.0.0" - domexception "^1.0.1" - escodegen "^1.9.1" - html-encoding-sniffer "^1.0.2" - left-pad "^1.3.0" - nwsapi "^2.0.7" - parse5 "4.0.0" - pn "^1.1.0" - request "^2.87.0" - request-promise-native "^1.0.5" - sax "^1.2.4" - symbol-tree "^3.2.2" - tough-cookie "^2.3.4" - w3c-hr-time "^1.0.1" - webidl-conversions "^4.0.2" - whatwg-encoding "^1.0.3" - whatwg-mimetype "^2.1.0" - whatwg-url "^6.4.1" - ws "^5.2.0" - xml-name-validator "^3.0.0" +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" -jsesc@^2.5.1: - version "2.5.2" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" - integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" -jsesc@~0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" - integrity sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0= +p-locate@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== + dependencies: + p-limit "^3.0.2" -json-schema-traverse@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" - integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== +p-map@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" + integrity sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ== + dependencies: + aggregate-error "^3.0.0" -json-schema@0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" - integrity sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM= +p-retry@^4.5.0: + version "4.6.2" + resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-4.6.2.tgz#9baae7184057edd4e17231cee04264106e092a16" + integrity sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ== + dependencies: + "@types/retry" "0.12.0" + retry "^0.13.1" -json-stringify-safe@~5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" - integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== -json5@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.0.tgz#e7a0c62c48285c628d20a10b85c89bb807c32850" - integrity sha512-8Mh9h6xViijj36g7Dxi+Y4S6hNGV96vcJZr/SrlHh1LR/pEn/8j/+qIBbs44YKl69Lrfctp4QD+AdWLTMqEZAQ== +package-json@^6.3.0: + version "6.5.0" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-6.5.0.tgz#6feedaca35e75725876d0b0e64974697fed145b0" + integrity sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ== dependencies: - minimist "^1.2.0" + got "^9.6.0" + registry-auth-token "^4.0.0" + registry-url "^5.0.0" + semver "^6.2.0" -jsprim@^1.2.2: - version "1.4.1" - resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" - integrity sha1-MT5mvB5cwG5Di8G3SZwuXFastqI= +param-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-3.0.4.tgz#7d17fe4aa12bde34d4a77d91acfb6219caad01c5" + integrity sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A== dependencies: - assert-plus "1.0.0" - extsprintf "1.3.0" - json-schema "0.2.3" - verror "1.10.0" + dot-case "^3.0.4" + tslib "^2.0.3" -kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: - version "3.2.2" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" - integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ= +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== dependencies: - is-buffer "^1.1.5" + callsites "^3.0.0" -kind-of@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" - integrity sha1-IIE989cSkosgc3hpGkUGb65y3Vc= +parse-entities@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8" + integrity sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ== dependencies: - is-buffer "^1.1.5" + character-entities "^1.0.0" + character-entities-legacy "^1.0.0" + character-reference-invalid "^1.0.0" + is-alphanumerical "^1.0.0" + is-decimal "^1.0.0" + is-hexadecimal "^1.0.0" -kind-of@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" - integrity sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw== +parse-json@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-even-better-errors "^2.3.0" + lines-and-columns "^1.1.6" -kind-of@^6.0.0, kind-of@^6.0.2: - version "6.0.2" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" - integrity sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA== +parse-numeric-range@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/parse-numeric-range/-/parse-numeric-range-1.3.0.tgz#7c63b61190d61e4d53a1197f0c83c47bb670ffa3" + integrity sha512-twN+njEipszzlMJd4ONUYgSfZPDxgHhT9Ahed5uTigpQn90FggW4SA/AIPq/6a149fTbE9qBEcSwE3FAEp6wQQ== -lcid@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/lcid/-/lcid-2.0.0.tgz#6ef5d2df60e52f82eb228a4c373e8d1f397253cf" - integrity sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA== +parse5-htmlparser2-tree-adapter@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz#23c2cc233bcf09bb7beba8b8a69d46b08c62c2f1" + integrity sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g== dependencies: - invert-kv "^2.0.0" + domhandler "^5.0.2" + parse5 "^7.0.0" -left-pad@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/left-pad/-/left-pad-1.3.0.tgz#5b8a3a7765dfe001261dde915589e782f8c94d1e" - integrity sha512-XI5MPzVNApjAyhQzphX8BkmKsKUxD4LdyK24iZeQGinBN9yTQT3bFlCBy/aVx2HrNcqQGsdot8ghrjyrvMCoEA== +parse5@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178" + integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug== -levn@~0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" - integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= +parse5@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" + integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== + +parse5@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.0.0.tgz#51f74a5257f5fcc536389e8c2d0b3802e1bfa91a" + integrity sha512-y/t8IXSPWTuRZqXc0ajH/UwDj4mnqLEbSttNbThcFhGrZuOyoyvNBO85PBp2jQa55wY9d07PBNjsK8ZP3K5U6g== dependencies: - prelude-ls "~1.1.2" - type-check "~0.3.2" + entities "^4.3.0" -load-script@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/load-script/-/load-script-1.0.0.tgz#0491939e0bee5643ee494a7e3da3d2bac70c6ca4" - integrity sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ= +parseurl@~1.3.2, parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== -locate-path@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" - integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== +pascal-case@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/pascal-case/-/pascal-case-3.1.2.tgz#b48e0ef2b98e205e7c1dae747d0b1508237660eb" + integrity sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g== dependencies: - p-locate "^3.0.0" - path-exists "^3.0.0" + no-case "^3.0.4" + tslib "^2.0.3" -lodash.sortby@^4.7.0: - version "4.7.0" - resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" - integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + integrity sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ== -lodash@^4.13.1, lodash@^4.17.10, lodash@^4.17.5: - version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" - integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== -loose-envify@^1.0.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" - integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== + +path-is-inside@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + integrity sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w== + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== + +path-to-regexp@2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-2.2.1.tgz#90b617025a16381a879bc82a38d4e8bdeb2bcf45" + integrity sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ== + +path-to-regexp@^1.7.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a" + integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA== dependencies: - js-tokens "^3.0.0 || ^4.0.0" + isarray "0.0.1" + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== -lory.js@^2.4.1: - version "2.5.3" - resolved "https://registry.yarnpkg.com/lory.js/-/lory.js-2.5.3.tgz#157d6bcf64105d56b1fdad763940e79912db19b4" - integrity sha512-9FKuaeLtSKupM9BNmcCY0W31yhloZv2vEMD/v0hnwsdajqzb8bQacD5ZxZw+WUD0dRAXM+qx65Vk1m++4qkcsQ== +picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== -map-age-cleaner@^0.1.1: - version "0.1.3" - resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" - integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w== +pkg-dir@^4.1.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" + integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== dependencies: - p-defer "^1.0.0" - -map-cache@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" - integrity sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8= + find-up "^4.0.0" -map-visit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" - integrity sha1-7Nyo8TFE5mDxtb1B8S80edmN+48= +pkg-up@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-3.1.0.tgz#100ec235cc150e4fd42519412596a28512a0def5" + integrity sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA== dependencies: - object-visit "^1.0.0" + find-up "^3.0.0" -md5@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/md5/-/md5-2.2.1.tgz#53ab38d5fe3c8891ba465329ea23fac0540126f9" - integrity sha1-U6s41f48iJG6RlMp6iP6wFQBJvk= +postcss-calc@^8.2.3: + version "8.2.4" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-8.2.4.tgz#77b9c29bfcbe8a07ff6693dc87050828889739a5" + integrity sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q== dependencies: - charenc "~0.0.1" - crypt "~0.0.1" - is-buffer "~1.1.1" - -mem@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/mem/-/mem-4.1.0.tgz#aeb9be2d21f47e78af29e4ac5978e8afa2ca5b8a" - integrity sha512-I5u6Q1x7wxO0kdOpYBB28xueHADYps5uty/zg936CiG8NTe5sJL8EjrCuLneuDW3PlMdZBGDIn8BirEVdovZvg== - dependencies: - map-age-cleaner "^0.1.1" - mimic-fn "^1.0.0" - p-is-promise "^2.0.0" - -micromatch@^3.1.10, micromatch@^3.1.4: - version "3.1.10" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" - integrity sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg== - dependencies: - arr-diff "^4.0.0" - array-unique "^0.3.2" - braces "^2.3.1" - define-property "^2.0.2" - extend-shallow "^3.0.2" - extglob "^2.0.4" - fragment-cache "^0.2.1" - kind-of "^6.0.2" - nanomatch "^1.2.9" - object.pick "^1.3.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.2" - -mime-db@~1.37.0: - version "1.37.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.37.0.tgz#0b6a0ce6fdbe9576e25f1f2d2fde8830dc0ad0d8" - integrity sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg== + postcss-selector-parser "^6.0.9" + postcss-value-parser "^4.2.0" -mime-types@^2.1.12, mime-types@~2.1.19: - version "2.1.21" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.21.tgz#28995aa1ecb770742fe6ae7e58f9181c744b3f96" - integrity sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg== +postcss-colormin@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-5.3.0.tgz#3cee9e5ca62b2c27e84fce63affc0cfb5901956a" + integrity sha512-WdDO4gOFG2Z8n4P8TWBpshnL3JpmNmJwdnfP2gbk2qBA8PWwOYcmjmI/t3CmMeL72a7Hkd+x/Mg9O2/0rD54Pg== dependencies: - mime-db "~1.37.0" - -mimic-fn@^1.0.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" - integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== + browserslist "^4.16.6" + caniuse-api "^3.0.0" + colord "^2.9.1" + postcss-value-parser "^4.2.0" -min-document@^2.19.0: - version "2.19.0" - resolved "https://registry.yarnpkg.com/min-document/-/min-document-2.19.0.tgz#7bd282e3f5842ed295bb748cdd9f1ffa2c824685" - integrity sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU= +postcss-convert-values@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-5.1.2.tgz#31586df4e184c2e8890e8b34a0b9355313f503ab" + integrity sha512-c6Hzc4GAv95B7suy4udszX9Zy4ETyMCgFPUDtWjdFTKH1SE9eFY/jEpHSwTH1QPuwxHpWslhckUQWbNRM4ho5g== dependencies: - dom-walk "^0.1.0" + browserslist "^4.20.3" + postcss-value-parser "^4.2.0" -minimatch@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" - integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== - dependencies: - brace-expansion "^1.1.7" +postcss-discard-comments@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz#8df5e81d2925af2780075840c1526f0660e53696" + integrity sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ== -minimist@0.0.8: - version "0.0.8" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" - integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= +postcss-discard-duplicates@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz#9eb4fe8456706a4eebd6d3b7b777d07bad03e848" + integrity sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw== -minimist@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" - integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ= +postcss-discard-empty@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz#e57762343ff7f503fe53fca553d18d7f0c369c6c" + integrity sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A== -minipass@^2.2.1, minipass@^2.3.4: - version "2.3.5" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.5.tgz#cacebe492022497f656b0f0f51e2682a9ed2d848" - integrity sha512-Gi1W4k059gyRbyVUZQ4mEqLm0YIUiGYfvxhF6SIlk3ui1WVxMTGfGdQ2SInh3PDrRTVvPKgULkpJtT4RH10+VA== - dependencies: - safe-buffer "^5.1.2" - yallist "^3.0.0" +postcss-discard-overridden@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz#7e8c5b53325747e9d90131bb88635282fb4a276e" + integrity sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw== -minizlib@^1.1.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.2.1.tgz#dd27ea6136243c7c880684e8672bb3a45fd9b614" - integrity sha512-7+4oTUOWKg7AuL3vloEWekXY2/D20cevzsrNT2kGWm+39J9hGTCBv8VI5Pm5lXZ/o3/mdR4f8rflAPhnQb8mPA== +postcss-discard-unused@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz#8974e9b143d887677304e558c1166d3762501142" + integrity sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw== dependencies: - minipass "^2.2.1" + postcss-selector-parser "^6.0.5" -mixin-deep@^1.2.0: - version "1.3.2" - resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566" - integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA== +postcss-loader@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-7.0.0.tgz#367d10eb1c5f1d93700e6b399683a6dc7c3af396" + integrity sha512-IDyttebFzTSY6DI24KuHUcBjbAev1i+RyICoPEWcAstZsj03r533uMXtDn506l6/wlsRYiS5XBdx7TpccCsyUg== dependencies: - for-in "^1.0.2" - is-extendable "^1.0.1" + cosmiconfig "^7.0.0" + klona "^2.0.5" + semver "^7.3.7" -mkdirp@0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e" - integrity sha1-G79asbqCevI1dRQ0kEJkVfSB/h4= - -mkdirp@^0.5.0, mkdirp@^0.5.1: - version "0.5.1" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" - integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= +postcss-merge-idents@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz#7753817c2e0b75d0853b56f78a89771e15ca04a1" + integrity sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw== dependencies: - minimist "0.0.8" + cssnano-utils "^3.1.0" + postcss-value-parser "^4.2.0" -moment-timezone@^0.5.26: - version "0.5.27" - resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.27.tgz#73adec8139b6fe30452e78f210f27b1f346b8877" - integrity sha512-EIKQs7h5sAsjhPCqN6ggx6cEbs94GK050254TIJySD1bzoM5JTYDwAU1IoVOeTOL6Gm27kYJ51/uuvq1kIlrbw== +postcss-merge-longhand@^5.1.5: + version "5.1.5" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-5.1.5.tgz#b0e03bee3b964336f5f33c4fc8eacae608e91c05" + integrity sha512-NOG1grw9wIO+60arKa2YYsrbgvP6tp+jqc7+ZD5/MalIw234ooH2C6KlR6FEn4yle7GqZoBxSK1mLBE9KPur6w== dependencies: - moment ">= 2.9.0" - -"moment@>= 2.9.0", moment@^2.20.1: - version "2.24.0" - resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b" - integrity sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg== - -ms@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" - integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= + postcss-value-parser "^4.2.0" + stylehacks "^5.1.0" -ms@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" - integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== - -nan@^2.9.2: - version "2.12.1" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.12.1.tgz#7b1aa193e9aa86057e3c7bbd0ac448e770925552" - integrity sha512-JY7V6lRkStKcKTvHO5NVSQRv+RV+FIL5pvDoLiAtSL9pKlC5x9PKQcZDsq7m4FO4d57mkhC6Z+QhAh3Jdk5JFw== - -nanomatch@^1.2.9: - version "1.2.13" - resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119" - integrity sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA== - dependencies: - arr-diff "^4.0.0" - array-unique "^0.3.2" - define-property "^2.0.2" - extend-shallow "^3.0.2" - fragment-cache "^0.2.1" - is-windows "^1.0.2" - kind-of "^6.0.2" - object.pick "^1.3.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" +postcss-merge-rules@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-5.1.2.tgz#7049a14d4211045412116d79b751def4484473a5" + integrity sha512-zKMUlnw+zYCWoPN6yhPjtcEdlJaMUZ0WyVcxTAmw3lkkN/NDMRkOkiuctQEoWAOvH7twaxUUdvBWl0d4+hifRQ== + dependencies: + browserslist "^4.16.6" + caniuse-api "^3.0.0" + cssnano-utils "^3.1.0" + postcss-selector-parser "^6.0.5" -needle@^2.2.1: - version "2.2.4" - resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.4.tgz#51931bff82533b1928b7d1d69e01f1b00ffd2a4e" - integrity sha512-HyoqEb4wr/rsoaIDfTH2aVL9nWtQqba2/HvMv+++m8u0dz808MaagKILxtfeSN7QU7nvbQ79zk3vYOJp9zsNEA== +postcss-minify-font-values@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz#f1df0014a726083d260d3bd85d7385fb89d1f01b" + integrity sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA== dependencies: - debug "^2.1.2" - iconv-lite "^0.4.4" - sax "^1.2.4" + postcss-value-parser "^4.2.0" -nice-try@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" - integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== - -node-pre-gyp@^0.10.0: - version "0.10.3" - resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.3.tgz#3070040716afdc778747b61b6887bf78880b80fc" - integrity sha512-d1xFs+C/IPS8Id0qPTZ4bUT8wWryfR/OzzAFxweG+uLN85oPzyo2Iw6bVlLQ/JOdgNonXLCoRyqDzDWq4iw72A== - dependencies: - detect-libc "^1.0.2" - mkdirp "^0.5.1" - needle "^2.2.1" - nopt "^4.0.1" - npm-packlist "^1.1.6" - npmlog "^4.0.2" - rc "^1.2.7" - rimraf "^2.6.1" - semver "^5.3.0" - tar "^4" - -node-releases@^1.1.3: - version "1.1.7" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.7.tgz#b09a10394d0ed8f7778f72bb861dde68b146303b" - integrity sha512-bKdrwaqJUPHqlCzDD7so/R+Nk0jGv9a11ZhLrD9f6i947qGLrGAhU3OxRENa19QQmwzGy/g6zCDEuLGDO8HPvA== - dependencies: - semver "^5.3.0" - -nopt@1.0.10: - version "1.0.10" - resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee" - integrity sha1-bd0hvSoxQXuScn3Vhfim83YI6+4= +postcss-minify-gradients@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz#f1fe1b4f498134a5068240c2f25d46fcd236ba2c" + integrity sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw== dependencies: - abbrev "1" + colord "^2.9.1" + cssnano-utils "^3.1.0" + postcss-value-parser "^4.2.0" -nopt@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" - integrity sha1-0NRoWv1UFRk8jHUFYC0NF81kR00= +postcss-minify-params@^5.1.3: + version "5.1.3" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-5.1.3.tgz#ac41a6465be2db735099bbd1798d85079a6dc1f9" + integrity sha512-bkzpWcjykkqIujNL+EVEPOlLYi/eZ050oImVtHU7b4lFS82jPnsCb44gvC6pxaNt38Els3jWYDHTjHKf0koTgg== dependencies: - abbrev "1" - osenv "^0.1.4" + browserslist "^4.16.6" + cssnano-utils "^3.1.0" + postcss-value-parser "^4.2.0" -normalize-path@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" - integrity sha1-GrKLVW4Zg2Oowab35vogE3/mrtk= +postcss-minify-selectors@^5.2.1: + version "5.2.1" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz#d4e7e6b46147b8117ea9325a915a801d5fe656c6" + integrity sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg== dependencies: - remove-trailing-separator "^1.0.1" + postcss-selector-parser "^6.0.5" -normalize-path@^3.0.0: +postcss-modules-extract-imports@^3.0.0: version "3.0.0" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" - integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== - -npm-bundled@^1.0.1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.6.tgz#e7ba9aadcef962bb61248f91721cd932b3fe6bdd" - integrity sha512-8/JCaftHwbd//k6y2rEWp6k1wxVfpFzB6t1p825+cUb7Ym2XQfhwIC5KwhrvzZRJu+LtDE585zVaS32+CGtf0g== + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz#cda1f047c0ae80c97dbe28c3e76a43b88025741d" + integrity sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw== -npm-packlist@^1.1.6: - version "1.3.0" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.3.0.tgz#7f01e8e44408341379ca98cfd756e7b29bd2626c" - integrity sha512-qPBc6CnxEzpOcc4bjoIBJbYdy0D/LFFPUdxvfwor4/w3vxeE0h6TiOVurCEPpQ6trjN77u/ShyfeJGsbAfB3dA== +postcss-modules-local-by-default@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz#ebbb54fae1598eecfdf691a02b3ff3b390a5a51c" + integrity sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ== dependencies: - ignore-walk "^3.0.1" - npm-bundled "^1.0.1" + icss-utils "^5.0.0" + postcss-selector-parser "^6.0.2" + postcss-value-parser "^4.1.0" -npm-run-path@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" - integrity sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8= +postcss-modules-scope@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz#9ef3151456d3bbfa120ca44898dfca6f2fa01f06" + integrity sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg== dependencies: - path-key "^2.0.0" + postcss-selector-parser "^6.0.4" -npmlog@^4.0.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" - integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg== +postcss-modules-values@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz#d7c5e7e68c3bb3c9b27cbf48ca0bb3ffb4602c9c" + integrity sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ== dependencies: - are-we-there-yet "~1.1.2" - console-control-strings "~1.1.0" - gauge "~2.7.3" - set-blocking "~2.0.0" - -number-is-nan@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" - integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= - -nwsapi@^2.0.7: - version "2.1.0" - resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.1.0.tgz#781065940aed90d9bb01ca5d0ce0fcf81c32712f" - integrity sha512-ZG3bLAvdHmhIjaQ/Db1qvBxsGvFMLIRpQszyqbg31VJ53UP++uZX1/gf3Ut96pdwN9AuDwlMqIYLm0UPCdUeHg== + icss-utils "^5.0.0" -oauth-sign@~0.9.0: - version "0.9.0" - resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" - integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== +postcss-normalize-charset@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz#9302de0b29094b52c259e9b2cf8dc0879879f0ed" + integrity sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg== -object-assign@^4.1.0, object-assign@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" - integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= +postcss-normalize-display-values@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz#72abbae58081960e9edd7200fcf21ab8325c3da8" + integrity sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA== + dependencies: + postcss-value-parser "^4.2.0" -object-copy@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" - integrity sha1-fn2Fi3gb18mRpBupde04EnVOmYw= +postcss-normalize-positions@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-5.1.0.tgz#902a7cb97cf0b9e8b1b654d4a43d451e48966458" + integrity sha512-8gmItgA4H5xiUxgN/3TVvXRoJxkAWLW6f/KKhdsH03atg0cB8ilXnrB5PpSshwVu/dD2ZsRFQcR1OEmSBDAgcQ== dependencies: - copy-descriptor "^0.1.0" - define-property "^0.2.5" - kind-of "^3.0.3" + postcss-value-parser "^4.2.0" -object-keys@^1.0.11, object-keys@^1.0.12: - version "1.1.0" - resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.0.tgz#11bd22348dd2e096a045ab06f6c85bcc340fa032" - integrity sha512-6OO5X1+2tYkNyNEx6TsCxEqFfRWaqx6EtMiSbGrw8Ob8v9Ne+Hl8rBAgLBZn5wjEz3s/s6U1WXFUFOcxxAwUpg== +postcss-normalize-repeat-style@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.0.tgz#f6d6fd5a54f51a741cc84a37f7459e60ef7a6398" + integrity sha512-IR3uBjc+7mcWGL6CtniKNQ4Rr5fTxwkaDHwMBDGGs1x9IVRkYIT/M4NelZWkAOBdV6v3Z9S46zqaKGlyzHSchw== + dependencies: + postcss-value-parser "^4.2.0" -object-keys@~1.0.0: - version "1.0.12" - resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" - integrity sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag== +postcss-normalize-string@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz#411961169e07308c82c1f8c55f3e8a337757e228" + integrity sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w== + dependencies: + postcss-value-parser "^4.2.0" -object-visit@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" - integrity sha1-95xEk68MU3e1n+OdOV5BBC3QRbs= +postcss-normalize-timing-functions@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz#d5614410f8f0b2388e9f240aa6011ba6f52dafbb" + integrity sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg== dependencies: - isobject "^3.0.0" + postcss-value-parser "^4.2.0" -object.assign@^4.0.4: - version "4.1.0" - resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" - integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w== +postcss-normalize-unicode@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.0.tgz#3d23aede35e160089a285e27bf715de11dc9db75" + integrity sha512-J6M3MizAAZ2dOdSjy2caayJLQT8E8K9XjLce8AUQMwOrCvjCHv24aLC/Lps1R1ylOfol5VIDMaM/Lo9NGlk1SQ== dependencies: - define-properties "^1.1.2" - function-bind "^1.1.1" - has-symbols "^1.0.0" - object-keys "^1.0.11" + browserslist "^4.16.6" + postcss-value-parser "^4.2.0" -object.pick@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" - integrity sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c= +postcss-normalize-url@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz#ed9d88ca82e21abef99f743457d3729a042adcdc" + integrity sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew== dependencies: - isobject "^3.0.1" + normalize-url "^6.0.1" + postcss-value-parser "^4.2.0" -once@^1.3.0, once@^1.3.1, once@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= +postcss-normalize-whitespace@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz#08a1a0d1ffa17a7cc6efe1e6c9da969cc4493cfa" + integrity sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA== dependencies: - wrappy "1" + postcss-value-parser "^4.2.0" -optionator@^0.8.1: - version "0.8.2" - resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" - integrity sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q= +postcss-ordered-values@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-5.1.2.tgz#daffacd4abf327d52d5ac570b59dfbcf4b836614" + integrity sha512-wr2avRbW4HS2XE2ZCqpfp4N/tDC6GZKZ+SVP8UBTOVS8QWrc4TD8MYrebJrvVVlGPKszmiSCzue43NDiVtgDmg== dependencies: - deep-is "~0.1.3" - fast-levenshtein "~2.0.4" - levn "~0.3.0" - prelude-ls "~1.1.2" - type-check "~0.3.2" - wordwrap "~1.0.0" + cssnano-utils "^3.1.0" + postcss-value-parser "^4.2.0" -os-homedir@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" - integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= +postcss-reduce-idents@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz#c89c11336c432ac4b28792f24778859a67dfba95" + integrity sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg== + dependencies: + postcss-value-parser "^4.2.0" -os-locale@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-3.1.0.tgz#a802a6ee17f24c10483ab9935719cef4ed16bf1a" - integrity sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q== +postcss-reduce-initial@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-5.1.0.tgz#fc31659ea6e85c492fb2a7b545370c215822c5d6" + integrity sha512-5OgTUviz0aeH6MtBjHfbr57tml13PuedK/Ecg8szzd4XRMbYxH4572JFG067z+FqBIf6Zp/d+0581glkvvWMFw== dependencies: - execa "^1.0.0" - lcid "^2.0.0" - mem "^4.0.0" + browserslist "^4.16.6" + caniuse-api "^3.0.0" -os-tmpdir@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" - integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= +postcss-reduce-transforms@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz#333b70e7758b802f3dd0ddfe98bb1ccfef96b6e9" + integrity sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ== + dependencies: + postcss-value-parser "^4.2.0" -osenv@^0.1.4: - version "0.1.5" - resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" - integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g== +postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.4, postcss-selector-parser@^6.0.5, postcss-selector-parser@^6.0.9: + version "6.0.10" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz#79b61e2c0d1bfc2602d549e11d0876256f8df88d" + integrity sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w== dependencies: - os-homedir "^1.0.0" - os-tmpdir "^1.0.0" + cssesc "^3.0.0" + util-deprecate "^1.0.2" -output-file-sync@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/output-file-sync/-/output-file-sync-2.0.1.tgz#f53118282f5f553c2799541792b723a4c71430c0" - integrity sha512-mDho4qm7WgIXIGf4eYU1RHN2UU5tPfVYVSRwDJw0uTmj35DQUt/eNp19N7v6T3SrR0ESTEf2up2CGO73qI35zQ== +postcss-sort-media-queries@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/postcss-sort-media-queries/-/postcss-sort-media-queries-4.2.1.tgz#a99bae69ef1098ee3b64a5fa94d258ec240d0355" + integrity sha512-9VYekQalFZ3sdgcTjXMa0dDjsfBVHXlraYJEMiOJ/2iMmI2JGCMavP16z3kWOaRu8NSaJCTgVpB/IVpH5yT9YQ== dependencies: - graceful-fs "^4.1.11" - is-plain-obj "^1.1.0" - mkdirp "^0.5.1" + sort-css-media-queries "2.0.4" -p-defer@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c" - integrity sha1-n26xgvbJqozXQwBKfU+WsZaw+ww= +postcss-svgo@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-5.1.0.tgz#0a317400ced789f233a28826e77523f15857d80d" + integrity sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA== + dependencies: + postcss-value-parser "^4.2.0" + svgo "^2.7.0" -p-finally@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" - integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4= +postcss-unique-selectors@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz#a9f273d1eacd09e9aa6088f4b0507b18b1b541b6" + integrity sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA== + dependencies: + postcss-selector-parser "^6.0.5" -p-is-promise@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-2.0.0.tgz#7554e3d572109a87e1f3f53f6a7d85d1b194f4c5" - integrity sha512-pzQPhYMCAgLAKPWD2jC3Se9fEfrD9npNos0y150EeqZll7akhEgGhTW/slB6lHku8AvYGiJ+YJ5hfHKePPgFWg== +postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" + integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== -p-limit@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.1.0.tgz#1d5a0d20fb12707c758a655f6bbc4386b5930d68" - integrity sha512-NhURkNcrVB+8hNfLuysU8enY5xn2KXphsHBaC2YmRNTZRc7RWusw6apSpdEj3jo4CMb6W9nrF6tTnsJsJeyu6g== - dependencies: - p-try "^2.0.0" +postcss-zindex@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-5.1.0.tgz#4a5c7e5ff1050bd4c01d95b1847dfdcc58a496ff" + integrity sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A== -p-locate@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" - integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== +postcss@^8.3.11, postcss@^8.4.13, postcss@^8.4.14, postcss@^8.4.7: + version "8.4.14" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.14.tgz#ee9274d5622b4858c1007a74d76e42e56fd21caf" + integrity sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig== dependencies: - p-limit "^2.0.0" + nanoid "^3.3.4" + picocolors "^1.0.0" + source-map-js "^1.0.2" -p-try@^2.0.0: +prepend-http@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.0.0.tgz#85080bb87c64688fa47996fe8f7dfbe8211760b1" - integrity sha512-hMp0onDKIajHfIkdRk3P4CdCmErkYAxxDtP3Wx/4nZ3aGlau2VKh3mZpcuFkH27WQkL/3WBCPOktzA9ZOAnMQQ== + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + integrity sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA== -parse5@4.0.0: +pretty-error@^4.0.0: version "4.0.0" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-4.0.0.tgz#6d78656e3da8d78b4ec0b906f7c08ef1dfe3f608" - integrity sha512-VrZ7eOd3T1Fk4XWNXMgiGBK/z0MG48BWG2uQNU4I72fkQuKUTZpl+u9k+CxEG0twMVzSmXEEz12z5Fnw1jIQFA== - -pascalcase@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" - integrity sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ= + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-4.0.0.tgz#90a703f46dd7234adb46d0f84823e9d1cb8f10d6" + integrity sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw== + dependencies: + lodash "^4.17.20" + renderkid "^3.0.0" -path-dirname@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" - integrity sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA= +pretty-time@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/pretty-time/-/pretty-time-1.1.0.tgz#ffb7429afabb8535c346a34e41873adf3d74dd0e" + integrity sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA== -path-exists@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" - integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU= +prism-react-renderer@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/prism-react-renderer/-/prism-react-renderer-1.3.3.tgz#9b5a4211a6756eee3c96fee9a05733abc0b0805c" + integrity sha512-Viur/7tBTCH2HmYzwCHmt2rEFn+rdIWNIINXyg0StiISbDiIhHKhrFuEK8eMkKgvsIYSjgGqy/hNyucHp6FpoQ== -path-is-absolute@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= +prismjs@^1.28.0: + version "1.28.0" + resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.28.0.tgz#0d8f561fa0f7cf6ebca901747828b149147044b6" + integrity sha512-8aaXdYvl1F7iC7Xm1spqSaY/OJBpYW3v+KJ+F17iYxvdc8sfjW194COK5wVhMZX45tGteiBQgdvD/nhxcRwylw== -path-key@^2.0.0, path-key@^2.0.1: +process-nextick-args@~2.0.0: version "2.0.1" - resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" - integrity sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A= - -path-parse@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" - integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" + integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== -performance-now@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" - integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= - -pn@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/pn/-/pn-1.1.0.tgz#e2f4cef0e219f463c179ab37463e4e1ecdccbafb" - integrity sha512-2qHaIQr2VLRFoxe2nASzsV6ef4yOOH+Fi9FBOVH6cqeSgUnoyySPZkxzLuzd+RYOQTRpROA0ztTMqxROKSb/nA== - -posix-character-classes@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" - integrity sha1-AerA/jta9xoqbAL+q7jB/vfgDqs= - -preact-compat@^3.17.0, preact-compat@^3.18.0: - version "3.18.4" - resolved "https://registry.yarnpkg.com/preact-compat/-/preact-compat-3.18.4.tgz#fbe76ddd30356c68e3ccde608107104946f2cf8d" - integrity sha512-aR5CvCIDerE2Y201ERVkWQdTAQKhKGNYujEk4tbyfQDInFTrnCCa3KCeGtULZrwy0PNRBjdQa2/Za7qv7ALNFg== +promise@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" + integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg== dependencies: - immutability-helper "^2.7.1" - preact-render-to-string "^3.8.2" - preact-transition-group "^1.1.1" - prop-types "^15.6.2" - standalone-react-addons-pure-render-mixin "^0.1.1" + asap "~2.0.3" -preact-render-to-string@^3.8.2: - version "3.8.2" - resolved "https://registry.yarnpkg.com/preact-render-to-string/-/preact-render-to-string-3.8.2.tgz#bd72964d705a57da3a9e72098acaa073dd3ceff9" - integrity sha512-przuZPajiurStGgxMoJP0EJeC4xj5CgHv+M7GfF3YxAdhGgEWAkhOSE0xympAFN20uMayntBZpttIZqqLl77fw== +prompts@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" + integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== dependencies: - pretty-format "^3.5.1" + kleur "^3.0.3" + sisteransi "^1.0.5" -preact-rheostat@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/preact-rheostat/-/preact-rheostat-2.1.1.tgz#45fcb4c2f4f7beb6dbd5e0f18f744655fc16ac7c" - integrity sha512-d03JgkpbjknALYl+zfNiJQ60sFd4A0YjnLCe/DB+rqKQck7jXpsW9RqSN0R50/lV8fEezhVCjq2WMPDDOKmwaA== +prop-types@^15.6.2, prop-types@^15.7.2: + version "15.8.1" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== dependencies: - object.assign "^4.0.4" - preact "^8.2.5" - preact-compat "^3.17.0" - prop-types "^15.5.10" - -preact-transition-group@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/preact-transition-group/-/preact-transition-group-1.1.1.tgz#f0a49327ea515ece34ea2be864c4a7d29e5d6e10" - integrity sha1-8KSTJ+pRXs406ivoZMSn0p5dbhA= - -preact@^8.2.5, preact@^8.2.7: - version "8.4.2" - resolved "https://registry.yarnpkg.com/preact/-/preact-8.4.2.tgz#1263b974a17d1ea80b66590e41ef786ced5d6a23" - integrity sha512-TsINETWiisfB6RTk0wh3/mvxbGRvx+ljeBccZ4Z6MPFKgu/KFGyf2Bmw3Z/jlXhL5JlNKY6QAbA9PVyzIy9//A== - -prelude-ls@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" - integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= - -pretty-format@^3.5.1: - version "3.8.0" - resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-3.8.0.tgz#bfbed56d5e9a776645f4b1ff7aa1a3ac4fa3c385" - integrity sha1-v77VbV6ad2ZF9LH/eqGjrE+jw4U= - -private@^0.1.6: - version "0.1.8" - resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" - integrity sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg== - -process-nextick-args@~2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" - integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw== - -process@~0.5.1: - version "0.5.2" - resolved "https://registry.yarnpkg.com/process/-/process-0.5.2.tgz#1638d8a8e34c2f440a91db95ab9aeb677fc185cf" - integrity sha1-FjjYqONML0QKkduVq5rrZ3/Bhc8= + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" -prop-types@^15.5.10, prop-types@^15.6.2: - version "15.7.1" - resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.1.tgz#2fa61e0a699d428b40320127733ee2931f05d9d1" - integrity sha512-f8Lku2z9kERjOCcnDOPm68EBJAO2K00Q5mSgPAUE/gJuBgsYLbVy6owSrtcHj90zt8PvW+z0qaIIgsIhHOa1Qw== +property-information@^5.0.0, property-information@^5.3.0: + version "5.6.0" + resolved "https://registry.yarnpkg.com/property-information/-/property-information-5.6.0.tgz#61675545fb23002f245c6540ec46077d4da3ed69" + integrity sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA== dependencies: - object-assign "^4.1.1" - react-is "^16.8.1" + xtend "^4.0.0" -psl@^1.1.24, psl@^1.1.28: - version "1.1.31" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.1.31.tgz#e9aa86d0101b5b105cbe93ac6b784cd547276184" - integrity sha512-/6pt4+C+T+wZUieKR620OpzN/LlnNKuWjy1iFLQ/UG35JqHlR/89MP1d96dUfkf6Dne3TuLQzOYEYshJ+Hx8mw== +proxy-addr@~2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" + integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg== + dependencies: + forwarded "0.2.0" + ipaddr.js "1.9.1" pump@^3.0.0: version "3.0.0" @@ -2607,36 +5789,75 @@ pump@^3.0.0: end-of-stream "^1.1.0" once "^1.3.1" -punycode@^1.4.1: +punycode@^1.3.2: version "1.4.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" - integrity sha1-wNWmOycYgArY4esPpSachN1BhF4= + integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ== -punycode@^2.1.0, punycode@^2.1.1: +punycode@^2.1.0: version "2.1.1" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== -qs@^6.5.1: - version "6.6.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.6.0.tgz#a99c0f69a8d26bf7ef012f871cdabb0aee4424c2" - integrity sha512-KIJqT9jQJDQx5h5uAVPimw6yVg2SekOKu959OCtktD3FjzbpvaPr8i4zzg07DOMz+igA4W/aNM7OV8H37pFYfA== +pupa@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/pupa/-/pupa-2.1.1.tgz#f5e8fd4afc2c5d97828faa523549ed8744a20d62" + integrity sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A== + dependencies: + escape-goat "^2.0.0" -qs@~6.5.2: - version "6.5.2" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" - integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA== +pure-color@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/pure-color/-/pure-color-1.3.0.tgz#1fe064fb0ac851f0de61320a8bf796836422f33e" + integrity sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA== -querystring-es3@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" - integrity sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM= +qs@6.10.3: + version "6.10.3" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.10.3.tgz#d6cde1b2ffca87b5aa57889816c5f81535e22e8e" + integrity sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ== + dependencies: + side-channel "^1.0.4" -"rancher-website-theme@https://github.com/rancherlabs/website-theme.git": - version "1.0.4" - resolved "https://github.com/rancherlabs/website-theme.git#008d29972e5c59fa3de46b54c71ffc46b27e395f" +queue-microtask@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== + +queue@6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/queue/-/queue-6.0.2.tgz#b91525283e2315c7553d2efa18d83e76432fed65" + integrity sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA== + dependencies: + inherits "~2.0.3" + +randombytes@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" + integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== + dependencies: + safe-buffer "^5.1.0" + +range-parser@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + integrity sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A== + +range-parser@^1.2.1, range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +raw-body@2.5.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" + integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== + dependencies: + bytes "3.1.2" + http-errors "2.0.0" + iconv-lite "0.4.24" + unpipe "1.0.0" -rc@^1.2.7: +rc@^1.2.8: version "1.2.8" resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== @@ -2646,15 +5867,160 @@ rc@^1.2.7: minimist "^1.2.0" strip-json-comments "~2.0.1" -react-is@^16.8.1: - version "16.8.1" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.8.1.tgz#a80141e246eb894824fb4f2901c0c50ef31d4cdb" - integrity sha512-ioMCzVDWvCvKD8eeT+iukyWrBGrA3DiFYkXfBsVYIRdaREZuBjENG+KjrikavCLasozqRWTwFUagU/O4vPpRMA== +react-base16-styling@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/react-base16-styling/-/react-base16-styling-0.6.0.tgz#ef2156d66cf4139695c8a167886cb69ea660792c" + integrity sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ== + dependencies: + base16 "^1.0.0" + lodash.curry "^4.0.1" + lodash.flow "^3.3.0" + pure-color "^1.2.0" + +react-dev-utils@^12.0.1: + version "12.0.1" + resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-12.0.1.tgz#ba92edb4a1f379bd46ccd6bcd4e7bc398df33e73" + integrity sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ== + dependencies: + "@babel/code-frame" "^7.16.0" + address "^1.1.2" + browserslist "^4.18.1" + chalk "^4.1.2" + cross-spawn "^7.0.3" + detect-port-alt "^1.1.6" + escape-string-regexp "^4.0.0" + filesize "^8.0.6" + find-up "^5.0.0" + fork-ts-checker-webpack-plugin "^6.5.0" + global-modules "^2.0.0" + globby "^11.0.4" + gzip-size "^6.0.0" + immer "^9.0.7" + is-root "^2.1.0" + loader-utils "^3.2.0" + open "^8.4.0" + pkg-up "^3.1.0" + prompts "^2.4.2" + react-error-overlay "^6.0.11" + recursive-readdir "^2.2.2" + shell-quote "^1.7.3" + strip-ansi "^6.0.1" + text-table "^0.2.0" + +react-dom@^17.0.2: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-17.0.2.tgz#ecffb6845e3ad8dbfcdc498f0d0a939736502c23" + integrity sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + scheduler "^0.20.2" + +react-error-overlay@^6.0.11: + version "6.0.11" + resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.11.tgz#92835de5841c5cf08ba00ddd2d677b6d17ff9adb" + integrity sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg== + +react-fast-compare@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/react-fast-compare/-/react-fast-compare-3.2.0.tgz#641a9da81b6a6320f270e89724fb45a0b39e43bb" + integrity sha512-rtGImPZ0YyLrscKI9xTpV8psd6I8VAtjKCzQDlzyDvqJA8XOW78TXYQwNRNd8g8JZnDu8q9Fu/1v4HPAVwVdHA== + +react-helmet-async@*, react-helmet-async@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/react-helmet-async/-/react-helmet-async-1.3.0.tgz#7bd5bf8c5c69ea9f02f6083f14ce33ef545c222e" + integrity sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg== + dependencies: + "@babel/runtime" "^7.12.5" + invariant "^2.2.4" + prop-types "^15.7.2" + react-fast-compare "^3.2.0" + shallowequal "^1.1.0" + +react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-json-view@^1.21.3: + version "1.21.3" + resolved "https://registry.yarnpkg.com/react-json-view/-/react-json-view-1.21.3.tgz#f184209ee8f1bf374fb0c41b0813cff54549c475" + integrity sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw== + dependencies: + flux "^4.0.1" + react-base16-styling "^0.6.0" + react-lifecycles-compat "^3.0.4" + react-textarea-autosize "^8.3.2" + +react-lifecycles-compat@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" + integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA== + +react-loadable-ssr-addon-v5-slorber@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz#2cdc91e8a744ffdf9e3556caabeb6e4278689883" + integrity sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A== + dependencies: + "@babel/runtime" "^7.10.3" + +react-router-config@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/react-router-config/-/react-router-config-5.1.1.tgz#0f4263d1a80c6b2dc7b9c1902c9526478194a988" + integrity sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg== + dependencies: + "@babel/runtime" "^7.1.2" + +react-router-dom@^5.3.3: + version "5.3.3" + resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.3.3.tgz#8779fc28e6691d07afcaf98406d3812fe6f11199" + integrity sha512-Ov0tGPMBgqmbu5CDmN++tv2HQ9HlWDuWIIqn4b88gjlAN5IHI+4ZUZRcpz9Hl0azFIwihbLDYw1OiHGRo7ZIng== + dependencies: + "@babel/runtime" "^7.12.13" + history "^4.9.0" + loose-envify "^1.3.1" + prop-types "^15.6.2" + react-router "5.3.3" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + +react-router@5.3.3, react-router@^5.3.3: + version "5.3.3" + resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.3.3.tgz#8e3841f4089e728cf82a429d92cdcaa5e4a3a288" + integrity sha512-mzQGUvS3bM84TnbtMYR8ZjKnuPJ71IjSzR+DE6UkUqvN4czWIqEs17yLL8xkAycv4ev0AiN+IGrWu88vJs/p2w== + dependencies: + "@babel/runtime" "^7.12.13" + history "^4.9.0" + hoist-non-react-statics "^3.1.0" + loose-envify "^1.3.1" + mini-create-react-context "^0.4.0" + path-to-regexp "^1.7.0" + prop-types "^15.6.2" + react-is "^16.6.0" + tiny-invariant "^1.0.2" + tiny-warning "^1.0.0" + +react-textarea-autosize@^8.3.2: + version "8.3.4" + resolved "https://registry.yarnpkg.com/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz#270a343de7ad350534141b02c9cb78903e553524" + integrity sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ== + dependencies: + "@babel/runtime" "^7.10.2" + use-composed-ref "^1.3.0" + use-latest "^1.2.1" + +react@^17.0.2: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react/-/react-17.0.2.tgz#d0b5cc516d29eb3eee383f75b62864cfb6800037" + integrity sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" -readable-stream@^2.0.2, readable-stream@^2.0.6: - version "2.3.6" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" - integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw== +readable-stream@^2.0.1: + version "2.3.7" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" + integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== dependencies: core-util-is "~1.0.0" inherits "~2.0.3" @@ -2664,191 +6030,297 @@ readable-stream@^2.0.2, readable-stream@^2.0.6: string_decoder "~1.1.1" util-deprecate "~1.0.1" -readdirp@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.2.1.tgz#0e87622a3325aa33e892285caf8b4e846529a525" - integrity sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ== +readable-stream@^3.0.6: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" + integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== dependencies: - graceful-fs "^4.1.11" - micromatch "^3.1.10" - readable-stream "^2.0.2" + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" -reduce@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/reduce/-/reduce-1.0.1.tgz#14fa2e5ff1fc560703a020cbb5fbaab691565804" - integrity sha1-FPouX/H8VgcDoCDLtfuqtpFWWAQ= +readdirp@~3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== dependencies: - object-keys "~1.0.0" + picomatch "^2.2.1" -regenerate-unicode-properties@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-7.0.0.tgz#107405afcc4a190ec5ed450ecaa00ed0cafa7a4c" - integrity sha512-s5NGghCE4itSlUS+0WUj88G6cfMVMmH8boTPNvABf8od+2dhT9WDlWu8n01raQAJZMOK8Ch6jSexaRO7swd6aw== +reading-time@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/reading-time/-/reading-time-1.5.0.tgz#d2a7f1b6057cb2e169beaf87113cc3411b5bc5bb" + integrity sha512-onYyVhBNr4CmAxFsKS7bz+uTLRakypIe4R+5A824vBSkQy/hB3fZepoVEf8OVAxzLvK+H/jm9TzpI3ETSm64Kg== + +rechoir@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" + integrity sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw== dependencies: - regenerate "^1.4.0" + resolve "^1.1.6" -regenerate@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11" - integrity sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg== +recursive-readdir@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.2.tgz#9946fb3274e1628de6e36b2f6714953b4845094f" + integrity sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg== + dependencies: + minimatch "3.0.4" + +regenerate-unicode-properties@^10.0.1: + version "10.0.1" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.0.1.tgz#7f442732aa7934a3740c779bb9b3340dccc1fb56" + integrity sha512-vn5DU6yg6h8hP/2OkQo3K7uVILvY4iu0oI4t3HFa81UPkhGJwkRwM10JEc3upjdhHjs/k8GJY1sRBhk5sr69Bw== + dependencies: + regenerate "^1.4.2" -regenerator-runtime@^0.12.0: - version "0.12.1" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.12.1.tgz#fa1a71544764c036f8c49b13a08b2594c9f8a0de" - integrity sha512-odxIc1/vDlo4iZcfXqRYFj0vpXFNoGdKMAUieAlFYO6m/nl5e9KR/beGf41z4a1FI+aQgtjhuaSlDxQ0hmkrHg== +regenerate@^1.4.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" + integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-transform@^0.13.3: - version "0.13.3" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.13.3.tgz#264bd9ff38a8ce24b06e0636496b2c856b57bcbb" - integrity sha512-5ipTrZFSq5vU2YoGoww4uaRVAK4wyYC4TSICibbfEPOruUu8FFP7ErV0BjmbIOEpn3O/k9na9UEdYR/3m7N6uA== +regenerator-runtime@^0.13.4: + version "0.13.9" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz#8925742a98ffd90814988d7566ad30ca3b263b52" + integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA== + +regenerator-transform@^0.15.0: + version "0.15.0" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.0.tgz#cbd9ead5d77fae1a48d957cf889ad0586adb6537" + integrity sha512-LsrGtPmbYg19bcPHwdtmXwbW+TqNvtY4riE3P83foeHRroMbH6/2ddFBfab3t7kbzc7v7p4wbkIecHImqt0QNg== dependencies: - private "^0.1.6" + "@babel/runtime" "^7.8.4" -regex-not@^1.0.0, regex-not@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" - integrity sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A== +regexpu-core@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-5.0.1.tgz#c531122a7840de743dcf9c83e923b5560323ced3" + integrity sha512-CriEZlrKK9VJw/xQGJpQM5rY88BtuL8DM+AEwvcThHilbxiTAy8vq4iJnd2tqq8wLmjbGZzP7ZcKFjbGkmEFrw== dependencies: - extend-shallow "^3.0.2" - safe-regex "^1.1.0" + regenerate "^1.4.2" + regenerate-unicode-properties "^10.0.1" + regjsgen "^0.6.0" + regjsparser "^0.8.2" + unicode-match-property-ecmascript "^2.0.0" + unicode-match-property-value-ecmascript "^2.0.0" -regexp-tree@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/regexp-tree/-/regexp-tree-0.1.1.tgz#27b455f9b138ca2e84c090e9aff1ffe2a04d97fa" - integrity sha512-HwRjOquc9QOwKTgbxvZTcddS5mlNlwePMQ3NFL8broajMLD5CXDAqas8Y5yxJH5QtZp5iRor3YCILd5pz71Cgw== - dependencies: - cli-table3 "^0.5.0" - colors "^1.1.2" - yargs "^12.0.5" - -regexpu-core@^4.1.3, regexpu-core@^4.2.0: - version "4.4.0" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.4.0.tgz#8d43e0d1266883969720345e70c275ee0aec0d32" - integrity sha512-eDDWElbwwI3K0Lo6CqbQbA6FwgtCz4kYTarrri1okfkRLZAqstU+B3voZBCjg8Fl6iq0gXrJG6MvRgLthfvgOA== - dependencies: - regenerate "^1.4.0" - regenerate-unicode-properties "^7.0.0" - regjsgen "^0.5.0" - regjsparser "^0.6.0" - unicode-match-property-ecmascript "^1.0.4" - unicode-match-property-value-ecmascript "^1.0.2" - -regjsgen@^0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.0.tgz#a7634dc08f89209c2049adda3525711fb97265dd" - integrity sha512-RnIrLhrXCX5ow/E5/Mh2O4e/oa1/jW0eaBKTSy3LaCj+M3Bqvm97GWDp2yUtzIs4LEn65zR2yiYGFqb2ApnzDA== +registry-auth-token@^4.0.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-4.2.1.tgz#6d7b4006441918972ccd5fedcd41dc322c79b250" + integrity sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw== + dependencies: + rc "^1.2.8" + +registry-url@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-5.1.0.tgz#e98334b50d5434b81136b44ec638d9c2009c5009" + integrity sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw== + dependencies: + rc "^1.2.8" -regjsparser@^0.6.0: +regjsgen@^0.6.0: version "0.6.0" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.6.0.tgz#f1e6ae8b7da2bae96c99399b868cd6c933a2ba9c" - integrity sha512-RQ7YyokLiQBomUJuUG8iGVvkgOLxwyZM8k6d3q5SAXpg4r5TZJZigKFvC6PpD+qQ98bCDC5YelPeA3EucDoNeQ== + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.6.0.tgz#83414c5354afd7d6627b16af5f10f41c4e71808d" + integrity sha512-ozE883Uigtqj3bx7OhL1KNbCzGyW2NQZPl6Hs09WTvCuZD5sTI4JY58bkbQWa/Y9hxIsvJ3M8Nbf7j54IqeZbA== + +regjsparser@^0.8.2: + version "0.8.4" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.8.4.tgz#8a14285ffcc5de78c5b95d62bbf413b6bc132d5f" + integrity sha512-J3LABycON/VNEu3abOviqGHuB/LOtOQj8SKmfP9anY5GfAVw/SPjwzSjxGjbZXIxbGfqTHtJw58C2Li/WkStmA== dependencies: jsesc "~0.5.0" -remove-trailing-separator@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" - integrity sha1-wkvOKig62tW8P1jg1IJJuSN52O8= +rehype-parse@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/rehype-parse/-/rehype-parse-6.0.2.tgz#aeb3fdd68085f9f796f1d3137ae2b85a98406964" + integrity sha512-0S3CpvpTAgGmnz8kiCyFLGuW5yA4OQhyNTm/nwPopZ7+PI11WnGl1TTWTGv/2hPEe/g2jRLlhVVSsoDH8waRug== + dependencies: + hast-util-from-parse5 "^5.0.0" + parse5 "^5.0.0" + xtend "^4.0.0" -repeat-element@^1.1.2: - version "1.1.3" - resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" - integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== +relateurl@^0.2.7: + version "0.2.7" + resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" + integrity sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog== + +remark-admonitions@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/remark-admonitions/-/remark-admonitions-1.2.1.tgz#87caa1a442aa7b4c0cafa04798ed58a342307870" + integrity sha512-Ji6p68VDvD+H1oS95Fdx9Ar5WA2wcDA4kwrrhVU7fGctC6+d3uiMICu7w7/2Xld+lnU7/gi+432+rRbup5S8ow== + dependencies: + rehype-parse "^6.0.2" + unified "^8.4.2" + unist-util-visit "^2.0.1" + +remark-emoji@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/remark-emoji/-/remark-emoji-2.2.0.tgz#1c702090a1525da5b80e15a8f963ef2c8236cac7" + integrity sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w== + dependencies: + emoticon "^3.2.0" + node-emoji "^1.10.0" + unist-util-visit "^2.0.3" + +remark-footnotes@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/remark-footnotes/-/remark-footnotes-2.0.0.tgz#9001c4c2ffebba55695d2dd80ffb8b82f7e6303f" + integrity sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ== + +remark-mdx@1.6.22: + version "1.6.22" + resolved "https://registry.yarnpkg.com/remark-mdx/-/remark-mdx-1.6.22.tgz#06a8dab07dcfdd57f3373af7f86bd0e992108bbd" + integrity sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ== + dependencies: + "@babel/core" "7.12.9" + "@babel/helper-plugin-utils" "7.10.4" + "@babel/plugin-proposal-object-rest-spread" "7.12.1" + "@babel/plugin-syntax-jsx" "7.12.1" + "@mdx-js/util" "1.6.22" + is-alphabetical "1.0.4" + remark-parse "8.0.3" + unified "9.2.0" + +remark-parse@8.0.3: + version "8.0.3" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-8.0.3.tgz#9c62aa3b35b79a486454c690472906075f40c7e1" + integrity sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q== + dependencies: + ccount "^1.0.0" + collapse-white-space "^1.0.2" + is-alphabetical "^1.0.0" + is-decimal "^1.0.0" + is-whitespace-character "^1.0.0" + is-word-character "^1.0.0" + markdown-escapes "^1.0.0" + parse-entities "^2.0.0" + repeat-string "^1.5.4" + state-toggle "^1.0.0" + trim "0.0.1" + trim-trailing-lines "^1.0.0" + unherit "^1.0.4" + unist-util-remove-position "^2.0.0" + vfile-location "^3.0.0" + xtend "^4.0.1" + +remark-squeeze-paragraphs@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz#76eb0e085295131c84748c8e43810159c5653ead" + integrity sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw== + dependencies: + mdast-squeeze-paragraphs "^4.0.0" -repeat-string@^1.6.1: +renderkid@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-3.0.0.tgz#5fd823e4d6951d37358ecc9a58b1f06836b6268a" + integrity sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg== + dependencies: + css-select "^4.1.3" + dom-converter "^0.2.0" + htmlparser2 "^6.1.0" + lodash "^4.17.21" + strip-ansi "^6.0.1" + +repeat-string@^1.5.4: version "1.6.1" resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" - integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= + integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w== -request-promise-core@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.1.tgz#3eee00b2c5aa83239cfb04c5700da36f81cd08b6" - integrity sha1-Pu4AssWqgyOc+wTFcA2jb4HNCLY= +require-from-string@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== + +"require-like@>= 0.1.1": + version "0.1.2" + resolved "https://registry.yarnpkg.com/require-like/-/require-like-0.1.2.tgz#ad6f30c13becd797010c468afa775c0c0a6b47fa" + integrity sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A== + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ== + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve-pathname@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-3.0.0.tgz#99d02224d3cf263689becbb393bc560313025dcd" + integrity sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng== + +resolve@^1.1.6, resolve@^1.14.2, resolve@^1.3.2: + version "1.22.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.0.tgz#5e0b8c67c15df57a89bdbabe603a002f21731198" + integrity sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw== dependencies: - lodash "^4.13.1" + is-core-module "^2.8.1" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" -request-promise-native@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.5.tgz#5281770f68e0c9719e5163fd3fab482215f4fda5" - integrity sha1-UoF3D2jgyXGeUWP9P6tIIhX0/aU= - dependencies: - request-promise-core "1.1.1" - stealthy-require "^1.1.0" - tough-cookie ">=2.3.3" - -request@^2.87.0: - version "2.88.0" - resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" - integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.8.0" - caseless "~0.12.0" - combined-stream "~1.0.6" - extend "~3.0.2" - forever-agent "~0.6.1" - form-data "~2.3.2" - har-validator "~5.1.0" - http-signature "~1.2.0" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.19" - oauth-sign "~0.9.0" - performance-now "^2.1.0" - qs "~6.5.2" - safe-buffer "^5.1.2" - tough-cookie "~2.4.3" - tunnel-agent "^0.6.0" - uuid "^3.3.2" - -require-directory@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" - integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= +responselike@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + integrity sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ== + dependencies: + lowercase-keys "^1.0.0" -require-main-filename@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" - integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE= +retry@^0.13.1: + version "0.13.1" + resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658" + integrity sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg== + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" -resolve-url@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" - integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= +rtl-detect@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/rtl-detect/-/rtl-detect-1.0.4.tgz#40ae0ea7302a150b96bc75af7d749607392ecac6" + integrity sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ== -resolve@^1.3.2, resolve@^1.8.1: - version "1.10.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.10.0.tgz#3bdaaeaf45cc07f375656dfd2e54ed0810b101ba" - integrity sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg== +rtlcss@^3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/rtlcss/-/rtlcss-3.5.0.tgz#c9eb91269827a102bac7ae3115dd5d049de636c3" + integrity sha512-wzgMaMFHQTnyi9YOwsx9LjOxYXJPzS8sYnFaKm6R5ysvTkwzHiB0vxnbHwchHQT65PTdBjDG21/kQBWI7q9O7A== dependencies: - path-parse "^1.0.6" + find-up "^5.0.0" + picocolors "^1.0.0" + postcss "^8.3.11" + strip-json-comments "^3.1.1" -ret@~0.1.10: - version "0.1.15" - resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" - integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" -rimraf@^2.6.1: - version "2.6.3" - resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab" - integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA== +rxjs@^7.5.4: + version "7.5.5" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.5.5.tgz#2ebad89af0f560f460ad5cc4213219e1f7dd4e9f" + integrity sha512-sy+H0pQofO95VDmFLzyaw9xNJU4KTRSwQIGM6+iG3SypAtCiLDzpeG8sJrNCWn2Up9km+KhkvTdbkrdy+yzZdw== dependencies: - glob "^7.1.3" + tslib "^2.1.0" -safe-buffer@^5.0.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: +safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== -safe-regex@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e" - integrity sha1-QKNmnzsHfR6UPURinhV91IAjvy4= - dependencies: - ret "~0.1.10" +safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== -"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: +"safer-buffer@>= 2.1.2 < 3": version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== @@ -2858,170 +6330,383 @@ sax@^1.2.4: resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== -semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1: - version "5.6.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.6.0.tgz#7e74256fbaa49c75aa7c7a205cc22799cac80004" - integrity sha512-RS9R6R35NYgQn++fkDWaOmqGoj4Ek9gGs+DPxNUZKuwE183xjJroKvyo1IzVFeXvUrvmALy6FWD5xrdJT25gMg== +scheduler@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91" + integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" -set-blocking@^2.0.0, set-blocking@~2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" - integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= +schema-utils@2.7.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.7.0.tgz#17151f76d8eae67fbbf77960c33c676ad9f4efc7" + integrity sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A== + dependencies: + "@types/json-schema" "^7.0.4" + ajv "^6.12.2" + ajv-keywords "^3.4.1" + +schema-utils@^2.6.5: + version "2.7.1" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.7.1.tgz#1ca4f32d1b24c590c203b8e7a50bf0ea4cd394d7" + integrity sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg== + dependencies: + "@types/json-schema" "^7.0.5" + ajv "^6.12.4" + ajv-keywords "^3.5.2" -set-value@^0.4.3: - version "0.4.3" - resolved "https://registry.yarnpkg.com/set-value/-/set-value-0.4.3.tgz#7db08f9d3d22dc7f78e53af3c3bf4666ecdfccf1" - integrity sha1-fbCPnT0i3H945Trzw79GZuzfzPE= +schema-utils@^3.0.0, schema-utils@^3.1.0, schema-utils@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-3.1.1.tgz#bc74c4b6b6995c1d88f76a8b77bea7219e0c8281" + integrity sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw== + dependencies: + "@types/json-schema" "^7.0.8" + ajv "^6.12.5" + ajv-keywords "^3.5.2" + +schema-utils@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.0.0.tgz#60331e9e3ae78ec5d16353c467c34b3a0a1d3df7" + integrity sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg== + dependencies: + "@types/json-schema" "^7.0.9" + ajv "^8.8.0" + ajv-formats "^2.1.1" + ajv-keywords "^5.0.0" + +section-matter@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/section-matter/-/section-matter-1.0.0.tgz#e9041953506780ec01d59f292a19c7b850b84167" + integrity sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA== dependencies: extend-shallow "^2.0.1" - is-extendable "^0.1.1" - is-plain-object "^2.0.1" - to-object-path "^0.3.0" + kind-of "^6.0.0" -set-value@^2.0.0: +select-hose@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.0.tgz#71ae4a88f0feefbbf52d1ea604f3fb315ebb6274" - integrity sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg== + resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" + integrity sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg== + +selfsigned@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-2.0.1.tgz#8b2df7fa56bf014d19b6007655fff209c0ef0a56" + integrity sha512-LmME957M1zOsUhG+67rAjKfiWFox3SBxE/yymatMZsAx+oMrJ0YQ8AToOnyCm7xbeg2ep37IHLxdu0o2MavQOQ== dependencies: - extend-shallow "^2.0.1" - is-extendable "^0.1.1" - is-plain-object "^2.0.3" - split-string "^3.0.1" + node-forge "^1" -shebang-command@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" - integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo= +semver-diff@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-3.1.1.tgz#05f77ce59f325e00e2706afd67bb506ddb1ca32b" + integrity sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg== dependencies: - shebang-regex "^1.0.0" + semver "^6.3.0" -shebang-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" - integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= +semver@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.0.0.tgz#5f3ca35761e47e05b206c6daff2cf814f0316b8e" + integrity sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A== + +semver@^5.4.1: + version "5.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.2.0, semver@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" + integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== + +semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7: + version "7.3.7" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.7.tgz#12c5b649afdbf9049707796e22a4028814ce523f" + integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g== + dependencies: + lru-cache "^6.0.0" + +send@0.18.0: + version "0.18.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" + integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== + dependencies: + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "2.0.0" + mime "1.6.0" + ms "2.1.3" + on-finished "2.4.1" + range-parser "~1.2.1" + statuses "2.0.1" + +serialize-javascript@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" + integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== + dependencies: + randombytes "^2.1.0" + +serve-handler@^6.1.3: + version "6.1.3" + resolved "https://registry.yarnpkg.com/serve-handler/-/serve-handler-6.1.3.tgz#1bf8c5ae138712af55c758477533b9117f6435e8" + integrity sha512-FosMqFBNrLyeiIDvP1zgO6YoTzFYHxLDEIavhlmQ+knB2Z7l1t+kGLHkZIDN7UVWqQAmKI3D20A6F6jo3nDd4w== + dependencies: + bytes "3.0.0" + content-disposition "0.5.2" + fast-url-parser "1.1.3" + mime-types "2.1.18" + minimatch "3.0.4" + path-is-inside "1.0.2" + path-to-regexp "2.2.1" + range-parser "1.2.0" + +serve-index@^1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239" + integrity sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw== + dependencies: + accepts "~1.3.4" + batch "0.6.1" + debug "2.6.9" + escape-html "~1.0.3" + http-errors "~1.6.2" + mime-types "~2.1.17" + parseurl "~1.3.2" + +serve-static@1.15.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" + integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.18.0" + +setimmediate@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA== -signal-exit@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" - integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0= +setprototypeof@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" + integrity sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ== + +setprototypeof@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" + integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== + +shallow-clone@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-3.0.1.tgz#8f2981ad92531f55035b01fb230769a40e02efa3" + integrity sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA== + dependencies: + kind-of "^6.0.2" -slash@^2.0.0: +shallowequal@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" + integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== + +shebang-command@^2.0.0: version "2.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" - integrity sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A== + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" -snapdragon-node@^2.0.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" - integrity sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw== +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +shell-quote@^1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.7.3.tgz#aa40edac170445b9a431e17bb62c0b881b9c4123" + integrity sha512-Vpfqwm4EnqGdlsBFNmHhxhElJYrdfcxPThu+ryKS5J8L/fhAwLazFZtq+S+TWZ9ANj2piSQLGj6NQg+lKPmxrw== + +shelljs@^0.8.5: + version "0.8.5" + resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.5.tgz#de055408d8361bed66c669d2f000538ced8ee20c" + integrity sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow== + dependencies: + glob "^7.0.0" + interpret "^1.0.0" + rechoir "^0.6.2" + +side-channel@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" + integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== dependencies: - define-property "^1.0.0" - isobject "^3.0.0" - snapdragon-util "^3.0.1" + call-bind "^1.0.0" + get-intrinsic "^1.0.2" + object-inspect "^1.9.0" -snapdragon-util@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" - integrity sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ== +signal-exit@^3.0.2, signal-exit@^3.0.3: + version "3.0.7" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + +sirv@^1.0.7: + version "1.0.19" + resolved "https://registry.yarnpkg.com/sirv/-/sirv-1.0.19.tgz#1d73979b38c7fe91fcba49c85280daa9c2363b49" + integrity sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ== dependencies: - kind-of "^3.2.0" + "@polka/url" "^1.0.0-next.20" + mrmime "^1.0.0" + totalist "^1.0.0" + +sisteransi@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" + integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== -snapdragon@^0.8.1: - version "0.8.2" - resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d" - integrity sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg== +sitemap@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/sitemap/-/sitemap-7.1.1.tgz#eeed9ad6d95499161a3eadc60f8c6dce4bea2bef" + integrity sha512-mK3aFtjz4VdJN0igpIJrinf3EO8U8mxOPsTBzSsy06UtjZQJ3YY3o3Xa7zSc5nMqcMrRwlChHZ18Kxg0caiPBg== dependencies: - base "^0.11.1" - debug "^2.2.0" - define-property "^0.2.5" - extend-shallow "^2.0.1" - map-cache "^0.2.2" - source-map "^0.5.6" - source-map-resolve "^0.5.0" - use "^3.1.0" + "@types/node" "^17.0.5" + "@types/sax" "^1.2.1" + arg "^5.0.0" + sax "^1.2.4" -source-map-resolve@^0.5.0: - version "0.5.2" - resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.2.tgz#72e2cc34095543e43b2c62b2c4c10d4a9054f259" - integrity sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA== +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +slash@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-4.0.0.tgz#2422372176c4c6c5addb5e2ada885af984b396a7" + integrity sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew== + +sockjs@^0.3.24: + version "0.3.24" + resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.24.tgz#c9bc8995f33a111bea0395ec30aa3206bdb5ccce" + integrity sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ== dependencies: - atob "^2.1.1" - decode-uri-component "^0.2.0" - resolve-url "^0.2.1" - source-map-url "^0.4.0" - urix "^0.1.0" + faye-websocket "^0.11.3" + uuid "^8.3.2" + websocket-driver "^0.7.4" -source-map-url@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" - integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM= +sort-css-media-queries@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/sort-css-media-queries/-/sort-css-media-queries-2.0.4.tgz#b2badfa519cb4a938acbc6d3aaa913d4949dc908" + integrity sha512-PAIsEK/XupCQwitjv7XxoMvYhT7EAfyzI3hsy/MyDgTvc+Ft55ctdkctJLOy6cQejaIC+zjpUL4djFVm2ivOOw== -source-map@^0.5.0, source-map@^0.5.6: +source-map-js@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" + integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== + +source-map-support@~0.5.20: + version "0.5.21" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" + integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map@^0.5.0: version "0.5.7" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" - integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= + integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== -source-map@~0.6.1: +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0: version "0.6.1" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -split-string@^3.0.1, split-string@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" - integrity sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw== - dependencies: - extend-shallow "^3.0.0" - -sshpk@^1.7.0: - version "1.16.1" - resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" - integrity sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg== - dependencies: - asn1 "~0.2.3" - assert-plus "^1.0.0" - bcrypt-pbkdf "^1.0.0" - dashdash "^1.12.0" - ecc-jsbn "~0.1.1" - getpass "^0.1.1" - jsbn "~0.1.0" - safer-buffer "^2.0.2" - tweetnacl "~0.14.0" - -standalone-react-addons-pure-render-mixin@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/standalone-react-addons-pure-render-mixin/-/standalone-react-addons-pure-render-mixin-0.1.1.tgz#3c7409f4c79c40de9ac72c616cf679a994f37551" - integrity sha1-PHQJ9MecQN6axyxhbPZ5qZTzdVE= +space-separated-tokens@^1.0.0: + version "1.1.5" + resolved "https://registry.yarnpkg.com/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz#85f32c3d10d9682007e917414ddc5c26d1aa6899" + integrity sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA== -static-extend@^0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" - integrity sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY= +spdy-transport@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdy-transport/-/spdy-transport-3.0.0.tgz#00d4863a6400ad75df93361a1608605e5dcdcf31" + integrity sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw== dependencies: - define-property "^0.2.5" - object-copy "^0.1.0" + debug "^4.1.0" + detect-node "^2.0.4" + hpack.js "^2.1.6" + obuf "^1.1.2" + readable-stream "^3.0.6" + wbuf "^1.7.3" -stealthy-require@^1.1.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b" - integrity sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks= +spdy@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/spdy/-/spdy-4.0.2.tgz#b74f466203a3eda452c02492b91fb9e84a27677b" + integrity sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA== + dependencies: + debug "^4.1.0" + handle-thing "^2.0.0" + http-deceiver "^1.2.7" + select-hose "^2.0.0" + spdy-transport "^3.0.0" -string-width@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" - integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== + +stable@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" + integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== + +state-toggle@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/state-toggle/-/state-toggle-1.0.3.tgz#e123b16a88e143139b09c6852221bc9815917dfe" + integrity sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ== + +statuses@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +"statuses@>= 1.4.0 < 2": + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== + +std-env@^3.0.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/std-env/-/std-env-3.1.1.tgz#1f19c4d3f6278c52efd08a94574a2a8d32b7d092" + integrity sha512-/c645XdExBypL01TpFKiG/3RAa/Qmu+zRi0MwAmrdEkwHNuN0ebo8ccAXBBDa5Z0QOJgBskUIbuCK91x0sCVEw== + +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== dependencies: - code-point-at "^1.0.0" - is-fullwidth-code-point "^1.0.0" - strip-ansi "^3.0.0" + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" -"string-width@^1.0.2 || 2", string-width@^2.0.0, string-width@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" - integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== +string-width@^5.0.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" + integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== dependencies: - is-fullwidth-code-point "^2.0.0" - strip-ansi "^4.0.0" + eastasianwidth "^0.2.0" + emoji-regex "^9.2.2" + strip-ansi "^7.0.1" + +string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" string_decoder@~1.1.1: version "1.1.1" @@ -3030,29 +6715,63 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -strip-ansi@^3.0.0, strip-ansi@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" - integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= +stringify-object@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629" + integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw== dependencies: - ansi-regex "^2.0.0" + get-own-enumerable-property-symbols "^3.0.0" + is-obj "^1.0.1" + is-regexp "^1.0.0" -strip-ansi@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" - integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-ansi@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.0.1.tgz#61740a08ce36b61e50e65653f07060d000975fb2" + integrity sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw== dependencies: - ansi-regex "^3.0.0" + ansi-regex "^6.0.1" -strip-eof@^1.0.0: +strip-bom-string@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" - integrity sha1-u0P/VZim6wXYm1n80SnJgzE2Br8= + resolved "https://registry.yarnpkg.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz#e5211e9224369fbb81d633a2f00044dc8cedad92" + integrity sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g== + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +strip-json-comments@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" + integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== strip-json-comments@~2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" - integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== + +style-to-object@0.3.0, style-to-object@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-0.3.0.tgz#b1b790d205991cc783801967214979ee19a76e46" + integrity sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA== + dependencies: + inline-style-parser "0.1.1" + +stylehacks@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-5.1.0.tgz#a40066490ca0caca04e96c6b02153ddc39913520" + integrity sha512-SzLmvHQTrIWfSgljkQCw2++C9+Ne91d/6Sp92I8c5uHTcy/PgeHamwITIbBW9wnFTY/3ZfSXR9HIL6Ikqmcu6Q== + dependencies: + browserslist "^4.16.6" + postcss-selector-parser "^6.0.4" supports-color@^5.3.0: version "5.5.0" @@ -3061,337 +6780,704 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -symbol-tree@^3.2.2: - version "3.2.2" - resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.2.tgz#ae27db38f660a7ae2e1c3b7d1bc290819b8519e6" - integrity sha1-rifbOPZgp64uHDt9G8KQgZuFGeY= - -tar@^4: - version "4.4.8" - resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.8.tgz#b19eec3fde2a96e64666df9fdb40c5ca1bc3747d" - integrity sha512-LzHF64s5chPQQS0IYBn9IN5h3i98c12bo4NCO7e0sGM2llXQ3p2FGC5sdENN4cTW48O915Sh+x+EXx7XW96xYQ== - dependencies: - chownr "^1.1.1" - fs-minipass "^1.2.5" - minipass "^2.3.4" - minizlib "^1.1.1" - mkdirp "^0.5.0" - safe-buffer "^5.1.2" - yallist "^3.0.2" - -through@~2.3.4: - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= - -tingle.js@^0.13.2: - version "0.13.2" - resolved "https://registry.yarnpkg.com/tingle.js/-/tingle.js-0.13.2.tgz#516e28d77c7c0160d835fa12278856be1a8a7502" - integrity sha512-hbfHZZ/sMo+JXM47GQb57b+0lgdy+o8D1pNINSQG98cvTyH+AusafgnklVL983nFU6psjz0wr/OcHOgF/5f/Vw== - -to-factory@^1.0.0: +supports-color@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +supports-color@^8.0.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + +supports-preserve-symlinks-flag@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/to-factory/-/to-factory-1.0.0.tgz#8738af8bd97120ad1d4047972ada5563bf9479b1" - integrity sha1-hzivi9lxIK0dQEeXKtpVY7+UebE= + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + +svg-parser@^2.0.2: + version "2.0.4" + resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.4.tgz#fdc2e29e13951736140b76cb122c8ee6630eb6b5" + integrity sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ== + +svgo@^2.5.0, svgo@^2.7.0: + version "2.8.0" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-2.8.0.tgz#4ff80cce6710dc2795f0c7c74101e6764cfccd24" + integrity sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg== + dependencies: + "@trysound/sax" "0.2.0" + commander "^7.2.0" + css-select "^4.1.3" + css-tree "^1.1.3" + csso "^4.2.0" + picocolors "^1.0.0" + stable "^0.1.8" + +tapable@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" + integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA== + +tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.2.1.tgz#1967a73ef4060a82f12ab96af86d52fdb76eeca0" + integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ== + +terser-webpack-plugin@^5.1.3, terser-webpack-plugin@^5.3.1: + version "5.3.3" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.3.tgz#8033db876dd5875487213e87c627bca323e5ed90" + integrity sha512-Fx60G5HNYknNTNQnzQ1VePRuu89ZVYWfjRAeT5rITuCY/1b08s49e5kSQwHDirKZWuoKOBRFS98EUUoZ9kLEwQ== + dependencies: + "@jridgewell/trace-mapping" "^0.3.7" + jest-worker "^27.4.5" + schema-utils "^3.1.1" + serialize-javascript "^6.0.0" + terser "^5.7.2" + +terser@^5.10.0, terser@^5.7.2: + version "5.14.0" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.14.0.tgz#eefeec9af5153f55798180ee2617f390bdd285e2" + integrity sha512-JC6qfIEkPBd9j1SMO3Pfn+A6w2kQV54tv+ABQLgZr7dA3k/DL/OBoYSWxzVpZev3J+bUHXfr55L8Mox7AaNo6g== + dependencies: + "@jridgewell/source-map" "^0.3.2" + acorn "^8.5.0" + commander "^2.20.0" + source-map-support "~0.5.20" + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== + +thunky@^1.0.2: + version "1.1.0" + resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.1.0.tgz#5abaf714a9405db0504732bbccd2cedd9ef9537d" + integrity sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA== + +tiny-invariant@^1.0.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.2.0.tgz#a1141f86b672a9148c72e978a19a73b9b94a15a9" + integrity sha512-1Uhn/aqw5C6RI4KejVeTg6mIS7IqxnLJ8Mv2tV5rTc0qWobay7pDUz6Wi392Cnc8ak1H0F2cjoRzb2/AW4+Fvg== + +tiny-warning@^1.0.0, tiny-warning@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/tiny-warning/-/tiny-warning-1.0.3.tgz#94a30db453df4c643d0fd566060d60a875d84754" + integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA== to-fast-properties@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" - integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= + integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== -to-object-path@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" - integrity sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68= +to-readable-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" + integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== dependencies: - kind-of "^3.0.2" + is-number "^7.0.0" -to-regex-range@^2.1.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" - integrity sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg= +toidentifier@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" + integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== + +totalist@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/totalist/-/totalist-1.1.0.tgz#a4d65a3e546517701e3e5c37a47a70ac97fe56df" + integrity sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g== + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= + +trim-trailing-lines@^1.0.0: + version "1.1.4" + resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz#bd4abbec7cc880462f10b2c8b5ce1d8d1ec7c2c0" + integrity sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ== + +trim@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" + integrity sha1-WFhUf2spB1fulczMZm+1AITEYN0= + +trough@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" + integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA== + +tslib@^2.0.3, tslib@^2.1.0, tslib@^2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.0.tgz#7cecaa7f073ce680a05847aa77be941098f36dc3" + integrity sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ== + +type-fest@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" + integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ== + +type-fest@^2.5.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-2.13.0.tgz#d1ecee38af29eb2e863b22299a3d68ef30d2abfb" + integrity sha512-lPfAm42MxE4/456+QyIaaVBAwgpJb6xZ8PRu09utnhPdWwcyj9vgy6Sq0Z5yNbJ21EdxB5dRU/Qg8bsyAMtlcw== + +type-is@~1.6.18: + version "1.6.18" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" + integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== dependencies: - is-number "^3.0.0" - repeat-string "^1.6.1" + media-typer "0.3.0" + mime-types "~2.1.24" -to-regex@^3.0.1, to-regex@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce" - integrity sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw== +typedarray-to-buffer@^3.1.5: + version "3.1.5" + resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" + integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== dependencies: - define-property "^2.0.2" - extend-shallow "^3.0.2" - regex-not "^1.0.2" - safe-regex "^1.1.0" + is-typedarray "^1.0.0" -tough-cookie@>=2.3.3: - version "3.0.1" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-3.0.1.tgz#9df4f57e739c26930a018184887f4adb7dca73b2" - integrity sha512-yQyJ0u4pZsv9D4clxO69OEjLWYw+jbgspjTue4lTQZLfV0c5l1VmK2y1JK8E9ahdpltPOaAThPcp5nKPUgSnsg== +ua-parser-js@^0.7.30: + version "0.7.31" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.31.tgz#649a656b191dffab4f21d5e053e27ca17cbff5c6" + integrity sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ== + +unherit@^1.0.4: + version "1.1.3" + resolved "https://registry.yarnpkg.com/unherit/-/unherit-1.1.3.tgz#6c9b503f2b41b262330c80e91c8614abdaa69c22" + integrity sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ== dependencies: - ip-regex "^2.1.0" - psl "^1.1.28" - punycode "^2.1.1" + inherits "^2.0.0" + xtend "^4.0.0" -tough-cookie@^2.3.4: - version "2.5.0" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" - integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== +unicode-canonical-property-names-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc" + integrity sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ== + +unicode-match-property-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz#54fd16e0ecb167cf04cf1f756bdcc92eba7976c3" + integrity sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q== dependencies: - psl "^1.1.28" - punycode "^2.1.1" + unicode-canonical-property-names-ecmascript "^2.0.0" + unicode-property-aliases-ecmascript "^2.0.0" + +unicode-match-property-value-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.0.0.tgz#1a01aa57247c14c568b89775a54938788189a714" + integrity sha512-7Yhkc0Ye+t4PNYzOGKedDhXbYIBe1XEQYQxOPyhcXNMJ0WCABqqj6ckydd6pWRZTHV4GuCPKdBAUiMc60tsKVw== -tough-cookie@~2.4.3: - version "2.4.3" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" - integrity sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ== +unicode-property-aliases-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.0.0.tgz#0a36cb9a585c4f6abd51ad1deddb285c165297c8" + integrity sha512-5Zfuy9q/DFr4tfO7ZPeVXb1aPoeQSdeFMLpYuFebehDAhbuevLs5yxSZmIFN1tP5F9Wl4IpJrYojg85/zgyZHQ== + +unified@9.2.0: + version "9.2.0" + resolved "https://registry.yarnpkg.com/unified/-/unified-9.2.0.tgz#67a62c627c40589edebbf60f53edfd4d822027f8" + integrity sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg== + dependencies: + bail "^1.0.0" + extend "^3.0.0" + is-buffer "^2.0.0" + is-plain-obj "^2.0.0" + trough "^1.0.0" + vfile "^4.0.0" + +unified@^8.4.2: + version "8.4.2" + resolved "https://registry.yarnpkg.com/unified/-/unified-8.4.2.tgz#13ad58b4a437faa2751a4a4c6a16f680c500fff1" + integrity sha512-JCrmN13jI4+h9UAyKEoGcDZV+i1E7BLFuG7OsaDvTXI5P0qhHX+vZO/kOhz9jn8HGENDKbwSeB0nVOg4gVStGA== dependencies: - psl "^1.1.24" - punycode "^1.4.1" + bail "^1.0.0" + extend "^3.0.0" + is-plain-obj "^2.0.0" + trough "^1.0.0" + vfile "^4.0.0" -tr46@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-1.0.1.tgz#a8b13fd6bfd2489519674ccde55ba3693b706d09" - integrity sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk= +unique-string@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-2.0.0.tgz#39c6451f81afb2749de2b233e3f7c5e8843bd89d" + integrity sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg== dependencies: - punycode "^2.1.0" + crypto-random-string "^2.0.0" -trim-right@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" - integrity sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM= +unist-builder@2.0.3, unist-builder@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/unist-builder/-/unist-builder-2.0.3.tgz#77648711b5d86af0942f334397a33c5e91516436" + integrity sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw== -tunnel-agent@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" - integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= - dependencies: - safe-buffer "^5.0.1" +unist-util-generated@^1.0.0: + version "1.1.6" + resolved "https://registry.yarnpkg.com/unist-util-generated/-/unist-util-generated-1.1.6.tgz#5ab51f689e2992a472beb1b35f2ce7ff2f324d4b" + integrity sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg== -tweetnacl@^0.14.3, tweetnacl@~0.14.0: - version "0.14.5" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= +unist-util-is@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-4.1.0.tgz#976e5f462a7a5de73d94b706bac1b90671b57797" + integrity sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg== -type-check@~0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" - integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= - dependencies: - prelude-ls "~1.1.2" +unist-util-position@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/unist-util-position/-/unist-util-position-3.1.0.tgz#1c42ee6301f8d52f47d14f62bbdb796571fa2d47" + integrity sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA== -unicode-canonical-property-names-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" - integrity sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ== +unist-util-remove-position@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz#5d19ca79fdba712301999b2b73553ca8f3b352cc" + integrity sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA== + dependencies: + unist-util-visit "^2.0.0" -unicode-match-property-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c" - integrity sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg== +unist-util-remove@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/unist-util-remove/-/unist-util-remove-2.1.0.tgz#b0b4738aa7ee445c402fda9328d604a02d010588" + integrity sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q== dependencies: - unicode-canonical-property-names-ecmascript "^1.0.4" - unicode-property-aliases-ecmascript "^1.0.4" + unist-util-is "^4.0.0" -unicode-match-property-value-ecmascript@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.0.2.tgz#9f1dc76926d6ccf452310564fd834ace059663d4" - integrity sha512-Rx7yODZC1L/T8XKo/2kNzVAQaRE88AaMvI1EF/Xnj3GW2wzN6fop9DDWuFAKUVFH7vozkz26DzP0qyWLKLIVPQ== +unist-util-stringify-position@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz#cce3bfa1cdf85ba7375d1d5b17bdc4cada9bd9da" + integrity sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g== + dependencies: + "@types/unist" "^2.0.2" -unicode-property-aliases-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.4.tgz#5a533f31b4317ea76f17d807fa0d116546111dd0" - integrity sha512-2WSLa6OdYd2ng8oqiGIWnJqyFArvhn+5vgx5GTxMbUYjCYKUcuKS62YLFF0R/BDGlB1yzXjQOLtPAfHsgirEpg== +unist-util-visit-parents@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz#65a6ce698f78a6b0f56aa0e88f13801886cdaef6" + integrity sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg== + dependencies: + "@types/unist" "^2.0.0" + unist-util-is "^4.0.0" -union-value@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.0.tgz#5c71c34cb5bad5dcebe3ea0cd08207ba5aa1aea4" - integrity sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ= +unist-util-visit@2.0.3, unist-util-visit@^2.0.0, unist-util-visit@^2.0.1, unist-util-visit@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-2.0.3.tgz#c3703893146df47203bb8a9795af47d7b971208c" + integrity sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q== dependencies: - arr-union "^3.1.0" - get-value "^2.0.6" - is-extendable "^0.1.1" - set-value "^0.4.3" + "@types/unist" "^2.0.0" + unist-util-is "^4.0.0" + unist-util-visit-parents "^3.0.0" -unset-value@^1.0.0: +universalify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" + integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== + +unpipe@1.0.0, unpipe@~1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" - integrity sha1-g3aHP30jNRef+x5vw6jtDfyKtVk= - dependencies: - has-value "^0.3.1" - isobject "^3.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= -upath@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/upath/-/upath-1.1.0.tgz#35256597e46a581db4793d0ce47fa9aebfc9fabd" - integrity sha512-bzpH/oBhoS/QI/YtbkqCg6VEiPYjSZtrHQM6/QnJS6OL9pKUFLqb3aFh4Scvwm45+7iAgiMkLhSbaZxUqmrprw== +update-notifier@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-5.1.0.tgz#4ab0d7c7f36a231dd7316cf7729313f0214d9ad9" + integrity sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw== + dependencies: + boxen "^5.0.0" + chalk "^4.1.0" + configstore "^5.0.1" + has-yarn "^2.1.0" + import-lazy "^2.1.0" + is-ci "^2.0.0" + is-installed-globally "^0.4.0" + is-npm "^5.0.0" + is-yarn-global "^0.3.0" + latest-version "^5.1.0" + pupa "^2.1.1" + semver "^7.3.4" + semver-diff "^3.1.1" + xdg-basedir "^4.0.0" uri-js@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" - integrity sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ== + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== dependencies: punycode "^2.1.0" -urix@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" - integrity sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI= +url-loader@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-4.1.1.tgz#28505e905cae158cf07c92ca622d7f237e70a4e2" + integrity sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA== + dependencies: + loader-utils "^2.0.0" + mime-types "^2.1.27" + schema-utils "^3.0.0" -use@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f" - integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ== +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= + dependencies: + prepend-http "^2.0.0" + +use-composed-ref@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz#3d8104db34b7b264030a9d916c5e94fbe280dbda" + integrity sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ== + +use-isomorphic-layout-effect@^1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz#497cefb13d863d687b08477d9e5a164ad8c1a6fb" + integrity sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA== + +use-latest@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/use-latest/-/use-latest-1.2.1.tgz#d13dfb4b08c28e3e33991546a2cee53e14038cf2" + integrity sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw== + dependencies: + use-isomorphic-layout-effect "^1.1.1" -util-deprecate@~1.0.1: +util-deprecate@^1.0.1, util-deprecate@^1.0.2, util-deprecate@~1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= -util@^0.10.3: - version "0.10.4" - resolved "https://registry.yarnpkg.com/util/-/util-0.10.4.tgz#3aa0125bfe668a4672de58857d3ace27ecb76901" - integrity sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A== - dependencies: - inherits "2.0.3" - -uuid@^3.3.2: - version "3.3.2" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" - integrity sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA== +utila@~0.4: + version "0.4.0" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" + integrity sha1-ihagXURWV6Oupe7MWxKk+lN5dyw= -verror@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" - integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA= - dependencies: - assert-plus "^1.0.0" - core-util-is "1.0.2" - extsprintf "^1.2.0" +utility-types@^3.10.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/utility-types/-/utility-types-3.10.0.tgz#ea4148f9a741015f05ed74fd615e1d20e6bed82b" + integrity sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg== -w3c-hr-time@^1.0.1: +utils-merge@1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.1.tgz#82ac2bff63d950ea9e3189a58a65625fedf19045" - integrity sha1-gqwr/2PZUOqeMYmlimViX+3xkEU= - dependencies: - browser-process-hrtime "^0.1.2" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= -webidl-conversions@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" - integrity sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg== +uuid@^8.3.2: + version "8.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" + integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== -whatwg-encoding@^1.0.1, whatwg-encoding@^1.0.3: - version "1.0.5" - resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0" - integrity sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw== - dependencies: - iconv-lite "0.4.24" +value-equal@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/value-equal/-/value-equal-1.0.1.tgz#1e0b794c734c5c0cade179c437d356d931a34d6c" + integrity sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw== -whatwg-mimetype@^2.1.0, whatwg-mimetype@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf" - integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g== +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= -whatwg-url@^6.4.1: - version "6.5.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-6.5.0.tgz#f2df02bff176fd65070df74ad5ccbb5a199965a8" - integrity sha512-rhRZRqx/TLJQWUpQ6bmrt2UV4f0HCQ463yQuONJqC6fO2VoEb1pTYddbe59SkYq87aoM5A3bdhMZiUiVws+fzQ== - dependencies: - lodash.sortby "^4.7.0" - tr46 "^1.0.1" - webidl-conversions "^4.0.2" +vfile-location@^3.0.0, vfile-location@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/vfile-location/-/vfile-location-3.2.0.tgz#d8e41fbcbd406063669ebf6c33d56ae8721d0f3c" + integrity sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA== -whatwg-url@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-7.0.0.tgz#fde926fa54a599f3adf82dff25a9f7be02dc6edd" - integrity sha512-37GeVSIJ3kn1JgKyjiYNmSLP1yzbpb29jdmwBSgkD9h40/hyrR/OifpVUndji3tmwGgD8qpw7iQu3RSbCrBpsQ== - dependencies: - lodash.sortby "^4.7.0" - tr46 "^1.0.1" - webidl-conversions "^4.0.2" +vfile-message@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-2.0.4.tgz#5b43b88171d409eae58477d13f23dd41d52c371a" + integrity sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ== + dependencies: + "@types/unist" "^2.0.0" + unist-util-stringify-position "^2.0.0" + +vfile@^4.0.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/vfile/-/vfile-4.2.1.tgz#03f1dce28fc625c625bc6514350fbdb00fa9e624" + integrity sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA== + dependencies: + "@types/unist" "^2.0.0" + is-buffer "^2.0.0" + unist-util-stringify-position "^2.0.0" + vfile-message "^2.0.0" + +wait-on@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/wait-on/-/wait-on-6.0.1.tgz#16bbc4d1e4ebdd41c5b4e63a2e16dbd1f4e5601e" + integrity sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw== + dependencies: + axios "^0.25.0" + joi "^17.6.0" + lodash "^4.17.21" + minimist "^1.2.5" + rxjs "^7.5.4" + +watchpack@^2.3.1: + version "2.4.0" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.0.tgz#fa33032374962c78113f93c7f2fb4c54c9862a5d" + integrity sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg== + dependencies: + glob-to-regexp "^0.4.1" + graceful-fs "^4.1.2" + +wbuf@^1.1.0, wbuf@^1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/wbuf/-/wbuf-1.7.3.tgz#c1d8d149316d3ea852848895cb6a0bfe887b87df" + integrity sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA== + dependencies: + minimalistic-assert "^1.0.0" + +web-namespaces@^1.0.0, web-namespaces@^1.1.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.4.tgz#bc98a3de60dadd7faefc403d1076d529f5e030ec" + integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw== + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE= + +webpack-bundle-analyzer@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.5.0.tgz#1b0eea2947e73528754a6f9af3e91b2b6e0f79d5" + integrity sha512-GUMZlM3SKwS8Z+CKeIFx7CVoHn3dXFcUAjT/dcZQQmfSZGvitPfMob2ipjai7ovFFqPvTqkEZ/leL4O0YOdAYQ== + dependencies: + acorn "^8.0.4" + acorn-walk "^8.0.0" + chalk "^4.1.0" + commander "^7.2.0" + gzip-size "^6.0.0" + lodash "^4.17.20" + opener "^1.5.2" + sirv "^1.0.7" + ws "^7.3.1" + +webpack-dev-middleware@^5.3.1: + version "5.3.3" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz#efae67c2793908e7311f1d9b06f2a08dcc97e51f" + integrity sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA== + dependencies: + colorette "^2.0.10" + memfs "^3.4.3" + mime-types "^2.1.31" + range-parser "^1.2.1" + schema-utils "^4.0.0" + +webpack-dev-server@^4.9.0: + version "4.9.2" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-4.9.2.tgz#c188db28c7bff12f87deda2a5595679ebbc3c9bc" + integrity sha512-H95Ns95dP24ZsEzO6G9iT+PNw4Q7ltll1GfJHV4fKphuHWgKFzGHWi4alTlTnpk1SPPk41X+l2RB7rLfIhnB9Q== + dependencies: + "@types/bonjour" "^3.5.9" + "@types/connect-history-api-fallback" "^1.3.5" + "@types/express" "^4.17.13" + "@types/serve-index" "^1.9.1" + "@types/serve-static" "^1.13.10" + "@types/sockjs" "^0.3.33" + "@types/ws" "^8.5.1" + ansi-html-community "^0.0.8" + bonjour-service "^1.0.11" + chokidar "^3.5.3" + colorette "^2.0.10" + compression "^1.7.4" + connect-history-api-fallback "^1.6.0" + default-gateway "^6.0.3" + express "^4.17.3" + graceful-fs "^4.2.6" + html-entities "^2.3.2" + http-proxy-middleware "^2.0.3" + ipaddr.js "^2.0.1" + open "^8.0.9" + p-retry "^4.5.0" + rimraf "^3.0.2" + schema-utils "^4.0.0" + selfsigned "^2.0.1" + serve-index "^1.9.1" + sockjs "^0.3.24" + spdy "^4.0.2" + webpack-dev-middleware "^5.3.1" + ws "^8.4.2" + +webpack-merge@^5.8.0: + version "5.8.0" + resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-5.8.0.tgz#2b39dbf22af87776ad744c390223731d30a68f61" + integrity sha512-/SaI7xY0831XwP6kzuwhKWVKDP9t1QY1h65lAFLbZqMPIuYcD9QAW4u9STIbU9kaJbPBB/geU/gLr1wDjOhQ+Q== + dependencies: + clone-deep "^4.0.1" + wildcard "^2.0.0" + +webpack-sources@^3.2.2, webpack-sources@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde" + integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== + +webpack@^5.72.1: + version "5.73.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.73.0.tgz#bbd17738f8a53ee5760ea2f59dce7f3431d35d38" + integrity sha512-svjudQRPPa0YiOYa2lM/Gacw0r6PvxptHj4FuEKQ2kX05ZLkjbVc5MnPs6its5j7IZljnIqSVo/OsY2X0IpHGA== + dependencies: + "@types/eslint-scope" "^3.7.3" + "@types/estree" "^0.0.51" + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/wasm-edit" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + acorn "^8.4.1" + acorn-import-assertions "^1.7.6" + browserslist "^4.14.5" + chrome-trace-event "^1.0.2" + enhanced-resolve "^5.9.3" + es-module-lexer "^0.9.0" + eslint-scope "5.1.1" + events "^3.2.0" + glob-to-regexp "^0.4.1" + graceful-fs "^4.2.9" + json-parse-even-better-errors "^2.3.1" + loader-runner "^4.2.0" + mime-types "^2.1.27" + neo-async "^2.6.2" + schema-utils "^3.1.0" + tapable "^2.1.1" + terser-webpack-plugin "^5.1.3" + watchpack "^2.3.1" + webpack-sources "^3.2.3" + +webpackbar@^5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/webpackbar/-/webpackbar-5.0.2.tgz#d3dd466211c73852741dfc842b7556dcbc2b0570" + integrity sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ== + dependencies: + chalk "^4.1.0" + consola "^2.15.3" + pretty-time "^1.1.0" + std-env "^3.0.1" + +websocket-driver@>=0.5.1, websocket-driver@^0.7.4: + version "0.7.4" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.4.tgz#89ad5295bbf64b480abcba31e4953aca706f5760" + integrity sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg== + dependencies: + http-parser-js ">=0.5.1" + safe-buffer ">=5.1.0" + websocket-extensions ">=0.1.1" + +websocket-extensions@>=0.1.1: + version "0.1.4" + resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42" + integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg== -which-module@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" - integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha1-lmRU6HZUYuN2RNNib2dCzotwll0= + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" -which@^1.2.9: +which@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== dependencies: isexe "^2.0.0" -wide-align@^1.1.0: - version "1.1.3" - resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" - integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== +which@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== dependencies: - string-width "^1.0.2 || 2" + isexe "^2.0.0" -wordwrap@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" - integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus= +widest-line@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" + integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== + dependencies: + string-width "^4.0.0" -wrap-ansi@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" - integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU= +widest-line@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-4.0.1.tgz#a0fc673aaba1ea6f0a0d35b3c2795c9a9cc2ebf2" + integrity sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig== dependencies: - string-width "^1.0.1" - strip-ansi "^3.0.1" + string-width "^5.0.1" + +wildcard@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/wildcard/-/wildcard-2.0.0.tgz#a77d20e5200c6faaac979e4b3aadc7b3dd7f8fec" + integrity sha512-JcKqAHLPxcdb9KM49dufGXn2x3ssnfjbcaQdLlfZsL9rH9wgDQjUtDxbo8NE0F6SFvydeu1VhZe7hZuHsB2/pw== + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.0.1.tgz#2101e861777fec527d0ea90c57c6b03aac56a5b3" + integrity sha512-QFF+ufAqhoYHvoHdajT/Po7KoXVBPXS2bgjIam5isfWJPfIOnQZ50JtUiVvCv/sjgacf3yRrt2ZKUZ/V4itN4g== + dependencies: + ansi-styles "^6.1.0" + string-width "^5.0.1" + strip-ansi "^7.0.1" wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= -ws@^5.2.0: - version "5.2.2" - resolved "https://registry.yarnpkg.com/ws/-/ws-5.2.2.tgz#dffef14866b8e8dc9133582514d1befaf96e980f" - integrity sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA== +write-file-atomic@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" + integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== dependencies: - async-limiter "~1.0.0" + imurmurhash "^0.1.4" + is-typedarray "^1.0.0" + signal-exit "^3.0.2" + typedarray-to-buffer "^3.1.5" -xml-name-validator@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a" - integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw== +ws@^7.3.1: + version "7.5.8" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.8.tgz#ac2729881ab9e7cbaf8787fe3469a48c5c7f636a" + integrity sha512-ri1Id1WinAX5Jqn9HejiGb8crfRio0Qgu8+MtL36rlTA6RLsMdWt1Az/19A2Qij6uSHUMphEFaTKa4WG+UNHNw== -"y18n@^3.2.1 || ^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" - integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w== +ws@^8.4.2: + version "8.8.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.8.0.tgz#8e71c75e2f6348dbf8d78005107297056cb77769" + integrity sha512-JDAgSYQ1ksuwqfChJusw1LSJ8BizJ2e/vVu5Lxjq3YvNJNlROv1ui4i+c/kUUrPheBvQl4c5UbERhTwKa6QBJQ== -yallist@^3.0.0, yallist@^3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.3.tgz#b4b049e314be545e3ce802236d6cd22cd91c3de9" - integrity sha512-S+Zk8DEWE6oKpV+vI3qWkaK+jSbIK86pCwe2IF/xwIpQ8jEuxpw9NyaGjmp9+BoJv5FV2piqCDcoCtStppiq2A== +xdg-basedir@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13" + integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q== -yargs-parser@^11.1.1: - version "11.1.1" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-11.1.1.tgz#879a0865973bca9f6bab5cbdf3b1c67ec7d3bcf4" - integrity sha512-C6kB/WJDiaxONLJQnF8ccx9SEeoTTLek8RVbaOIsrAUS8VrBEXfmeSnCZxygc+XC2sNMBIwOOnfcxiynjHsVSQ== +xml-js@^1.6.11: + version "1.6.11" + resolved "https://registry.yarnpkg.com/xml-js/-/xml-js-1.6.11.tgz#927d2f6947f7f1c19a316dd8eea3614e8b18f8e9" + integrity sha512-7rVi2KMfwfWFl+GpPg6m80IVMWXLRjO+PxTq7V2CDhoGak0wzYzFgUY2m4XJ47OGdXd8eLE8EmwfAmdjw7lC1g== dependencies: - camelcase "^5.0.0" - decamelize "^1.2.0" + sax "^1.2.4" -yargs@^12.0.5: - version "12.0.5" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-12.0.5.tgz#05f5997b609647b64f66b81e3b4b10a368e7ad13" - integrity sha512-Lhz8TLaYnxq/2ObqHDql8dX8CJi97oHxrjUcYtzKbbykPtVW9WB+poxI+NM2UIzsMgNCZTIf0AQwsjK5yMAqZw== - dependencies: - cliui "^4.0.0" - decamelize "^1.2.0" - find-up "^3.0.0" - get-caller-file "^1.0.1" - os-locale "^3.0.0" - require-directory "^2.1.1" - require-main-filename "^1.0.1" - set-blocking "^2.0.0" - string-width "^2.0.0" - which-module "^2.0.0" - y18n "^3.2.1 || ^4.0.0" - yargs-parser "^11.1.1" +xtend@^4.0.0, xtend@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" + integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + +zwitch@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-1.0.5.tgz#d11d7381ffed16b742f6af7b3f223d5cd9fe9920" + integrity sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==